diff --git a/src/Guide_stick_system/image_recognition/manage/img/11.jpg b/src/Guide_stick_system/image_recognition/manage/img/11.jpg new file mode 100644 index 0000000..9c9247b Binary files /dev/null and b/src/Guide_stick_system/image_recognition/manage/img/11.jpg differ diff --git a/src/Guide_stick_system/image_recognition/manage/img/22.jpg b/src/Guide_stick_system/image_recognition/manage/img/22.jpg new file mode 100644 index 0000000..6b63407 Binary files /dev/null and b/src/Guide_stick_system/image_recognition/manage/img/22.jpg differ diff --git a/src/Guide_stick_system/image_recognition/manage/itt/ReadMe b/src/Guide_stick_system/image_recognition/manage/itt/ReadMe new file mode 100644 index 0000000..8f57aec --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/itt/ReadMe @@ -0,0 +1,2 @@ +主要实现每1s读取一次img目录中的图片,并且识别的图片中的文字会存储到txt目录中, +另外,txt目录中设置了当存储的备份文件即历史识别后结构仅仅保留最后1m以来做记录器。 diff --git a/src/Guide_stick_system/image_recognition/manage/itt/wenzhi.py b/src/Guide_stick_system/image_recognition/manage/itt/wenzhi.py new file mode 100644 index 0000000..fc7078f --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/itt/wenzhi.py @@ -0,0 +1,99 @@ +import pytesseract +from PIL import Image +import sys +import os +import time +import datetime + + +def ensure_directories(): + # 确保存储输出文本的目录存在 + output_dir = 'txt' + if not os.path.exists(output_dir): + os.makedirs(output_dir) + +def ensure_pytesseract_installed(): + try: + # 尝试导入pytesseract以确认它已安装 + import pytesseract + except ImportError: + print("pytesseract库未安装。请运行'pip install pytesseract'来安装它。") + sys.exit(1) + +def ensure_tesseract_executable_configured(): + try: + # 尝试获取Tesseract的路径以确认它已配置 + pytesseract.pytesseract.tesseract_cmd + except AttributeError: + print("未配置Tesseract可执行文件的路径。请在pytesseract中设置tesseract_cmd。") + sys.exit(1) + +def image_to_text(image_path): + try: + # 打开图像文件 + img = Image.open(image_path) + except IOError: + print(f"无法打开图像文件:{image_path}") + return None + + try: + # 使用pytesseract进行文字识别 + text = pytesseract.image_to_string(img, lang='eng') + except Exception as e: + print(f"文字识别过程中发生错误:{e}") + return None + + return text + +def main(): + # 确保pytesseract库已安装 + ensure_pytesseract_installed() + + # 确保配置了Tesseract可执行文件的路径 + ensure_tesseract_executable_configured() + + # 确保输出目录存在 + ensure_directories() + + # 设置输出目录路径 + output_dir_path = 'txt' + + + # 主循环,每3秒处理一张图片 + while True: + # 获取当前时间,并格式化为文件名 + current_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + image_filename = f"{current_time}.jpg" + #image_path = os.path.join('img', image_filename) + image_path = 'img/22.jpg' + output_file_path = os.path.join(output_dir_path, f"{current_time}.txt") + + # 检查图片是否存在 + if not os.path.exists(image_path): + print(f"图片 {image_path} 不存在,等待下一张...") + time.sleep(1) + continue + + # 调用image_to_text函数并将结果写入文件 + text = image_to_text(image_path) + if text: + with open(output_file_path, 'w', encoding='utf-8') as file: + file.write(text) + print(f"图片 {image_filename} 的识别结果已保存到 {output_file_path}") + else: + print(f"无法识别图片 {image_filename} 中的文字。") + + # 检查txt文件夹中的文件数量,如果超过60个,删除最早的文件 + files = os.listdir(output_dir_path) + if len(files) > 60: + # 对文件进行排序,以便找到最早的文件 + files.sort() + oldest_file_path = os.path.join(output_dir_path, files[0]) + os.remove(oldest_file_path) + print(f"已删除最早的文件:{oldest_file_path}") + + # 等待1秒 + time.sleep(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/log.txt b/src/Guide_stick_system/image_recognition/manage/log.txt new file mode 100644 index 0000000..3342b69 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/log.txt @@ -0,0 +1,242 @@ +2024年 07月 01日 星期一 21:13:13 CST: wenzhi.py 被停止, PID: 29387 +2024年 07月 01日 星期一 21:13:13 CST: tts.py 运行失败,退出状态码 1, PID: 29405 +2024年 07月 01日 星期一 21:13:18 CST: wenzhi.py 被停止, PID: 29408 +2024年 07月 01日 星期一 21:13:18 CST: tts.py 运行失败,退出状态码 1, PID: 29435 +2024年 07月 01日 星期一 21:13:23 CST: wenzhi.py 被停止, PID: 29456 +2024年 07月 01日 星期一 21:13:23 CST: tts.py 运行失败,退出状态码 1, PID: 29474 +2024年 07月 01日 星期一 21:14:26 CST: wenzhi.py 被停止, PID: 29817 +2024年 07月 01日 星期一 21:14:26 CST: tts.py 运行失败,退出状态码 1, PID: 29835 +2024年 07月 01日 星期一 21:14:31 CST: wenzhi.py 被停止, PID: 29838 +2024年 07月 01日 星期一 21:14:31 CST: tts.py 运行失败,退出状态码 1, PID: 29865 +2024年 07月 01日 星期一 21:16:08 CST: wenzhi.py 被停止, PID: 30373 +2024年 07月 01日 星期一 21:16:08 CST: tts.py 运行失败,退出状态码 1, PID: 30391 +2024年 07月 01日 星期一 21:16:13 CST: wenzhi.py 被停止, PID: 30412 +2024年 07月 01日 星期一 21:16:13 CST: tts.py 运行失败,退出状态码 1, PID: 30466 +2024年 07月 01日 星期一 21:16:18 CST: wenzhi.py 被停止, PID: 30469 +2024年 07月 01日 星期一 21:16:18 CST: tts.py 运行失败,退出状态码 1, PID: 30550 +2024年 07月 01日 星期一 21:16:42 CST: wenzhi.py 被停止, PID: 30664 +2024年 07月 01日 星期一 21:16:42 CST: tts.py 运行失败,退出状态码 1, PID: 30691 +2024年 07月 01日 星期一 21:16:47 CST: wenzhi.py 被停止, PID: 30694 +2024年 07月 01日 星期一 21:16:47 CST: tts.py 运行失败,退出状态码 1, PID: 30721 +2024年 07月 01日 星期一 21:16:52 CST: wenzhi.py 被停止, PID: 30733 +2024年 07月 01日 星期一 21:16:52 CST: tts.py 运行失败,退出状态码 1, PID: 30769 +2024年 07月 01日 星期一 21:16:57 CST: wenzhi.py 被停止, PID: 30781 +2024年 07月 01日 星期一 21:16:57 CST: tts.py 运行失败,退出状态码 1, PID: 30835 +2024年 07月 01日 星期一 21:17:02 CST: wenzhi.py 被停止, PID: 30838 +2024年 07月 01日 星期一 21:17:02 CST: tts.py 运行失败,退出状态码 1, PID: 30895 +2024年 07月 01日 星期一 21:19:05 CST: wenzhi.py 被停止, PID: 31525 +2024年 07月 01日 星期一 21:19:05 CST: tts.py 运行失败,退出状态码 1, PID: 31543 +2024年 07月 01日 星期一 21:19:10 CST: wenzhi.py 被停止, PID: 31546 +2024年 07月 01日 星期一 21:19:10 CST: tts.py 运行失败,退出状态码 1, PID: 31573 +2024年 07月 01日 星期一 21:20:16 CST: wenzhi.py 被停止, PID: 31924 +2024年 07月 01日 星期一 21:20:17 CST: tts.py 运行失败,退出状态码 1, PID: 31960 +2024年 07月 01日 星期一 21:20:22 CST: wenzhi.py 被停止, PID: 31981 +2024年 07月 01日 星期一 21:20:22 CST: tts.py 运行失败,退出状态码 1, PID: 32008 +2024年 07月 01日 星期一 21:20:27 CST: wenzhi.py 被停止, PID: 32029 +2024年 07月 01日 星期一 21:20:27 CST: tts.py 运行失败,退出状态码 1, PID: 32056 +2024年 07月 01日 星期一 21:20:32 CST: wenzhi.py 被停止, PID: 32059 +2024年 07月 01日 星期一 21:20:32 CST: tts.py 运行失败,退出状态码 1, PID: 32086 +2024年 07月 01日 星期一 21:20:37 CST: wenzhi.py 被停止, PID: 32125 +2024年 07月 01日 星期一 21:20:37 CST: tts.py 运行失败,退出状态码 1, PID: 32161 +2024年 07月 01日 星期一 21:20:42 CST: wenzhi.py 被停止, PID: 32191 +2024年 07月 01日 星期一 21:20:42 CST: tts.py 运行失败,退出状态码 1, PID: 32264 +2024年 07月 01日 星期一 21:20:47 CST: wenzhi.py 被停止, PID: 32267 +2024年 07月 01日 星期一 21:20:47 CST: tts.py 运行失败,退出状态码 1, PID: 32303 +2024年 07月 01日 星期一 21:20:52 CST: wenzhi.py 被停止, PID: 32306 +2024年 07月 01日 星期一 21:20:52 CST: tts.py 运行失败,退出状态码 1, PID: 32351 +2024年 07月 01日 星期一 21:20:57 CST: wenzhi.py 被停止, PID: 32354 +2024年 07月 01日 星期一 21:20:57 CST: tts.py 运行失败,退出状态码 1, PID: 32426 +2024年 07月 01日 星期一 23:05:26 CST: wenzhi.py 被停止, PID: 8301 +2024年 07月 01日 星期一 23:05:26 CST: tts.py 运行失败,退出状态码 1, PID: 8318 +2024年 07月 01日 星期一 23:05:28 CST: wenzhi.py 被停止, PID: 8322 +2024年 07月 01日 星期一 23:05:28 CST: tts.py 运行失败,退出状态码 1, PID: 8339 +2024年 07月 01日 星期一 23:05:30 CST: wenzhi.py 被停止, PID: 8352 +2024年 07月 01日 星期一 23:05:30 CST: tts.py 运行失败,退出状态码 1, PID: 8369 +2024年 07月 01日 星期一 23:05:32 CST: wenzhi.py 被停止, PID: 8373 +2024年 07月 01日 星期一 23:05:32 CST: tts.py 运行失败,退出状态码 1, PID: 8400 +2024年 07月 01日 星期一 23:05:34 CST: wenzhi.py 被停止, PID: 8422 +2024年 07月 01日 星期一 23:05:34 CST: tts.py 运行失败,退出状态码 1, PID: 8448 +2024年 07月 01日 星期一 23:09:24 CST: wenzhi.py 被停止, PID: 10042 +2024年 07月 01日 星期一 23:09:24 CST: tts.py 运行失败,退出状态码 1, PID: 10069 +2024年 07月 01日 星期一 23:09:26 CST: wenzhi.py 被停止, PID: 10073 +2024年 07月 01日 星期一 23:09:27 CST: tts.py 运行失败,退出状态码 1, PID: 10090 +2024年 07月 01日 星期一 23:09:29 CST: wenzhi.py 被停止, PID: 10121 +2024年 07月 01日 星期一 23:09:29 CST: tts.py 运行失败,退出状态码 1, PID: 10129 +2024年 07月 01日 星期一 23:09:31 CST: wenzhi.py 被停止, PID: 10178 +2024年 07月 01日 星期一 23:09:31 CST: tts.py 运行失败,退出状态码 1, PID: 10195 +2024年 07月 01日 星期一 23:09:33 CST: wenzhi.py 被停止, PID: 10208 +2024年 07月 01日 星期一 23:09:33 CST: tts.py 运行失败,退出状态码 1, PID: 10225 +2024年 07月 01日 星期一 23:09:35 CST: wenzhi.py 被停止, PID: 10229 +2024年 07月 01日 星期一 23:09:35 CST: tts.py 运行失败,退出状态码 1, PID: 10237 +2024年 07月 01日 星期一 23:09:37 CST: wenzhi.py 被停止, PID: 10241 +2024年 07月 01日 星期一 23:09:37 CST: tts.py 运行失败,退出状态码 1, PID: 10249 +2024年 07月 01日 星期一 23:09:39 CST: wenzhi.py 被停止, PID: 10289 +2024年 07月 01日 星期一 23:09:39 CST: tts.py 运行失败,退出状态码 1, PID: 10306 +2024年 07月 01日 星期一 23:09:41 CST: wenzhi.py 被停止, PID: 10310 +2024年 07月 01日 星期一 23:09:41 CST: tts.py 运行失败,退出状态码 1, PID: 10327 +2024年 07月 01日 星期一 23:09:43 CST: wenzhi.py 被停止, PID: 10340 +2024年 07月 01日 星期一 23:09:43 CST: tts.py 运行失败,退出状态码 1, PID: 10357 +2024年 07月 01日 星期一 23:09:45 CST: wenzhi.py 被停止, PID: 10379 +2024年 07月 01日 星期一 23:09:45 CST: tts.py 运行失败,退出状态码 1, PID: 10396 +2024年 07月 01日 星期一 23:13:21 CST: wenzhi.py 被停止, PID: 11686 +2024年 07月 01日 星期一 23:13:21 CST: tts.py 运行成功, PID: 11694 +2024年 07月 01日 星期一 23:13:23 CST: wenzhi.py 被停止, PID: 11698 +2024年 07月 01日 星期一 23:13:23 CST: tts.py 运行成功, PID: 11733 +2024年 07月 01日 星期一 23:14:23 CST: wenzhi.py 被停止, PID: 12133 +2024年 07月 01日 星期一 23:14:23 CST: tts.py 运行成功, PID: 12141 +2024年 07月 01日 星期一 23:14:25 CST: wenzhi.py 被停止, PID: 12154 +2024年 07月 01日 星期一 23:14:25 CST: tts.py 运行成功, PID: 12180 +2024年 07月 01日 星期一 23:16:13 CST: wenzhi.py 被停止, PID: 12910 +2024年 07月 01日 星期一 23:16:13 CST: tts.py 运行成功, PID: 12918 +2024年 07月 01日 星期一 23:16:15 CST: wenzhi.py 被停止, PID: 12940 +2024年 07月 01日 星期一 23:16:15 CST: tts.py 运行成功, PID: 12957 +2024年 07月 01日 星期一 23:16:17 CST: wenzhi.py 被停止, PID: 12961 +2024年 07月 01日 星期一 23:16:17 CST: tts.py 运行成功, PID: 12978 +2024年 07月 01日 星期一 23:16:19 CST: wenzhi.py 被停止, PID: 12982 +2024年 07月 01日 星期一 23:16:19 CST: tts.py 运行成功, PID: 12990 +2024年 07月 01日 星期一 23:16:21 CST: wenzhi.py 被停止, PID: 12994 +2024年 07月 01日 星期一 23:16:21 CST: tts.py 运行成功, PID: 13002 +2024年 07月 01日 星期一 23:16:23 CST: wenzhi.py 被停止, PID: 13024 +2024年 07月 01日 星期一 23:16:23 CST: tts.py 运行成功, PID: 13041 +2024年 07月 01日 星期一 23:16:25 CST: wenzhi.py 被停止, PID: 13054 +2024年 07月 01日 星期一 23:16:25 CST: tts.py 运行成功, PID: 13062 +2024年 07月 01日 星期一 23:16:27 CST: wenzhi.py 被停止, PID: 13075 +2024年 07月 01日 星期一 23:16:27 CST: tts.py 运行成功, PID: 13083 +2024年 07月 01日 星期一 23:17:10 CST: wenzhi.py 被停止, PID: 13388 +2024年 07月 01日 星期一 23:17:10 CST: tts.py 运行成功, PID: 13396 +2024年 07月 01日 星期一 23:17:12 CST: wenzhi.py 被停止, PID: 13400 +2024年 07月 01日 星期一 23:17:12 CST: tts.py 运行成功, PID: 13408 +2024年 07月 01日 星期一 23:17:23 CST: wenzhi.py 被停止, PID: 13502 +2024年 07月 01日 星期一 23:17:23 CST: tts.py 运行成功, PID: 13510 +2024年 07月 01日 星期一 23:17:25 CST: wenzhi.py 被停止, PID: 13514 +2024年 07月 01日 星期一 23:17:25 CST: tts.py 运行成功, PID: 13522 +2024年 07月 01日 星期一 23:17:27 CST: wenzhi.py 被停止, PID: 13526 +2024年 07月 01日 星期一 23:17:27 CST: tts.py 运行成功, PID: 13534 +2024年 07月 01日 星期一 23:17:59 CST: wenzhi.py 被停止, PID: 13762 +2024年 07月 01日 星期一 23:17:59 CST: tts.py 运行失败,退出状态码 1, PID: 13779 +2024年 07月 01日 星期一 23:18:01 CST: wenzhi.py 被停止, PID: 13783 +2024年 07月 01日 星期一 23:18:01 CST: tts.py 运行失败,退出状态码 1, PID: 13791 +2024年 07月 01日 星期一 23:18:03 CST: wenzhi.py 被停止, PID: 13795 +2024年 07月 01日 星期一 23:18:03 CST: tts.py 运行失败,退出状态码 1, PID: 13821 +2024年 07月 01日 星期一 23:18:05 CST: wenzhi.py 被停止, PID: 13825 +2024年 07月 01日 星期一 23:18:05 CST: tts.py 运行失败,退出状态码 1, PID: 13833 +2024年 07月 01日 星期一 23:18:07 CST: wenzhi.py 被停止, PID: 13837 +2024年 07月 01日 星期一 23:18:07 CST: tts.py 运行失败,退出状态码 1, PID: 13854 +2024年 07月 01日 星期一 23:18:09 CST: wenzhi.py 被停止, PID: 13867 +2024年 07月 01日 星期一 23:18:09 CST: tts.py 运行失败,退出状态码 1, PID: 13875 +2024年 07月 01日 星期一 23:18:11 CST: wenzhi.py 被停止, PID: 13879 +2024年 07月 01日 星期一 23:18:11 CST: tts.py 运行失败,退出状态码 1, PID: 13887 +2024年 07月 01日 星期一 23:18:13 CST: wenzhi.py 被停止, PID: 13891 +2024年 07月 01日 星期一 23:18:13 CST: tts.py 运行失败,退出状态码 1, PID: 13899 +2024年 07月 01日 星期一 23:18:15 CST: wenzhi.py 被停止, PID: 13903 +2024年 07月 01日 星期一 23:18:15 CST: tts.py 运行失败,退出状态码 1, PID: 13911 +2024年 07月 01日 星期一 23:18:17 CST: wenzhi.py 被停止, PID: 13915 +2024年 07月 01日 星期一 23:18:18 CST: tts.py 运行失败,退出状态码 1, PID: 13923 +2024年 07月 01日 星期一 23:18:20 CST: wenzhi.py 被停止, PID: 13927 +2024年 07月 01日 星期一 23:18:20 CST: tts.py 运行失败,退出状态码 1, PID: 13935 +2024年 07月 01日 星期一 23:18:22 CST: wenzhi.py 被停止, PID: 13939 +2024年 07月 01日 星期一 23:18:22 CST: tts.py 运行失败,退出状态码 1, PID: 13947 +2024年 07月 01日 星期一 23:18:24 CST: wenzhi.py 被停止, PID: 13960 +2024年 07月 01日 星期一 23:18:24 CST: tts.py 运行失败,退出状态码 1, PID: 13968 +2024年 07月 01日 星期一 23:18:26 CST: wenzhi.py 被停止, PID: 13972 +2024年 07月 01日 星期一 23:18:26 CST: tts.py 运行失败,退出状态码 1, PID: 13980 +2024年 07月 01日 星期一 23:18:28 CST: wenzhi.py 被停止, PID: 13984 +2024年 07月 01日 星期一 23:18:28 CST: tts.py 运行失败,退出状态码 1, PID: 13992 +2024年 07月 01日 星期一 23:18:30 CST: wenzhi.py 被停止, PID: 13996 +2024年 07月 01日 星期一 23:18:30 CST: tts.py 运行失败,退出状态码 1, PID: 14004 +2024年 07月 01日 星期一 23:18:32 CST: wenzhi.py 被停止, PID: 14017 +2024年 07月 01日 星期一 23:18:32 CST: tts.py 运行失败,退出状态码 1, PID: 14025 +2024年 07月 01日 星期一 23:18:34 CST: wenzhi.py 被停止, PID: 14029 +2024年 07月 01日 星期一 23:18:34 CST: tts.py 运行失败,退出状态码 1, PID: 14046 +2024年 07月 01日 星期一 23:18:36 CST: wenzhi.py 被停止, PID: 14050 +2024年 07月 01日 星期一 23:18:36 CST: tts.py 运行失败,退出状态码 1, PID: 14076 +2024年 07月 01日 星期一 23:18:38 CST: wenzhi.py 被停止, PID: 14080 +2024年 07月 01日 星期一 23:18:38 CST: tts.py 运行失败,退出状态码 1, PID: 14088 +2024年 07月 01日 星期一 23:18:40 CST: wenzhi.py 被停止, PID: 14092 +2024年 07月 01日 星期一 23:18:40 CST: tts.py 运行失败,退出状态码 1, PID: 14109 +2024年 07月 01日 星期一 23:18:42 CST: wenzhi.py 被停止, PID: 14113 +2024年 07月 01日 星期一 23:18:42 CST: tts.py 运行失败,退出状态码 1, PID: 14130 +2024年 07月 01日 星期一 23:18:44 CST: wenzhi.py 被停止, PID: 14134 +2024年 07月 01日 星期一 23:18:44 CST: tts.py 运行失败,退出状态码 1, PID: 14142 +2024年 07月 01日 星期一 23:18:46 CST: wenzhi.py 被停止, PID: 14146 +2024年 07月 01日 星期一 23:18:46 CST: tts.py 运行失败,退出状态码 1, PID: 14154 +2024年 07月 01日 星期一 23:18:48 CST: wenzhi.py 被停止, PID: 14167 +2024年 07月 01日 星期一 23:18:48 CST: tts.py 运行失败,退出状态码 1, PID: 14184 +2024年 07月 01日 星期一 23:18:50 CST: wenzhi.py 被停止, PID: 14197 +2024年 07月 01日 星期一 23:18:50 CST: tts.py 运行失败,退出状态码 1, PID: 14223 +2024年 07月 01日 星期一 23:18:52 CST: wenzhi.py 被停止, PID: 14227 +2024年 07月 01日 星期一 23:18:52 CST: tts.py 运行失败,退出状态码 1, PID: 14235 +2024年 07月 01日 星期一 23:18:54 CST: wenzhi.py 被停止, PID: 14248 +2024年 07月 01日 星期一 23:18:54 CST: tts.py 运行失败,退出状态码 1, PID: 14282 +2024年 07月 01日 星期一 23:18:56 CST: wenzhi.py 被停止, PID: 14296 +2024年 07月 01日 星期一 23:18:56 CST: tts.py 运行失败,退出状态码 1, PID: 14313 +2024年 07月 01日 星期一 23:18:58 CST: wenzhi.py 被停止, PID: 14334 +2024年 07月 01日 星期一 23:18:58 CST: tts.py 运行失败,退出状态码 1, PID: 14361 +2024年 07月 01日 星期一 23:19:00 CST: wenzhi.py 被停止, PID: 14374 +2024年 07月 01日 星期一 23:19:00 CST: tts.py 运行失败,退出状态码 1, PID: 14391 +2024年 07月 01日 星期一 23:19:02 CST: wenzhi.py 被停止, PID: 14395 +2024年 07月 01日 星期一 23:19:02 CST: tts.py 运行失败,退出状态码 1, PID: 14403 +2024年 07月 01日 星期一 23:19:04 CST: wenzhi.py 被停止, PID: 14425 +2024年 07月 01日 星期一 23:19:04 CST: tts.py 运行失败,退出状态码 1, PID: 14433 +2024年 07月 01日 星期一 23:19:06 CST: wenzhi.py 被停止, PID: 14446 +2024年 07月 01日 星期一 23:19:07 CST: tts.py 运行失败,退出状态码 1, PID: 14463 +2024年 07月 01日 星期一 23:19:09 CST: wenzhi.py 被停止, PID: 14485 +2024年 07月 01日 星期一 23:19:09 CST: tts.py 运行失败,退出状态码 1, PID: 14502 +2024年 07月 01日 星期一 23:19:11 CST: wenzhi.py 被停止, PID: 14515 +2024年 07月 01日 星期一 23:19:11 CST: tts.py 运行失败,退出状态码 1, PID: 14532 +2024年 07月 01日 星期一 23:19:13 CST: wenzhi.py 被停止, PID: 14536 +2024年 07月 01日 星期一 23:19:13 CST: tts.py 运行失败,退出状态码 1, PID: 14544 +2024年 07月 01日 星期一 23:19:15 CST: wenzhi.py 被停止, PID: 14548 +2024年 07月 01日 星期一 23:19:15 CST: tts.py 运行失败,退出状态码 1, PID: 14574 +2024年 07月 01日 星期一 23:19:17 CST: wenzhi.py 被停止, PID: 14578 +2024年 07月 01日 星期一 23:19:17 CST: tts.py 运行失败,退出状态码 1, PID: 14586 +2024年 07月 01日 星期一 23:19:19 CST: wenzhi.py 被停止, PID: 14599 +2024年 07月 01日 星期一 23:19:19 CST: tts.py 运行失败,退出状态码 1, PID: 14616 +2024年 07月 01日 星期一 23:19:21 CST: wenzhi.py 被停止, PID: 14620 +2024年 07月 01日 星期一 23:19:21 CST: tts.py 运行失败,退出状态码 1, PID: 14646 +2024年 07月 01日 星期一 23:19:23 CST: wenzhi.py 被停止, PID: 14650 +2024年 07月 01日 星期一 23:19:23 CST: tts.py 运行失败,退出状态码 1, PID: 14658 +2024年 07月 01日 星期一 23:19:25 CST: wenzhi.py 被停止, PID: 14671 +2024年 07月 01日 星期一 23:19:25 CST: tts.py 运行失败,退出状态码 1, PID: 14679 +2024年 07月 01日 星期一 23:19:27 CST: wenzhi.py 被停止, PID: 14692 +2024年 07月 01日 星期一 23:19:27 CST: tts.py 运行失败,退出状态码 1, PID: 14709 +2024年 07月 01日 星期一 23:19:29 CST: wenzhi.py 被停止, PID: 14713 +2024年 07月 01日 星期一 23:19:29 CST: tts.py 运行失败,退出状态码 1, PID: 14721 +2024年 07月 01日 星期一 23:19:31 CST: wenzhi.py 被停止, PID: 14735 +2024年 07月 01日 星期一 23:19:31 CST: tts.py 运行失败,退出状态码 1, PID: 14743 +2024年 07月 01日 星期一 23:19:33 CST: wenzhi.py 被停止, PID: 14756 +2024年 07月 01日 星期一 23:19:33 CST: tts.py 运行失败,退出状态码 1, PID: 14773 +2024年 07月 01日 星期一 23:19:35 CST: wenzhi.py 被停止, PID: 14777 +2024年 07月 01日 星期一 23:19:35 CST: tts.py 运行失败,退出状态码 1, PID: 14785 +2024年 07月 01日 星期一 23:19:37 CST: wenzhi.py 被停止, PID: 14798 +2024年 07月 01日 星期一 23:19:37 CST: tts.py 运行失败,退出状态码 1, PID: 14815 +2024年 07月 01日 星期一 23:19:39 CST: wenzhi.py 被停止, PID: 14819 +2024年 07月 01日 星期一 23:19:39 CST: tts.py 运行失败,退出状态码 1, PID: 14836 +2024年 07月 01日 星期一 23:19:41 CST: wenzhi.py 被停止, PID: 14840 +2024年 07月 01日 星期一 23:19:41 CST: tts.py 运行失败,退出状态码 1, PID: 14848 +2024年 07月 01日 星期一 23:19:43 CST: wenzhi.py 被停止, PID: 14852 +2024年 07月 01日 星期一 23:19:43 CST: tts.py 运行失败,退出状态码 1, PID: 14860 +2024年 07月 01日 星期一 23:19:45 CST: wenzhi.py 被停止, PID: 14864 +2024年 07月 01日 星期一 23:19:45 CST: tts.py 运行失败,退出状态码 1, PID: 14872 +2024年 07月 01日 星期一 23:19:47 CST: wenzhi.py 被停止, PID: 14885 +2024年 07月 01日 星期一 23:19:47 CST: tts.py 运行失败,退出状态码 1, PID: 14893 +2024年 07月 01日 星期一 23:19:49 CST: wenzhi.py 被停止, PID: 14897 +2024年 07月 01日 星期一 23:19:49 CST: tts.py 运行失败,退出状态码 1, PID: 14905 +2024年 07月 01日 星期一 23:19:51 CST: wenzhi.py 被停止, PID: 14909 +2024年 07月 01日 星期一 23:19:51 CST: tts.py 运行失败,退出状态码 1, PID: 14926 +2024年 07月 01日 星期一 23:19:53 CST: wenzhi.py 被停止, PID: 14948 +2024年 07月 01日 星期一 23:19:53 CST: tts.py 运行失败,退出状态码 1, PID: 14956 +2024年 07月 01日 星期一 23:19:55 CST: wenzhi.py 被停止, PID: 14969 +2024年 07月 01日 星期一 23:19:55 CST: tts.py 运行失败,退出状态码 1, PID: 14977 +2024年 07月 01日 星期一 23:19:58 CST: wenzhi.py 被停止, PID: 14981 +2024年 07月 01日 星期一 23:19:58 CST: tts.py 运行失败,退出状态码 1, PID: 15007 +2024年 07月 01日 星期一 23:20:00 CST: wenzhi.py 被停止, PID: 15020 +2024年 07月 01日 星期一 23:20:00 CST: tts.py 运行失败,退出状态码 1, PID: 15037 +2024年 07月 01日 星期一 23:20:02 CST: wenzhi.py 被停止, PID: 15041 +2024年 07月 01日 星期一 23:20:02 CST: tts.py 运行失败,退出状态码 1, PID: 15049 +2024年 07月 01日 星期一 23:20:04 CST: wenzhi.py 被停止, PID: 15053 +2024年 07月 01日 星期一 23:20:04 CST: tts.py 运行失败,退出状态码 1, PID: 15061 +2024年 07月 01日 星期一 23:20:06 CST: wenzhi.py 被停止, PID: 15067 +2024年 07月 01日 星期一 23:20:06 CST: tts.py 运行失败,退出状态码 1, PID: 15075 +2024年 07月 01日 星期一 23:20:08 CST: wenzhi.py 被停止, PID: 15079 +2024年 07月 01日 星期一 23:20:08 CST: tts.py 运行失败,退出状态码 1, PID: 15105 diff --git a/src/Guide_stick_system/image_recognition/manage/repted.sh b/src/Guide_stick_system/image_recognition/manage/repted.sh new file mode 100644 index 0000000..724dae8 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/repted.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# repted.sh +while true; do + # 在后台运行itt目录中的wenzhi.py脚本并获取其PID + python3 itt/wenzhi.py & + wenzhi_pid=$! + + # 等待一段时间,让wenzhi.py开始执行 + sleep 1 + + # 检查wenzhi.py是否还在运行 + if kill -0 $wenzhi_pid 2>/dev/null; then + # wenzhi.py仍在运行,尝试停止它 + kill $wenzhi_pid + wait $wenzhi_pid + echo "$(date): wenzhi.py 被停止, PID: $wenzhi_pid" >> log.txt + else + # wenzhi.py已经停止,记录日志 + echo "$(date): wenzhi.py 已经停止, PID: $wenzhi_pid" >> log.txt + fi + + # 在后台运行tts目录中的tts.py脚本并获取其PID + python3 tts/tts.py & + tts_pid=$! + + # 等待tts.py脚本结束以获取其退出状态 + wait $tts_pid + tts_exit_status=$? + + # 检查tts.py脚本是否成功运行 + if [ $tts_exit_status -eq 0 ]; then + echo "$(date): tts.py 运行成功, PID: $tts_pid" >> log.txt + else + echo "$(date): tts.py 运行失败,退出状态码 $tts_exit_status, PID: $tts_pid" >> log.txt + fi + + # 等待一段时间后再重复这个过程 + sleep 1 +done diff --git a/src/Guide_stick_system/image_recognition/manage/tts/trans.py b/src/Guide_stick_system/image_recognition/manage/tts/trans.py new file mode 100644 index 0000000..b926776 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/tts/trans.py @@ -0,0 +1,69 @@ +import numpy as np +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import OneHotEncoder +from keras.models import Sequential +from keras.layers import Dense, Embedding, Flatten +from keras.preprocessing.sequence import pad_sequences +from keras.utils import to_categorical +import re + +# 文本清洗函数 +def clean_text(text): + # 使用正则表达式去除非标准字符 + cleaned_text = re.sub(r'[^a-zA-Z0-9\s]', '', text.lower()) + return cleaned_text + +# 读取文本文件 +def read_text_file(file_path): + with open(file_path, 'r', encoding='utf-8') as f: + return f.read() + +# 数据准备 +def prepare_data(text, max_sequence_length=10): + words = set(clean_text(text).split()) + word2index = {word: i for i, word in enumerate(words, 1)} + index2word = {i: word for word, i in word2index.items()} + + sentences = text.split('. ') + X, y = [], [] + for sentence in sentences: + cleaned_sentence = clean_text(sentence) + tokens = [word2index[word] for word in cleaned_sentence.split() if word in word2index] + for i in range(1, len(tokens)): + X.append(tokens[:-i]) + y.append(tokens[i]) + + # 使用pad_sequences处理变长序列 + X = pad_sequences(X, maxlen=max_sequence_length, padding='pre', truncating='post') + y = to_categorical(np.array(y), num_classes=len(word2index)) + return X, y, word2index, index2word + +# 读取文本文件并准备数据 +text = read_text_file('input.txt') # 假设输入文件名为input.txt +X, y, word2index, index2word = prepare_data(text) + +# 划分训练集和测试集 +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) + +# 构建NNLM模型(这里使用Embedding层来捕获词汇之间的相似性) +model = Sequential() +model.add(Embedding(input_dim=len(word2index) + 1, output_dim=32, input_length=max_sequence_length)) +model.add(Flatten()) +model.add(Dense(128, activation='relu')) +model.add(Dense(len(word2index), activation='softmax')) + +model.compile(optimizer='adam', loss='categorical_crossentropy') + +# 训练模型 +model.fit(np.array([x for x in X_train]), y_train, epochs=10, batch_size=32) + +# 预测 +predictions = model.predict([x for x in X_test]) +predicted_words = [np.argmax(pred) for pred in predictions] +predicted_sentences = [' '.join(index2word[word] for word in [sentence[0]] + [predicted_words[i] for i, _ in enumerate(sentence[1:]) if i < len(predicted_words)]) + for sentence in X_test] + +# 将预测结果写入文本文件 +with open('output.txt', 'w', encoding='utf-8') as f: + for sentence in predicted_sentences: + f.write(sentence + '\n') \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/tts/tts.py b/src/Guide_stick_system/image_recognition/manage/tts/tts.py new file mode 100644 index 0000000..8ae8441 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/tts/tts.py @@ -0,0 +1,76 @@ +import subprocess +import os +import datetime +from datetime import timedelta +import re + +def speak_text_from_file(file_path, voice='zh', speed=150, pitch=50, output_file=None): + """ + 从文件中读取文本并使用espeak将其转换为语音。 + + :param file_path: 文本文件的路径 + :param voice: 使用的声音(例如 'zh' 用于中文) + :param speed: 语速(默认为 150) + :param pitch: 音调(默认为 50) + :param output_file: 如果指定,将语音输出保存为WAV文件 + """ + with open(file_path, 'r', encoding='utf-8') as file: + text = file.read() + + # 构建espeak命令 + cmd = ['espeak', '-v', voice, '-s', str(speed), '-p', str(pitch)] + if output_file: + cmd.extend(['-w', output_file]) + else: + # 如果没有指定输出文件,则直接播放语音 + pass # 这里可以添加其他选项,如音量调整等 + + # 使用stdin将文本传递给espeak + try: + proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') + proc.communicate(input=text) + + if proc.returncode != 0: + # 获取并打印错误信息 + error_message = proc.stderr.read() + print(f"Error executing espeak: {error_message}") + except Exception as e: + print(f"An error occurred: {e}") + +# 获取当前文件夹路径 +current_dir = os.getcwd() + +# 获取父文件夹路径 +parent_dir = os.path.dirname(current_dir) + +specific_folder_name = "txt" + +specific_folder_path = os.path.join(current_dir, specific_folder_name) + +# 列出当前文件夹中的所有txt文件 +txt_files = [f for f in os.listdir(specific_folder_path) if f.endswith('.txt')] + +# 提取文件名中的数字,并找出最大的数字 +max_number = -1 +max_file = None +for txt_file in txt_files: + # 使用正则表达式提取文件名中的数字 + match = re.search(r'^(\d+)\.txt$', txt_file) + if match: + file_number = int(match.group(1)) + if file_number > max_number: + max_number = file_number + max_file = txt_file + +# 检查是否找到了数字命名最大的txt文件 +if max_file: + file_path = os.path.join(current_dir, max_file) + # 调用函数来朗读文本文件 + speak_text_from_file(file_path, voice='zh', speed=140, pitch=55) + + # 如果你想要将语音保存为WAV文件,可以这样做: + output_wav_file = 'output/output.wav' # WAV文件的输出路径 + speak_text_from_file(file_path, voice='zh', speed=140, pitch=55, output_file=output_wav_file) +else: + print("没有找到数字命名最大的txt文件。") + print(specific_folder_path) \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231806.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231806.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231806.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231808.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231808.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231808.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231810.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231810.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231810.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231813.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231813.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231813.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231815.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231815.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231815.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231817.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231817.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231817.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231819.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231819.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231819.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231821.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231821.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231821.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231823.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231823.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231823.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231825.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231825.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231825.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231827.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231827.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231827.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231829.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231829.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231829.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231831.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231831.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231831.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231833.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231833.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231833.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231835.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231835.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231835.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231837.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231837.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231837.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231839.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231839.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231839.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231841.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231841.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231841.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231843.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231843.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231843.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231845.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231845.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231845.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231847.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231847.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231847.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231849.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231849.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231849.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231851.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231851.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231851.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231853.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231853.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231853.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231855.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231855.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231855.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231857.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231857.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231857.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231859.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231859.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231859.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231901.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231901.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231901.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231904.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231904.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231904.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231906.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231906.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231906.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231908.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231908.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231908.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231910.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231910.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231910.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231912.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231912.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231912.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231914.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231914.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231914.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231916.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231916.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231916.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231918.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231918.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231918.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231920.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231920.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231920.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231922.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231922.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231922.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231924.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231924.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231924.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231926.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231926.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231926.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231928.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231928.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231928.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231930.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231930.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231930.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231932.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231932.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231932.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231934.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231934.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231934.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231936.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231936.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231936.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231938.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231938.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231938.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231940.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231940.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231940.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231942.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231942.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231942.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231944.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231944.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231944.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231946.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231946.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231946.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231948.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231948.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231948.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231951.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231951.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231951.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231953.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231953.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231953.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231955.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231955.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231955.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231957.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231957.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231957.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701231959.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701231959.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701231959.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701232001.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701232001.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701232001.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701232003.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701232003.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701232003.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701232005.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701232005.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701232005.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/manage/txt/20240701232007.txt b/src/Guide_stick_system/image_recognition/manage/txt/20240701232007.txt new file mode 100644 index 0000000..b7bd227 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/manage/txt/20240701232007.txt @@ -0,0 +1,4 @@ +English + +X is + \ No newline at end of file diff --git a/src/Guide_stick_system/image_recognition/yolov5_detector.py b/src/Guide_stick_system/image_recognition/yolov5_detector.py new file mode 100644 index 0000000..9d5f976 --- /dev/null +++ b/src/Guide_stick_system/image_recognition/yolov5_detector.py @@ -0,0 +1,285 @@ +# 假设这是yolo_detector.py文件的一部分 +import torch +import cv2 +import logging +import numpy as np + +from utils.general import non_max_suppression, scale_coords +from utils.augmentations import letterbox +from utils.torch_utils import select_device + +from models.experimental import attempt_load +from PIL import Image +from typing import Tuple, Union + + +# 配置日志 +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + +class YOLO_Detector: + + def __init__(self, weights, img_size, conf_thres, iou_thres, classes=None, agnostic=False): + self.weights = weights + self.img_size = img_size + self.conf_thres = conf_thres + self.iou_thres = iou_thres + self.classes = classes + self.agnostic = agnostic + self.device = select_device('') + logging.info("YOLO_Detector initialized with configuration: weights=%s, img_size=%d, conf_thres=%.2f, iou_thres=%.2f", weights, img_size, conf_thres, iou_thres) + + def select_device(device_id='0'): + """ + 根据设备ID选择计算设备。 + + 参数: + - device_id: 设备ID,默认为'0'。可以是整数,表示CUDA设备的索引;也可以是字符串'gpu',表示优先选择CUDA设备。 + + 返回: + - torch.device对象,指向选择的计算设备,可以是CUDA设备或CPU设备。 + """ + try: + # 尝试将设备ID转换为整数,以便选择具体的CUDA设备 + device_id = int(device_id) + # 如果CUDA可用,返回指定索引的CUDA设备;否则返回CPU设备 + return torch.device(f'cuda:{device_id}' if torch.cuda.is_available() else 'cpu') + except ValueError: + # 如果设备ID不是整数,尝试将其解释为字符串指令 + # 如果CUDA可用且设备ID为'gpu',返回CUDA设备;否则返回CPU设备 + return torch.device('cuda' if torch.cuda.is_available() and device_id.lower() == 'gpu' else 'cpu') + + def attempt_load(weights, map_location=torch.device('cpu')): + """ + 尝试加载预训练模型。 + + 参数: + weights (str or Tensor): 模型权重的文件路径或Tensor。 + map_location (torch.device): 指定加载模型时的目标设备。 + + 返回: + torch.nn.Module: 加载成功的模型,如果加载失败则返回None。 + """ + # 设置日志记录级别为ERROR,只记录错误信息 + # 设置日志记录 + logging.basicConfig(level=logging.ERROR) + + try: + # 尝试加载模型 + # 使用torch.nn.Module.load_state_dict显式加载状态来提高安全性 + # 首先尝试直接加载模型 + model = torch.load(weights, map_location=map_location) + except FileNotFoundError as e: + # 记录文件找不到的错误信息并返回None + logging.error(f"模型文件找不到: {e}") + return None + except torch.serialization.SerializationError as e: + # 记录模型版本不兼容的错误信息并返回None + logging.error(f"模型版本不兼容: {e}") + return None + except Exception as e: + # 记录其他未知错误信息并返回None + logging.error(f"加载模型时发生未知错误: {e}") + return None + + # 检查加载的模型是否是字典类型,并且包含'model'键 + # 检查是否模型是一个字典,如果是,尝试从字典中加载'model'键 + if isinstance(model, dict) and 'model' in model: + try: + # 如果model不是nn.Module的实例,抛出异常 + # 确保model是torch.nn.Module的实例 + if not isinstance(model, torch.nn.Module): + raise ValueError("模型字典中的'model'键值不是torch.nn.Module的实例") + # 创建一个新的nn.Module实例,并加载字典中的模型权重 + # 加载模型状态 + actual_model = torch.nn.Module() + actual_model.load_state_dict(model['model']) + return actual_model + except Exception as e: + # 记录从字典加载模型失败的错误信息并返回None + logging.error(f"从字典加载模型时发生错误: {e}") + return None + else: + # 如果加载的模型不是字典类型,直接返回加载的模型 + return model + + def letterbox(img: Image.Image, new_shape: Union[int, Tuple[int, int]] = 640, color: Tuple[int, int, int] = (114, 114, 114), auto: bool = True, scaleFill: bool = False, scaleUp: bool = True) -> Image.Image: + """ + 对图像进行letterbox调整大小。 + + 参数: + - img: PIL Image对象,需要调整大小的图像。 + - new_shape: 目标图像的宽度和高度,可以是整数(表示正方形目标)或元组(表示宽高)。 + - color: 填充边框的颜色,使用RGB格式的三元组表示。 + - auto: 是否根据图像纵横比自动调整大小。 + - scaleFill: 是否按需扩展图像以填满整个目标区域。(当前未使用) + - scaleUp: 如果设置为False,则不会对小于目标尺寸的图像进行放大。(当前未使用) + + 返回: + - Image.Image: 调整大小并填充后的图像。 + """ + + # 验证输入 + if not isinstance(img, Image.Image): + raise ValueError("img 参数必须是 PIL Image 对象。") + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + if not isinstance(new_shape, tuple) or len(new_shape) != 2 or any(not isinstance(x, int) or x <= 0 for x in new_shape): + raise ValueError("new_shape 参数必须是正整数或包含两个正整数的元组。") + if not isinstance(color, tuple) or len(color) != 3 or any(not isinstance(x, int) or x < 0 or x > 255 for x in color): + raise ValueError("color 参数必须是 RGB 颜色的三元组,每个值介于 0 和 255 之间。") + + # 获取图像当前尺寸并处理边界条件 + h, w = img.size + if h == 0 or w == 0: + raise ValueError("图像的宽度或高度为0。") + + # 计算新尺寸 + new_h, new_w = new_shape + if auto: + aspect_ratio = min(new_h / h, new_w / w) + new_w, new_h = int(w * aspect_ratio), int(h * aspect_ratio) + else: + new_w, new_h = new_shape + + # 调整图像大小并在新图像中填充 + resized = img.resize((new_w, new_h), Image.BILINEAR) + new_img = Image.new('RGB', (new_shape[1], new_shape[0]), color=color) + offset = ((new_shape[1] - new_w) // 2, (new_shape[0] - new_h) // 2) + new_img.paste(resized, offset) + + return new_img + + def non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False): + """ + 对预测框进行非极大值抑制(Non-Maximum Suppression, NMS)操作。 + + 参数: + - pred: 包含检测结果的张量,形状为 (num_boxes, num_classes + 5),其中每个元素包含 [x1, y1, x2, y2, confidence, class_id] + - conf_thres: 保留的置信度阈值 + - iou_thres: IOU阈值用于去除重叠的框 + - classes: 如果不为None,则只对指定类别的预测框进行NMS + - agnostic: 是否忽略类别信息进行NMS,默认False + + 返回: + - filtered_boxes: 保留下来的预测框,形状为 (num_filtered_boxes, num_classes + 5) + """ + if pred.numel() == 0: + return pred.new_zeros((0, pred.size(-1))) + + # 获取置信度大于conf_thres的预测框 + indices = torch.where(pred[..., 4] > conf_thres)[0] + + # 将pred张量切片为仅包含这些框的张量 + pred = pred[indices] + + # 如果指定了类别,只保留这些类别的框 + if classes is not None: + class_mask = pred[..., 5].unsqueeze(-1) == classes[:, None] + pred = pred[class_mask.any(dim=1)] + + # 对预测框进行排序,按置信度降序 + sorted_scores, sorted_indices = pred[..., 4].sort(descending=True) + + # 初始化保留的框索引列表 + kept_indices = [] + + while sorted_indices.numel() > 0: + i = sorted_indices[0] + kept_indices.append(i.item()) + + # 计算与当前框IOU大于iou_thres的其他框 + iou = box_iou(pred[i.unsqueeze(0)], pred[sorted_indices[1:]]) + mask = iou < iou_thres + + # 更新排序索引 + sorted_indices = sorted_indices[mask] + sorted_scores = sorted_scores[mask] + + # 根据保留的索引筛选预测框 + kept_boxes = pred[torch.tensor(kept_indices, dtype=torch.long)] + + # 如果agnostic为True,忽略类别信息 + if agnostic: + kept_boxes[..., 5] = 0 + + return kept_boxes + + def scale_coords(img_shape, coords, new_shape): + # 确保输入参数符合预期的格式和类型 + if not (isinstance(img_shape, list) and isinstance(new_shape, list) and len(img_shape) == 2 and len(new_shape) == 2): + raise ValueError("img_shape and new_shape must be lists of length 2.") + if not (isinstance(coords, np.ndarray) and coords.shape[1] == 4): + raise ValueError("coords must be a numpy array with 4 columns.") + + # 验证形状为正 + if any(i <= 0 for i in img_shape) or any(i <= 0 for i in new_shape): + raise ValueError("img_shape and new_shape must have positive elements.") + + # 计算缩放比例 + ratio_w = new_shape[0] / img_shape[1] + ratio_h = new_shape[1] / img_shape[0] + ratio = min(ratio_w, ratio_h) # 保证比例不会超过任一边界 + + if ratio < 1: + new_w = int(img_shape[1] * ratio) + new_h = int(img_shape[0] * ratio) + # 重新计算中心点 + center_x = (coords[:, 0] + coords[:, 2]) / 2 + center_y = (coords[:, 1] + coords[:, 3]) / 2 + center_x = center_x * new_w / img_shape[1] + center_y = center_y * new_h / img_shape[0] + # 计算新的宽和高 + w = (coords[:, 2] - coords[:, 0]) * ratio + h = (coords[:, 3] - coords[:, 1]) * ratio # 完善高度的更新计算 + + # 更新坐标 + scaled_coords = np.array([center_x - w / 2, center_y - h / 2, center_x + w / 2, center_y + h / 2]).T + return scaled_coords + else: + # 如果ratio >= 1,不需要缩放,直接返回原coords + return coords + + def detect_image(weights, img_path, img_size=640, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False): + device = select_device('') + + try: + device = select_device('') + logging.info("Selecting device: %s", device) + model = attempt_load(weights, map_location=device) + model.eval() + logging.info("Model loaded successfully") + except Exception as e: + logging.error(f"Error loading the model: {e}") + return [] + + try: + img = Image.open(img_path) + img = letterbox(img, new_shape=img_size)[0] + img = np.array(img) + logging.info("Image loaded and preprocessed") + except Exception as e: + logging.error(f"Error loading the image: {e}") + return [] + + img = img[:, :, ::-1].transpose(2, 0, 1) + img = np.ascontiguousarray(img) + + img_tensor = torch.from_numpy(img).to(device) + img_tensor = img_tensor.float() + img_tensor /= 255.0 + + if img_tensor.ndimension() == 3: + img_tensor = img_tensor.unsqueeze(0) + + try: + pred = model(img_tensor, augment=False)[0] + logging.info("Inference completed") + except Exception as e: + logging.error(f"Error during inference: {e}") + return [] + + pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=agnostic) + if len(pred): + pred[:, :4] = scale_coords(img.shape[1:], pred[:, :4], img_size).round() + logging.info("Non-maximum suppression completed") + return pred diff --git a/src/Guide_stick_system/voice_assistant/README.md b/src/Guide_stick_system/voice_assistant/README.md new file mode 100644 index 0000000..06c5c26 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/README.md @@ -0,0 +1,7 @@ +# 树莓派实现语音助手 + +### 基于: +1. [Snowboy](https://snowboy.kitt.ai/dashboard),用于实现唤醒系统 +2. [百度语音](https://cloud.baidu.com/),用于语音识别 + +#### 使用python编写,具体介绍前往 [树莓派实现语音助手](https://www.passerma.com/article/54) \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/__pycache__/snowboydecoder.cpython-39.pyc b/src/Guide_stick_system/voice_assistant/__pycache__/snowboydecoder.cpython-39.pyc new file mode 100644 index 0000000..3bca655 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/__pycache__/snowboydecoder.cpython-39.pyc differ diff --git a/src/Guide_stick_system/voice_assistant/__pycache__/snowboydetect.cpython-39.pyc b/src/Guide_stick_system/voice_assistant/__pycache__/snowboydetect.cpython-39.pyc new file mode 100644 index 0000000..e5b5617 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/__pycache__/snowboydetect.cpython-39.pyc differ diff --git a/src/Guide_stick_system/voice_assistant/_snowboydetect.so b/src/Guide_stick_system/voice_assistant/_snowboydetect.so new file mode 100644 index 0000000..f3c78d3 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/_snowboydetect.so differ diff --git a/src/Guide_stick_system/voice_assistant/audio/audio.wav b/src/Guide_stick_system/voice_assistant/audio/audio.wav new file mode 100644 index 0000000..a44c5fd Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/audio/audio.wav differ diff --git a/src/Guide_stick_system/voice_assistant/audio/exit.wav b/src/Guide_stick_system/voice_assistant/audio/exit.wav new file mode 100644 index 0000000..8a6a638 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/audio/exit.wav differ diff --git a/src/Guide_stick_system/voice_assistant/audio/none.wav b/src/Guide_stick_system/voice_assistant/audio/none.wav new file mode 100644 index 0000000..fda5a70 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/audio/none.wav differ diff --git a/src/Guide_stick_system/voice_assistant/audio/open.wav b/src/Guide_stick_system/voice_assistant/audio/open.wav new file mode 100644 index 0000000..1192790 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/audio/open.wav differ diff --git a/src/Guide_stick_system/voice_assistant/fetchToken.py b/src/Guide_stick_system/voice_assistant/fetchToken.py new file mode 100644 index 0000000..ba23d52 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/fetchToken.py @@ -0,0 +1,69 @@ +#从百度AI开放平台获取语音合成(TTS)服务所需的access_token +# -*- coding: utf-8 -*- + +import sys +import json + +# 保证兼容python2以及python3 +IS_PY3 = sys.version_info.major == 3 +if IS_PY3: + from urllib.request import urlopen + from urllib.request import Request + from urllib.error import URLError + from urllib.parse import urlencode + from urllib.parse import quote_plus +else: + import urllib.request + import urllib + from urllib import quote_plus + from urllib import urlopen + from urllib import Request + from urllib import URLError + from urllib import urlencode + +# 替换你的 API_KEY +API_KEY = 'P2o5IjKMXqGz80VzRuv2v9Uj' + +# 替换你的 SECRET_KEY +SECRET_KEY = 'TbvincgBwx4TfOvVXC5WgrP3WED9J2SQ' + +TOKEN_URL = 'http://openapi.baidu.com/oauth/2.0/token' + + +def fetch_token(): + # 定义请求参数 + params = {'grant_type': 'client_credentials', + 'client_id': API_KEY, + 'client_secret': SECRET_KEY} + # 将参数编码为utf-8 + post_data = urlencode(params) + if (IS_PY3): + post_data = post_data.encode('utf-8') + # 创建请求对象 + req = Request(TOKEN_URL, post_data) + try: + # 发送请求 + f = urlopen(req, timeout=5) + result_str = f.read() + except URLError as err: + # 处理异常 + print('token http response http code : ' + str(err.code)) + result_str = err.read() + if (IS_PY3): + result_str = result_str.decode() + + # 将结果转换为字典 + result = json.loads(result_str) + + # 检查结果中是否包含access_token和scope + if ('access_token' in result.keys() and 'scope' in result.keys()): + # 检查scope中是否包含audio_tts_post + if not 'audio_tts_post' in result['scope'].split(' '): + print('please ensure has check the tts ability') + return '' + # 返回access_token + return result['access_token'] + else: + # 否则,检查API_KEY和SECRET_KEY是否正确 + print('please overwrite the correct API_KEY and SECRET_KEY') + return '' diff --git a/src/Guide_stick_system/voice_assistant/resources/._alexa.umdl b/src/Guide_stick_system/voice_assistant/resources/._alexa.umdl new file mode 100644 index 0000000..e8c147b Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/._alexa.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/resources/._alexa_02092017.umdl b/src/Guide_stick_system/voice_assistant/resources/._alexa_02092017.umdl new file mode 100644 index 0000000..e8c147b Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/._alexa_02092017.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/resources/._common.res b/src/Guide_stick_system/voice_assistant/resources/._common.res new file mode 100644 index 0000000..e8c147b Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/._common.res differ diff --git a/src/Guide_stick_system/voice_assistant/resources/._ding.wav b/src/Guide_stick_system/voice_assistant/resources/._ding.wav new file mode 100644 index 0000000..e8c147b Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/._ding.wav differ diff --git a/src/Guide_stick_system/voice_assistant/resources/._dong.wav b/src/Guide_stick_system/voice_assistant/resources/._dong.wav new file mode 100644 index 0000000..e8c147b Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/._dong.wav differ diff --git a/src/Guide_stick_system/voice_assistant/resources/._snowboy.umdl b/src/Guide_stick_system/voice_assistant/resources/._snowboy.umdl new file mode 100644 index 0000000..e8c147b Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/._snowboy.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/resources/alexa.umdl b/src/Guide_stick_system/voice_assistant/resources/alexa.umdl new file mode 100644 index 0000000..0d9db6f Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/alexa.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/resources/alexa_02092017.umdl b/src/Guide_stick_system/voice_assistant/resources/alexa_02092017.umdl new file mode 100644 index 0000000..c4a6094 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/alexa_02092017.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/resources/common.res b/src/Guide_stick_system/voice_assistant/resources/common.res new file mode 100644 index 0000000..0e267f5 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/common.res differ diff --git a/src/Guide_stick_system/voice_assistant/resources/ding.wav b/src/Guide_stick_system/voice_assistant/resources/ding.wav new file mode 100644 index 0000000..79346e0 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/ding.wav differ diff --git a/src/Guide_stick_system/voice_assistant/resources/dong.wav b/src/Guide_stick_system/voice_assistant/resources/dong.wav new file mode 100644 index 0000000..426596b Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/dong.wav differ diff --git a/src/Guide_stick_system/voice_assistant/resources/snowboy.umdl b/src/Guide_stick_system/voice_assistant/resources/snowboy.umdl new file mode 100644 index 0000000..bb68185 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/resources/snowboy.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snow.py b/src/Guide_stick_system/voice_assistant/snow.py new file mode 100644 index 0000000..eb3f4bb --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snow.py @@ -0,0 +1,195 @@ +import snowboydecoder +import signal +import wave +import sys +import json +import requests +import time +import os +import base64 +from pyaudio import PyAudio, paInt16 +import webbrowser +from fetchToken import fetch_token +import time + +IS_PY3 = sys.version_info.major == 3 +if IS_PY3: + from urllib.request import urlopen + from urllib.request import Request + from urllib.error import URLError + from urllib.parse import urlencode + from urllib.parse import quote_plus +else: + import urllib.request + from urllib import quote_plus + from urllib.request import urlopen + from urllib.request import Request + from urllib.request import URLError + from urllib import urlencode + +interrupted = False # snowboy监听唤醒结束标志 +endSnow = False # 程序结束标志 + +framerate = 16000 # 采样率 +num_samples = 2000 # 采样点 +channels = 1 # 声道 +sampwidth = 2 # 采样宽度2bytes + +FILEPATH = './audio/audio.wav' # 录制完成存放音频路径 +music_exit = './audio/exit.wav' # 唤醒系统退出语音 +music_open = './audio/open.wav' # 唤醒系统打开语音 +os.close(sys.stderr.fileno()) # 去掉错误警告 + +def signal_handler(signal, frame): + """ + 监听键盘结束 + """ + global interrupted + interrupted = True + +def interrupt_callback(): + """ + 监听唤醒 + """ + global interrupted + return interrupted + +def detected(): + """ + 唤醒成功 + """ + print('唤醒成功') + play(music_open) + global interrupted + interrupted = True + detector.terminate() + +def play(filename): + """ + 播放音频 + """ + wf = wave.open(filename, 'rb') # 打开audio.wav + p = PyAudio() # 实例化 pyaudio + # 打开流 + stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), + channels=wf.getnchannels(), + rate=wf.getframerate(), + output=True) + data = wf.readframes(1024) + while data != b'': + data = wf.readframes(1024) + stream.write(data) + # 释放IO + stream.stop_stream() + stream.close() + p.terminate() + +def save_wave_file(filepath, data): + """ + 存储文件 + """ + # 打开文件 + wf = wave.open(filepath, 'wb') + # 设置通道数 + wf.setnchannels(channels) + # 设置采样宽度 + wf.setsampwidth(sampwidth) + # 设置帧率 + wf.setframerate(framerate) + # 将数据写入文件 + wf.writeframes(b''.join(data)) + # 关闭文件 + wf.close() + +def my_record(): + """ + 录音 + """ + pa = PyAudio() # 初始化pyaudio + stream = pa.open(format=paInt16, channels=channels, + rate=framerate, input=True, frames_per_buffer=num_samples) # 打开流 + my_buf = [] # 初始化缓冲区 + # count = 0 + t = time.time() + print('开始录音...') # 打印开始录音信息 + while time.time() < t + 4: + string_audio_data = stream.read(num_samples) # 读取数据 + my_buf.append(string_audio_data) # 添加到缓冲区 + print('录音结束!') # 打印录音结束信息 + save_wave_file(FILEPATH, my_buf) # 保存录音文件 + stream.close() # 关闭流 + + +def speech2text(speech_data, token, dev_pid=1537): + """ + 音频转文字 + """ + FORMAT = 'wav' + RATE = '16000' + CHANNEL = 1 + CUID = 'baidu_workshop' + SPEECH = base64.b64encode(speech_data).decode('utf-8') + data = { + 'format': FORMAT, + 'rate': RATE, + 'channel': CHANNEL, + 'cuid': CUID, + 'len': len(speech_data), + 'speech': SPEECH, + 'token': token, + 'dev_pid': dev_pid + } + # 语音转文字接口 该接口可能每个人不一样,取决于你需要哪种语音识别功能,本文使用的是 语音识别极速版 + + url = 'https://vop.baidu.com/pro_api' + headers = {'Content-Type': 'application/json'} # 请求头 + print('正在识别...') + r = requests.post(url, json=data, headers=headers) + Result = r.json() + if 'result' in Result: + return Result['result'][0] + else: + return Result + +def get_audio(file): + """ + 获取音频文件 + """ + with open(file, 'rb') as f: + data = f.read() + return data + +def identifyComplete(text): + """ + 识别成功 + """ + print(text) + maps = { + '打开百度': ['打开百度。', '打开百度', '打开百度,', 'baidu'] + } + if (text == '再见。' or text == '拜拜。'): + play(music_exit) # 关闭系统播放反馈语音 + exit() + if text in maps['打开百度']: + webbrowser.open_new_tab('https://www.baidu.com') + play('./audio/openbaidu.wav') # 识别到播放反馈语音 + else: + play('./audio/none.wav') # 未匹配口令播放反馈语音 + print('操作完成') + +if __name__ == "__main__": + while endSnow == False: + interrupted = False + # 实例化snowboy,第一个参数就是唤醒识别模型位置 + detector = snowboydecoder.HotwordDetector('ma.pmdl', sensitivity=0.5) + print('等待唤醒') + # snowboy监听循环 + detector.start(detected_callback=detected, + interrupt_check=interrupt_callback, + sleep_time=0.03) + my_record() # 唤醒成功开始录音 + TOKEN = fetch_token() # 获取token + speech = get_audio(FILEPATH) + result = speech2text(speech, TOKEN, int(80001)) + if type(result) == str: + identifyComplete(result.strip(',')) \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/.gitignore b/src/Guide_stick_system/voice_assistant/snowboy/.gitignore new file mode 100644 index 0000000..a050624 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/.gitignore @@ -0,0 +1,54 @@ +/lib/libsnowboy-detect.a +snowboy-detect-swig.cc +snowboydetect.py +Snowboy.pm +.DS_Store + +*.dylib +*.pyc +*.o +*.so +*.swp +*.swo + +/examples/C/pa_stable_v19_20140130.tgz +/examples/C/pa_stable_v190600_20161030.tgz +/examples/C/portaudio +/examples/C/demo +/examples/C++/pa_stable_v19_20140130.tgz +/examples/C++/pa_stable_v190600_20161030.tgz +/examples/C++/portaudio +/examples/C++/demo +/examples/C++/demo2 +/examples/Java/Demo.class +/examples/Perl/data/ +/examples/iOS/Obj-C/Pods/Pods.xcodeproj/xcuserdata/ +/examples/iOS/Obj-C/SnowboyTest.xcodeproj/project.xcworkspace/xcuserdata/ +/examples/iOS/Obj-C/SnowboyTest.xcodeproj/xcuserdata/ +/examples/iOS/Obj-C/SnowboyTest.xcworkspace/xcuserdata/ +/examples/iOS/Swift3/SnowboyTest.xcodeproj/project.xcworkspace/xcuserdata/ +/examples/iOS/Swift3/SnowboyTest.xcodeproj/xcuserdata/ + +/swig/Android/OpenBLAS-0.2.18.tar.gz +/swig/Android/android-ndk-r11c-darwin-x86_64.zip +/swig/Android/android-ndk-r14b-darwin-x86_64.zip +/swig/Android/android-ndk-r11c-linux-x86_64.zip +/swig/Android/OpenBLAS-Android/ +/swig/Android/OpenBLAS-Android-ARM32/ +/swig/Android/android-ndk-r11c/ +/swig/Android/android-ndk-r14b/ +/swig/Android/ndk_install/ +/swig/Android/ndk_install_32bit/ +/swig/Android/java/ +/swig/Android/jniLibs/ +/swig/Java/java/ +/swig/Java/jniLibs/ + +/build +/node_modules +/lib/node/binding +/lib/node/index.js + +/dist +**/snowboy.egg-info +/.idea diff --git a/src/Guide_stick_system/voice_assistant/snowboy/.npmignore b/src/Guide_stick_system/voice_assistant/snowboy/.npmignore new file mode 100644 index 0000000..6f1b347 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/.npmignore @@ -0,0 +1,22 @@ +/lib/libsnowboy-detect.a +snowboy-detect-swig.cc +snowboydetect.py +.DS_Store + +*.pyc +*.o +*.so + +/examples/C++/* +/examples/Python/* + +/swig/Android/* +/swig/Python/* + +/build +/node_modules + +/lib/node/*.ts + +.npmignore +.travis.yml diff --git a/src/Guide_stick_system/voice_assistant/snowboy/.travis.yml b/src/Guide_stick_system/voice_assistant/snowboy/.travis.yml new file mode 100644 index 0000000..306bea2 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/.travis.yml @@ -0,0 +1,90 @@ +language: cpp + +# Cache node dependencies +cache: + directories: + - node_modules + +# Ubuntu 14.04 Trusty support +sudo: required +dist: trusty + +addons: + apt: + sources: + # add PPAs with more up-to-date toolchains + - ubuntu-toolchain-r-test + - llvm-toolchain-precise-3.9 + packages: + # install toolchains + - libmagic-dev + - libatlas-base-dev + - gcc-5 + - g++-5 + - clang-3.8 + +os: +- linux +- osx + +env: + global: + - secure: Hpft/SbwPrjQbHq+3DeJ8aMCpg2uW4z9MY4XaPPA5FQ80QkUdFMqALRvdBhXf/hm6bEZVLbIMXxqCImL5C4nx1SMUmsL6w/FbJjnamYEopk2MKCPZHKtZOdxsbdUwpL30WRH85DQ0KbcG9LatEr+qLwf9adRQrozhh5zhoRXzjuH8nxS/GRkYuZgTt4wxNt7xYnCVlARS9/V15OeOGcRWw/Q/r++ipINz8ylGqUnTGImZrDZ2nhlOkBSNzrPA7NhCSw1OiGvZpg4zVj/gDkSkPNFn4oDFr1nNDqg0EPFGVXDDI0KA7dpw2DhrJk1z8HgXw8PorPGP0mLnDl4i811KkCz6g6y+ETC6k1VtdB2jss0MCnD9HtxM0RS62yls6Bm5aMhoFjryOHgLHNrjiHfW2/lki421K6QlGp3a2ONkRk9zHiti3uTdtbxlz0kcu7Z8FT045lHNZX0B6QpPiLi2sy7H/dItqAGdWuY0lrGrddX1PpxCckBAZLO8/VEGGGkLQtzbxEXgF+EW0HJxURvUYUF2VCy+kaq86KrFzvSKS/evW/vj7Sq2rNbOCtnIy/rvIKAXU0bbR/1imuEiiMhKdiZku+jRfZZmpjKHoydba9SsHpuNGnR/sH40AIHv7Lv6q+z3mEI+X1YaOVAAlLYWExuHLLbWYjng2gEBIHwmuU= + - secure: RNZDzRXBhS98DMpa0QIKQjL8Nl7Pbo6cYtPyaMjEgF2nv+W+gwhcyDDRUE4psJm26Qkz3AZNfLx/kGKPhhAjBpuGFreCbAFy3uDfbDdcn2K68E+yRSdBAoTIKlxVPpQR11hfPHiAs+3s4BIwLGnuwJSK3JMisboji4ceaxVQpdo0ZcJnNKykN2zabUl+8BW8SYQ8cYp/DLg+wSeqq7eplyYD7zoT/GGnSNylkrRsJxB5zlrRQC/ngUfK7AuxhkfQ14dsdWkkrx0RyVFul5VAc85qAbrtJvLZs2Cu/J3ohNzcRZG7m8+U4diHuIlBFx0ezL3hVBfXkOf74dP8+OnL3rAr/1n+dczl5/5mQqlSsy8UAtUtfdAtd+wRNRy5d+er1YuJBWOGs2SXInjNViEY1Phgs6bY/Lu3wiIxDJH0TORan6ZVSje2/vi7aegRoiqHNrs4m2JuQDCPXu53HKh22+nWgRLLXFT2oBN3FdCz3xj04t+LyT+P5uq9q0jXxKc1nlNpvF3nDzhIuJKcfgBRNm9Wt1vz04xzSRgZEFGMTRWkYTdV+0ZVeqEQjEPo4fRNJ6PT1Tem8VqIoHEKGivGkwiAZ6FhQ/TNkVD7tv5Vhq7eK3ZPXDRakuBsLJ5Nc9QnLCpoEqbuIYqjr8ODKV2HSjS16VaGPbvtYPWzhGKU9C4= + matrix: + - NODE_VERSION="4.0.0" + - NODE_VERSION="5.0.0" + - NODE_VERSION="6.0.0" + - NODE_VERSION="7.0.0" + - NODE_VERSION="8.0.0" + - NODE_VERSION="9.0.0" + +before_install: +# use the correct version of node +- rm -rf ~/.nvm/ && git clone --depth 1 https://github.com/creationix/nvm.git ~/.nvm +- source ~/.nvm/nvm.sh +- nvm install $NODE_VERSION +- nvm use $NODE_VERSION +# get commit message +- COMMIT_MESSAGE=$(git show -s --format=%B $TRAVIS_COMMIT | tr -d '\n') +# put local node-pre-gyp on PATH +- export PATH=./node_modules/.bin/:$PATH +# put global node-gyp and nan on PATH +- npm install node-gyp -g +# install aws-sdk so it is available for publishing +- npm install aws-sdk nan typescript @types/node +# figure out if we should publish or republish +- PUBLISH_BINARY=false +- REPUBLISH_BINARY=false +# if we are building a tag then publish +# - if [[ $TRAVIS_BRANCH == `git describe --tags --always HEAD` ]]; then PUBLISH_BINARY=true; fi; +# or if we put [publish binary] in the commit message +- if test "${COMMIT_MESSAGE#*'[publish binary]'}" != "$COMMIT_MESSAGE"; then PUBLISH_BINARY=true; fi; +# alternativly we can [republish binary] which will replace any existing binary +- if test "${COMMIT_MESSAGE#*'[republish binary]'}" != "$COMMIT_MESSAGE"; then PUBLISH_BINARY=true && REPUBLISH_BINARY=true; fi; +install: +# ensure source install works +- npm install --build-from-source +# test our module +- node lib/node/index.js + +before_script: +# if publishing, do it +- if [[ $REPUBLISH_BINARY == true ]]; then node-pre-gyp package unpublish; fi; +- if [[ $PUBLISH_BINARY == true ]]; then node-pre-gyp package publish; fi; +# cleanup +- node-pre-gyp clean +- node-gyp clean + +script: +# if publishing, test installing from remote +- INSTALL_RESULT=0 +- if [[ $PUBLISH_BINARY == true ]]; then INSTALL_RESULT=$(npm install --fallback-to-build=false > /dev/null)$? || true; fi; +# if install returned non zero (errored) then we first unpublish and then call false so travis will bail at this line +- if [[ $INSTALL_RESULT != 0 ]]; then echo "returned $INSTALL_RESULT";node-pre-gyp unpublish;false; fi +# If success then we arrive here so lets clean up +- node-pre-gyp clean + +after_success: +# if success then query and display all published binaries +- node-pre-gyp info diff --git a/src/Guide_stick_system/voice_assistant/snowboy/LICENSE b/src/Guide_stick_system/voice_assistant/snowboy/LICENSE new file mode 100644 index 0000000..10f3579 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/LICENSE @@ -0,0 +1,206 @@ +THIS LICENSE GOVERNS THE SOURCE CODE, THE LIBRARIES, THE RESOURCE FILES, AS WELL +AS THE HOTWORD MODEL snowboy/resources/snowboy.umdl PROVIDED IN THIS REPOSITORY. +ALL OTHER HOTWORD MODELS ARE GOVERNED BY THEIR OWN LICENSES. + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/Guide_stick_system/voice_assistant/snowboy/MANIFEST.in b/src/Guide_stick_system/voice_assistant/snowboy/MANIFEST.in new file mode 100644 index 0000000..eed4385 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/MANIFEST.in @@ -0,0 +1,12 @@ +recursive-include include * +recursive-include lib * +recursive-include swig/Python * +recursive-include resources * +include README.md + +exclude *.txt +exclude *.pyc +global-exclude .DS_Store _snowboydetect.so +prune resources/alexa +prune lib/ios +prune lib/android \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/README.md b/src/Guide_stick_system/voice_assistant/snowboy/README.md new file mode 100644 index 0000000..c4063d4 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/README.md @@ -0,0 +1,489 @@ +*Dear KITT.AI users,* + +*We are writing this update to let you know that we plan to shut down all KITT.AI products (Snowboy, NLU and Chatflow) by Dec. 31st, 2020.* + +*we launched our first product Snowboy in 2016, and then NLU and Chatflow later that year. Since then, we have served more than 85,000 developers, worldwide, accross all our products. It has been 4 extraordinary years in our life, and we appreciate the opportunity to be able to serve the community.* + +*The field of artificial intelligence is moving rapidly. As much as we like our products, we still see that they are getting outdated and are becoming difficult to maintain. All official websites/APIs for our products will be taken down by Dec. 31st, 2020. Our github repositories will remain open, but only community support will be available from this point beyond.* + +*Thank you all, and goodbye!* + +*The KITT.AI Team +Mar. 18th, 2020* + +# Snowboy Hotword Detection + +by [KITT.AI](http://kitt.ai). + +[Home Page](https://snowboy.kitt.ai) + +[Full Documentation](http://docs.kitt.ai/snowboy) and [FAQ](http://docs.kitt.ai/snowboy#faq) + +[Discussion Group](https://groups.google.com/a/kitt.ai/forum/#!forum/snowboy-discussion) (or send email to snowboy-discussion@kitt.ai) + +[Commercial application FAQ](README_commercial.md) + +Version: 1.3.0 (2/19/2018) + +## Alexa support + +Snowboy now brings hands-free experience to the [Alexa AVS sample app](https://github.com/alexa/avs-device-sdk/wiki/Raspberry-Pi-Quick-Start-Guide-with-Script) on Raspberry Pi! See more info below regarding the performance and how you can use other hotword models. The following instructions currently support AVS sdk Version 1.12.1. + +**Performance** + +The performance of hotword detection usually depends on the actual environment, e.g., is it used with a quality microphone, is it used on the street, in a kitchen, or is there any background noise, etc. So we feel it is best for the users to evaluate it in their real environment. For the evaluation purpose, we have prepared an Android app which can be installed and run out of box: [SnowboyAlexaDemo.apk](https://github.com/Kitt-AI/snowboy/raw/master/resources/alexa/SnowboyAlexaDemo.apk) (please uninstall any previous versions first if you have installed this app before). + +**Kittai KWD Engine** + +* Set up [Alexa AVS sample app](https://github.com/alexa/avs-device-sdk/wiki/Raspberry-Pi-Quick-Start-Guide-with-Script) following the official AVS instructions + +* Apply patch to replace the Sensory KWD engine with Kittai engine +``` +# Copy the patch file to the root directory of Alexa AVS sample app. Please replace $ALEXA_AVS_SAMPLE_APP_PATH with the actual path where you +# cloned the Alexa AVS sample app repository, and replace $SNOWBOY_ROOT_PATH with the actual path where you clone the Snowboy repository +cd $ALEXA_AVS_SAMPLE_APP_PATH +cp $SNOWBOY_PATH/resource/alexa/alexa-avs-sample-app/avs-kittai.patch ./ + +# Apply the patch, this will modify the scripts setup.sh and pi.sh +patch < avs-kittai.patch +``` + +* Re-compile the avs-device-sdk and sample app +``` +sudo bash setup.sh config.json +``` + +* Run the sample app +``` +sudo bash startsample.sh +``` + +Here is a [demo video](https://www.youtube.com/watch?v=wiLEr6TeE58) for how to use Snowboy hotword engine in Alexa Voice Service. + +**Personal model** + +* Create your personal hotword model through our [website](https://snowboy.kitt.ai) or [hotword API](https://snowboy.kitt.ai/api/v1/train/) + + +* Put your personal model in [snowboy/resources](https://github.com/Kitt-AI/snowboy/tree/master/resources) +``` +# Please put YOUR_PERSONAL_MODEL.pmdl in $ALEXA_AVS_SAMPLE_APP_PATH/third-party/snowboy/resources, +# and $ALEXA_AVS_SAMPLE_APP_PATH with the actual path where you put the Alexa AVS sample app repository. + +cp YOUR_PERSONAL_MODEL.pmdl $ALEXA_AVS_SAMPLE_APP_PATH/third-party/snowboy/resources/ + +``` + +* Replace the model name 'alexa.umdl' with your personal model name, update `KITT_AI_SENSITIVITY`, set `KITT_AI_APPLY_FRONT_END_PROCESSING` to `false` in the [Alexa AVS sample app code](https://github.com/alexa/avs-device-sdk/blob/master/KWD/KWDProvider/src/KeywordDetectorProvider.cpp) and re-compile +``` +# Modify $ALEXA_AVS_SAMPLE_APP_PATH/avs-device-sdk/blob/master/KWD/KWDProvider/src/KeywordDetectorProvider.cpp: +# Replace the model name 'alexa.umdl' with your personal model name 'YOUR_PERSONAL_MODEL.pmdl' at line 52 +# Update `KITT_AI_SENSITIVITY` at line 26 +# Set `KITT_AI_APPLY_FRONT_END_PROCESSING` to `false` at line 32 +sudo bash setup.sh config.json +``` + +* Run the wake word agent with engine set to `kitt_ai`! + +Here is a [demo video](https://www.youtube.com/watch?v=9Bj8kdfwG7I) for how to use a personal model in Alexa Voice Service. + +**Universal model** + +* Put your personal model in [snowboy/resources](https://github.com/Kitt-AI/snowboy/tree/master/resources) +``` +# Please put YOUR_UNIVERSAL_MODEL.umdl in $ALEXA_AVS_SAMPLE_APP_PATH/third-party/snowboy/resources, +# and $ALEXA_AVS_SAMPLE_APP_PATH with the actual path where you put the Alexa AVS sample app repository. + +cp YOUR_UNIVERSAL_MODEL.umdl $ALEXA_AVS_SAMPLE_APP_PATH/third-party/snowboy/resources/ + +``` + +* Replace the model name 'alexa.umdl' with your universal model name, update `KITT_AI_SENSITIVITY` in the [Alexa AVS sample app code](https://github.com/alexa/avs-device-sdk/blob/master/KWD/KWDProvider/src/KeywordDetectorProvider.cpp) and re-compile +``` +# Modify $ALEXA_AVS_SAMPLE_APP_PATH/avs-device-sdk/blob/master/KWD/KWDProvider/src/KeywordDetectorProvider.cpp: +# Replace the model name 'alexa.umdl' with your universal model name 'YOUR_UNIVERSAL_MODEL.umdl' at line 52 +# Update `KITT_AI_SENSITIVITY` at line 26 +sudo bash setup.sh config.json +``` + +* Run the wake word agent with engine set to `kitt_ai`! + + +## Hotword as a Service + +Snowboy now offers **Hotword as a Service** through the ``https://snowboy.kitt.ai/api/v1/train/`` +endpoint. Check out the [Full Documentation](http://docs.kitt.ai/snowboy) and example [Python/Bash script](examples/REST_API) (other language contributions are very welcome). + +As a quick start, ``POST`` to https://snowboy.kitt.ai/api/v1/train: + + { + "name": "a word", + "language": "en", + "age_group": "10_19", + "gender": "F", + "microphone": "mic type", + "token": "", + "voice_samples": [ + {wave: ""}, + {wave: ""}, + {wave: ""} + ] + } + +then you'll get a trained personal model in return! + +## Introduction + +Snowboy is a customizable hotword detection engine for you to create your own +hotword like "OK Google" or "Alexa". It is powered by deep neural networks and +has the following properties: + +* **highly customizable**: you can freely define your own magic phrase here – +let it be “open sesame”, “garage door open”, or “hello dreamhouse”, you name it. + +* **always listening** but protects your privacy: Snowboy does not use Internet +and does *not* stream your voice to the cloud. + +* light-weight and **embedded**: it even runs on a Raspberry Pi and consumes +less than 10% CPU on the weakest Pi (single-core 700MHz ARMv6). + +* Apache licensed! + +Currently Snowboy supports (look into the [lib](lib) folder): + +* all versions of Raspberry Pi (with Raspbian based on Debian Jessie 8.0) +* 64bit Mac OS X +* 64bit Ubuntu 14.04 +* iOS +* Android +* ARM64 (aarch64, Ubuntu 16.04) + +It ships in the form of a **C++ library** with language-dependent wrappers +generated by SWIG. We welcome wrappers for new languages -- feel free to send a +pull request! + +Currently we have built wrappers for: + +* C/C++ +* Java/Android +* Go (thanks to @brentnd and @deadprogram) +* Node (thanks to @evancohen and @nekuz0r) +* Perl (thanks to @iboguslavsky) +* Python2/Python3 +* iOS/Swift3 (thanks to @grimlockrocks) +* iOS/Object-C (thanks to @patrickjquinn) + +If you want support on other hardware/OS, please send your request to +[snowboy@kitt.ai](mailto:snowboy.kitt.ai) + +Note: **Snowboy does not support Windows** yet. Please build Snowboy on *nix platforms. + +## Pricing for Snowboy models + +Hackers: free + +* Personal use +* Community support + +Business: please contact us at [snowboy@kitt.ai](mailto:snowboy@kitt.ai) + +* Personal use +* Commercial license +* Technical support + +## Pretrained universal models + +We provide pretrained universal models for testing purpose. When you test those +models, bear in mind that they may not be optimized for your specific device or +environment. + +Here is the list of the models, and the parameters that you have to use for them: + +* **resources/alexa/alexa-avs-sample-app/alexa.umdl**: Universal model for the hotword "Alexa" optimized for [Alexa AVS sample app](https://github.com/alexa/alexa-avs-sample-app). Set SetSensitivity to 0.6, and set ApplyFrontend to true. This is so far the best "Alexa" model we released publicly, when ApplyFrontend is set to true. +* **resources/models/snowboy.umdl**: Universal model for the hotword "Snowboy". Set SetSensitivity to 0.5 and ApplyFrontend to false. +* **resources/models/jarvis.umdl**: Universal model for the hotword "Jarvis" (https://snowboy.kitt.ai/hotword/29). It has two different models for the hotword Jarvis, so you have to use two sensitivites. Set sensitivities to "0.8,0.80" and ApplyFrontend to true. +* **resources/models/smart_mirror.umdl**: Universal model for the hotword "Smart Mirror" (https://snowboy.kitt.ai/hotword/47). Set sensitivity to Sensitivity to 0.5, and ApplyFrontend to false. +* **resources/models/subex.umdl**: Universal model for the hotword "Subex" (https://snowboy.kitt.ai/hotword/22014). Set sensitivity to Sensitivity to 0.5, and ApplyFrontend to true. +* **resources/models/neoya.umdl**: Universal model for the hotword "Neo ya" (https://snowboy.kitt.ai/hotword/22171). It has two different models for the hotword "Neo ya", so you have to use two sensitivites. Set sensitivities to "0.7,0.7", and ApplyFrontend to true. +* **resources/models/hey_extreme.umdl**: Universal model for the hotword "Hey Extreme" (https://snowboy.kitt.ai/hotword/15428). Set sensitivity to Sensitivity to 0.6, and ApplyFrontend to true. +* **resources/models/computer.umdl**: Universal model for the hotword "Computer" (https://snowboy.kitt.ai/hotword/46). Set sensitivity to Sensitivity to 0.6, and ApplyFrontend to true. +* **resources/models/view_glass.umdl**: Universal model for the hotword "View Glass" (https://snowboy.kitt.ai/hotword/7868). Set Sensitivity to 0.7, and ApplyFrontend to true. + +## Precompiled node module + +Snowboy is available in the form of a native node module precompiled for: +64 bit Ubuntu, MacOS X, and the Raspberry Pi (Raspbian 8.0+). For quick +installation run: + + npm install --save snowboy + +For sample usage see the `examples/Node` folder. You may have to install +dependencies like `fs`, `wav` or `node-record-lpcm16` depending on which script +you use. + +## Precompiled Binaries with Python Demo +* 64 bit Ubuntu [14.04](https://s3-us-west-2.amazonaws.com/snowboy/snowboy-releases/ubuntu1404-x86_64-1.3.0.tar.bz2) +* [MacOS X](https://s3-us-west-2.amazonaws.com/snowboy/snowboy-releases/osx-x86_64-1.3.0.tar.bz2) +* Raspberry Pi with Raspbian 8.0, all versions + ([1/2/3/Zero](https://s3-us-west-2.amazonaws.com/snowboy/snowboy-releases/rpi-arm-raspbian-8.0-1.3.0.tar.bz2)) + +If you want to compile a version against your own environment/language, read on. + +## Dependencies + +To run the demo you will likely need the following, depending on which demo you +use and what platform you are working with: + +* SoX (audio conversion) +* PortAudio or PyAudio (audio capturing) +* SWIG 3.0.10 or above (compiling Snowboy for different languages/platforms) +* ATLAS or OpenBLAS (matrix computation) + +You can also find the exact commands you need to install the dependencies on +Mac OS X, Ubuntu or Raspberry Pi below. + +### Mac OS X + +`brew` install `swig`, `sox`, `portaudio` and its Python binding `pyaudio`: + + brew install swig portaudio sox + pip install pyaudio + +If you don't have Homebrew installed, please download it [here](http://brew.sh/). If you don't have `pip`, you can install it [here](https://pip.pypa.io/en/stable/installing/). + +Make sure that you can record audio with your microphone: + + rec t.wav + +### Ubuntu/Raspberry Pi/Pine64/Nvidia Jetson TX1/Nvidia Jetson TX2 + +First `apt-get` install `sox`, `portaudio` and its Python binding `pyaudio`: + + sudo apt-get install python-pyaudio python3-pyaudio sox + pip install pyaudio + +Compile a supported swig version (3.0.10 or above) + + wget http://downloads.sourceforge.net/swig/swig-3.0.10.tar.gz + sudo apt-get install libpcre3 libpcre3-dev + ./configure --prefix=/usr \ + --without-clisp \ + --without-maximum-compile-warnings && + make + make install && + install -v -m755 -d /usr/share/doc/swig-3.0.10 && + cp -v -R Doc/* /usr/share/doc/swig-3.0.10 + +Then install the `atlas` matrix computing library: + + sudo apt-get install libatlas-base-dev + +Make sure that you can record audio with your microphone: + + rec t.wav + +If you need extra setup on your audio (especially on a Raspberry Pi), please see the [full documentation](http://docs.kitt.ai/snowboy). + +## Compile a Node addon +Compiling a node addon for Linux and the Raspberry Pi requires the installation of the following dependencies: + + sudo apt-get install libmagic-dev libatlas-base-dev + +Then to compile the addon run the following from the root of the snowboy repository: + + npm install + ./node_modules/node-pre-gyp/bin/node-pre-gyp clean configure build + +## Compile a Java Wrapper + + # Make sure you have JDK installed. + cd swig/Java + make + +SWIG will generate a directory called `java` which contains converted Java wrappers and a directory called `jniLibs` which contains the JNI library. + +To run the Java example script: + + cd examples/Java + make run + +## Compile a Python Wrapper + + cd swig/Python + make + +SWIG will generate a `_snowboydetect.so` file and a simple (but hard-to-read) python wrapper `snowboydetect.py`. We have provided a higher level python wrapper `snowboydecoder.py` on top of that. + +Feel free to adapt the `Makefile` in `swig/Python` to your own system's setting if you cannot `make` it. + +## Compile a GO Wrapper + + cd examples/Go + go get github.com/Kitt-AI/snowboy/swig/Go + go build -o snowboy main.go + ./snowboy ../../resources/snowboy.umdl ../../resources/snowboy.wav + +Expected Output: + +``` +Snowboy detecting keyword in ../../resources/snowboy.wav +Snowboy detected keyword 1 +``` + +For more, please read `examples/Go/readme.md`. + +## Compile a Perl Wrapper + + cd swig/Perl + make + +The Perl examples include training personal hotword using the KITT.AI RESTful APIs, adding Google Speech API after the hotword detection, etc. To run the examples, do the following + + cd examples/Perl + + # Install cpanm, if you don't already have it. + curl -L https://cpanmin.us | perl - --sudo App::cpanminus + + # Install the dependencies. Note, on Linux you will have to install the + # PortAudio package first, using e.g.: + # apt-get install portaudio19-dev + sudo cpanm --installdeps . + + # Run the unit test. + ./snowboy_unit_test.pl + + # Run the personal model training example. + ./snowboy_RESTful_train.pl + + # Run the Snowboy Google Speech API example. By default it uses the Snowboy + # universal hotword. + ./snowboy_googlevoice.pl [Hotword_Model] + + +## Compile an iOS Wrapper + +Using Snowboy library in Objective-C does not really require a wrapper. It is basically the same as using C++ library in Objective-C. We have compiled a "fat" static library for iOS devices, see the library here `lib/ios/libsnowboy-detect.a`. + +To initialize Snowboy detector in Objective-C: + + snowboy::SnowboyDetect* snowboyDetector = new snowboy::SnowboyDetect( + std::string([[[NSBundle mainBundle]pathForResource:@"common" ofType:@"res"] UTF8String]), + std::string([[[NSBundle mainBundle]pathForResource:@"snowboy" ofType:@"umdl"] UTF8String])); + snowboyDetector->SetSensitivity("0.45"); // Sensitivity for each hotword + snowboyDetector->SetAudioGain(2.0); // Audio gain for detection + +To run hotword detection in Objective-C: + + int result = snowboyDetector->RunDetection(buffer[0], bufferSize); // buffer[0] is a float array + +You may want to play with the frequency of the calls to `RunDetection()`, which controls the CPU usage and the detection latency. + +Thanks to @patrickjquinn and @grimlockrocks, we now have examples of using Snowboy in both Objective-C and Swift3. Check out the examples at `examples/iOS/`, and the screenshots below! + +Obj-C Example Swift3 Example + + +## Compile an Android Wrapper + +Full README and tutorial is in [Android README](examples/Android/README.md) and here's a screenshot: + +Android Alexa Demo + +We have prepared an Android app which can be installed and run out of box: [SnowboyAlexaDemo.apk](https://github.com/Kitt-AI/snowboy/raw/master/resources/alexa/SnowboyAlexaDemo.apk) (please uninstall any previous one first if you installed this app before). + +## Quick Start for Python Demo + +Go to the `examples/Python` folder and open your python console: + + In [1]: import snowboydecoder + + In [2]: def detected_callback(): + ....: print "hotword detected" + ....: + + In [3]: detector = snowboydecoder.HotwordDetector("resources/snowboy.umdl", sensitivity=0.5, audio_gain=1) + + In [4]: detector.start(detected_callback) + +Then speak "snowboy" to your microphone to see whetheer Snowboy detects you. + +The `snowboy.umdl` file is a "universal" model that detect different people speaking "snowboy". If you want other hotwords, please go to [snowboy.kitt.ai](https://snowboy.kitt.ai) to record, train and downloand your own personal model (a `.pmdl` file). + +When `sensitiviy` is higher, the hotword gets more easily triggered. But you might get more false alarms. + +`audio_gain` controls whether to increase (>1) or decrease (<1) input volume. + +Two demo files `demo.py` and `demo2.py` are provided to show more usages. + +Note: if you see the following error: + + TypeError: __init__() got an unexpected keyword argument 'model_str' + +You are probably using an old version of SWIG. Please upgrade. We have tested with SWIG version 3.0.7 and 3.0.8. + +## Advanced Usages & Demos + +See [Full Documentation](http://docs.kitt.ai/snowboy). + +## Change Log + +**v1.3.0, 2/19/2018** + +* Added Frontend processing for all platforms +* Added `resources/models/smart_mirror.umdl` for https://snowboy.kitt.ai/hotword/47 +* Added `resources/models/jarvis.umdl` for https://snowboy.kitt.ai/hotword/29 +* Added README for Chinese +* Cleaned up the supported platforms +* Re-structured the model path + +**v1.2.0, 3/25/2017** + +* Added better Alexa model for [Alexa AVS sample app](https://github.com/alexa/alexa-avs-sample-app) +* New decoder that works well for short hotwords like Alexa + +**v1.1.1, 3/24/2017** + +* Added Android demo +* Added iOS demos +* Added Samsung Artik support +* Added Go support +* Added Intel Edison support +* Added Pine64 support +* Added Perl Support +* Added a more robust "Alexa" model (umdl) +* Offering Hotword as a Service through ``/api/v1/train`` endpoint. +* Decoder is not changed. + +**v1.1.0, 9/20/2016** + +* Added library for Node. +* Added support for Python3. +* Added universal model `alexa.umdl` +* Updated universal model `snowboy.umdl` so that it works in noisy environment. + +**v1.0.4, 7/13/2016** + +* Updated universal `snowboy.umdl` model to make it more robust. +* Various improvements to speed up the detection. +* Bug fixes. + +**v1.0.3, 6/4/2016** + +* Updated universal `snowboy.umdl` model to make it more robust in non-speech environment. +* Fixed bug when using float as input data. +* Added library support for Android ARMV7 architecture. +* Added library for iOS. + +**v1.0.2, 5/24/2016** + +* Updated universal `snowboy.umdl` model +* added C++ examples, docs will come in next release. + +**v1.0.1, 5/16/2016** + +* VAD now returns -2 on silence, -1 on error, 0 on voice and >0 on triggered models +* added static library for Raspberry Pi in case people want to compile themselves instead of using the binary version + +**v1.0.0, 5/10/2016** + +* initial release diff --git a/src/Guide_stick_system/voice_assistant/snowboy/README_ZH_CN.md b/src/Guide_stick_system/voice_assistant/snowboy/README_ZH_CN.md new file mode 100644 index 0000000..8b94f52 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/README_ZH_CN.md @@ -0,0 +1,427 @@ +# Snowboy 唤醒词检测 + +[KITT.AI](http://kitt.ai)出品。 + +[Home Page](https://snowboy.kitt.ai) + +[Full Documentation](http://docs.kitt.ai/snowboy) 和 [FAQ](http://docs.kitt.ai/snowboy#faq) + +[Discussion Group](https://groups.google.com/a/kitt.ai/forum/#!forum/snowboy-discussion) (或者发送邮件给 snowboy-discussion@kitt.ai) + +(因为我们每天都会收到很多消息,从2016年9月开始建立了讨论组。请在这里发送一般性的讨论。关于错误,请使用Github问题标签。) + +版本:1.3.0(2/19/2018) + +## Alexa支持 + +Snowboy现在为运行在Raspberry Pi上的[Alexa AVS sample app](https://github.com/alexa/alexa-avs-sample-app)提供了hands-free的体验!有关性能以及如何使用其他唤醒词模型,请参阅下面的信息。 + +**性能** + +唤醒检测的性能通常依赖于实际的环境,例如,它是否与高质量麦克风一起使用,是否在街道上,在厨房中,是否有背景噪音等等. 所以对于性能,我们觉得最好是在使用者真实的环境中进行评估。为了方便评估,我们准备了一个可以直接安装训醒的Android应用程序:[SnowboyAlexaDemo.apk](https://github.com/Kitt-AI/snowboy/raw/master/resources/alexa/SnowboyAlexaDemo.apk) (如果您之前安装了此应用程序,请先卸载它) 。 + +**个人模型** + +* 用以下方式创建您的个人模型:[website](https://snowboy.kitt.ai) 或者 [hotword API](https://snowboy.kitt.ai/api/v1/train/) +* 将[Alexa AVS sample app](https://github.com/alexa/alexa-avs-sample-app)(安装后)的唤醒词模型替换为您的个人模型 + +``` +# Please replace YOUR_PERSONAL_MODEL.pmdl with the personal model you just +# created, and $ALEXA_AVS_SAMPLE_APP_PATH with the actual path where you +# cloned the Alexa AVS sample app repository. +cp YOUR_PERSONAL_MODEL.pmdl $ALEXA_AVS_SAMPLE_APP_PATH/samples/wakeWordAgent/ext/resources/alexa.umdl +``` + +* 在[Alexa AVS sample app code](https://github.com/alexa/alexa-avs-sample-app/blob/master/samples/wakeWordAgent/src/KittAiSnowboyWakeWordEngine.cpp)中设置 `APPLY_FRONTEND` 为 `false`,更新 `SENSITIVITY`,并重新编译 + +``` +# Please replace $ALEXA_AVS_SAMPLE_APP_PATH with the actual path where you +# cloned the Alexa AVS sample app repository. +cd $ALEXA_AVS_SAMPLE_APP_PATH/samples/wakeWordAgent/src/ + +# Modify KittAiSnowboyWakeWordEngine.cpp and update SENSITIVITY at line 28. +# Modify KittAiSnowboyWakeWordEngine.cpp and set APPLY_FRONTEND to false at +# line 30. +make +``` + +* 运行程序,并且把唤醒词引擎设置为`kitt_ai` + + +**通用模型** + +* 将[Alexa AVS sample app](https://github.com/alexa/alexa-avs-sample-app)(安装后)的唤醒词模型替换为您的通用模型 + +``` +# Please replace YOUR_UNIVERSAL_MODEL.umdl with the personal model you just +# created, and $ALEXA_AVS_SAMPLE_APP_PATH with the actual path where you +# cloned the Alexa AVS sample app repository. +cp YOUR_UNIVERSAL_MODEL.umdl $ALEXA_AVS_SAMPLE_APP_PATH/samples/wakeWordAgent/ext/resources/alexa.umdl +``` + +* 在[Alexa AVS sample app code](https://github.com/alexa/alexa-avs-sample-app/blob/master/samples/wakeWordAgent/src/KittAiSnowboyWakeWordEngine.cpp) 中更新 `SENSITIVITY`, 并重新编译 + +``` +# Please replace $ALEXA_AVS_SAMPLE_APP_PATH with the actual path where you +# cloned the Alexa AVS sample app repository. +cd $ALEXA_AVS_SAMPLE_APP_PATH/samples/wakeWordAgent/src/ + +# Modify KittAiSnowboyWakeWordEngine.cpp and update SENSITIVITY at line 28. +make +``` + +* 运行程序,并且把唤醒词引擎设置为`kitt_ai` + + +## 个人唤醒词训练服务 + +Snowboy现在通过 `https://snowboy.kitt.ai/api/v1/train/` 端口提供 **个人唤醒词训练服务**, 请查看[Full Documentation](http://docs.kitt.ai/snowboy)和示例[Python/Bash script](examples/REST_API)(非常欢迎贡献其他的语言)。 + +简单来说,`POST` 下面代码到https://snowboy.kitt.ai/api/v1/train: + + { + "name": "a word", + "language": "en", + "age_group": "10_19", + "gender": "F", + "microphone": "mic type", + "token": "", + "voice_samples": [ + {wave: ""}, + {wave: ""}, + {wave: ""} + ] + } + +然后您会获得一个训练好的个人模型! + + +## 介绍 + +Snowboy是一款可定制的唤醒词检测引擎,可为您创建像 "OK Google" 或 "Alexa" 这样的唤醒词。Snowboy基于神经网络,具有以下特性: + +* **高度可定制**:您可以自由定义自己的唤醒词 - +比如说“open sesame”,“garage door open”或 “hello dreamhouse”等等。 + +* **总是在监听** 但保护您的个人隐私:Snowboy不使用互联网,不会将您的声音传输到云端。 + +* **轻量级和嵌入式的**:它可以轻松在Raspberry Pi上运行,甚至在最弱的Pi(单核700MHz ARMv6)上,Snowboy占用的CPU也少于10%。 + +* Apache授权! + +目前Snowboy支持(查看lib文件夹): + +* 所有版本的Raspberry Pi(Raspbian基于Debian Jessie 8.0) +* 64位Mac OS X +* 64位Ubuntu 14.04 +* iOS +* Android +* ARM64(aarch64,Ubuntu 16.04) + +Snowboy底层库由C++写成,通过swig被封装成能在多种操作系统和语言上使用的软件库。我们欢迎新语言的封装,请随时发送你们的Pull Request! + +目前我们已经现实封装的有: + +* C/C++ +* Java / Android +* Go(thanks to @brentnd and @deadprogram) +* Node(thanks to @evancohen和@ nekuz0r) +* Perl(thanks to @iboguslavsky) +* Python2/Python3 +* iOS / Swift3(thanks to @grimlockrocks) +* iOS / Object-C(thanks to @patrickjquinn) + +如果您想要支持其他硬件或操作系统,请将您的请求发送至[snowboy@kitt.ai](mailto:snowboy.kitt.ai) + +注意:**Snowboy还不支持Windows** 。请在 *nix平台上编译Snowboy。 + +## Snowboy模型的定价 + +黑客:免费 + +* 个人使用 +* 社区支持 + +商业:请通过[snowboy@kitt.ai](mailto:snowboy@kitt.ai)与我们联系 + +* 个人使用 +* 商业许可证 +* 技术支持 + +## 预训练的通用模型 + +为了测试方便,我们提供一些事先训练好的通用模型。当您测试那些模型时,请记住他们可能没有为您的特定设备或环境进行过优化。 + +以下是模型列表和您必须使用的参数: + +* **resources/alexa/alexa-avs-sample-app/alexa.umdl**:这个是为[Alexa AVS sample app](https://github.com/alexa/alexa-avs-sample-app)优化过的唤醒词为“Alexa”的通用模型,将`SetSensitivity`设置为`0.6`,并将`ApplyFrontend`设置为true。当`ApplyFrontend`设置为`true`时,这是迄今为止我们公开发布的最好的“Alexa”的模型。 +* **resources/models/snowboy.umdl**:唤醒词为“snowboy”的通用模型。将`SetSensitivity`设置为`0.5`,`ApplyFrontend`设置为`false`。 +* **resources/models/jarvis.umdl**: 唤醒词为“Jarvis” (https://snowboy.kitt.ai/hotword/29) 的通用模型,其中包含了对应于“Jarvis”的两个唤醒词模型,所以需要设置两个`sensitivity`。将`SetSensitivity`设置为`0.8,0.8`,`ApplyFrontend`设置为`true`。 +* **resources/models/smart_mirror.umdl**: 唤醒词为“Smart Mirror” (https://snowboy.kitt.ai/hotword/47) 的通用模型。将`SetSensitivity`设置为`0.5`,`ApplyFrontend`设置为`false`。 +* **resources/models/subex.umdl**: 唤醒词为“Subex”(https://snowboy.kitt.ai/hotword/22014) 的通用模型。将`SetSensitivity`设置为`0.5`,`ApplyFrontend`设置为`true`。 +* **resources/models/neoya.umdl**: 唤醒词为“Neo ya”(https://snowboy.kitt.ai/hotword/22171) 的通用模型。其中包含了对应于“Neo ya”的两个>唤醒词模型,所以需要设置两个`sensitivity`。将`SetSensitivity`设置为`0.7,0.7`,`ApplyFrontend`设置为`true`。 +* **resources/models/hey_extreme.umdl**: 唤醒词为“Hey Extreme” (https://snowboy.kitt.ai/hotword/15428)的通用模型。将`SetSensitivity`设置为`0.6`,`ApplyFrontend`设置为`true`。 +* **resources/models/computer.umdl**: 唤醒词为“Computer” (https://snowboy.kitt.ai/hotword/46) 的通用模型。将`SetSensitivity`设置为`0.6`,`ApplyFrontend`设置为`true`。 +* **resources/models/view_glass.umdl**: 唤醒词为“View Glass” (https://snowboy.kitt.ai/hotword/7868) 的通用模型。将`SetSensitivity`设置为`0.7`,`ApplyFrontend`设置为`true`。 + +## 预编译node模块 + +Snowboy为一下平台编译了node模块:64位Ubuntu,MacOS X和Raspberry Pi(Raspbian 8.0+)。快速安装运行: + + npm install --save snowboy + +有关示例用法,请参阅examples/Node文件夹。根据您使用的脚本,可能需要安装依赖关系库例如fs,wav或node-record-lpcm16。 + +## 预编译Python Demo的二进制文件 +* 64 bit Ubuntu [12.04](https://s3-us-west-2.amazonaws.com/snowboy/snowboy-releases/ubuntu1204-x86_64-1.2.0.tar.bz2) + / [14.04](https://s3-us-west-2.amazonaws.com/snowboy/snowboy-releases/ubuntu1404-x86_64-1.3.0.tar.bz2) +* [MacOS X](https://s3-us-west-2.amazonaws.com/snowboy/snowboy-releases/osx-x86_64-1.3.0.tar.bz2) +* Raspberry Pi with Raspbian 8.0, all versions + ([1/2/3/Zero](https://s3-us-west-2.amazonaws.com/snowboy/snowboy-releases/rpi-arm-raspbian-8.0-1.3.0.tar.bz2)) +* Pine64 (Debian Jessie 8.5 (3.10.102)), Nvidia Jetson TX1 and Nvidia Jetson TX2 ([download](https://s3-us-west-2.amazonaws.com/snowboy/snowboy-releases/pine64-debian-jessie-1.2.0.tar.bz2)) +* Intel Edison (Ubilinux based on Debian Wheezy 7.8) ([download](https://s3-us-west-2.amazonaws.com/snowboy/snowboy-releases/edison-ubilinux-1.2.0.tar.bz2)) + +如果您要根据自己的环境/语言编译版本,请继续阅读。 + +## 依赖 + +要运行demo,您可能需要以下内容,具体取决于您使用的示例和您正在使用的平台: + +* SoX(音频转换) +* PortAudio或PyAudio(音频录音) +* SWIG 3.0.10或以上(针对不同语言/平台编译Snowboy) +* ATLAS或OpenBLAS(矩阵计算) + +在下面您还可以找到在Mac OS X,Ubuntu或Raspberry Pi上安装依赖关系所需的确切命令。 + +### Mac OS X + +`brew` 安装 `swig`,`sox`,`portaudio` 和绑定了 `pyaudio`的Python: + + brew install swig portaudio sox + pip install pyaudio + +如果您没有安装Homebrew,请在这里[here](http://brew.sh/)下载。如果没有pip,可以在这里[here](https://pip.pypa.io/en/stable/installing/)安装。 + +确保您可以用麦克风录制音频: + + rec t.wav + +### Ubuntu / Raspberry Pi / Pine64 / Nvidia Jetson TX1 / Nvidia Jetson TX2 + +首先 `apt-get` 安装 `swig`,`sox`,`portaudio`和绑定了 `pyaudio` 的 Python: + + sudo apt-get install swig3.0 python-pyaudio python3-pyaudio sox + pip install pyaudio + +然后安装 `atlas` 矩阵计算库: + + sudo apt-get install libatlas-base-dev + +确保您可以用麦克风录制音频: + + rec t.wav + +如果您需要额外设置您的音频(特别是Raspberry Pi),请参阅[full documentation](http://docs.kitt.ai/snowboy)。 + +## 编译Node插件 + +为Linux和Raspberry Pi编译node插件需要安装以下依赖项: + + sudo apt-get install libmagic-dev libatlas-base-dev + +然后编译插件,从snowboy代码库的根目录运行以下内容: + + npm install + ./node_modules/node-pre-gyp/bin/node-pre-gyp clean configure build + +## 编译Java Wrapper + + # Make sure you have JDK installed. + cd swig/Java + make + +SWIG将生成一个包含转换成Java封装的`java`目录和一个包含JNI库的`jniLibs`目录。 + +运行Java示例脚本: + + cd examples/Java + make run + +## 编译Python Wrapper + + cd swig/Python + make + +SWIG将生成一个_snowboydetect.so文件和一个简单(但难以阅读)的python 封装snowboydetect.py。我们已经提供了一个更容易读懂的python封装snowboydecoder.py。 + +如果不能make,请适配`swig/Python`中的Makefile到您自己的系统设置。 + +## 编译GO Warpper + + cd examples/Go + go get github.com/Kitt-AI/snowboy/swig/Go + go build -o snowboy main.go + ./snowboy ../../resources/snowboy.umdl ../../resources/snowboy.wav + +期望输出: + + Snowboy detecting keyword in ../../resources/snowboy.wav + Snowboy detected keyword 1 + + +更多细节,请阅读 'examples/Go/readme.md'。 + +## 编译Perl wrapper + + cd swig/Perl + make + +Perl示例包括使用KITT.AI RESTful API训练个人唤醒词,在检测到唤醒之后添加Google Speech API等。要运行示例,请执行以下操作 + + cd examples/Perl + + # Install cpanm, if you don't already have it. + curl -L https://cpanmin.us | perl - --sudo App::cpanminus + + # Install the dependencies. Note, on Linux you will have to install the + # PortAudio package first, using e.g.: + # apt-get install portaudio19-dev + sudo cpanm --installdeps . + + # Run the unit test. + ./snowboy_unit_test.pl + + # Run the personal model training example. + ./snowboy_RESTful_train.pl + + # Run the Snowboy Google Speech API example. By default it uses the Snowboy + # universal hotword. + ./snowboy_googlevoice.pl [Hotword_Model] + +## 编译iOS wrapper + +在Objective-C中使用Snowboy库不需要封装. 它与Objective-C中使用C++库基本相同. 我们为iOS设备编写了一个 "fat" 静态库,请参阅这里的库`lib/ios/libsnowboy-detect.a`。 + +在Objective-C中初始化Snowboy检测器: + + snowboy::SnowboyDetect* snowboyDetector = new snowboy::SnowboyDetect( + std::string([[[NSBundle mainBundle]pathForResource:@"common" ofType:@"res"] UTF8String]), + std::string([[[NSBundle mainBundle]pathForResource:@"snowboy" ofType:@"umdl"] UTF8String])); + snowboyDetector->SetSensitivity("0.45"); // Sensitivity for each hotword + snowboyDetector->SetAudioGain(2.0); // Audio gain for detection + +在Objective-C中运行唤醒词检测: + + int result = snowboyDetector->RunDetection(buffer[0], bufferSize); // buffer[0] is a float array + +您可能需要按照一定的频率调用RunDetection(),从而控制CPU使用率和检测延迟。 + +感谢@patrickjquinn和@grimlockrocks,我们现在有了在Objective-C和Swift3中使用Snowboy的例子。看看下面的例子`examples/iOS/`和下面的截图! + +Obj-C Example Swift3 Example + +# 编译Android Wrapper + +完整的README和教程在[Android README](examples/Android/README.md),这里是一个截图: + +Android Alexa Demo + +我们准备了一个可以安装并运行的Android应用程序:[SnowboyAlexaDemo.apk](https://github.com/Kitt-AI/snowboy/raw/master/resources/alexa/SnowboyAlexaDemo.apk)(如果您之前安装了此应用程序,请先卸载它们)。 + +## Python demo快速入门 + +进入 `examples/Python` 文件夹并打开你的python控制台: + + In [1]: import snowboydecoder + + In [2]: def detected_callback(): + ....: print "hotword detected" + ....: + + In [3]: detector = snowboydecoder.HotwordDetector("resources/snowboy.umdl", sensitivity=0.5, audio_gain=1) + + In [4]: detector.start(detected_callback) + +然后对你的麦克风说"snowboy",看看是否Snowboy检测到你。 + +这个 `snowboy.umdl` 文件是一个 "通用" 模型,可以检测不同的人说 "snowboy" 。 如果你想要其他的唤醒词,请去[snowboy.kitt.ai](https://snowboy.kitt.ai)录音,训练和下载你自己的个人模型(一个.pmdl文件)。 + +当 `sensitiviy` 设置越高,唤醒越容易触发。但是你也可能会收到更多的误唤醒。 + +`audio_gain` 控制是否增加(> 1)或降低(<1)输入音量。 + +我们提供了两个演示文件 `demo.py`, `demo2.py` 以显示更多的用法。 + +注意:如果您看到以下错误: + + TypeError: __init__() got an unexpected keyword argument 'model_str' + +您可能正在使用旧版本的SWIG. 请升级SWIG。我们已经测试过SWIG 3.0.7和3.0.8。 + +## 高级用法与演示 + +请参阅[Full Documentation](http://docs.kitt.ai/snowboy)。 + +## 更改日志 + +**v1.3.0, 2/19/2018** + +* 添加前端处理到所有平台 +* 添加`resources/models/smart_mirror.umdl` 给 https://snowboy.kitt.ai/hotword/47 +* 添加`resources/models/jarvis.umdl` 给 https://snowboy.kitt.ai/hotword/29 +* 添加中文文档 +* 清理支持的平台 +* 重新定义了模型路径 + +**v1.2.0, 3/25/2017** + +* 为[Alexa AVS sample app](https://github.com/alexa/alexa-avs-sample-app)添加更好的Alexa模型 +* 新的解码器,适用于像Alexa这样的简短的词条 + +**v1.1.1, 3/24/2017** + +* 添加Android演示 +* 添加了iOS演示 +* 增加了三星Artik支持 +* 添加Go支持 +* 增加了英特尔爱迪生支持 +* 增加了Pine64的支持 +* 增加了Perl支持 +* 添加了更强大的“Alexa”模型(umdl) +* 通过/api/v1/train终端提供Hotword即服务。 +* 解码器没有改变 + +**v1.1.0, 9/20/2016** + +* 添加了Node的库 +* 增加了对Python3的支持 +* 增加了通用模型 alexa.umdl +* 更新通用模型snowboy.umdl,使其在嘈杂的环境中工作 + +**v1.0.4, 7/13/2016** + +* 更新通用snowboy.umdl模型,使其更加健壮 +* 各种改进加快检测 +* Bug修复 + +**v1.0.3, 6/4/2016** + +* 更新的通用snowboy.umdl模型,使其在非语音环境中更加强大 +* 修正使用float作为输入数据时的错误 +* 为Android ARMV7架构增加了库支持 +* 为iOS添加了库 + +**v1.0.2, 5/24/2016** + +* 更新通用snowboy.umdl模型 +* 添加C ++示例,文档将在下一个版本中 + +**v1.0.1, 5/16/2016** + +* VAD现在返回-2为静音,-1为错误,0为语音,大于0为触发了唤醒 +* 添加了Raspberry Pi的静态库,以防人们想自己编译而不是使用二进制版本 + +**v1.0.0, 5/10/2016** + +* 初始版本 diff --git a/src/Guide_stick_system/voice_assistant/snowboy/README_commercial.md b/src/Guide_stick_system/voice_assistant/snowboy/README_commercial.md new file mode 100644 index 0000000..b2daa37 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/README_commercial.md @@ -0,0 +1,134 @@ +# Common Questions for a Commercial Application + +You are looking for a way to put Snowboy in a commercial application. We have compiled a large collection of common questions from our customers all over the world in various industries. + + +## Universal models (paid) vs. personal models (free) + +Personal models: + +* are the models you downloaded from https://snowboy.kitt.ai or using our `/train` SaaS API. +* are good for quick demos +* are built with only 3 voice samples +* are not noise robust and you'll get a lot of false alarms in real environment +* only work on your own voice or a very similar voice, thus is speaker dependent +* are free + +Universal models: + +* are built using a lot more voice samples (at least thousands) +* take effort to collect those voice samples +* take a lot of GPU time to train +* are more robust against noise +* are mostly speaker independent (with challenges on children's voice and accents) +* cannot be built by yourself using the web interface or the SaaS API +* cost you money + +### FAQ for universal & personal models + +Q: **If I record multiple times on snowboy.kitt.ai, can I improve the personal models?** +A: No. Personal models only take 3 voice samples to build. Each time you record new voices, the previous samples are overwritten and not used in your current model. + + +Q: **How can I get a universal model for free?** +A: The *one and only* way: Ask 500 people to log in to snowboy.kitt.ai, contribute their voice samples to a particular hotword, then ask us to build a universal model for that hotword. + +Q: **Can I use your API to collect voices from 500 people and increment the sample counter from snowboy.kitt.ai?** +A: No. The [SaaS](https://github.com/kitt-ai/snowboy#hotword-as-a-service) API is separate from the website. + +Q: **How long does it take to get a universal model?** +A: Usually a month. + +## Licensing + + +### Explain your license again? + +Everything on Snowboy's GitHub repo is Apache licensed, including various sample applications and wrapper codes, though the Snowboy library is binary code compiled against different platforms. + +With that said, if you built an application from https://github.com/kitt-ai/snowboy or personal models downloaded from https://snowboy.kitt.ai, you don't need to pay a penny. + +If you want to use a universal model with your own customized hotword, you'll need an **evaluation license** and a **commercial license**. + +### Evaluation license + +Each hotword is different. When you train a universal model with your own hotword, nobody can guarantee that it works on your system without any flaws. Thus you'll need to get an evaluation license first to test whether your universal model works for you. + +An evaluation license: + +* gives you a 90 day window to evaluate the universal model we build for you +* costs you money + +**Warning: an evaluation license will expire after 90 days. Make sure you don't use the model with evaluation license in production systems.** Get a commercial license from us for your production system. + +#### Evaluation license FAQ + +Q: **How much does it cost?** +A: A few thousand dollars. + +Q: **Can I get a discount as a {startup, student, NGO}?** +A: No. Our pricing is already at least half of what others charge. + +Q: **How can you make sure your universal model works for me?** +A: We simply can't. However we have a few sample universal models from our GitHub [repo](https://github.com/Kitt-AI/snowboy/tree/master/resources), including "alexa.umdl", "snowboy.umdl", and "smart_mirror.umdl". The "alexa.umdl" model is enhanced with a lot more data and is not a typical case. So pay attention to test "snowboy.umdl" and "smart_mirror.umdl". They offer similar performance to your model. + + +### Commercial license + +After evaluation, if you feel want to go with Snowboy, you'll need a commercial license to deploy it. We usually charge a flat fee per unit of hardware you sell. + +#### Commercial license FAQ + +Q: **Is it a one-time license or subscription-based license?** +A: It's a perpetual license for each device. Since the Snowboy library runs *offline* on your device, you can run it forever without worrying about any broken and dependent web services. + +Q: **What's your pricing structure?** +A: We have tiered pricing depending on your volume. We charge less if you sell more. + +Q: **Can you give me one example?** +A: For instance, if your product is a talking robot with a $300 price tag, and you sell at least 100,000 units per year, we'll probably charge you $1 per unit once you go over 100,000 units. If your product is a smart speaker with a $30 price tag, we won't charge you $1, but you'll have to sell a lot more to make the business sense to us. + +Q: **I plan to sell 1000 units a year, can I license your software for $1 per unit?** +A: No. In that way we only make $1000 a year, which is not worth the amount of time we put on your hotword. + +Q: **I make a cellphone app, not a hardware product, what's the pricing structure?** +A: Depends on how you generate revenue. For instance, if your app is priced at $1.99, we'll collect cents per paid user, assuming you have a large user base. If you only have 2000 paid users, we'll make a revenue of less than a hundred dollars and it won't make sense to us. + + +### What's the process of getting a license? + +1. Make sure Snowboy can run on your system +2. Reach out to us with your hotword name, commercial application, and target market +3. Discuss with us about **commercial license** fee to make sure our pricing fits your budget +4. Sign an evaluation contract, pay 50% of invoice +5. We'll train a universal model for you and give you an **evaluation license** of 90 days +6. Test the model and discuss how we can improve it +7. If you decide to go with it, get a commercial license from us + +## General Questions + +### What language does Snowboy support? + +We support North American English and Chinese the best. We can deal with a bit of Indian accents as well. For other languages, we'll need to first listen to your hotword (please send us a few .wav voice samples) before we can engage. + +### How many voice samples do you need? + +Usually 1500 voice samples from 500 people to get started. The more the better. If your hotword is in English, we can collect the voice samples for you. Otherwise you'll need to collect it yourself and send to us. + +### What's the format on voice samples? + +16000Hz sample rate, 16 bit integer, mono channel, .wav files. + +### Does Snowboy do: AEC, VAD, Noise Suppression, Beam Forming? + +Snowboy has a weak support for VAD and noise suppression, as we found some customers would use Snowboy without a microphone array. Snowboy is not a audio frontend processing toolkit thus does not support AEC and beam forming. + +If your application wants to support far-field speech, i.e., verbal communication at least 3 feet away, you'll need a microphone array to enhance incoming speech and reduce noise. Please do not reply on Snowboy to do everything. + +### Can you compile Snowboy for my platform? + +If your platform is not listed [here](https://github.com/Kitt-AI/snowboy/tree/master/lib), and you want to get a commercial license from us, please contact us with your toolchain, hardware chip, RAM, OS, GCC/G++ version. Depending on the effort, we might charge an NRE fee for cross compiling. + +### Contact + +If this document doesn't cover what's needed, feel free to reach out to us at snowboy@kitt.ai \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/binding.gyp b/src/Guide_stick_system/voice_assistant/snowboy/binding.gyp new file mode 100644 index 0000000..b2917ae --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/binding.gyp @@ -0,0 +1,85 @@ +{ + 'targets': [{ + 'target_name': 'snowboy', + 'sources': [ + 'swig/Node/snowboy.cc' + ], + 'conditions': [ + ['OS=="mac"', { + 'link_settings': { + 'libraries': [ + '<(module_root_dir)/lib/osx/libsnowboy-detect.a', + ] + } + }], + ['OS=="linux" and target_arch=="x64"', { + 'link_settings': { + 'ldflags': [ + '-Wl,--no-as-needed', + ], + 'libraries': [ + '<(module_root_dir)/lib/ubuntu64/libsnowboy-detect.a', + ] + } + }], + ['OS=="linux" and target_arch=="arm"', { + 'link_settings': { + 'ldflags': [ + '-Wl,--no-as-needed', + ], + 'libraries': [ + '<(module_root_dir)/lib/rpi/libsnowboy-detect.a', + ] + } + }], + ['OS=="linux" and target_arch=="arm64"', { + 'link_settings': { + 'ldflags': [ + '-Wl,--no-as-needed', + ], + 'libraries': [ + '<(module_root_dir)/lib/aarch64-ubuntu1604/libsnowboy-detect.a', + ] + } + }] + ], + 'cflags': [ + '-std=c++11', + '-fexceptions', + '-Wall', + '-D_GLIBCXX_USE_CXX11_ABI=0' + ], + 'cflags!': [ + '-fno-exceptions' + ], + 'cflags_cc!': [ + '-fno-exceptions' + ], + 'include_dirs': [ + " + + +Don't forget to disable the "debug" option when releasing your Android App! + +Note: If you need to copy the Android demo to another folder, please use the `-RL` option of `cp` to replace the relative symbol links with real files: + + cp -RL SnowboyAlexaDemo Other_Folder + +Note: The sample app will save/overwrite all audio to a file (`recording.pcm`). Make sure you do not leave it on for a long time. + +## Useful Code Snippets + + +To initialize Snowboy detector in Java: + + # Assume you put the model related files under /sdcard/snowboy/ + SnowboyDetect snowboyDetector = new SnowboyDetect("/sdcard/snowboy/common.res", + "/sdcard/snowboy/snowboy.umdl"); + snowboyDetector.SetSensitivity("0.45"); // Sensitivity for each hotword + snowboyDetector.SetAudioGain(2.0); // Audio gain for detection + +To run hotword detection in Java: + + int result = snowboyDetector.RunDetection(buffer, buffer.length); // buffer is a short array. + +You may want to play with the frequency of the calls to `RunDetection()`, which controls the CPU usage and the detection latency. + + +## Common Asks + +The following issues have been fixed pushed to `master`. + +- [x] softfloating point support with OpenBlas +- [x] upgrade NDK version to newer than r11c +- [x] NDK toolchain building: remove `--stl=libc++` option + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.classpath b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.classpath new file mode 100644 index 0000000..5176974 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.classpath @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.gitignore b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.gitignore new file mode 100644 index 0000000..dff166a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.gitignore @@ -0,0 +1,14 @@ +*.iml +.gradle +/local.properties +/.idea/workspace.xml +/.idea/libraries +.DS_Store +/build +/captures +.externalNativeBuild +*.apk +*.ap_ +.metadata/ +.idea/workspace.xml +.idea/tasks.xml diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/.name b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/.name new file mode 100644 index 0000000..ef643d4 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/.name @@ -0,0 +1 @@ +SnowboyAlexaDemo \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/compiler.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/compiler.xml new file mode 100644 index 0000000..96cc43e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/compiler.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/copyright/profiles_settings.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/copyright/profiles_settings.xml new file mode 100644 index 0000000..e7bedf3 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/copyright/profiles_settings.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/encodings.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/encodings.xml new file mode 100644 index 0000000..240b96f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/encodings.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/gradle.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/gradle.xml new file mode 100644 index 0000000..47bd81f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/gradle.xml @@ -0,0 +1,17 @@ + + + + + + \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/misc.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/misc.xml new file mode 100644 index 0000000..5d19981 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/misc.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/modules.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/modules.xml new file mode 100644 index 0000000..fd9bf43 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/runConfigurations.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/runConfigurations.xml new file mode 100644 index 0000000..7f68460 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.idea/runConfigurations.xml @@ -0,0 +1,12 @@ + + + + + + \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.project b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.project new file mode 100644 index 0000000..8e9025f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/.project @@ -0,0 +1,33 @@ + + + Alexa19 + + + + + + com.android.ide.eclipse.adt.ResourceManagerBuilder + + + + + com.android.ide.eclipse.adt.PreCompilerBuilder + + + + + org.eclipse.jdt.core.javabuilder + + + + + com.android.ide.eclipse.adt.ApkBuilder + + + + + + com.android.ide.eclipse.adt.AndroidNature + org.eclipse.jdt.core.javanature + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/AndroidManifest.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/AndroidManifest.xml new file mode 100644 index 0000000..11fb5c1 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/AndroidManifest.xml @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/assets/snowboy/alexa.umdl b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/assets/snowboy/alexa.umdl new file mode 100644 index 0000000..03a4f52 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/assets/snowboy/alexa.umdl @@ -0,0 +1 @@ +../../../../../resources/alexa/alexa-avs-sample-app/alexa.umdl \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/assets/snowboy/common.res b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/assets/snowboy/common.res new file mode 100644 index 0000000..bb7bed5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/assets/snowboy/common.res @@ -0,0 +1 @@ +../../../../../resources/common.res \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/assets/snowboy/ding.wav b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/assets/snowboy/ding.wav new file mode 100644 index 0000000..0b0c1a1 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/assets/snowboy/ding.wav @@ -0,0 +1 @@ +../../../../../resources/ding.wav \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/build.gradle b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/build.gradle new file mode 100644 index 0000000..0d82489 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/build.gradle @@ -0,0 +1,52 @@ +buildscript { + repositories { + mavenCentral() + } + dependencies { + classpath 'com.android.tools.build:gradle:2.3.0' + } +} +apply plugin: 'android' + +dependencies { + compile fileTree(include: '*.jar', dir: 'libs') +} + +android { + signingConfigs { + } + compileSdkVersion 25 + buildToolsVersion '25.0.0' + compileOptions.encoding = 'ISO-8859-1' + sourceSets { + main { + manifest.srcFile 'AndroidManifest.xml' + java.srcDirs = ['src'] + resources.srcDirs = ['src'] + aidl.srcDirs = ['src'] + renderscript.srcDirs = ['src'] + res.srcDirs = ['res'] + assets.srcDirs = ['assets'] + } + + // Move the tests to tests/java, tests/res, etc... + instrumentTest.setRoot('tests') + + // Move the build types to build-types/ + // For instance, build-types/debug/java, build-types/debug/AndroidManifest.xml, ... + // This moves them out of them default location under src//... which would + // conflict with src/ being used by the main source set. + // Adding new build types or product flavors should be accompanied + // by a similar customization. + debug.setRoot('build-types/debug') + release.setRoot('build-types/release') + } + buildTypes { + release { + } + } + defaultConfig { + } + productFlavors { + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradle/wrapper/gradle-wrapper.jar b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000..8c0fb64 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradle/wrapper/gradle-wrapper.jar differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradle/wrapper/gradle-wrapper.properties b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..dff2a98 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Tue Mar 07 14:27:10 PST 2017 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-all.zip diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradlew b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradlew new file mode 100644 index 0000000..91a7e26 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradlew @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# For Cygwin, ensure paths are in UNIX format before anything is touched. +if $cygwin ; then + [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` +fi + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >&- +APP_HOME="`pwd -P`" +cd "$SAVED" >&- + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradlew.bat b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradlew.bat new file mode 100644 index 0000000..8a0b282 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/gradlew.bat @@ -0,0 +1,90 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windowz variants + +if not "%OS%" == "Windows_NT" goto win9xME_args +if "%@eval[2+2]" == "4" goto 4NT_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* +goto execute + +:4NT_args +@rem Get arguments from the 4NT Shell from JP Software +set CMD_LINE_ARGS=%$ + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/ic_launcher-web.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/ic_launcher-web.png new file mode 100644 index 0000000..8e8513d Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/ic_launcher-web.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/project.properties b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/project.properties new file mode 100644 index 0000000..22c1e80 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/project.properties @@ -0,0 +1,17 @@ +# This file is automatically generated by Android Tools. +# Do not modify this file -- YOUR CHANGES WILL BE ERASED! +# +# This file must be checked in Version Control Systems. +# +# To customize properties used by the Ant build system edit +# "ant.properties", and override values to adapt the script to your +# project structure. +# +# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home): +#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt + +# Indicates whether an apk should be generated for each density. +split.density=false +# Project target. +target=android-17 +apk-configurations= diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/drawable/icon.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/drawable/icon.png new file mode 100644 index 0000000..7502484 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/drawable/icon.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/layout/main.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/layout/main.xml new file mode 100644 index 0000000..3ad80bb --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/layout/main.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-hdpi/ic_launcher.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-hdpi/ic_launcher.png new file mode 100644 index 0000000..6c76225 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-hdpi/ic_launcher.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-mdpi/ic_launcher.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-mdpi/ic_launcher.png new file mode 100644 index 0000000..b589d0a Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-mdpi/ic_launcher.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-xhdpi/ic_launcher.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-xhdpi/ic_launcher.png new file mode 100644 index 0000000..fe7e684 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-xhdpi/ic_launcher.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-xxhdpi/ic_launcher.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-xxhdpi/ic_launcher.png new file mode 100644 index 0000000..7b04ebd Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-xxhdpi/ic_launcher.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-xxxhdpi/ic_launcher.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-xxxhdpi/ic_launcher.png new file mode 100644 index 0000000..06cbec7 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/mipmap-xxxhdpi/ic_launcher.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/values/strings.xml b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/values/strings.xml new file mode 100644 index 0000000..03690a4 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/res/values/strings.xml @@ -0,0 +1,8 @@ + + + SnowboyAlexaDemo + Start + Stop + Play + Stop + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/AppResCopy.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/AppResCopy.java new file mode 100644 index 0000000..461a5f2 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/AppResCopy.java @@ -0,0 +1,64 @@ +package ai.kitt.snowboy; + +import android.content.Context; +import android.util.Log; +import java.io.File; +import java.io.FileOutputStream; +import java.io.InputStream; + +public class AppResCopy { + private final static String TAG = AppResCopy.class.getSimpleName(); + private static String envWorkSpace = Constants.DEFAULT_WORK_SPACE; + + private static void copyFilesFromAssets(Context context, String assetsSrcDir, String sdcardDstDir, boolean override) { + try { + String fileNames[] = context.getAssets().list(assetsSrcDir); + if (fileNames.length > 0) { + Log.i(TAG, assetsSrcDir +" directory has "+fileNames.length+" files.\n"); + File dir = new File(sdcardDstDir); + if (!dir.exists()) { + if (!dir.mkdirs()) { + Log.e(TAG, "mkdir failed: "+sdcardDstDir); + return; + } else { + Log.i(TAG, "mkdir ok: "+sdcardDstDir); + } + } else { + Log.w(TAG, sdcardDstDir+" already exists! "); + } + for (String fileName : fileNames) { + copyFilesFromAssets(context,assetsSrcDir + "/" + fileName,sdcardDstDir+"/"+fileName, override); + } + } else { + Log.i(TAG, assetsSrcDir +" is file\n"); + File outFile = new File(sdcardDstDir); + if (outFile.exists()) { + if (override) { + outFile.delete(); + Log.e(TAG, "overriding file "+ sdcardDstDir +"\n"); + } else { + Log.e(TAG, "file "+ sdcardDstDir +" already exists. No override.\n"); + return; + } + } + InputStream is = context.getAssets().open(assetsSrcDir); + FileOutputStream fos = new FileOutputStream(outFile); + byte[] buffer = new byte[1024]; + int byteCount=0; + while ((byteCount=is.read(buffer)) != -1) { + fos.write(buffer, 0, byteCount); + } + fos.flush(); + is.close(); + fos.close(); + Log.i(TAG, "copy to "+sdcardDstDir+" ok!"); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + public static void copyResFromAssetsToSD(Context context) { + copyFilesFromAssets(context, Constants.ASSETS_RES_DIR, envWorkSpace+"/", true); + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/Constants.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/Constants.java new file mode 100644 index 0000000..6eb9971 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/Constants.java @@ -0,0 +1,12 @@ +package ai.kitt.snowboy; +import java.io.File; +import android.os.Environment; + +public class Constants { + public static final String ASSETS_RES_DIR = "snowboy"; + public static final String DEFAULT_WORK_SPACE = Environment.getExternalStorageDirectory().getAbsolutePath() + "/snowboy/"; + public static final String ACTIVE_UMDL = "alexa.umdl"; + public static final String ACTIVE_RES = "common.res"; + public static final String SAVE_AUDIO = Constants.DEFAULT_WORK_SPACE + File.separatorChar + "recording.pcm"; + public static final int SAMPLE_RATE = 16000; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/Demo.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/Demo.java new file mode 100644 index 0000000..fd58457 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/Demo.java @@ -0,0 +1,252 @@ +package ai.kitt.snowboy; + +import ai.kitt.snowboy.audio.RecordingThread; +import ai.kitt.snowboy.audio.PlaybackThread; + +import android.app.Activity; +import android.media.AudioManager; +import android.os.Bundle; +import android.os.Handler; +import android.os.Message; +import android.text.Html; +import android.view.View; +import android.view.View.OnClickListener; +import android.widget.Button; +import android.widget.ScrollView; +import android.widget.TextView; +import android.widget.Toast; +import android.content.Context; + +import ai.kitt.snowboy.audio.AudioDataSaver; +import ai.kitt.snowboy.demo.R; + + +public class Demo extends Activity { + + private Button record_button; + private Button play_button; + private TextView log; + private ScrollView logView; + static String strLog = null; + + private int preVolume = -1; + private static long activeTimes = 0; + + private RecordingThread recordingThread; + private PlaybackThread playbackThread; + + @Override + public void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + setContentView(R.layout.main); + setUI(); + + setProperVolume(); + + AppResCopy.copyResFromAssetsToSD(this); + + activeTimes = 0; + recordingThread = new RecordingThread(handle, new AudioDataSaver()); + playbackThread = new PlaybackThread(); + } + + void showToast(CharSequence msg) { + Toast.makeText(this, msg, Toast.LENGTH_SHORT).show(); + } + + private void setUI() { + record_button = (Button) findViewById(R.id.btn_test1); + record_button.setOnClickListener(record_button_handle); + record_button.setEnabled(true); + + play_button = (Button) findViewById(R.id.btn_test2); + play_button.setOnClickListener(play_button_handle); + play_button.setEnabled(true); + + log = (TextView)findViewById(R.id.log); + logView = (ScrollView)findViewById(R.id.logView); + } + + private void setMaxVolume() { + AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); + preVolume = audioManager.getStreamVolume(AudioManager.STREAM_MUSIC); + updateLog(" ----> preVolume = "+preVolume, "green"); + int maxVolume = audioManager.getStreamMaxVolume(AudioManager.STREAM_MUSIC); + updateLog(" ----> maxVolume = "+maxVolume, "green"); + audioManager.setStreamVolume(AudioManager.STREAM_MUSIC, maxVolume, 0); + int currentVolume = audioManager.getStreamVolume(AudioManager.STREAM_MUSIC); + updateLog(" ----> currentVolume = "+currentVolume, "green"); + } + + private void setProperVolume() { + AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); + preVolume = audioManager.getStreamVolume(AudioManager.STREAM_MUSIC); + updateLog(" ----> preVolume = "+preVolume, "green"); + int maxVolume = audioManager.getStreamMaxVolume(AudioManager.STREAM_MUSIC); + updateLog(" ----> maxVolume = "+maxVolume, "green"); + int properVolume = (int) ((float) maxVolume * 0.2); + audioManager.setStreamVolume(AudioManager.STREAM_MUSIC, properVolume, 0); + int currentVolume = audioManager.getStreamVolume(AudioManager.STREAM_MUSIC); + updateLog(" ----> currentVolume = "+currentVolume, "green"); + } + + private void restoreVolume() { + if(preVolume>=0) { + AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); + audioManager.setStreamVolume(AudioManager.STREAM_MUSIC, preVolume, 0); + updateLog(" ----> set preVolume = "+preVolume, "green"); + int currentVolume = audioManager.getStreamVolume(AudioManager.STREAM_MUSIC); + updateLog(" ----> currentVolume = "+currentVolume, "green"); + } + } + + private void startRecording() { + recordingThread.startRecording(); + updateLog(" ----> recording started ...", "green"); + record_button.setText(R.string.btn1_stop); + } + + private void stopRecording() { + recordingThread.stopRecording(); + updateLog(" ----> recording stopped ", "green"); + record_button.setText(R.string.btn1_start); + } + + private void startPlayback() { + updateLog(" ----> playback started ...", "green"); + play_button.setText(R.string.btn2_stop); + // (new PcmPlayer()).playPCM(); + playbackThread.startPlayback(); + } + + private void stopPlayback() { + updateLog(" ----> playback stopped ", "green"); + play_button.setText(R.string.btn2_start); + playbackThread.stopPlayback(); + } + + private void sleep() { + try { Thread.sleep(500); + } catch (Exception e) {} + } + + private OnClickListener record_button_handle = new OnClickListener() { + // @Override + public void onClick(View arg0) { + if(record_button.getText().equals(getResources().getString(R.string.btn1_start))) { + stopPlayback(); + sleep(); + startRecording(); + } else { + stopRecording(); + sleep(); + } + } + }; + + private OnClickListener play_button_handle = new OnClickListener() { + // @Override + public void onClick(View arg0) { + if (play_button.getText().equals(getResources().getString(R.string.btn2_start))) { + stopRecording(); + sleep(); + startPlayback(); + } else { + stopPlayback(); + } + } + }; + + public Handler handle = new Handler() { + @Override + public void handleMessage(Message msg) { + MsgEnum message = MsgEnum.getMsgEnum(msg.what); + switch(message) { + case MSG_ACTIVE: + activeTimes++; + updateLog(" ----> Detected " + activeTimes + " times", "green"); + // Toast.makeText(Demo.this, "Active "+activeTimes, Toast.LENGTH_SHORT).show(); + showToast("Active "+activeTimes); + break; + case MSG_INFO: + updateLog(" ----> "+message); + break; + case MSG_VAD_SPEECH: + updateLog(" ----> normal voice", "blue"); + break; + case MSG_VAD_NOSPEECH: + updateLog(" ----> no speech", "blue"); + break; + case MSG_ERROR: + updateLog(" ----> " + msg.toString(), "red"); + break; + default: + super.handleMessage(msg); + break; + } + } + }; + + public void updateLog(final String text) { + + log.post(new Runnable() { + @Override + public void run() { + if (currLogLineNum >= MAX_LOG_LINE_NUM) { + int st = strLog.indexOf("
"); + strLog = strLog.substring(st+4); + } else { + currLogLineNum++; + } + String str = ""+text+""+"
"; + strLog = (strLog == null || strLog.length() == 0) ? str : strLog + str; + log.setText(Html.fromHtml(strLog)); + } + }); + logView.post(new Runnable() { + @Override + public void run() { + logView.fullScroll(ScrollView.FOCUS_DOWN); + } + }); + } + + static int MAX_LOG_LINE_NUM = 200; + static int currLogLineNum = 0; + + public void updateLog(final String text, final String color) { + log.post(new Runnable() { + @Override + public void run() { + if(currLogLineNum>=MAX_LOG_LINE_NUM) { + int st = strLog.indexOf("
"); + strLog = strLog.substring(st+4); + } else { + currLogLineNum++; + } + String str = ""+text+""+"
"; + strLog = (strLog == null || strLog.length() == 0) ? str : strLog + str; + log.setText(Html.fromHtml(strLog)); + } + }); + logView.post(new Runnable() { + @Override + public void run() { + logView.fullScroll(ScrollView.FOCUS_DOWN); + } + }); + } + + private void emptyLog() { + strLog = null; + log.setText(""); + } + + @Override + public void onDestroy() { + restoreVolume(); + recordingThread.stopRecording(); + super.onDestroy(); + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/MsgEnum.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/MsgEnum.java new file mode 100644 index 0000000..06db43a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/MsgEnum.java @@ -0,0 +1,18 @@ +package ai.kitt.snowboy; + +public enum MsgEnum { + MSG_VAD_END, + MSG_VAD_NOSPEECH, + MSG_VAD_SPEECH, + MSG_VOLUME_NOTIFY, + MSG_WAV_DATAINFO, + MSG_RECORD_START, + MSG_RECORD_STOP, + MSG_ACTIVE, + MSG_ERROR, + MSG_INFO; + + public static MsgEnum getMsgEnum(int i) { + return MsgEnum.values()[i]; + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/SnowboyDetect.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/SnowboyDetect.java new file mode 100644 index 0000000..1a09ecb --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/SnowboyDetect.java @@ -0,0 +1 @@ +../../../../../../../swig/Android/java/ai/kitt/snowboy/SnowboyDetect.java \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/SnowboyVad.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/SnowboyVad.java new file mode 100644 index 0000000..12df56b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/SnowboyVad.java @@ -0,0 +1 @@ +../../../../../../../swig/Android/java/ai/kitt/snowboy/SnowboyVad.java \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/AudioDataReceivedListener.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/AudioDataReceivedListener.java new file mode 100644 index 0000000..e51fdff --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/AudioDataReceivedListener.java @@ -0,0 +1,7 @@ +package ai.kitt.snowboy.audio; + +public interface AudioDataReceivedListener { + void start(); + void onAudioDataReceived(byte[] data, int length); + void stop(); +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/AudioDataSaver.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/AudioDataSaver.java new file mode 100644 index 0000000..5f46450 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/AudioDataSaver.java @@ -0,0 +1,89 @@ +package ai.kitt.snowboy.audio; + +import java.io.BufferedOutputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; + +import android.util.Log; + +import ai.kitt.snowboy.Constants; + +public class AudioDataSaver implements AudioDataReceivedListener { + + private static final String TAG = AudioDataSaver.class.getSimpleName(); + + // file size of when to delete and create a new recording file + private final float MAX_RECORDING_FILE_SIZE_IN_MB = 50f; + + // initial file size of recording file + private final float INITIAL_FILE_SIZE_IN_MB = 1.3f; + + // converted max file size + private final float MAX_RECORDING_FILE_SIZE_IN_BYTES + = (MAX_RECORDING_FILE_SIZE_IN_MB - INITIAL_FILE_SIZE_IN_MB) * 1024 * 1024; + + // keeps track of recording file size + private int recordingFileSizeCounterInBytes = 0; + + private File saveFile = null; + private DataOutputStream dataOutputStreamInstance = null; + + public AudioDataSaver() { + saveFile = new File(Constants.SAVE_AUDIO); + Log.e(TAG, Constants.SAVE_AUDIO); + } + + @Override + public void start() { + if (null != saveFile) { + if (saveFile.exists()) { + saveFile.delete(); + } + try { + saveFile.createNewFile(); + } catch (IOException e) { + Log.e(TAG, "IO Exception on creating audio file " + saveFile.toString(), e); + } + + try { + BufferedOutputStream bufferedStreamInstance = new BufferedOutputStream( + new FileOutputStream(this.saveFile)); + dataOutputStreamInstance = new DataOutputStream(bufferedStreamInstance); + } catch (FileNotFoundException e) { + throw new IllegalStateException("Cannot Open File", e); + } + } + } + + @Override + public void onAudioDataReceived(byte[] data, int length) { + try { + if (null != dataOutputStreamInstance) { + if (recordingFileSizeCounterInBytes >= MAX_RECORDING_FILE_SIZE_IN_BYTES) { + stop(); + start(); + recordingFileSizeCounterInBytes = 0; + } + dataOutputStreamInstance.write(data, 0, length); + recordingFileSizeCounterInBytes += length; + } + } catch (IOException e) { + Log.e(TAG, "IO Exception on saving audio file " + saveFile.toString(), e); + } + } + + @Override + public void stop() { + if (null != dataOutputStreamInstance) { + try { + dataOutputStreamInstance.close(); + } catch (IOException e) { + Log.e(TAG, "IO Exception on finishing saving audio file " + saveFile.toString(), e); + } + Log.e(TAG, "Recording saved to " + saveFile.toString()); + } + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/PlaybackThread.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/PlaybackThread.java new file mode 100644 index 0000000..41574bd --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/PlaybackThread.java @@ -0,0 +1,115 @@ +package ai.kitt.snowboy.audio; + +import android.media.AudioFormat; +import android.media.AudioManager; +import android.media.AudioTrack; +import android.util.Log; +import java.io.BufferedInputStream; +import java.io.DataInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.ShortBuffer; + +import ai.kitt.snowboy.Constants; + +public class PlaybackThread { + private static final String TAG = PlaybackThread.class.getSimpleName(); + + public PlaybackThread() { + } + + private Thread thread; + private boolean shouldContinue; + protected AudioTrack audioTrack; + + public boolean playing() { + return thread != null; + } + + public void startPlayback() { + if (thread != null) + return; + + // Start streaming in a thread + shouldContinue = true; + thread = new Thread(new Runnable() { + @Override + public void run() { + play(); + } + }); + thread.start(); + } + + public void stopPlayback() { + if (thread == null) + return; + + shouldContinue = false; + relaseAudioTrack(); + thread = null; + } + + protected void relaseAudioTrack() { + if (audioTrack != null) { + try { + audioTrack.release(); + } catch (Exception e) {} + } + } + + public short[] readPCM() { + try { + File recordFile = new File(Constants.SAVE_AUDIO); + InputStream inputStream = new FileInputStream(recordFile); + BufferedInputStream bufferedInputStream = new BufferedInputStream(inputStream); + DataInputStream dataInputStream = new DataInputStream(bufferedInputStream); + + byte[] audioData = new byte[(int)recordFile.length()]; + + dataInputStream.read(audioData); + dataInputStream.close(); + Log.v(TAG, "audioData size: " + audioData.length); + + ShortBuffer sb = ByteBuffer.wrap(audioData).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer(); + short[] samples = new short[sb.limit() - sb.position()]; + sb.get(samples); + return samples; + } catch (FileNotFoundException e) { + Log.e(TAG, "Cannot find saved audio file", e); + } catch (IOException e) { + Log.e(TAG, "IO Exception on saved audio file", e); + } + return null; + } + + private void play() { + short[] samples = this.readPCM(); + int shortSizeInBytes = Short.SIZE / Byte.SIZE; + int bufferSizeInBytes = samples.length * shortSizeInBytes; + Log.v(TAG, "shortSizeInBytes: " + shortSizeInBytes + " bufferSizeInBytes: " + bufferSizeInBytes); + + audioTrack = new AudioTrack( + AudioManager.STREAM_MUSIC, + Constants.SAMPLE_RATE, + AudioFormat.CHANNEL_OUT_MONO, + AudioFormat.ENCODING_PCM_16BIT, + bufferSizeInBytes, + AudioTrack.MODE_STREAM); + + if (audioTrack.getState() == AudioTrack.STATE_INITIALIZED) { + audioTrack.play(); + audioTrack.write(samples, 0, samples.length); + Log.v(TAG, "Audio playback started"); + } + + if (!shouldContinue) { + relaseAudioTrack(); + } + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/RecordingThread.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/RecordingThread.java new file mode 100644 index 0000000..850e955 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/audio/RecordingThread.java @@ -0,0 +1,152 @@ +package ai.kitt.snowboy.audio; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +import ai.kitt.snowboy.Constants; +import ai.kitt.snowboy.MsgEnum; +import android.media.AudioFormat; +import android.media.AudioRecord; +import android.media.MediaRecorder; +import android.media.MediaPlayer; +import android.os.Handler; +import android.os.Message; +import android.util.Log; + +import ai.kitt.snowboy.SnowboyDetect; + +public class RecordingThread { + static { System.loadLibrary("snowboy-detect-android"); } + + private static final String TAG = RecordingThread.class.getSimpleName(); + + private static final String ACTIVE_RES = Constants.ACTIVE_RES; + private static final String ACTIVE_UMDL = Constants.ACTIVE_UMDL; + + private boolean shouldContinue; + private AudioDataReceivedListener listener = null; + private Handler handler = null; + private Thread thread; + + private static String strEnvWorkSpace = Constants.DEFAULT_WORK_SPACE; + private String activeModel = strEnvWorkSpace+ACTIVE_UMDL; + private String commonRes = strEnvWorkSpace+ACTIVE_RES; + + private SnowboyDetect detector = new SnowboyDetect(commonRes, activeModel); + private MediaPlayer player = new MediaPlayer(); + + public RecordingThread(Handler handler, AudioDataReceivedListener listener) { + this.handler = handler; + this.listener = listener; + + detector.SetSensitivity("0.6"); + detector.SetAudioGain(1); + detector.ApplyFrontend(true); + try { + player.setDataSource(strEnvWorkSpace+"ding.wav"); + player.prepare(); + } catch (IOException e) { + Log.e(TAG, "Playing ding sound error", e); + } + } + + private void sendMessage(MsgEnum what, Object obj){ + if (null != handler) { + Message msg = handler.obtainMessage(what.ordinal(), obj); + handler.sendMessage(msg); + } + } + + public void startRecording() { + if (thread != null) + return; + + shouldContinue = true; + thread = new Thread(new Runnable() { + @Override + public void run() { + record(); + } + }); + thread.start(); + } + + public void stopRecording() { + if (thread == null) + return; + + shouldContinue = false; + thread = null; + } + + private void record() { + Log.v(TAG, "Start"); + android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_AUDIO); + + // Buffer size in bytes: for 0.1 second of audio + int bufferSize = (int)(Constants.SAMPLE_RATE * 0.1 * 2); + if (bufferSize == AudioRecord.ERROR || bufferSize == AudioRecord.ERROR_BAD_VALUE) { + bufferSize = Constants.SAMPLE_RATE * 2; + } + + byte[] audioBuffer = new byte[bufferSize]; + AudioRecord record = new AudioRecord( + MediaRecorder.AudioSource.DEFAULT, + Constants.SAMPLE_RATE, + AudioFormat.CHANNEL_IN_MONO, + AudioFormat.ENCODING_PCM_16BIT, + bufferSize); + + if (record.getState() != AudioRecord.STATE_INITIALIZED) { + Log.e(TAG, "Audio Record can't initialize!"); + return; + } + record.startRecording(); + if (null != listener) { + listener.start(); + } + Log.v(TAG, "Start recording"); + + long shortsRead = 0; + detector.Reset(); + while (shouldContinue) { + record.read(audioBuffer, 0, audioBuffer.length); + + if (null != listener) { + listener.onAudioDataReceived(audioBuffer, audioBuffer.length); + } + + // Converts to short array. + short[] audioData = new short[audioBuffer.length / 2]; + ByteBuffer.wrap(audioBuffer).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(audioData); + + shortsRead += audioData.length; + + // Snowboy hotword detection. + int result = detector.RunDetection(audioData, audioData.length); + + if (result == -2) { + // post a higher CPU usage: + // sendMessage(MsgEnum.MSG_VAD_NOSPEECH, null); + } else if (result == -1) { + sendMessage(MsgEnum.MSG_ERROR, "Unknown Detection Error"); + } else if (result == 0) { + // post a higher CPU usage: + // sendMessage(MsgEnum.MSG_VAD_SPEECH, null); + } else if (result > 0) { + sendMessage(MsgEnum.MSG_ACTIVE, null); + Log.i("Snowboy: ", "Hotword " + Integer.toString(result) + " detected!"); + player.start(); + } + } + + record.stop(); + record.release(); + + if (null != listener) { + listener.stop(); + } + Log.v(TAG, String.format("Recording stopped. Samples read: %d", shortsRead)); + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/snowboyJNI.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/snowboyJNI.java new file mode 100644 index 0000000..b8ff562 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/ai/kitt/snowboy/snowboyJNI.java @@ -0,0 +1 @@ +../../../../../../../swig/Android/java/ai/kitt/snowboy/snowboyJNI.java \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/main/jniLibs b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/main/jniLibs new file mode 100644 index 0000000..76f885f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Android/SnowboyAlexaDemo/src/main/jniLibs @@ -0,0 +1 @@ +../../../../../swig/Android/jniLibs \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/Makefile b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/Makefile new file mode 100644 index 0000000..c890403 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/Makefile @@ -0,0 +1,22 @@ +include demo.mk + +BINFILES = demo demo2 + +all: $(BINFILES) + +%.a: + $(MAKE) -C ${@D} ${@F} + +$(BINFILES): $(PORTAUDIOLIBS) $(SNOWBOYDETECTLIBFILE) + +$(PORTAUDIOLIBS): + @-./install_portaudio.sh + +clean: + -rm -f *.o *.a $(BINFILES) + +depend: + -$(CXX) -M $(CXXFLAGS) *.cc > .depend.mk + +# Putting "-" so no error messages. +-include .depend.mk diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/demo.cc b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/demo.cc new file mode 100644 index 0000000..330e6ff --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/demo.cc @@ -0,0 +1,238 @@ +// example/C++/demo.cc + +// Copyright 2016 KITT.AI (author: Guoguo Chen) + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "include/snowboy-detect.h" + +int PortAudioCallback(const void* input, + void* output, + unsigned long frame_count, + const PaStreamCallbackTimeInfo* time_info, + PaStreamCallbackFlags status_flags, + void* user_data); + +class PortAudioWrapper { + public: + // Constructor. + PortAudioWrapper(int sample_rate, int num_channels, int bits_per_sample) { + num_lost_samples_ = 0; + min_read_samples_ = sample_rate * 0.1; + Init(sample_rate, num_channels, bits_per_sample); + } + + // Reads data from ring buffer. + template + void Read(std::vector* data) { + assert(data != NULL); + + // Checks ring buffer overflow. + if (num_lost_samples_ > 0) { + std::cerr << "Lost " << num_lost_samples_ << " samples due to ring" + << " buffer overflow." << std::endl; + num_lost_samples_ = 0; + } + + ring_buffer_size_t num_available_samples = 0; + while (true) { + num_available_samples = + PaUtil_GetRingBufferReadAvailable(&pa_ringbuffer_); + if (num_available_samples >= min_read_samples_) { + break; + } + Pa_Sleep(5); + } + + // Reads data. + num_available_samples = PaUtil_GetRingBufferReadAvailable(&pa_ringbuffer_); + data->resize(num_available_samples); + ring_buffer_size_t num_read_samples = PaUtil_ReadRingBuffer( + &pa_ringbuffer_, data->data(), num_available_samples); + if (num_read_samples != num_available_samples) { + std::cerr << num_available_samples << " samples were available, but " + << "only " << num_read_samples << " samples were read." << std::endl; + } + } + + int Callback(const void* input, void* output, + unsigned long frame_count, + const PaStreamCallbackTimeInfo* time_info, + PaStreamCallbackFlags status_flags) { + // Input audio. + ring_buffer_size_t num_written_samples = + PaUtil_WriteRingBuffer(&pa_ringbuffer_, input, frame_count); + num_lost_samples_ += frame_count - num_written_samples; + return paContinue; + } + + ~PortAudioWrapper() { + Pa_StopStream(pa_stream_); + Pa_CloseStream(pa_stream_); + Pa_Terminate(); + PaUtil_FreeMemory(ringbuffer_); + } + + private: + // Initialization. + bool Init(int sample_rate, int num_channels, int bits_per_sample) { + // Allocates ring buffer memory. + int ringbuffer_size = 16384; + ringbuffer_ = static_cast( + PaUtil_AllocateMemory(bits_per_sample / 8 * ringbuffer_size)); + if (ringbuffer_ == NULL) { + std::cerr << "Fail to allocate memory for ring buffer." << std::endl; + return false; + } + + // Initializes PortAudio ring buffer. + ring_buffer_size_t rb_init_ans = + PaUtil_InitializeRingBuffer(&pa_ringbuffer_, bits_per_sample / 8, + ringbuffer_size, ringbuffer_); + if (rb_init_ans == -1) { + std::cerr << "Ring buffer size is not power of 2." << std::endl; + return false; + } + + // Initializes PortAudio. + PaError pa_init_ans = Pa_Initialize(); + if (pa_init_ans != paNoError) { + std::cerr << "Fail to initialize PortAudio, error message is \"" + << Pa_GetErrorText(pa_init_ans) << "\"" << std::endl; + return false; + } + + PaError pa_open_ans; + if (bits_per_sample == 8) { + pa_open_ans = Pa_OpenDefaultStream( + &pa_stream_, num_channels, 0, paUInt8, sample_rate, + paFramesPerBufferUnspecified, PortAudioCallback, this); + } else if (bits_per_sample == 16) { + pa_open_ans = Pa_OpenDefaultStream( + &pa_stream_, num_channels, 0, paInt16, sample_rate, + paFramesPerBufferUnspecified, PortAudioCallback, this); + } else if (bits_per_sample == 32) { + pa_open_ans = Pa_OpenDefaultStream( + &pa_stream_, num_channels, 0, paInt32, sample_rate, + paFramesPerBufferUnspecified, PortAudioCallback, this); + } else { + std::cerr << "Unsupported BitsPerSample: " << bits_per_sample + << std::endl; + return false; + } + if (pa_open_ans != paNoError) { + std::cerr << "Fail to open PortAudio stream, error message is \"" + << Pa_GetErrorText(pa_open_ans) << "\"" << std::endl; + return false; + } + + PaError pa_stream_start_ans = Pa_StartStream(pa_stream_); + if (pa_stream_start_ans != paNoError) { + std::cerr << "Fail to start PortAudio stream, error message is \"" + << Pa_GetErrorText(pa_stream_start_ans) << "\"" << std::endl; + return false; + } + return true; + } + + private: + // Pointer to the ring buffer memory. + char* ringbuffer_; + + // Ring buffer wrapper used in PortAudio. + PaUtilRingBuffer pa_ringbuffer_; + + // Pointer to PortAudio stream. + PaStream* pa_stream_; + + // Number of lost samples at each Read() due to ring buffer overflow. + int num_lost_samples_; + + // Wait for this number of samples in each Read() call. + int min_read_samples_; +}; + +int PortAudioCallback(const void* input, + void* output, + unsigned long frame_count, + const PaStreamCallbackTimeInfo* time_info, + PaStreamCallbackFlags status_flags, + void* user_data) { + PortAudioWrapper* pa_wrapper = reinterpret_cast(user_data); + pa_wrapper->Callback(input, output, frame_count, time_info, status_flags); + return paContinue; +} + +void SignalHandler(int signal){ + std::cerr << "Caught signal " << signal << ", terminating..." << std::endl; + exit(0); +} + +int main(int argc, char* argv[]) { + std::string usage = + "Example that shows how to use Snowboy in C++. Parameters are\n" + "hard-coded in the parameter section. Please check the source code for\n" + "more details. Audio is captured by PortAudio.\n" + "\n" + "To run the example:\n" + " ./demo\n"; + + // Checks the command. + if (argc > 1) { + std::cerr << usage; + exit(1); + } + + // Configures signal handling. + struct sigaction sig_int_handler; + sig_int_handler.sa_handler = SignalHandler; + sigemptyset(&sig_int_handler.sa_mask); + sig_int_handler.sa_flags = 0; + sigaction(SIGINT, &sig_int_handler, NULL); + + // Parameter section. + // If you have multiple hotword models (e.g., 2), you should set + // and as follows: + // model_filename = + // "resources/models/snowboy.umdl,resources/models/smart_mirror.umdl"; + // sensitivity_str = "0.5,0.5"; + std::string resource_filename = "resources/common.res"; + std::string model_filename = "resources/models/snowboy.umdl"; + std::string sensitivity_str = "0.5"; + float audio_gain = 1; + bool apply_frontend = false; + + // Initializes Snowboy detector. + snowboy::SnowboyDetect detector(resource_filename, model_filename); + detector.SetSensitivity(sensitivity_str); + detector.SetAudioGain(audio_gain); + detector.ApplyFrontend(apply_frontend); + + // Initializes PortAudio. You may use other tools to capture the audio. + PortAudioWrapper pa_wrapper(detector.SampleRate(), + detector.NumChannels(), detector.BitsPerSample()); + + // Runs the detection. + // Note: I hard-coded as data type because detector.BitsPerSample() + // returns 16. + std::cout << "Listening... Press Ctrl+C to exit" << std::endl; + std::vector data; + while (true) { + pa_wrapper.Read(&data); + if (data.size() != 0) { + int result = detector.RunDetection(data.data(), data.size()); + if (result > 0) { + std::cout << "Hotword " << result << " detected!" << std::endl; + } + } + } + + return 0; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/demo.mk b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/demo.mk new file mode 100644 index 0000000..db136d1 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/demo.mk @@ -0,0 +1,50 @@ +TOPDIR := ../../ +DYNAMIC := True +CC = $(CXX) +CXX := +LDFLAGS := +LDLIBS := +PORTAUDIOINC := portaudio/install/include +PORTAUDIOLIBS := portaudio/install/lib/libportaudio.a + +CXXFLAGS += -D_GLIBCXX_USE_CXX11_ABI=0 + +ifeq ($(DYNAMIC), True) + CXXFLAGS += -fPIC +endif + +ifeq ($(shell uname -m | cut -c 1-3), x86) + CXXFLAGS += -msse -msse2 +endif + +ifeq ($(shell uname), Darwin) + # By default Mac uses clang++ as g++, but people may have changed their + # default configuration. + CXX := clang++ + CXXFLAGS += -I$(TOPDIR) -Wall -Wno-sign-compare -Winit-self \ + -DHAVE_POSIX_MEMALIGN -DHAVE_CLAPACK -I$(PORTAUDIOINC) + LDLIBS += -ldl -lm -framework Accelerate -framework CoreAudio \ + -framework AudioToolbox -framework AudioUnit -framework CoreServices \ + $(PORTAUDIOLIBS) + SNOWBOYDETECTLIBFILE := $(TOPDIR)/lib/osx/libsnowboy-detect.a +else ifeq ($(shell uname), Linux) + CXX := g++ + CXXFLAGS += -I$(TOPDIR) -std=c++0x -Wall -Wno-sign-compare \ + -Wno-unused-local-typedefs -Winit-self -rdynamic \ + -DHAVE_POSIX_MEMALIGN -I$(PORTAUDIOINC) + LDLIBS += -ldl -lm -Wl,-Bstatic -Wl,-Bdynamic -lrt -lpthread $(PORTAUDIOLIBS)\ + -L/usr/lib/atlas-base -lf77blas -lcblas -llapack_atlas -latlas -lasound + SNOWBOYDETECTLIBFILE := $(TOPDIR)/lib/ubuntu64/libsnowboy-detect.a + ifneq (,$(findstring arm,$(shell uname -m))) + SNOWBOYDETECTLIBFILE := $(TOPDIR)/lib/rpi/libsnowboy-detect.a + endif +endif + +# Suppress clang warnings... +COMPILER = $(shell $(CXX) -v 2>&1 ) +ifeq ($(findstring clang,$(COMPILER)), clang) + CXXFLAGS += -Wno-mismatched-tags -Wno-c++11-extensions +endif + +# Set optimization level. +CXXFLAGS += -O3 diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/demo2.cc b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/demo2.cc new file mode 100644 index 0000000..64a17e6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/demo2.cc @@ -0,0 +1,150 @@ +#include +#include "include/snowboy-detect.h" +#include "portaudio.h" + +#define resource_filename "resources/common.res" +#define model_filename "resources/models/snowboy.umdl" +#define sensitivity_str "0.5" +#define audio_gain 1.0 +#define apply_frontend false + +struct wavHeader { //44 byte HEADER only + char RIFF[4]; + int RIFFsize; + char fmt[8]; + int fmtSize; + short fmtTag; + short nchan; + int fs; + int avgBps; + short nBlockAlign; + short bps; + char data[4]; + int datasize; +}; + + +void readWavHeader(wavHeader *wavhdr, FILE *fi) { + //===================================================== + // Reads the WAV file header considering the follow restrictions: + // - format tag needs to be 1=PCM (no encoding) + // - shoud be imidiately before the databytes + // (it should not contain chunks after 'data') + // Returns a pointer pointing to the begining of the data + + char *tag = (char *)wavhdr; + fread(wavhdr, 34, 1, fi); //starting tag should be "RIFF" + if (tag[0] != 'R' || tag[1] != 'I' || tag[2] != 'F' || tag[3] != 'F') { + fclose(fi); + perror("NO 'RIFF'."); + } + if (wavhdr->fmtTag != 1) { + fclose(fi); + perror("WAV file has encoded data or it is WAVEFORMATEXTENSIBLE."); + } + if (wavhdr->fmtSize == 14) { + wavhdr->bps = 16; + } + if (wavhdr->fmtSize >= 16) { + fread(&wavhdr->bps, 2, 1, fi); + } + if (wavhdr->fmtSize == 18) { + short lixo; + fread(&lixo, 2, 1, fi); + } + tag += 36; //aponta para wavhdr->data + fread(tag, 4, 1, fi); //data chunk deve estar aqui. + while (tag[0] != 'd' || tag[1] != 'a' || tag[2] != 't' || tag[3] != 'a') { + fread(tag, 4, 1, fi); + if (ftell(fi) >= long(wavhdr->RIFFsize)) { + fclose(fi); + perror("Bad WAV header !"); + } + } + fread(&wavhdr->datasize, 4, 1, fi); //data size + // Assuming that header ends here. + // From here until the end it is audio data +} + + + +int main(int argc, char * argv[]) { + std::string usage = + "C++ demo that shows how to use snowboy. In this examle user can read\n" + "the audio data from a file.\n" + "\n" + "Atention reading from a file: this software is for simulation/test\n" + "only. You need to take precautions when loading a file into the\n" + "memory.\n" + "\n" + "To run the example:\n" + " ./demo2 [filename.raw || filename.wav ]\n" + "\n" + "IMPORTANT NOTE: Raw file must be 16kHz sample, mono and 16bit\n"; + + // default + char * filename; + int fsize; + short * data_buffer = NULL; + bool isRaw = true; + FILE *f = NULL; + + if (argc > 2 or argc < 2) { + std::cout << usage << std::endl; + exit(1); + } else { + filename = argv[1]; + } + + std::string str = filename; + std::string type = ".wav"; + + if (str.find(type) != std::string::npos) { + isRaw = false; + } + + + if (filename != NULL) { + f = fopen(filename,"rb"); + } + + if (f == NULL) { + perror ("Error opening file"); + return(-1); + } + + if (!isRaw) { + wavHeader *wavhdr = new wavHeader(); + readWavHeader(wavhdr, f); + + data_buffer = (short *)malloc(wavhdr->datasize); + // Consume all the audio to the buffer + fread(data_buffer, wavhdr->datasize, 1, f); + fclose(f); + fsize = wavhdr->datasize; + } else { + fseek(f,0,SEEK_END); + fsize = ftell(f); + rewind(f); + + // Consume all the audio to the buffer + data_buffer = (short *)malloc(fsize); + int aa = fread(&data_buffer[0], 1 ,fsize, f); + std::cout << "Read bytes: " << aa << std::endl; + fclose(f); + + } + + // Initializes Snowboy detector. + snowboy::SnowboyDetect detector(resource_filename, model_filename); + detector.SetSensitivity(sensitivity_str); + detector.SetAudioGain(audio_gain); + detector.ApplyFrontend(apply_frontend); + + int result = detector.RunDetection(&data_buffer[0], fsize/sizeof(short)); + std::cout << ">>>>> Result: " << result << " <<<<<" << std::endl; + std::cout << "Legend: -2: noise | -1: error | 0: silence | 1: hotword" + << std::endl; + + return 0; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/install_portaudio.sh b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/install_portaudio.sh new file mode 100644 index 0000000..ceecd2e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/install_portaudio.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# This script attempts to install PortAudio, which can grap a live audio stream +# from the soundcard. +# +# On linux systems, we only build with ALSA, so make sure you install it using +# e.g.: +# sudo apt-get -y install libasound2-dev + +echo "Installing portaudio" + +if [ ! -e pa_stable_v190600_20161030.tgz ]; then + wget -T 10 -t 3 \ + http://www.portaudio.com/archives/pa_stable_v190600_20161030.tgz || exit 1; +fi + +tar -xovzf pa_stable_v190600_20161030.tgz || exit 1 + +cd portaudio +patch < ../patches/portaudio.patch + +MACOS=`uname 2>/dev/null | grep Darwin` +if [ -z "$MACOS" ]; then + ./configure --without-jack --without-oss \ + --with-alsa --prefix=`pwd`/install --with-pic || exit 1; + sed -i '40s:src/common/pa_ringbuffer.o::g' Makefile + sed -i '40s:$: src/common/pa_ringbuffer.o:' Makefile +else + # People may have changed OSX's default configuration -- we use clang++. + CC=clang CXX=clang++ ./configure --prefix=`pwd`/install --with-pic +fi + +make +make install + +cd .. diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/patches/portaudio.patch b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/patches/portaudio.patch new file mode 100644 index 0000000..1c61832 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/patches/portaudio.patch @@ -0,0 +1,11 @@ +--- Makefile.in 2017-05-31 16:42:16.000000000 -0700 ++++ Makefile_new.in 2017-05-31 16:44:02.000000000 -0700 +@@ -193,6 +193,8 @@ + for include in $(INCLUDES); do \ + $(INSTALL_DATA) -m 644 $(top_srcdir)/include/$$include $(DESTDIR)$(includedir)/$$include; \ + done ++ $(INSTALL_DATA) -m 644 $(top_srcdir)/src/common/pa_ringbuffer.h $(DESTDIR)$(includedir)/$$include ++ $(INSTALL_DATA) -m 644 $(top_srcdir)/src/common/pa_util.h $(DESTDIR)$(includedir)/$$include + $(INSTALL) -d $(DESTDIR)$(libdir)/pkgconfig + $(INSTALL) -m 644 portaudio-2.0.pc $(DESTDIR)$(libdir)/pkgconfig/portaudio-2.0.pc + @echo "" diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/resources b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/resources new file mode 100644 index 0000000..81bd1c5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C++/resources @@ -0,0 +1 @@ +../../resources/ \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C/Makefile b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/Makefile new file mode 100644 index 0000000..a310eb9 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/Makefile @@ -0,0 +1,20 @@ +include demo.mk + +BINFILE = demo + +OBJFILES = demo.o snowboy-detect-c-wrapper.o + +all: $(BINFILE) + +%.a: + $(MAKE) -C ${@D} ${@F} + +# We have to use the C++ compiler to link. +$(BINFILE): $(PORTAUDIOLIBS) $(SNOWBOYDETECTLIBFILE) $(OBJFILES) + $(CXX) $(OBJFILES) $(SNOWBOYDETECTLIBFILE) $(PORTAUDIOLIBS) $(LDLIBS) -o $(BINFILE) + +$(PORTAUDIOLIBS): + @-./install_portaudio.sh + +clean: + -rm -f *.o *.a $(BINFILE) $(OBJFILES) diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C/demo.c b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/demo.c new file mode 100644 index 0000000..f72f36b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/demo.c @@ -0,0 +1,223 @@ +// example/C/demo.c + +// Copyright 2017 KITT.AI (author: Guoguo Chen) + +#include +#include +#include +#include +#include +#include +#include + +#include "snowboy-detect-c-wrapper.h" + +// Pointer to the ring buffer memory. +char* g_ringbuffer; +// Ring buffer wrapper used in PortAudio. +PaUtilRingBuffer g_pa_ringbuffer; +// Pointer to PortAudio stream. +PaStream* g_pa_stream; +// Number of lost samples at each LoadAudioData() due to ring buffer overflow. +int g_num_lost_samples; +// Wait for this number of samples in each LoadAudioData() call. +int g_min_read_samples; +// Pointer to the audio data. +int16_t* g_data; + +int PortAudioCallback(const void* input, + void* output, + unsigned long frame_count, + const PaStreamCallbackTimeInfo* time_info, + PaStreamCallbackFlags status_flags, + void* user_data) { + ring_buffer_size_t num_written_samples = + PaUtil_WriteRingBuffer(&g_pa_ringbuffer, input, frame_count); + g_num_lost_samples += frame_count - num_written_samples; + return paContinue; +} + +void StartAudioCapturing(int sample_rate, + int num_channels, int bits_per_sample) { + g_data = NULL; + g_num_lost_samples = 0; + g_min_read_samples = sample_rate * 0.1; + + // Allocates ring buffer memory. + int ringbuffer_size = 16384; + g_ringbuffer = (char*)( + PaUtil_AllocateMemory(bits_per_sample / 8 * ringbuffer_size)); + if (g_ringbuffer == NULL) { + fprintf(stderr, "Fail to allocate memory for ring buffer.\n"); + exit(1); + } + + // Initializes PortAudio ring buffer. + ring_buffer_size_t rb_init_ans = + PaUtil_InitializeRingBuffer(&g_pa_ringbuffer, bits_per_sample / 8, + ringbuffer_size, g_ringbuffer); + if (rb_init_ans == -1) { + fprintf(stderr, "Ring buffer size is not power of 2.\n"); + exit(1); + } + + // Initializes PortAudio. + PaError pa_init_ans = Pa_Initialize(); + if (pa_init_ans != paNoError) { + fprintf(stderr, "Fail to initialize PortAudio, error message is %s.\n", + Pa_GetErrorText(pa_init_ans)); + exit(1); + } + + PaError pa_open_ans; + if (bits_per_sample == 8) { + pa_open_ans = Pa_OpenDefaultStream( + &g_pa_stream, num_channels, 0, paUInt8, sample_rate, + paFramesPerBufferUnspecified, PortAudioCallback, NULL); + } else if (bits_per_sample == 16) { + pa_open_ans = Pa_OpenDefaultStream( + &g_pa_stream, num_channels, 0, paInt16, sample_rate, + paFramesPerBufferUnspecified, PortAudioCallback, NULL); + } else if (bits_per_sample == 32) { + pa_open_ans = Pa_OpenDefaultStream( + &g_pa_stream, num_channels, 0, paInt32, sample_rate, + paFramesPerBufferUnspecified, PortAudioCallback, NULL); + } else { + fprintf(stderr, "Unsupported BitsPerSample: %d.\n", bits_per_sample); + exit(1); + } + if (pa_open_ans != paNoError) { + fprintf(stderr, "Fail to open PortAudio stream, error message is %s.\n", + Pa_GetErrorText(pa_open_ans)); + exit(1); + } + + PaError pa_stream_start_ans = Pa_StartStream(g_pa_stream); + if (pa_stream_start_ans != paNoError) { + fprintf(stderr, "Fail to start PortAudio stream, error message is %s.\n", + Pa_GetErrorText(pa_stream_start_ans)); + exit(1); + } +} + +void StopAudioCapturing() { + if (g_data != NULL) { + free(g_data); + g_data = NULL; + } + Pa_StopStream(g_pa_stream); + Pa_CloseStream(g_pa_stream); + Pa_Terminate(); + PaUtil_FreeMemory(g_ringbuffer); +} + +int LoadAudioData() { + if (g_data != NULL) { + free(g_data); + g_data = NULL; + } + + // Checks ring buffer overflow. + if (g_num_lost_samples > 0) { + fprintf(stderr, "Lost %d samples due to ring buffer overflow.\n", + g_num_lost_samples); + g_num_lost_samples = 0; + } + + ring_buffer_size_t num_available_samples = 0; + while (true) { + num_available_samples = + PaUtil_GetRingBufferReadAvailable(&g_pa_ringbuffer); + if (num_available_samples >= g_min_read_samples) { + break; + } + Pa_Sleep(5); + } + + // Reads data. + num_available_samples = PaUtil_GetRingBufferReadAvailable(&g_pa_ringbuffer); + g_data = malloc(num_available_samples * sizeof(int16_t)); + ring_buffer_size_t num_read_samples = PaUtil_ReadRingBuffer( + &g_pa_ringbuffer, g_data, num_available_samples); + if (num_read_samples != num_available_samples) { + fprintf(stderr, "%d samples were available, but only %d samples were read" + ".\n", num_available_samples, num_read_samples); + } + return num_read_samples; +} + +void SignalHandler(int signal) { + fprintf(stderr, "Caught signal %d, terminating...\n", signal); + exit(0); +} + +int main(int argc, char* argv[]) { + const char usage[] = + "Example that shows how to use Snowboy in pure C. Snowboy was written\n" + "in C++, so we have to write a wrapper in order to use Snowboy in pure\n" + "C. See snowboy-detect-c-wrapper.h and snowboy-detect-c-wrapper.cc for\n" + "more details.\n" + "\n" + "Parameters are hard-coded in the parameter section for this example.\n" + "Please check the source code for more details.\n" + "\n" + "Audio is captured by PortAudio, feel free to replace PortAudio with\n" + "your own audio capturing tool.\n" + "\n" + "To run the example:\n" + " ./demo\n"; + + // Checks the command. + if (argc > 1) { + printf("%s", usage); + exit(1); + } + + // Configures signal handling. + struct sigaction sig_int_handler; + sig_int_handler.sa_handler = SignalHandler; + sigemptyset(&sig_int_handler.sa_mask); + sig_int_handler.sa_flags = 0; + sigaction(SIGINT, &sig_int_handler, NULL); + + // Parameter section. + // If you have multiple hotword models (e.g., 2), you should set + // and as follows: + // model_filename = + // "resources/models/snowboy.umdl,resources/models/smart_mirror.umdl"; + // sensitivity_str = "0.5,0.5"; + const char resource_filename[] = "resources/common.res"; + const char model_filename[] = "resources/models/snowboy.umdl"; + const char sensitivity_str[] = "0.5"; + float audio_gain = 1; + bool apply_frontend = false; + + // Initializes Snowboy detector. + SnowboyDetect* detector = SnowboyDetectConstructor(resource_filename, + model_filename); + SnowboyDetectSetSensitivity(detector, sensitivity_str); + SnowboyDetectSetAudioGain(detector, audio_gain); + SnowboyDetectApplyFrontend(detector, apply_frontend); + + // Initializes PortAudio. You may use other tools to capture the audio. + StartAudioCapturing(SnowboyDetectSampleRate(detector), + SnowboyDetectNumChannels(detector), + SnowboyDetectBitsPerSample(detector)); + + // Runs the detection. + printf("Listening... Press Ctrl+C to exit\n"); + while (true) { + int array_length = LoadAudioData(); + if (array_length != 0) { + int result = SnowboyDetectRunDetection(detector, + g_data, array_length, false); + if (result > 0) { + printf("Hotword %d detected!\n", result); + } + } + } + + StopAudioCapturing(); + SnowboyDetectDestructor(detector); + return 0; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C/demo.mk b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/demo.mk new file mode 100644 index 0000000..73d1140 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/demo.mk @@ -0,0 +1,58 @@ +TOPDIR := ../../ +DYNAMIC := True +CC := +CXX := +LDFLAGS := +LDLIBS := +PORTAUDIOINC := portaudio/install/include +PORTAUDIOLIBS := portaudio/install/lib/libportaudio.a + +CFLAGS := +CXXFLAGS += -D_GLIBCXX_USE_CXX11_ABI=0 + +ifeq ($(DYNAMIC), True) + CFLAGS += -fPIC + CXXFLAGS += -fPIC +endif + +ifeq ($(shell uname -m | cut -c 1-3), x86) + CFLAGS += -msse -msse2 + CXXFLAGS += -msse -msse2 +endif + +ifeq ($(shell uname), Darwin) + # By default Mac uses clang++ as g++, but people may have changed their + # default configuration. + CC := clang + CXX := clang++ + CFLAGS += -I$(TOPDIR) -Wall -I$(PORTAUDIOINC) + CXXFLAGS += -I$(TOPDIR) -Wall -Wno-sign-compare -Winit-self \ + -DHAVE_POSIX_MEMALIGN -DHAVE_CLAPACK -I$(PORTAUDIOINC) + LDLIBS += -ldl -lm -framework Accelerate -framework CoreAudio \ + -framework AudioToolbox -framework AudioUnit -framework CoreServices \ + $(PORTAUDIOLIBS) + SNOWBOYDETECTLIBFILE := $(TOPDIR)/lib/osx/libsnowboy-detect.a +else ifeq ($(shell uname), Linux) + CC := gcc + CXX := g++ + CFLAGS += -I$(TOPDIR) -Wall -I$(PORTAUDIOINC) + CXXFLAGS += -I$(TOPDIR) -std=c++0x -Wall -Wno-sign-compare \ + -Wno-unused-local-typedefs -Winit-self -rdynamic \ + -DHAVE_POSIX_MEMALIGN -I$(PORTAUDIOINC) + LDLIBS += -ldl -lm -Wl,-Bstatic -Wl,-Bdynamic -lrt -lpthread $(PORTAUDIOLIBS)\ + -L/usr/lib/atlas-base -lf77blas -lcblas -llapack_atlas -latlas -lasound + SNOWBOYDETECTLIBFILE := $(TOPDIR)/lib/ubuntu64/libsnowboy-detect.a + ifneq (,$(findstring arm,$(shell uname -m))) + SNOWBOYDETECTLIBFILE := $(TOPDIR)/lib/rpi/libsnowboy-detect.a + endif +endif + +# Suppress clang warnings... +COMPILER = $(shell $(CXX) -v 2>&1 ) +ifeq ($(findstring clang,$(COMPILER)), clang) + CXXFLAGS += -Wno-mismatched-tags -Wno-c++11-extensions +endif + +# Set optimization level. +CFLAGS += -O3 +CXXFLAGS += -O3 diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C/install_portaudio.sh b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/install_portaudio.sh new file mode 100644 index 0000000..ceecd2e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/install_portaudio.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# This script attempts to install PortAudio, which can grap a live audio stream +# from the soundcard. +# +# On linux systems, we only build with ALSA, so make sure you install it using +# e.g.: +# sudo apt-get -y install libasound2-dev + +echo "Installing portaudio" + +if [ ! -e pa_stable_v190600_20161030.tgz ]; then + wget -T 10 -t 3 \ + http://www.portaudio.com/archives/pa_stable_v190600_20161030.tgz || exit 1; +fi + +tar -xovzf pa_stable_v190600_20161030.tgz || exit 1 + +cd portaudio +patch < ../patches/portaudio.patch + +MACOS=`uname 2>/dev/null | grep Darwin` +if [ -z "$MACOS" ]; then + ./configure --without-jack --without-oss \ + --with-alsa --prefix=`pwd`/install --with-pic || exit 1; + sed -i '40s:src/common/pa_ringbuffer.o::g' Makefile + sed -i '40s:$: src/common/pa_ringbuffer.o:' Makefile +else + # People may have changed OSX's default configuration -- we use clang++. + CC=clang CXX=clang++ ./configure --prefix=`pwd`/install --with-pic +fi + +make +make install + +cd .. diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C/patches/portaudio.patch b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/patches/portaudio.patch new file mode 100644 index 0000000..1c61832 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/patches/portaudio.patch @@ -0,0 +1,11 @@ +--- Makefile.in 2017-05-31 16:42:16.000000000 -0700 ++++ Makefile_new.in 2017-05-31 16:44:02.000000000 -0700 +@@ -193,6 +193,8 @@ + for include in $(INCLUDES); do \ + $(INSTALL_DATA) -m 644 $(top_srcdir)/include/$$include $(DESTDIR)$(includedir)/$$include; \ + done ++ $(INSTALL_DATA) -m 644 $(top_srcdir)/src/common/pa_ringbuffer.h $(DESTDIR)$(includedir)/$$include ++ $(INSTALL_DATA) -m 644 $(top_srcdir)/src/common/pa_util.h $(DESTDIR)$(includedir)/$$include + $(INSTALL) -d $(DESTDIR)$(libdir)/pkgconfig + $(INSTALL) -m 644 portaudio-2.0.pc $(DESTDIR)$(libdir)/pkgconfig/portaudio-2.0.pc + @echo "" diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C/resources b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/resources new file mode 100644 index 0000000..bc76415 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/resources @@ -0,0 +1 @@ +../../resources \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C/snowboy-detect-c-wrapper.cc b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/snowboy-detect-c-wrapper.cc new file mode 100644 index 0000000..9129271 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/snowboy-detect-c-wrapper.cc @@ -0,0 +1,82 @@ +// snowboy-detect-c-wrapper.cc + +// Copyright 2017 KITT.AI (author: Guoguo Chen) + +#include + +#include "snowboy-detect-c-wrapper.h" +#include "include/snowboy-detect.h" + +extern "C" { + SnowboyDetect* SnowboyDetectConstructor(const char* const resource_filename, + const char* const model_str) { + return reinterpret_cast( + new snowboy::SnowboyDetect(resource_filename, model_str)); + } + + bool SnowboyDetectReset(SnowboyDetect* detector) { + assert(detector != NULL); + return reinterpret_cast(detector)->Reset(); + } + + int SnowboyDetectRunDetection(SnowboyDetect* detector, + const int16_t* const data, + const int array_length, bool is_end) { + assert(detector != NULL); + assert(data != NULL); + return reinterpret_cast( + detector)->RunDetection(data, array_length, is_end); + } + + void SnowboyDetectSetSensitivity(SnowboyDetect* detector, + const char* const sensitivity_str) { + assert(detector != NULL); + reinterpret_cast( + detector)->SetSensitivity(sensitivity_str); + } + + void SnowboyDetectSetAudioGain(SnowboyDetect* detector, + const float audio_gain) { + assert(detector != NULL); + reinterpret_cast( + detector)->SetAudioGain(audio_gain); + } + + void SnowboyDetectUpdateModel(SnowboyDetect* detector) { + assert(detector != NULL); + reinterpret_cast(detector)->UpdateModel(); + } + + void SnowboyDetectApplyFrontend(SnowboyDetect* detector, + const bool apply_frontend) { + assert(detector != NULL); + reinterpret_cast( + detector)->ApplyFrontend(apply_frontend); + } + + int SnowboyDetectNumHotwords(SnowboyDetect* detector) { + assert(detector != NULL); + return reinterpret_cast(detector)->NumHotwords(); + } + + int SnowboyDetectSampleRate(SnowboyDetect* detector) { + assert(detector != NULL); + return reinterpret_cast(detector)->SampleRate(); + } + + int SnowboyDetectNumChannels(SnowboyDetect* detector) { + assert(detector != NULL); + return reinterpret_cast(detector)->NumChannels(); + } + + int SnowboyDetectBitsPerSample(SnowboyDetect* detector) { + assert(detector != NULL); + return reinterpret_cast(detector)->BitsPerSample(); + } + + void SnowboyDetectDestructor(SnowboyDetect* detector) { + assert(detector != NULL); + delete reinterpret_cast(detector); + detector = NULL; + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/C/snowboy-detect-c-wrapper.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/snowboy-detect-c-wrapper.h new file mode 100644 index 0000000..99a4e02 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/C/snowboy-detect-c-wrapper.h @@ -0,0 +1,51 @@ +// snowboy-detect-c-wrapper.h + +// Copyright 2017 KITT.AI (author: Guoguo Chen) + +#ifndef SNOWBOY_DETECT_C_WRAPPER_H_ +#define SNOWBOY_DETECT_C_WRAPPER_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + typedef struct SnowboyDetect SnowboyDetect; + + SnowboyDetect* SnowboyDetectConstructor(const char* const resource_filename, + const char* const model_str); + + bool SnowboyDetectReset(SnowboyDetect* detector); + + int SnowboyDetectRunDetection(SnowboyDetect* detector, + const int16_t* const data, + const int array_length, bool is_end); + + void SnowboyDetectSetSensitivity(SnowboyDetect* detector, + const char* const sensitivity_str); + + void SnowboyDetectSetAudioGain(SnowboyDetect* detector, + const float audio_gain); + + void SnowboyDetectUpdateModel(SnowboyDetect* detector); + + void SnowboyDetectApplyFrontend(SnowboyDetect* detector, + const bool apply_frontend); + + int SnowboyDetectNumHotwords(SnowboyDetect* detector); + + int SnowboyDetectSampleRate(SnowboyDetect* detector); + + int SnowboyDetectNumChannels(SnowboyDetect* detector); + + int SnowboyDetectBitsPerSample(SnowboyDetect* detector); + + void SnowboyDetectDestructor(SnowboyDetect* detector); + +#ifdef __cplusplus +} +#endif + +#endif // SNOWBOY_DETECT_C_WRAPPER_H_ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/detect/main.go b/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/detect/main.go new file mode 100644 index 0000000..00ec04d --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/detect/main.go @@ -0,0 +1,40 @@ +package main + +import ( + "fmt" + "io/ioutil" + "unsafe" + "os" + + "github.com/Kitt-AI/snowboy/swig/Go" +) + +func main() { + if len(os.Args) < 3 { + fmt.Printf("usage: %s \n", os.Args[0]) + return + } + fmt.Printf("Snowboy detecting keyword in %s\n", os.Args[2]) + detector := snowboydetect.NewSnowboyDetect("../../../resources/common.res", os.Args[1]) + detector.SetSensitivity("0.5") + detector.SetAudioGain(1) + detector.ApplyFrontend(false) + defer snowboydetect.DeleteSnowboyDetect(detector) + + dat, err := ioutil.ReadFile(os.Args[2]) + if err != nil { + panic(err) + } + + ptr := snowboydetect.SwigcptrInt16_t(unsafe.Pointer(&dat[0])) + res := detector.RunDetection(ptr, len(dat) / 2 /* len of int16 */) + if res == -2 { + fmt.Println("Snowboy detected silence") + } else if res == -1 { + fmt.Println("Snowboy detection returned error") + } else if res == 0 { + fmt.Println("Snowboy detected nothing") + } else { + fmt.Println("Snowboy detected keyword ", res) + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/detect/readme.md b/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/detect/readme.md new file mode 100644 index 0000000..1b8ea90 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/detect/readme.md @@ -0,0 +1,40 @@ +## Dependencies + +### Swig +http://www.swig.org/ + +### Go Package +``` +go get github.com/Kitt-AI/snowboy/swig/Go +``` + +## Building + +``` +go build -o snowboy main.go +``` + +## Running + +``` +./snowboy +``` + +### Examples +Cmd: +`./snowboy ../../../resources/models/snowboy.umdl ../../../resources/snowboy.wav` + +Output: +``` +Snowboy detecting keyword in ../../resources/snowboy.wav +Snowboy detected keyword 1 +``` + +Cmd: +`./snowboy ../../resources/alexa.umdl ../../resources/snowboy.wav` + +Output: +``` +Snowboy detecting keyword in ../../resources/snowboy.wav +Snowboy detected nothing +``` diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/listen/README.md b/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/listen/README.md new file mode 100644 index 0000000..cf8aab0 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/listen/README.md @@ -0,0 +1,36 @@ +## Dependencies + +### Swig +http://www.swig.org/ + +### Go Package alongside the more idiomatic wrapper `go-snowboy`, plus PortAudio +``` +github.com/brentnd/go-snowboy +github.com/gordonklaus/portaudio +``` + +## Building + +``` +go build -o listen main.go +``` + +## Running + +``` +./listen [path to snowboy resource file] [path to snowboy hotword file] +``` + +### Examples +Cmd: +`./listen ../../../resources/common.res ../../../resources/models/snowboy.umdl` + +Output: +``` +sample rate=16000, num channels=1, bit depth=16 +Silence detected. +Silence detected. +Silence detected. +You said the hotword! +Silence detected. +``` diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/listen/main.go b/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/listen/main.go new file mode 100644 index 0000000..af772f7 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Go/listen/main.go @@ -0,0 +1,101 @@ +// This example streams the microphone thru Snowboy to listen for the hotword, +// by using the PortAudio interface. +// +// HOW TO USE: +// go run examples/Go/listen/main.go [path to snowboy resource file] [path to snowboy hotword file] +// +package main + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "time" + + "github.com/brentnd/go-snowboy" + "github.com/gordonklaus/portaudio" +) + +// Sound represents a sound stream implementing the io.Reader interface +// that provides the microphone data. +type Sound struct { + stream *portaudio.Stream + data []int16 +} + +// Init initializes the Sound's PortAudio stream. +func (s *Sound) Init() { + inputChannels := 1 + outputChannels := 0 + sampleRate := 16000 + s.data = make([]int16, 1024) + + // initialize the audio recording interface + err := portaudio.Initialize() + if err != nil { + fmt.Errorf("Error initialize audio interface: %s", err) + return + } + + // open the sound input stream for the microphone + stream, err := portaudio.OpenDefaultStream(inputChannels, outputChannels, float64(sampleRate), len(s.data), s.data) + if err != nil { + fmt.Errorf("Error open default audio stream: %s", err) + return + } + + err = stream.Start() + if err != nil { + fmt.Errorf("Error on stream start: %s", err) + return + } + + s.stream = stream +} + +// Close closes down the Sound's PortAudio connection. +func (s *Sound) Close() { + s.stream.Close() + portaudio.Terminate() +} + +// Read is the Sound's implementation of the io.Reader interface. +func (s *Sound) Read(p []byte) (int, error) { + s.stream.Read() + + buf := &bytes.Buffer{} + for _, v := range s.data { + binary.Write(buf, binary.LittleEndian, v) + } + + copy(p, buf.Bytes()) + return len(p), nil +} + +func main() { + // open the mic + mic := &Sound{} + mic.Init() + defer mic.Close() + + // open the snowboy detector + d := snowboy.NewDetector(os.Args[1]) + defer d.Close() + + // set the handlers + d.HandleFunc(snowboy.NewHotword(os.Args[2], 0.5), func(string) { + fmt.Println("You said the hotword!") + }) + + d.HandleSilenceFunc(1*time.Second, func(string) { + fmt.Println("Silence detected.") + }) + + // display the detector's expected audio format + sr, nc, bd := d.AudioFormat() + fmt.Printf("sample rate=%d, num channels=%d, bit depth=%d\n", sr, nc, bd) + + // start detecting using the microphone + d.ReadAndDetect(mic) +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/Demo.java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/Demo.java new file mode 100644 index 0000000..d064007 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/Demo.java @@ -0,0 +1,65 @@ +import ai.kitt.snowboy.*; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +import javax.sound.sampled.AudioFormat; +import javax.sound.sampled.AudioSystem; +import javax.sound.sampled.DataLine; +import javax.sound.sampled.SourceDataLine; +import javax.sound.sampled.TargetDataLine; + +public class Demo { + static { + System.loadLibrary("snowboy-detect-java"); + } + + public static void main(String[] args) { + // Sets up audio. + AudioFormat format = new AudioFormat(16000, 16, 1, true, false); + DataLine.Info targetInfo = new DataLine.Info(TargetDataLine.class, format); + + // Sets up Snowboy. + SnowboyDetect detector = new SnowboyDetect("resources/common.res", + "resources/models/snowboy.umdl"); + detector.SetSensitivity("0.5"); + detector.SetAudioGain(1); + detector.ApplyFrontend(false); + + try { + TargetDataLine targetLine = + (TargetDataLine) AudioSystem.getLine(targetInfo); + targetLine.open(format); + targetLine.start(); + + // Reads 0.1 second of audio in each call. + byte[] targetData = new byte[3200]; + short[] snowboyData = new short[1600]; + int numBytesRead; + + while (true) { + // Reads the audio data in the blocking mode. If you are on a very slow + // machine such that the hotword detector could not process the audio + // data in real time, this will cause problem... + numBytesRead = targetLine.read(targetData, 0, targetData.length); + + if (numBytesRead == -1) { + System.out.print("Fails to read audio data."); + break; + } + + // Converts bytes into int16 that Snowboy will read. + ByteBuffer.wrap(targetData).order( + ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(snowboyData); + + // Detection. + int result = detector.RunDetection(snowboyData, snowboyData.length); + if (result > 0) { + System.out.print("Hotword " + result + " detected!\n"); + } + } + } catch (Exception e) { + System.err.println(e); + } + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/Makefile b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/Makefile new file mode 100644 index 0000000..91bba7c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/Makefile @@ -0,0 +1,10 @@ +all: Demo.class + +Demo.class: Demo.java + javac -classpath java Demo.java + +run: Demo.class + java -classpath .:java -Djava.library.path=jniLibs Demo + +clean: + -rm -f Demo.class diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/java b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/java new file mode 100644 index 0000000..5ab8548 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/java @@ -0,0 +1 @@ +../../swig/Java/java/ \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/jniLibs b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/jniLibs new file mode 100644 index 0000000..5e07dc2 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/jniLibs @@ -0,0 +1 @@ +../../swig/Java/jniLibs/ \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/resources b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/resources new file mode 100644 index 0000000..81bd1c5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Java/resources @@ -0,0 +1 @@ +../../resources/ \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Node/file.js b/src/Guide_stick_system/voice_assistant/snowboy/examples/Node/file.js new file mode 100644 index 0000000..c97bf54 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Node/file.js @@ -0,0 +1,46 @@ +const fs = require('fs'); +const wav = require('wav'); +const Detector = require('../../').Detector; +const Models = require('../../').Models; + +const models = new Models(); + +models.add({ + file: 'resources/models/snowboy.umdl', + sensitivity: '0.5', + hotwords : 'snowboy' +}); + +const detector = new Detector({ + resource: "resources/common.res", + models: models, + audioGain: 1.0, + applyFrontend: false +}); + +detector.on('silence', function () { + console.log('silence'); +}); + +detector.on('sound', function (buffer) { + // contains the last chunk of the audio that triggers the "sound" + // event. It could be written to a wav stream. + console.log('sound'); +}); + +detector.on('error', function () { + console.log('error'); +}); + +detector.on('hotword', function (index, hotword, buffer) { + // contains the last chunk of the audio that triggers the "hotword" + // event. It could be written to a wav stream. You will have to use it + // together with the in the "sound" event if you want to get audio + // data after the hotword. + console.log('hotword', index, hotword); +}); + +const file = fs.createReadStream('resources/snowboy.wav'); +const reader = new wav.Reader(); + +file.pipe(reader).pipe(detector); diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Node/microphone.js b/src/Guide_stick_system/voice_assistant/snowboy/examples/Node/microphone.js new file mode 100644 index 0000000..a0dfe2e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Node/microphone.js @@ -0,0 +1,48 @@ +const record = require('node-record-lpcm16'); +const Detector = require('../../').Detector; +const Models = require('../../').Models; + +const models = new Models(); + +models.add({ + file: 'resources/models/snowboy.umdl', + sensitivity: '0.5', + hotwords : 'snowboy' +}); + +const detector = new Detector({ + resource: "resources/common.res", + models: models, + audioGain: 2.0, + applyFrontend: true +}); + +detector.on('silence', function () { + console.log('silence'); +}); + +detector.on('sound', function (buffer) { + // contains the last chunk of the audio that triggers the "sound" + // event. It could be written to a wav stream. + console.log('sound'); +}); + +detector.on('error', function () { + console.log('error'); +}); + +detector.on('hotword', function (index, hotword, buffer) { + // contains the last chunk of the audio that triggers the "hotword" + // event. It could be written to a wav stream. You will have to use it + // together with the in the "sound" event if you want to get audio + // data after the hotword. + console.log(buffer); + console.log('hotword', index, hotword); +}); + +const mic = record.start({ + threshold: 0, + verbose: true +}); + +mic.pipe(detector); diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Node/resources b/src/Guide_stick_system/voice_assistant/snowboy/examples/Node/resources new file mode 100644 index 0000000..81bd1c5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Node/resources @@ -0,0 +1 @@ +../../resources/ \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/cpanfile b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/cpanfile new file mode 100644 index 0000000..fa2cc20 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/cpanfile @@ -0,0 +1,5 @@ +requires 'Audio::PortAudio', '>= 0.03'; +requires 'JSON', '>= 2.00'; +requires 'Statistics::Basic', '>= 1.6611'; +requires 'LWP::UserAgent', '>= 6.0.0'; +requires 'Mozilla::CA', '>= 20160104'; diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/resources b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/resources new file mode 100644 index 0000000..81bd1c5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/resources @@ -0,0 +1 @@ +../../resources/ \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/snowboy_RESTful_train.pl b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/snowboy_RESTful_train.pl new file mode 100644 index 0000000..60f0ff2 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/snowboy_RESTful_train.pl @@ -0,0 +1,251 @@ +#!/usr/bin/perl + +# This script uses PortAudio to record 3 audio samples on your computer, and +# sends them to the KITT.AI RESTful API to train the personal hotword model. + +use Audio::PortAudio; +use File::Path qw(make_path); +use IO::Handle; +use JSON; +use LWP::UserAgent; +use MIME::Base64; +use Statistics::Basic qw(:all); + +my $Usage = < + e.g.: ./snowboy_RESTful_train.pl \ + abcdefghijklmnopqrstuvwxyzABCD0123456789 snowboy en + +EOU + +if (@ARGV != 3) { + die $Usage; +} + +# Gets parameters. +my $api_token = shift @ARGV; +my $hotword = shift @ARGV; +my $language = shift @ARGV; + +# Turns on OUTPUT_AUTOFLUSH. +$|++; + +# Audio format +use constant RATE => 16000; +use constant NUMCHANNELS => 1; +use constant BITSPERSAMPLE => 16; + +# Calculates number of samples per chunk based on a given chunk size in +# milliseconds. +use constant CHUNK_SIZE_MS => 20; +use constant SAMPLES => RATE * CHUNK_SIZE_MS / 1000; + +# Miniumum number of non-silent chunks to count as utterance. Anything less is +# noise. +use constant MIN_SPEECH => 5; + +# Detects 500ms silence (25 blocks * 20 ms ~500ms of speech) before termiating +# recording. +use constant TRAILING_SILENCE_BLOCKS => 25; + +# Depth of FIFO buffer in blocks +use constant FIFO_DEPTH => 25; + +# REST endpoint for model training +use constant URL => 'https://snowboy.kitt.ai/api/v1/train/'; + +$trailing_silence_blocks = 0; +$speech_blocks = 0; +$buffer = ''; + +# Audio capturing. +my $api = Audio::PortAudio::default_host_api(); +my $device = $api->default_input_device; +my $stream = $device->open_read_stream( + {channel_count => NUMCHANNELS, sample_format => 'int16'}, + RATE, + SAMPLES); + +# Collects 1000 msec worth of voice data and calculates silence treshold and DC +# offset. +print "\n"; +print "Calculating statistics on silence, please be quite...\n"; +for ($i = 0; $i < (1000 / CHUNK_SIZE_MS); $i++) { + # SLN format = 2 bytes per sample. + $stream->read($buffer, SAMPLES); + + # Discards first (usually noisy) block. + next if not $i; + + # Unpacks into an array of 16-bit linear samples. + my $vec = vector(unpack('s*', $buffer)); + + my $stddev = round(stddev($vec)); + my $mean = round(mean($vec)); + + push @alldevs, $stddev; + push @allmeans, $mean; + + # printf "%.2f secs: mean: %d, stdddev: %d\r", + # $i * SAMPLES / RATE, $mean, $stddev; + + # Finds MAX mean across all data chunks. + $maxdev = $stddev if $stddev > $maxdev; +} + +my $vec = vector(@alldevs); +$stddev = round(stddev($vec)); +$mean = round(mean($vec)); + +$maxdev = $mean + $stddev; + +# Too quiet (good silence supression, like SIP phones) +$maxdev = 100 if $maxdev < 100; + +# Add margin to silence detection to be safe. +$maxdev *= 2; + +$dcoffset = round(mean(@allmeans)); + +print "Done (Silence Threshold: $maxdev, DC Offset: $dcoffset)\n"; + +@spin = (qw[/ - \ |]); + +# Collects 3 voice samples to send to KITT.AI for personal model training. +for ($samples = 0; $samples < 3; $samples++) { + $speech_blocks = 0; + $trailing_silence_blocks = 0; + @utterance_blocks = (); + $buffer = ''; + $i = 0; + + print "\n"; + printf "Now speak your sample %d:\n", $samples + 1; + while ($trailing_silence_blocks < TRAILING_SILENCE_BLOCKS) { + $stream->read($buffer, SAMPLES); + push @utterance_blocks, $buffer; + + if (isSilence($buffer)) { + if ($speech_blocks > MIN_SPEECH) { + print '.'; + $trailing_silence_blocks++; + } else { + # No good speech collected; restart. + print $spin[$i++], "\r"; + $i = 0 if $i == scalar @spin; + $speech_blocks = 0; + # FIFO - remove first block, shift array up. + shift @utterance_blocks if scalar @utterance_blocks > FIFO_DEPTH; + } + } else { + print '*' if $speech_blocks > MIN_SPEECH; + $speech_blocks++; + $trailing_silence_blocks = 0; + } + } + + printf "\n"; + printf "Utterance is %.2f seconds long (%d blocks)\n", + (20 * (scalar @utterance_blocks) / 1000), scalar @utterance_blocks; + + $utterance[$samples] = join '', @utterance_blocks; +} +print "\n"; + +# Send API request to KITT.AI +$APIreq = encode_json({ + # gender => 'male', + # age_group => '40-49', + name => $hotword, + language => $language, + token => $api_token, + microphone => 'mobile', + voice_samples => [ + {wave => encode_base64(addWavHeader($utterance[0]))}, + {wave => encode_base64(addWavHeader($utterance[1]))}, + {wave => encode_base64(addWavHeader($utterance[2]))} + ] +}); + +$ua = LWP::UserAgent->new(debug => 1); +my $response = $ua->post(URL, + Content_Type => "application/json", + Content => $APIreq); + +$model_dir = "data"; +$time_str = time; +$hotword_name = $hotword; +$hotword_name =~ s/\s+/_/g; +if ($response->is_success) { + # Saves the generated models in the current working directory. + make_path($model_dir); + + # Saves samples. + for (0..2) { + $id = $_ + 1; + my $fh = IO::File->new( + ">$model_dir/${hotword_name}_${time_str}_sample${id}.wav"); + if (defined $fh) { + print $fh addWavHeader($utterance[$_]); + $fh->close; + } + } + + # Saves the generated personal model. + my $fh = IO::File->new(">$model_dir/${hotword_name}_${time_str}.pmdl"); + if (defined $fh) { + print $fh $response->content; + $fh->close; + } + + print "Model $model_dir/${hotword_name}_${time_str}.pmdl created.\n"; +} else { + print "Failed to create model:\n"; + die $response->status_line; +} + +sub isSilence { + my $samples = shift; + + # Unpack into an array of 16-bit linear samples + my $vec = vector(unpack('s*', $samples)); + my $stddev = round(stddev($vec)); + + return $stddev < $maxdev; +} + +# WAV format reference: http://soundfile.sapp.org/doc/WaveFormat/ +sub addWavHeader { + my $raw = shift; + my $header; + + my $byterate = RATE * NUMCHANNELS * BITSPERSAMPLE / 8; + my $blockalign = NUMCHANNELS * BITSPERSAMPLE / 8; + + $header = pack('A4VA4A4VvvVVvvA4V', + 'RIFF', + 36 + length $raw, + 'WAVE', + 'fmt', + 16, + 1, # PCM + 1, # Num Channels + RATE, + $byterate, + $blockalign, + BITSPERSAMPLE, + 'data', + length $raw + ); + + return $header . $raw; +} + +sub round { + my($number) = shift; + return int($number + .5); +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/snowboy_googlevoice.pl b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/snowboy_googlevoice.pl new file mode 100644 index 0000000..97b5d05 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/snowboy_googlevoice.pl @@ -0,0 +1,235 @@ +#!/usr/bin/perl + +# This script first uses Snowboy to wake up, then collects audio and sends to +# Google Speech API for further recognition. It works with both personal and +# universal models. By default, it uses the Snowboy universal model at +# resources/models/snowboy.umdl, you can change it to other universal models, or +# your own personal models. You also have to provide your Google API key in +# order to use it. + +use Snowboy; + +use Audio::PortAudio; +use Data::Dumper; +use Getopt::Long; +use IO::Handle; +use JSON; +use LWP::UserAgent; +use Statistics::Basic qw(:all); +use Time::HiRes qw(gettimeofday tv_interval); + +my $Usage = < [Hotword_Model] + e.g.: ./snowboy_googlevoice.pl \ + abcdefghijklmnopqrstuvwxyzABC0123456789 resources/models/snowboy.umdl + +Allowed options: + --language : Language for speech recognizer. (string, default="en") + +EOU + +my $language = "en"; +GetOptions('language=s' => \$language); + +if (@ARGV < 1 || @ARGV > 2) { + die $Usage; +} + +# Gets parameters. +my $api_key = shift @ARGV; +my $model = shift @ARGV || 'resources/models/snowboy.umdl'; + +if ($model eq 'resources/models/snowboy.umdl') { + $hotword = "Snowboy"; +} else { + $hotword = "your hotword"; +} + +# Output setting. +STDOUT->autoflush(1); +binmode STDOUT, ':utf8'; + +# Audio format. +use constant RATE => 16000; +use constant NUMCHANNELS => 1; +use constant BITSPERSAMPLE => 16; + +# Samples per data chunk count +use constant SAMPLES => 640; + +# Detects 500ms silence (12 blocks * 40 ms = after 500ms of speech) +use constant TRAILING_SILENCE_BLOCKS => 12; + +# Google Speech API endpoint (language-dependent). +$url = "http://www.google.com/speech-api/v2/recognize?lang=" + . $language + . "&key=" + . $api_key + . "&output=json&maxresults=1&grammar=builtin:search"; + +# Audio capturing. +my $api = Audio::PortAudio::default_host_api(); +my $device = $api->default_input_device; +my $stream = $device->open_read_stream( + {channel_count => NUMCHANNELS, sample_format => 'int16'}, + RATE, + SAMPLES); + +# Collects 1000 msec worth of voice data and calculates silence treshold and DC +# offset. +print "\n"; +print "Calculating statistics on silence, please be quite...\n"; +for ($i = 0; $i < (1 / (SAMPLES / RATE)); $i++) { + # SLN format = 2 bytes per sample. + $stream->read($buffer, SAMPLES); + + # Discards first (usually noisy) block. + next if not $i; + + # Unpacks into an array of 16-bit linear samples. + my $vec = vector(unpack('s*', $buffer)); + + my $stddev = round(stddev($vec)); + my $mean = round(mean($vec)); + + push @alldevs, $stddev; + push @allmeans, $mean; + + # printf "%.2f secs: mean: %d, stdddev: %d\n", + # $i * SAMPLES / RATE, $mean, $stddev; + + # Find AMX mean across all data chunks. + $maxdev = $stddev if $stddev > $maxdev; +} + +my $vec = vector(@alldevs); +$stddev = round(stddev($vec)); +$mean = round(mean($vec)); + +$maxdev = $mean + $stddev; + +# Too quiet (good silence supression, like SIP phones) +$maxdev = 100 if $maxdev < 100; + +# Add margin to silence detection to be safe. +$maxdev *= 2; + +$dcoffset = round(mean(@allmeans)); + +print "Done (Silence Threshold: $maxdev, DC Offset: $dcoffset)\n"; + +# Snowboy decoder. +$sb = new Snowboy::SnowboyDetect('resources/common.res', $model); +$sb->SetSensitivity('0.5'); +$sb->SetAudioGain(1.0); +$sb->ApplyFrontend(0); + +# Running the detection forever. +print "\n"; +print "Start by saying " . $hotword . "...\n"; +while (1) { + $stream->read($buffer, SAMPLES); + $processed = DSP($buffer); + + # Running the Snowboy detection. + $result = $sb->RunDetection($processed); + + $silence_blocks = 0; + $speech_blocks = 0; + $prespeech = ''; + $speechbuffer = ''; + + if ($result == 1) { + print 'Speak> '; + $sb->Reset(); + + while ($silence_blocks < TRAILING_SILENCE_BLOCKS) { + $stream->read($buffer, SAMPLES); + + # Buffer up (trim the leading silence). + $speechbuffer .= $buffer unless $speech_blocks < 5; + + if (isSilence($buffer)) { + # Counts blocks of 20ms silence after solid 500ms of speech. + $silence_blocks++ unless $speech_blocks < 10; + } else { + $silence_blocks = 0; + $speech_blocks++; + $prespeech .= $buffer unless $speech_blocks >= 5; + print '.'; + } + } + + print "\n"; + + $ua = LWP::UserAgent->new(debug => 1); + $t1 = [gettimeofday]; + my $response = $ua->post( + $url, + Content_Type => "audio/l16; rate=" . RATE, + Content => amp($prespeech . $speechbuffer)); + $t2 = [gettimeofday]; + + if ($response->is_success) { + my $resp = (split /\n/, $response->content)[1]; + next if not $resp; + $res = decode_json($resp); + + $result = $res->{result}[res->{result_index}] + ->{alternative}[0]->{transcript}; + } else { + delete $response->{'_request'}->{'_content'}; + print "Failed to do speech recognition from Google Speech API:\n"; + die $response->status_line; + } + + print "$result (", tv_interval ($t1, $t2), " sec)\n"; + print "\n"; + print "Start by saying " . $hotword . "...\n"; + } +} + +sub DSP { + my $mysamples = shift; + my @processed, @samples; + + # Removes DC offset. + @samples = unpack('s*', $mysamples); + + # Calculated DC offset for each voice data chunk. + # $mean = round(mean(@samples)); + + # Uses the same DC offset identified during training. + return pack('s*', map {$_ -= $dcoffset} @samples); +} + +sub isSilence { + my $samples = shift; + + # Unpacks into an array of 16-bit linear samples. + my $vec = vector(unpack('s*', $samples)); + my $stddev = round(stddev($vec)); + + return $stddev < $maxdev; +} + +sub amp { + my $samples = shift; + return pack 's*', map {$_ <<= 3} unpack('s*', $samples); +} + +sub round { + my($number) = shift; + return int($number + .5); +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/snowboy_unit_test.pl b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/snowboy_unit_test.pl new file mode 100644 index 0000000..b54cc30 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Perl/snowboy_unit_test.pl @@ -0,0 +1,31 @@ +#!/usr/bin/perl + +use Snowboy; +use Fcntl; + +# Positive test. +open WAV, 'resources/snowboy.wav'; + +# Set $INPUT_RECORD_SEPARATOR to undef so that we can read the full file. +local $/ = undef; +$data = ; +close WAV; + +$sb = new Snowboy::SnowboyDetect('resources/common.res', + 'resources/models/snowboy.umdl'); + +$sb->SetSensitivity ("0.5"); +$sb->SetAudioGain (1); +$sb->ApplyFrontend (0); + +print "==== SnowBoy object properties ====\n"; +print "Sample Rate : ", $sb->SampleRate(), "\n"; +print "Number of Channels : ", $sb->NumChannels(), "\n"; +print "Bits per Sample : ", $sb->BitsPerSample(), "\n"; +print "Number of hotwords : ", $sb->NumHotwords(), "\n\n"; + +if ($sb->RunDetection($data) > 0) { + print "Unit test passed!\n" +} else { + print "Unit test failed!\n" +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/__init__.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo.py new file mode 100644 index 0000000..305b8b6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo.py @@ -0,0 +1,35 @@ +import snowboydecoder +import sys +import signal + +interrupted = False + + +def signal_handler(signal, frame): + global interrupted + interrupted = True + + +def interrupt_callback(): + global interrupted + return interrupted + +if len(sys.argv) == 1: + print("Error: need to specify model name") + print("Usage: python demo.py your.model") + sys.exit(-1) + +model = sys.argv[1] + +# capture SIGINT signal, e.g., Ctrl+C +signal.signal(signal.SIGINT, signal_handler) + +detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5) +print('Listening... Press Ctrl+C to exit') + +# main loop +detector.start(detected_callback=snowboydecoder.play_audio_file, + interrupt_check=interrupt_callback, + sleep_time=0.03) + +detector.terminate() diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo2.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo2.py new file mode 100644 index 0000000..fab9b98 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo2.py @@ -0,0 +1,41 @@ +import snowboydecoder +import sys +import signal + +# Demo code for listening to two hotwords at the same time + +interrupted = False + + +def signal_handler(signal, frame): + global interrupted + interrupted = True + + +def interrupt_callback(): + global interrupted + return interrupted + +if len(sys.argv) != 3: + print("Error: need to specify 2 model names") + print("Usage: python demo.py 1st.model 2nd.model") + sys.exit(-1) + +models = sys.argv[1:] + +# capture SIGINT signal, e.g., Ctrl+C +signal.signal(signal.SIGINT, signal_handler) + +sensitivity = [0.5]*len(models) +detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity) +callbacks = [lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING), + lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)] +print('Listening... Press Ctrl+C to exit') + +# main loop +# make sure you have the same numbers of callbacks and models +detector.start(detected_callback=callbacks, + interrupt_check=interrupt_callback, + sleep_time=0.03) + +detector.terminate() diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo3.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo3.py new file mode 100644 index 0000000..29ecec6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo3.py @@ -0,0 +1,40 @@ +import snowboydecoder +import sys +import wave + +# Demo code for detecting hotword in a .wav file +# Example Usage: +# $ python demo3.py resources/snowboy.wav resources/models/snowboy.umdl +# Should print: +# Hotword Detected! +# +# $ python demo3.py resources/ding.wav resources/models/snowboy.umdl +# Should print: +# Hotword Not Detected! + + +if len(sys.argv) != 3: + print("Error: need to specify wave file name and model name") + print("Usage: python demo3.py wave_file model_file") + sys.exit(-1) + +wave_file = sys.argv[1] +model_file = sys.argv[2] + +f = wave.open(wave_file) +assert f.getnchannels() == 1, "Error: Snowboy only supports 1 channel of audio (mono, not stereo)" +assert f.getframerate() == 16000, "Error: Snowboy only supports 16K sampling rate" +assert f.getsampwidth() == 2, "Error: Snowboy only supports 16bit per sample" +data = f.readframes(f.getnframes()) +f.close() + +sensitivity = 0.5 +detection = snowboydecoder.HotwordDetector(model_file, sensitivity=sensitivity) + +ans = detection.detector.RunDetection(data) + +if ans == 1: + print('Hotword Detected!') +else: + print('Hotword Not Detected!') + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo4.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo4.py new file mode 100644 index 0000000..a59eeda --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo4.py @@ -0,0 +1,76 @@ +import snowboydecoder +import sys +import signal +import speech_recognition as sr +import os + +""" +This demo file shows you how to use the new_message_callback to interact with +the recorded audio after a keyword is spoken. It uses the speech recognition +library in order to convert the recorded audio into text. + +Information on installing the speech recognition library can be found at: +https://pypi.python.org/pypi/SpeechRecognition/ +""" + + +interrupted = False + + +def audioRecorderCallback(fname): + print "converting audio to text" + r = sr.Recognizer() + with sr.AudioFile(fname) as source: + audio = r.record(source) # read the entire audio file + # recognize speech using Google Speech Recognition + try: + # for testing purposes, we're just using the default API key + # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")` + # instead of `r.recognize_google(audio)` + print(r.recognize_google(audio)) + except sr.UnknownValueError: + print "Google Speech Recognition could not understand audio" + except sr.RequestError as e: + print "Could not request results from Google Speech Recognition service; {0}".format(e) + + os.remove(fname) + + + +def detectedCallback(): + sys.stdout.write("recording audio...") + sys.stdout.flush() + +def signal_handler(signal, frame): + global interrupted + interrupted = True + + +def interrupt_callback(): + global interrupted + return interrupted + +if len(sys.argv) == 1: + print "Error: need to specify model name" + print "Usage: python demo.py your.model" + sys.exit(-1) + +model = sys.argv[1] + +# capture SIGINT signal, e.g., Ctrl+C +signal.signal(signal.SIGINT, signal_handler) + +detector = snowboydecoder.HotwordDetector(model, sensitivity=0.38) +print "Listening... Press Ctrl+C to exit" + +# main loop +detector.start(detected_callback=detectedCallback, + audio_recorder_callback=audioRecorderCallback, + interrupt_check=interrupt_callback, + sleep_time=0.01) + +detector.terminate() + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo_arecord.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo_arecord.py new file mode 100644 index 0000000..c14bf15 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo_arecord.py @@ -0,0 +1,35 @@ +import snowboydecoder_arecord +import sys +import signal + +interrupted = False + + +def signal_handler(signal, frame): + global interrupted + interrupted = True + + +def interrupt_callback(): + global interrupted + return interrupted + +if len(sys.argv) == 1: + print("Error: need to specify model name") + print("Usage: python demo.py your.model") + sys.exit(-1) + +model = sys.argv[1] + +# capture SIGINT signal, e.g., Ctrl+C +signal.signal(signal.SIGINT, signal_handler) + +detector = snowboydecoder_arecord.HotwordDetector(model, sensitivity=0.5) +print('Listening... Press Ctrl+C to exit') + +# main loop +detector.start(detected_callback=snowboydecoder_arecord.play_audio_file, + interrupt_check=interrupt_callback, + sleep_time=0.03) + +detector.terminate() diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo_threaded.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo_threaded.py new file mode 100644 index 0000000..ded146c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/demo_threaded.py @@ -0,0 +1,47 @@ +import snowboythreaded +import sys +import signal +import time + +stop_program = False + +# This a demo that shows running Snowboy in another thread + + +def signal_handler(signal, frame): + global stop_program + stop_program = True + + +if len(sys.argv) == 1: + print("Error: need to specify model name") + print("Usage: python demo4.py your.model") + sys.exit(-1) + +model = sys.argv[1] + +# capture SIGINT signal, e.g., Ctrl+C +signal.signal(signal.SIGINT, signal_handler) + +# Initialize ThreadedDetector object and start the detection thread +threaded_detector = snowboythreaded.ThreadedDetector(model, sensitivity=0.5) +threaded_detector.start() + +print('Listening... Press Ctrl+C to exit') + +# main loop +threaded_detector.start_recog(sleep_time=0.03) + +# Let audio initialization happen before requesting input +time.sleep(1) + +# Do a simple task separate from the detection - addition of numbers +while not stop_program: + try: + num1 = int(raw_input("Enter the first number to add: ")) + num2 = int(raw_input("Enter the second number to add: ")) + print "Sum of number: {}".format(num1 + num2) + except ValueError: + print "You did not enter a number." + +threaded_detector.terminate() diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/requirements.txt b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/requirements.txt new file mode 100644 index 0000000..68d034a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/requirements.txt @@ -0,0 +1 @@ +PyAudio==0.2.9 diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/resources b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/resources new file mode 100644 index 0000000..81bd1c5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/resources @@ -0,0 +1 @@ +../../resources/ \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/snowboydecoder.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/snowboydecoder.py new file mode 100644 index 0000000..5da9aff --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/snowboydecoder.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python + +import collections +import pyaudio +import snowboydetect +import time +import wave +import os +import logging +from ctypes import * +from contextlib import contextmanager + +logging.basicConfig() +logger = logging.getLogger("snowboy") +logger.setLevel(logging.INFO) +TOP_DIR = os.path.dirname(os.path.abspath(__file__)) + +RESOURCE_FILE = os.path.join(TOP_DIR, "resources/common.res") +DETECT_DING = os.path.join(TOP_DIR, "resources/ding.wav") +DETECT_DONG = os.path.join(TOP_DIR, "resources/dong.wav") + +def py_error_handler(filename, line, function, err, fmt): + pass + +ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p) + +c_error_handler = ERROR_HANDLER_FUNC(py_error_handler) + +@contextmanager +def no_alsa_error(): + try: + asound = cdll.LoadLibrary('libasound.so') + asound.snd_lib_error_set_handler(c_error_handler) + yield + asound.snd_lib_error_set_handler(None) + except: + yield + pass + +class RingBuffer(object): + """Ring buffer to hold audio from PortAudio""" + def __init__(self, size = 4096): + self._buf = collections.deque(maxlen=size) + + def extend(self, data): + """Adds data to the end of buffer""" + self._buf.extend(data) + + def get(self): + """Retrieves data from the beginning of buffer and clears it""" + tmp = bytes(bytearray(self._buf)) + self._buf.clear() + return tmp + + +def play_audio_file(fname=DETECT_DING): + """Simple callback function to play a wave file. By default it plays + a Ding sound. + + :param str fname: wave file name + :return: None + """ + ding_wav = wave.open(fname, 'rb') + ding_data = ding_wav.readframes(ding_wav.getnframes()) + with no_alsa_error(): + audio = pyaudio.PyAudio() + stream_out = audio.open( + format=audio.get_format_from_width(ding_wav.getsampwidth()), + channels=ding_wav.getnchannels(), + rate=ding_wav.getframerate(), input=False, output=True) + stream_out.start_stream() + stream_out.write(ding_data) + time.sleep(0.2) + stream_out.stop_stream() + stream_out.close() + audio.terminate() + + +class HotwordDetector(object): + """ + Snowboy decoder to detect whether a keyword specified by `decoder_model` + exists in a microphone input stream. + + :param decoder_model: decoder model file path, a string or a list of strings + :param resource: resource file path. + :param sensitivity: decoder sensitivity, a float of a list of floats. + The bigger the value, the more senstive the + decoder. If an empty list is provided, then the + default sensitivity in the model will be used. + :param audio_gain: multiply input volume by this factor. + :param apply_frontend: applies the frontend processing algorithm if True. + """ + def __init__(self, decoder_model, + resource=RESOURCE_FILE, + sensitivity=[], + audio_gain=1, + apply_frontend=False): + + def audio_callback(in_data, frame_count, time_info, status): + self.ring_buffer.extend(in_data) + play_data = chr(0) * len(in_data) + return play_data, pyaudio.paContinue + + tm = type(decoder_model) + ts = type(sensitivity) + if tm is not list: + decoder_model = [decoder_model] + if ts is not list: + sensitivity = [sensitivity] + model_str = ",".join(decoder_model) + + self.detector = snowboydetect.SnowboyDetect( + resource_filename=resource.encode(), model_str=model_str.encode()) + self.detector.SetAudioGain(audio_gain) + self.detector.ApplyFrontend(apply_frontend) + self.num_hotwords = self.detector.NumHotwords() + + if len(decoder_model) > 1 and len(sensitivity) == 1: + sensitivity = sensitivity*self.num_hotwords + if len(sensitivity) != 0: + assert self.num_hotwords == len(sensitivity), \ + "number of hotwords in decoder_model (%d) and sensitivity " \ + "(%d) does not match" % (self.num_hotwords, len(sensitivity)) + sensitivity_str = ",".join([str(t) for t in sensitivity]) + if len(sensitivity) != 0: + self.detector.SetSensitivity(sensitivity_str.encode()) + + self.ring_buffer = RingBuffer( + self.detector.NumChannels() * self.detector.SampleRate() * 5) + with no_alsa_error(): + self.audio = pyaudio.PyAudio() + self.stream_in = self.audio.open( + input=True, output=False, + format=self.audio.get_format_from_width( + self.detector.BitsPerSample() / 8), + channels=self.detector.NumChannels(), + rate=self.detector.SampleRate(), + frames_per_buffer=2048, + stream_callback=audio_callback) + + + def start(self, detected_callback=play_audio_file, + interrupt_check=lambda: False, + sleep_time=0.03, + audio_recorder_callback=None, + silent_count_threshold=15, + recording_timeout=100): + """ + Start the voice detector. For every `sleep_time` second it checks the + audio buffer for triggering keywords. If detected, then call + corresponding function in `detected_callback`, which can be a single + function (single model) or a list of callback functions (multiple + models). Every loop it also calls `interrupt_check` -- if it returns + True, then breaks from the loop and return. + + :param detected_callback: a function or list of functions. The number of + items must match the number of models in + `decoder_model`. + :param interrupt_check: a function that returns True if the main loop + needs to stop. + :param float sleep_time: how much time in second every loop waits. + :param audio_recorder_callback: if specified, this will be called after + a keyword has been spoken and after the + phrase immediately after the keyword has + been recorded. The function will be + passed the name of the file where the + phrase was recorded. + :param silent_count_threshold: indicates how long silence must be heard + to mark the end of a phrase that is + being recorded. + :param recording_timeout: limits the maximum length of a recording. + :return: None + """ + if interrupt_check(): + logger.debug("detect voice return") + return + + tc = type(detected_callback) + if tc is not list: + detected_callback = [detected_callback] + if len(detected_callback) == 1 and self.num_hotwords > 1: + detected_callback *= self.num_hotwords + + assert self.num_hotwords == len(detected_callback), \ + "Error: hotwords in your models (%d) do not match the number of " \ + "callbacks (%d)" % (self.num_hotwords, len(detected_callback)) + + logger.debug("detecting...") + + state = "PASSIVE" + while True: + if interrupt_check(): + logger.debug("detect voice break") + break + data = self.ring_buffer.get() + if len(data) == 0: + time.sleep(sleep_time) + continue + + status = self.detector.RunDetection(data) + if status == -1: + logger.warning("Error initializing streams or reading audio data") + + #small state machine to handle recording of phrase after keyword + if state == "PASSIVE": + if status > 0: #key word found + self.recordedData = [] + self.recordedData.append(data) + silentCount = 0 + recordingCount = 0 + message = "Keyword " + str(status) + " detected at time: " + message += time.strftime("%Y-%m-%d %H:%M:%S", + time.localtime(time.time())) + logger.info(message) + callback = detected_callback[status-1] + if callback is not None: + callback() + + if audio_recorder_callback is not None: + state = "ACTIVE" + continue + + elif state == "ACTIVE": + stopRecording = False + if recordingCount > recording_timeout: + stopRecording = True + elif status == -2: #silence found + if silentCount > silent_count_threshold: + stopRecording = True + else: + silentCount = silentCount + 1 + elif status == 0: #voice found + silentCount = 0 + + if stopRecording == True: + fname = self.saveMessage() + audio_recorder_callback(fname) + state = "PASSIVE" + continue + + recordingCount = recordingCount + 1 + self.recordedData.append(data) + + logger.debug("finished.") + + def saveMessage(self): + """ + Save the message stored in self.recordedData to a timestamped file. + """ + filename = 'output' + str(int(time.time())) + '.wav' + data = b''.join(self.recordedData) + + #use wave to save data + wf = wave.open(filename, 'wb') + wf.setnchannels(1) + wf.setsampwidth(self.audio.get_sample_size( + self.audio.get_format_from_width( + self.detector.BitsPerSample() / 8))) + wf.setframerate(self.detector.SampleRate()) + wf.writeframes(data) + wf.close() + logger.debug("finished saving: " + filename) + return filename + + def terminate(self): + """ + Terminate audio stream. Users cannot call start() again to detect. + :return: None + """ + self.stream_in.stop_stream() + self.stream_in.close() + self.audio.terminate() diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/snowboydecoder_arecord.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/snowboydecoder_arecord.py new file mode 100644 index 0000000..efa3be4 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/snowboydecoder_arecord.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python + +import collections +import snowboydetect +import time +import wave +import os +import logging +import subprocess +import threading + +logging.basicConfig() +logger = logging.getLogger("snowboy") +logger.setLevel(logging.INFO) +TOP_DIR = os.path.dirname(os.path.abspath(__file__)) + +RESOURCE_FILE = os.path.join(TOP_DIR, "resources/common.res") +DETECT_DING = os.path.join(TOP_DIR, "resources/ding.wav") +DETECT_DONG = os.path.join(TOP_DIR, "resources/dong.wav") + + +class RingBuffer(object): + """Ring buffer to hold audio from audio capturing tool""" + def __init__(self, size = 4096): + self._buf = collections.deque(maxlen=size) + + def extend(self, data): + """Adds data to the end of buffer""" + self._buf.extend(data) + + def get(self): + """Retrieves data from the beginning of buffer and clears it""" + tmp = bytes(bytearray(self._buf)) + self._buf.clear() + return tmp + + +def play_audio_file(fname=DETECT_DING): + """Simple callback function to play a wave file. By default it plays + a Ding sound. + + :param str fname: wave file name + :return: None + """ + os.system("aplay " + fname + " > /dev/null 2>&1") + + +class HotwordDetector(object): + """ + Snowboy decoder to detect whether a keyword specified by `decoder_model` + exists in a microphone input stream. + + :param decoder_model: decoder model file path, a string or a list of strings + :param resource: resource file path. + :param sensitivity: decoder sensitivity, a float of a list of floats. + The bigger the value, the more senstive the + decoder. If an empty list is provided, then the + default sensitivity in the model will be used. + :param audio_gain: multiply input volume by this factor. + """ + def __init__(self, decoder_model, + resource=RESOURCE_FILE, + sensitivity=[], + audio_gain=1): + + tm = type(decoder_model) + ts = type(sensitivity) + if tm is not list: + decoder_model = [decoder_model] + if ts is not list: + sensitivity = [sensitivity] + model_str = ",".join(decoder_model) + + self.detector = snowboydetect.SnowboyDetect( + resource_filename=resource.encode(), model_str=model_str.encode()) + self.detector.SetAudioGain(audio_gain) + self.num_hotwords = self.detector.NumHotwords() + + if len(decoder_model) > 1 and len(sensitivity) == 1: + sensitivity = sensitivity*self.num_hotwords + if len(sensitivity) != 0: + assert self.num_hotwords == len(sensitivity), \ + "number of hotwords in decoder_model (%d) and sensitivity " \ + "(%d) does not match" % (self.num_hotwords, len(sensitivity)) + sensitivity_str = ",".join([str(t) for t in sensitivity]) + if len(sensitivity) != 0: + self.detector.SetSensitivity(sensitivity_str.encode()) + + self.ring_buffer = RingBuffer( + self.detector.NumChannels() * self.detector.SampleRate() * 5) + + def record_proc(self): + CHUNK = 2048 + RECORD_RATE = 16000 + cmd = 'arecord -q -r %d -f S16_LE' % RECORD_RATE + process = subprocess.Popen(cmd.split(' '), + stdout = subprocess.PIPE, + stderr = subprocess.PIPE) + wav = wave.open(process.stdout, 'rb') + while self.recording: + data = wav.readframes(CHUNK) + self.ring_buffer.extend(data) + process.terminate() + + def init_recording(self): + """ + Start a thread for spawning arecord process and reading its stdout + """ + self.recording = True + self.record_thread = threading.Thread(target = self.record_proc) + self.record_thread.start() + + def start(self, detected_callback=play_audio_file, + interrupt_check=lambda: False, + sleep_time=0.03): + """ + Start the voice detector. For every `sleep_time` second it checks the + audio buffer for triggering keywords. If detected, then call + corresponding function in `detected_callback`, which can be a single + function (single model) or a list of callback functions (multiple + models). Every loop it also calls `interrupt_check` -- if it returns + True, then breaks from the loop and return. + + :param detected_callback: a function or list of functions. The number of + items must match the number of models in + `decoder_model`. + :param interrupt_check: a function that returns True if the main loop + needs to stop. + :param float sleep_time: how much time in second every loop waits. + :return: None + """ + + self.init_recording() + + if interrupt_check(): + logger.debug("detect voice return") + return + + tc = type(detected_callback) + if tc is not list: + detected_callback = [detected_callback] + if len(detected_callback) == 1 and self.num_hotwords > 1: + detected_callback *= self.num_hotwords + + assert self.num_hotwords == len(detected_callback), \ + "Error: hotwords in your models (%d) do not match the number of " \ + "callbacks (%d)" % (self.num_hotwords, len(detected_callback)) + + logger.debug("detecting...") + + while True: + if interrupt_check(): + logger.debug("detect voice break") + break + data = self.ring_buffer.get() + if len(data) == 0: + time.sleep(sleep_time) + continue + + ans = self.detector.RunDetection(data) + if ans == -1: + logger.warning("Error initializing streams or reading audio data") + elif ans > 0: + message = "Keyword " + str(ans) + " detected at time: " + message += time.strftime("%Y-%m-%d %H:%M:%S", + time.localtime(time.time())) + logger.info(message) + callback = detected_callback[ans-1] + if callback is not None: + callback() + + logger.debug("finished.") + + def terminate(self): + """ + Terminate audio stream. Users cannot call start() again to detect. + :return: None + """ + self.recording = False + self.record_thread.join() + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/snowboythreaded.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/snowboythreaded.py new file mode 100644 index 0000000..7932d48 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python/snowboythreaded.py @@ -0,0 +1,96 @@ +import snowboydecoder +import threading +import Queue + + +class ThreadedDetector(threading.Thread): + """ + Wrapper class around detectors to run them in a separate thread + and provide methods to pause, resume, and modify detection + """ + + def __init__(self, models, **kwargs): + """ + Initialize Detectors object. **kwargs is for any __init__ keyword + arguments to be passed into HotWordDetector __init__() method. + """ + threading.Thread.__init__(self) + self.models = models + self.init_kwargs = kwargs + self.interrupted = True + self.commands = Queue.Queue() + self.vars_are_changed = True + self.detectors = None # Initialize when thread is run in self.run() + self.run_kwargs = None # Initialize when detectors start in self.start_recog() + + def initialize_detectors(self): + """ + Returns initialized Snowboy HotwordDetector objects + """ + self.detectors = snowboydecoder.HotwordDetector(self.models, **self.init_kwargs) + + def run(self): + """ + Runs in separate thread - waits on command to either run detectors + or terminate thread from commands queue + """ + try: + while True: + command = self.commands.get(True) + if command == "Start": + self.interrupted = False + if self.vars_are_changed: + # If there is an existing detector object, terminate it + if self.detectors is not None: + self.detectors.terminate() + self.initialize_detectors() + self.vars_are_changed = False + # Start detectors - blocks until interrupted by self.interrupted variable + self.detectors.start(interrupt_check=lambda: self.interrupted, **self.run_kwargs) + elif command == "Terminate": + # Program ending - terminate thread + break + finally: + if self.detectors is not None: + self.detectors.terminate() + + def start_recog(self, **kwargs): + """ + Starts recognition in thread. Accepts kwargs to pass into the + HotWordDetector.start() method, but does not accept interrupt_callback, + as that is already set up. + """ + assert "interrupt_check" not in kwargs, \ + "Cannot set interrupt_check argument. To interrupt detectors, use Detectors.pause_recog() instead" + self.run_kwargs = kwargs + self.commands.put("Start") + + def pause_recog(self): + """ + Halts recognition in thread. + """ + self.interrupted = True + + def terminate(self): + """ + Terminates recognition thread - called when program terminates + """ + self.pause_recog() + self.commands.put("Terminate") + + def is_running(self): + return not self.interrupted + + def change_models(self, models): + if self.is_running(): + print("Models will be changed after restarting detectors.") + if self.models != models: + self.models = models + self.vars_are_changed = True + + def change_sensitivity(self, sensitivity): + if self.is_running(): + print("Sensitivity will be changed after restarting detectors.") + if self.init_kwargs['sensitivity'] != sensitivity: + self.init_kwargs['sensitivity'] = sensitivity + self.vars_are_changed = True diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo.py new file mode 100644 index 0000000..305b8b6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo.py @@ -0,0 +1,35 @@ +import snowboydecoder +import sys +import signal + +interrupted = False + + +def signal_handler(signal, frame): + global interrupted + interrupted = True + + +def interrupt_callback(): + global interrupted + return interrupted + +if len(sys.argv) == 1: + print("Error: need to specify model name") + print("Usage: python demo.py your.model") + sys.exit(-1) + +model = sys.argv[1] + +# capture SIGINT signal, e.g., Ctrl+C +signal.signal(signal.SIGINT, signal_handler) + +detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5) +print('Listening... Press Ctrl+C to exit') + +# main loop +detector.start(detected_callback=snowboydecoder.play_audio_file, + interrupt_check=interrupt_callback, + sleep_time=0.03) + +detector.terminate() diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo2.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo2.py new file mode 100644 index 0000000..fab9b98 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo2.py @@ -0,0 +1,41 @@ +import snowboydecoder +import sys +import signal + +# Demo code for listening to two hotwords at the same time + +interrupted = False + + +def signal_handler(signal, frame): + global interrupted + interrupted = True + + +def interrupt_callback(): + global interrupted + return interrupted + +if len(sys.argv) != 3: + print("Error: need to specify 2 model names") + print("Usage: python demo.py 1st.model 2nd.model") + sys.exit(-1) + +models = sys.argv[1:] + +# capture SIGINT signal, e.g., Ctrl+C +signal.signal(signal.SIGINT, signal_handler) + +sensitivity = [0.5]*len(models) +detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity) +callbacks = [lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING), + lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)] +print('Listening... Press Ctrl+C to exit') + +# main loop +# make sure you have the same numbers of callbacks and models +detector.start(detected_callback=callbacks, + interrupt_check=interrupt_callback, + sleep_time=0.03) + +detector.terminate() diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo3.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo3.py new file mode 100644 index 0000000..29ecec6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo3.py @@ -0,0 +1,40 @@ +import snowboydecoder +import sys +import wave + +# Demo code for detecting hotword in a .wav file +# Example Usage: +# $ python demo3.py resources/snowboy.wav resources/models/snowboy.umdl +# Should print: +# Hotword Detected! +# +# $ python demo3.py resources/ding.wav resources/models/snowboy.umdl +# Should print: +# Hotword Not Detected! + + +if len(sys.argv) != 3: + print("Error: need to specify wave file name and model name") + print("Usage: python demo3.py wave_file model_file") + sys.exit(-1) + +wave_file = sys.argv[1] +model_file = sys.argv[2] + +f = wave.open(wave_file) +assert f.getnchannels() == 1, "Error: Snowboy only supports 1 channel of audio (mono, not stereo)" +assert f.getframerate() == 16000, "Error: Snowboy only supports 16K sampling rate" +assert f.getsampwidth() == 2, "Error: Snowboy only supports 16bit per sample" +data = f.readframes(f.getnframes()) +f.close() + +sensitivity = 0.5 +detection = snowboydecoder.HotwordDetector(model_file, sensitivity=sensitivity) + +ans = detection.detector.RunDetection(data) + +if ans == 1: + print('Hotword Detected!') +else: + print('Hotword Not Detected!') + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo4.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo4.py new file mode 100644 index 0000000..dc52119 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/demo4.py @@ -0,0 +1,75 @@ +import snowboydecoder +import sys +import signal +import speech_recognition as sr +import os + +""" +This demo file shows you how to use the new_message_callback to interact with +the recorded audio after a keyword is spoken. It uses the speech recognition +library in order to convert the recorded audio into text. + +Information on installing the speech recognition library can be found at: +https://pypi.python.org/pypi/SpeechRecognition/ +""" + + +interrupted = False + + +def audioRecorderCallback(fname): + print("converting audio to text") + r = sr.Recognizer() + with sr.AudioFile(fname) as source: + audio = r.record(source) # read the entire audio file + # recognize speech using Google Speech Recognition + try: + # for testing purposes, we're just using the default API key + # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")` + # instead of `r.recognize_google(audio)` + print(r.recognize_google(audio)) + except sr.UnknownValueError: + print("Google Speech Recognition could not understand audio") + except sr.RequestError as e: + print("Could not request results from Google Speech Recognition service; {0}".format(e)) + + os.remove(fname) + + + +def detectedCallback(): + print('recording audio...', end='', flush=True) + +def signal_handler(signal, frame): + global interrupted + interrupted = True + + +def interrupt_callback(): + global interrupted + return interrupted + +if len(sys.argv) == 1: + print("Error: need to specify model name") + print("Usage: python demo.py your.model") + sys.exit(-1) + +model = sys.argv[1] + +# capture SIGINT signal, e.g., Ctrl+C +signal.signal(signal.SIGINT, signal_handler) + +detector = snowboydecoder.HotwordDetector(model, sensitivity=0.38) +print('Listening... Press Ctrl+C to exit') + +# main loop +detector.start(detected_callback=detectedCallback, + audio_recorder_callback=audioRecorderCallback, + interrupt_check=interrupt_callback, + sleep_time=0.01) + +detector.terminate() + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/requirements.txt b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/requirements.txt new file mode 100644 index 0000000..d8614c7 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/requirements.txt @@ -0,0 +1 @@ +../Python/requirements.txt \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/resources b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/resources new file mode 100644 index 0000000..81bd1c5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/resources @@ -0,0 +1 @@ +../../resources/ \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/snowboydecoder.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/snowboydecoder.py new file mode 100644 index 0000000..34ee26f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/Python3/snowboydecoder.py @@ -0,0 +1,277 @@ +#!/usr/bin/env python + +import collections +import pyaudio +from . import snowboydetect +import time +import wave +import os +import logging +from ctypes import * +from contextlib import contextmanager + +logging.basicConfig() +logger = logging.getLogger("snowboy") +logger.setLevel(logging.INFO) +TOP_DIR = os.path.dirname(os.path.abspath(__file__)) + +RESOURCE_FILE = os.path.join(TOP_DIR, "resources/common.res") +DETECT_DING = os.path.join(TOP_DIR, "resources/ding.wav") +DETECT_DONG = os.path.join(TOP_DIR, "resources/dong.wav") + +def py_error_handler(filename, line, function, err, fmt): + pass + +ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p) + +c_error_handler = ERROR_HANDLER_FUNC(py_error_handler) + +@contextmanager +def no_alsa_error(): + try: + asound = cdll.LoadLibrary('libasound.so') + asound.snd_lib_error_set_handler(c_error_handler) + yield + asound.snd_lib_error_set_handler(None) + except: + yield + pass + +class RingBuffer(object): + """Ring buffer to hold audio from PortAudio""" + + def __init__(self, size=4096): + self._buf = collections.deque(maxlen=size) + + def extend(self, data): + """Adds data to the end of buffer""" + self._buf.extend(data) + + def get(self): + """Retrieves data from the beginning of buffer and clears it""" + tmp = bytes(bytearray(self._buf)) + self._buf.clear() + return tmp + + +def play_audio_file(fname=DETECT_DING): + """Simple callback function to play a wave file. By default it plays + a Ding sound. + + :param str fname: wave file name + :return: None + """ + ding_wav = wave.open(fname, 'rb') + ding_data = ding_wav.readframes(ding_wav.getnframes()) + with no_alsa_error(): + audio = pyaudio.PyAudio() + stream_out = audio.open( + format=audio.get_format_from_width(ding_wav.getsampwidth()), + channels=ding_wav.getnchannels(), + rate=ding_wav.getframerate(), input=False, output=True) + stream_out.start_stream() + stream_out.write(ding_data) + time.sleep(0.2) + stream_out.stop_stream() + stream_out.close() + audio.terminate() + + +class HotwordDetector(object): + """ + Snowboy decoder to detect whether a keyword specified by `decoder_model` + exists in a microphone input stream. + + :param decoder_model: decoder model file path, a string or a list of strings + :param resource: resource file path. + :param sensitivity: decoder sensitivity, a float of a list of floats. + The bigger the value, the more senstive the + decoder. If an empty list is provided, then the + default sensitivity in the model will be used. + :param audio_gain: multiply input volume by this factor. + :param apply_frontend: applies the frontend processing algorithm if True. + """ + + def __init__(self, decoder_model, + resource=RESOURCE_FILE, + sensitivity=[], + audio_gain=1, + apply_frontend=False): + + tm = type(decoder_model) + ts = type(sensitivity) + if tm is not list: + decoder_model = [decoder_model] + if ts is not list: + sensitivity = [sensitivity] + model_str = ",".join(decoder_model) + + self.detector = snowboydetect.SnowboyDetect( + resource_filename=resource.encode(), model_str=model_str.encode()) + self.detector.SetAudioGain(audio_gain) + self.detector.ApplyFrontend(apply_frontend) + self.num_hotwords = self.detector.NumHotwords() + + if len(decoder_model) > 1 and len(sensitivity) == 1: + sensitivity = sensitivity * self.num_hotwords + if len(sensitivity) != 0: + assert self.num_hotwords == len(sensitivity), \ + "number of hotwords in decoder_model (%d) and sensitivity " \ + "(%d) does not match" % (self.num_hotwords, len(sensitivity)) + sensitivity_str = ",".join([str(t) for t in sensitivity]) + if len(sensitivity) != 0: + self.detector.SetSensitivity(sensitivity_str.encode()) + + self.ring_buffer = RingBuffer( + self.detector.NumChannels() * self.detector.SampleRate() * 5) + + def start(self, detected_callback=play_audio_file, + interrupt_check=lambda: False, + sleep_time=0.03, + audio_recorder_callback=None, + silent_count_threshold=15, + recording_timeout=100): + """ + Start the voice detector. For every `sleep_time` second it checks the + audio buffer for triggering keywords. If detected, then call + corresponding function in `detected_callback`, which can be a single + function (single model) or a list of callback functions (multiple + models). Every loop it also calls `interrupt_check` -- if it returns + True, then breaks from the loop and return. + + :param detected_callback: a function or list of functions. The number of + items must match the number of models in + `decoder_model`. + :param interrupt_check: a function that returns True if the main loop + needs to stop. + :param float sleep_time: how much time in second every loop waits. + :param audio_recorder_callback: if specified, this will be called after + a keyword has been spoken and after the + phrase immediately after the keyword has + been recorded. The function will be + passed the name of the file where the + phrase was recorded. + :param silent_count_threshold: indicates how long silence must be heard + to mark the end of a phrase that is + being recorded. + :param recording_timeout: limits the maximum length of a recording. + :return: None + """ + self._running = True + + def audio_callback(in_data, frame_count, time_info, status): + self.ring_buffer.extend(in_data) + play_data = chr(0) * len(in_data) + return play_data, pyaudio.paContinue + + with no_alsa_error(): + self.audio = pyaudio.PyAudio() + self.stream_in = self.audio.open( + input=True, output=False, + format=self.audio.get_format_from_width( + self.detector.BitsPerSample() / 8), + channels=self.detector.NumChannels(), + rate=self.detector.SampleRate(), + frames_per_buffer=2048, + stream_callback=audio_callback) + + if interrupt_check(): + logger.debug("detect voice return") + return + + tc = type(detected_callback) + if tc is not list: + detected_callback = [detected_callback] + if len(detected_callback) == 1 and self.num_hotwords > 1: + detected_callback *= self.num_hotwords + + assert self.num_hotwords == len(detected_callback), \ + "Error: hotwords in your models (%d) do not match the number of " \ + "callbacks (%d)" % (self.num_hotwords, len(detected_callback)) + + logger.debug("detecting...") + + state = "PASSIVE" + while self._running is True: + if interrupt_check(): + logger.debug("detect voice break") + break + data = self.ring_buffer.get() + if len(data) == 0: + time.sleep(sleep_time) + continue + + status = self.detector.RunDetection(data) + if status == -1: + logger.warning("Error initializing streams or reading audio data") + + #small state machine to handle recording of phrase after keyword + if state == "PASSIVE": + if status > 0: #key word found + self.recordedData = [] + self.recordedData.append(data) + silentCount = 0 + recordingCount = 0 + message = "Keyword " + str(status) + " detected at time: " + message += time.strftime("%Y-%m-%d %H:%M:%S", + time.localtime(time.time())) + logger.info(message) + callback = detected_callback[status-1] + if callback is not None: + callback() + + if audio_recorder_callback is not None: + state = "ACTIVE" + continue + + elif state == "ACTIVE": + stopRecording = False + if recordingCount > recording_timeout: + stopRecording = True + elif status == -2: #silence found + if silentCount > silent_count_threshold: + stopRecording = True + else: + silentCount = silentCount + 1 + elif status == 0: #voice found + silentCount = 0 + + if stopRecording == True: + fname = self.saveMessage() + audio_recorder_callback(fname) + state = "PASSIVE" + continue + + recordingCount = recordingCount + 1 + self.recordedData.append(data) + + logger.debug("finished.") + + def saveMessage(self): + """ + Save the message stored in self.recordedData to a timestamped file. + """ + filename = 'output' + str(int(time.time())) + '.wav' + data = b''.join(self.recordedData) + + #use wave to save data + wf = wave.open(filename, 'wb') + wf.setnchannels(1) + wf.setsampwidth(self.audio.get_sample_size( + self.audio.get_format_from_width( + self.detector.BitsPerSample() / 8))) + wf.setframerate(self.detector.SampleRate()) + wf.writeframes(data) + wf.close() + logger.debug("finished saving: " + filename) + return filename + + def terminate(self): + """ + Terminate audio stream. Users can call start() again to detect. + :return: None + """ + self.stream_in.stop_stream() + self.stream_in.close() + self.audio.terminate() + self._running = False diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/REST_API/training_service.py b/src/Guide_stick_system/voice_assistant/snowboy/examples/REST_API/training_service.py new file mode 100644 index 0000000..4a60c7f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/REST_API/training_service.py @@ -0,0 +1,52 @@ +#! /usr/bin/evn python + +import sys +import base64 +import requests + + +def get_wave(fname): + with open(fname) as infile: + return base64.b64encode(infile.read()) + + +endpoint = "https://snowboy.kitt.ai/api/v1/train/" + +############# MODIFY THE FOLLOWING ############# +token = "" +hotword_name = "???" +language = "en" +age_group = "20_29" +gender = "M" +microphone = "??" # e.g., macbook pro microphone +############### END OF MODIFY ################## + +if __name__ == "__main__": + try: + [_, wav1, wav2, wav3, out] = sys.argv + except ValueError: + print "Usage: %s wave_file1 wave_file2 wave_file3 out_model_name" % sys.argv[0] + sys.exit() + + data = { + "name": hotword_name, + "language": language, + "age_group": age_group, + "gender": gender, + "microphone": microphone, + "token": token, + "voice_samples": [ + {"wave": get_wave(wav1)}, + {"wave": get_wave(wav2)}, + {"wave": get_wave(wav3)} + ] + } + + response = requests.post(endpoint, json=data) + if response.ok: + with open(out, "w") as outfile: + outfile.write(response.content) + print "Saved model to '%s'." % out + else: + print "Request failed." + print response.text diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/REST_API/training_service.sh b/src/Guide_stick_system/voice_assistant/snowboy/examples/REST_API/training_service.sh new file mode 100644 index 0000000..d19c8ee --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/REST_API/training_service.sh @@ -0,0 +1,39 @@ +#! /usr/bin/env bash +ENDPOINT="https://snowboy.kitt.ai/api/v1/train/" + +############# MODIFY THE FOLLOWING ############# +TOKEN="??" +NAME="??" +LANGUAGE="en" +AGE_GROUP="20_29" +GENDER="M" +MICROPHONE="??" # e.g., PS3 Eye +############### END OF MODIFY ################## + +if [[ "$#" != 4 ]]; then + printf "Usage: %s wave_file1 wave_file2 wave_file3 out_model_name" $0 + exit +fi + +WAV1=`base64 $1` +WAV2=`base64 $2` +WAV3=`base64 $3` +OUTFILE="$4" + +cat <data.json +{ + "name": "$NAME", + "language": "$LANGUAGE", + "age_group": "$AGE_GROUP", + "token": "$TOKEN", + "gender": "$GENDER", + "microphone": "$MICROPHONE", + "voice_samples": [ + {"wave": "$WAV1"}, + {"wave": "$WAV2"}, + {"wave": "$WAV3"} + ] +} +EOF + +curl -H "Content-Type: application/json" -X POST -d @data.json $ENDPOINT > $OUTFILE diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Podfile b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Podfile new file mode 100644 index 0000000..a51749c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Podfile @@ -0,0 +1,8 @@ +platform :ios, “10.0” + +target 'SnowboyTest' do + +pod "EZAudio" + + +end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Podfile.lock b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Podfile.lock new file mode 100644 index 0000000..7838365 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Podfile.lock @@ -0,0 +1,19 @@ +PODS: + - EZAudio (1.1.5): + - EZAudio/Full (= 1.1.5) + - EZAudio/Core (1.1.5) + - EZAudio/Full (1.1.5): + - EZAudio/Core + - TPCircularBuffer (= 1.1) + - TPCircularBuffer (1.1) + +DEPENDENCIES: + - EZAudio + +SPEC CHECKSUMS: + EZAudio: 3b1cb4b6d7ebca68f0c2478576d75940aad4aa99 + TPCircularBuffer: 4960a919e667280f7f38963d771f45871b64fe62 + +PODFILE CHECKSUM: cb3967f691a7245e97aa9d41e36b4522c55ccda1 + +COCOAPODS: 1.2.0.beta.1 diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudio.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudio.h new file mode 100644 index 0000000..d552983 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudio.h @@ -0,0 +1,528 @@ +// +// EZAudio.h +// EZAudio +// +// Created by Syed Haris Ali on 11/21/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import + +//! Project version number for teat. +FOUNDATION_EXPORT double EZAudioVersionNumber; + +//! Project version string for teat. +FOUNDATION_EXPORT const unsigned char EZAudioVersionString[]; + +//------------------------------------------------------------------------------ +#pragma mark - Core Components +//------------------------------------------------------------------------------ + +#import "EZAudioDevice.h" +#import "EZAudioFile.h" +#import "EZMicrophone.h" +#import "EZOutput.h" +#import "EZRecorder.h" +#import "EZAudioPlayer.h" + +//------------------------------------------------------------------------------ +#pragma mark - Interface Components +//------------------------------------------------------------------------------ + +#import "EZPlot.h" +#import "EZAudioDisplayLink.h" +#import "EZAudioPlot.h" +#import "EZAudioPlotGL.h" + +//------------------------------------------------------------------------------ +#pragma mark - Utility Components +//------------------------------------------------------------------------------ + +#import "EZAudioFFT.h" +#import "EZAudioFloatConverter.h" +#import "EZAudioFloatData.h" +#import "EZAudioUtilities.h" + +//------------------------------------------------------------------------------ + +/** + EZAudio is a simple, intuitive framework for iOS and OSX. The goal of EZAudio was to provide a modular, cross-platform framework to simplify performing everyday audio operations like getting microphone input, creating audio waveforms, recording/playing audio files, etc. The visualization tools like the EZAudioPlot and EZAudioPlotGL were created to plug right into the framework's various components and provide highly optimized drawing routines that work in harmony with audio callback loops. All components retain the same namespace whether you're on an iOS device or a Mac computer so an EZAudioPlot understands it will subclass an UIView on an iOS device or an NSView on a Mac. + + Class methods for EZAudio are provided as utility methods used throughout the other modules within the framework. For instance, these methods help make sense of error codes (checkResult:operation:), map values betwen coordinate systems (MAP:leftMin:leftMax:rightMin:rightMax:), calculate root mean squared values for buffers (RMS:length:), etc. + + @warning As of 1.0 these methods have been moved over to `EZAudioUtilities` to allow using specific modules without requiring the whole library. + */ +@interface EZAudio : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Debugging +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Debugging EZAudio +///----------------------------------------------------------- + +/** + Globally sets whether or not the program should exit if a `checkResult:operation:` operation fails. Currently the behavior on EZAudio is to quit if a `checkResult:operation:` fails, but this is not desirable in any production environment. Internally there are a lot of `checkResult:operation:` operations used on all the core classes. This should only ever be set to NO in production environments since a `checkResult:operation:` failing means something breaking has likely happened. + @param shouldExitOnCheckResultFail A BOOL indicating whether or not the running program should exist due to a `checkResult:operation:` fail. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)setShouldExitOnCheckResultFail:(BOOL)shouldExitOnCheckResultFail __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Provides a flag indicating whether or not the program will exit if a `checkResult:operation:` fails. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A BOOL indicating whether or not the program will exit if a `checkResult:operation:` fails. + */ ++ (BOOL)shouldExitOnCheckResultFail __attribute__((deprecated)); + +//------------------------------------------------------------------------------ +#pragma mark - AudioBufferList Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name AudioBufferList Utility +///----------------------------------------------------------- + +/** + Allocates an AudioBufferList structure. Make sure to call freeBufferList when done using AudioBufferList or it will leak. + @param frames The number of frames that will be stored within each audio buffer + @param channels The number of channels (e.g. 2 for stereo, 1 for mono, etc.) + @param interleaved Whether the samples will be interleaved (if not it will be assumed to be non-interleaved and each channel will have an AudioBuffer allocated) + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return An AudioBufferList struct that has been allocated in memory + */ ++ (AudioBufferList *)audioBufferListWithNumberOfFrames:(UInt32)frames + numberOfChannels:(UInt32)channels + interleaved:(BOOL)interleaved __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Allocates an array of float arrays given the number of frames needed to store in each float array. + @param frames A UInt32 representing the number of frames to store in each float buffer + @param channels A UInt32 representing the number of channels (i.e. the number of float arrays to allocate) + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return An array of float arrays, each the length of the number of frames specified + */ ++ (float **)floatBuffersWithNumberOfFrames:(UInt32)frames + numberOfChannels:(UInt32)channels __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Deallocates an AudioBufferList structure from memory. + @param bufferList A pointer to the buffer list you would like to free + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)freeBufferList:(AudioBufferList *)bufferList __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Deallocates an array of float buffers + @param buffers An array of float arrays + @param channels A UInt32 representing the number of channels (i.e. the number of float arrays to deallocate) + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)freeFloatBuffers:(float **)buffers numberOfChannels:(UInt32)channels __attribute__((deprecated)); + +//------------------------------------------------------------------------------ +#pragma mark - AudioStreamBasicDescription Utilties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Creating An AudioStreamBasicDescription +///----------------------------------------------------------- + +/** + Creates a signed-integer, interleaved AudioStreamBasicDescription for the number of channels specified for an AIFF format. + @param channels The desired number of channels + @param sampleRate A float representing the sample rate. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)AIFFFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Creates an AudioStreamBasicDescription for the iLBC narrow band speech codec. + @param sampleRate A float representing the sample rate. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)iLBCFormatWithSampleRate:(float)sampleRate __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Creates a float-based, non-interleaved AudioStreamBasicDescription for the number of channels specified. + @param channels A UInt32 representing the number of channels. + @param sampleRate A float representing the sample rate. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A float-based AudioStreamBasicDescription with the number of channels specified. + */ ++ (AudioStreamBasicDescription)floatFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Creates an AudioStreamBasicDescription for an M4A AAC format. + @param channels The desired number of channels + @param sampleRate A float representing the sample rate. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)M4AFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Creates a single-channel, float-based AudioStreamBasicDescription. + @param sampleRate A float representing the sample rate. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)monoFloatFormatWithSampleRate:(float)sampleRate __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Creates a single-channel, float-based AudioStreamBasicDescription (as of 0.0.6 this is the same as `monoFloatFormatWithSampleRate:`). + @param sampleRate A float representing the sample rate. + @return A new AudioStreamBasicDescription with the specified format. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (AudioStreamBasicDescription)monoCanonicalFormatWithSampleRate:(float)sampleRate __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Creates a two-channel, non-interleaved, float-based AudioStreamBasicDescription (as of 0.0.6 this is the same as `stereoFloatNonInterleavedFormatWithSampleRate:`). + @param sampleRate A float representing the sample rate. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)stereoCanonicalNonInterleavedFormatWithSampleRate:(float)sampleRate __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Creates a two-channel, interleaved, float-based AudioStreamBasicDescription. + @param sampleRate A float representing the sample rate. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)stereoFloatInterleavedFormatWithSampleRate:(float)sampleRate __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Creates a two-channel, non-interleaved, float-based AudioStreamBasicDescription. + @param sampleRate A float representing the sample rate. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)stereoFloatNonInterleavedFormatWithSampleRate:(float)sampleRate __attribute__((deprecated)); + +//------------------------------------------------------------------------------ +// @name AudioStreamBasicDescription Helper Functions +//------------------------------------------------------------------------------ + +/** + Checks an AudioStreamBasicDescription to see if it is a float-based format (as opposed to a signed integer based format). + @param asbd A valid AudioStreamBasicDescription + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A BOOL indicating whether or not the AudioStreamBasicDescription is a float format. + */ ++ (BOOL)isFloatFormat:(AudioStreamBasicDescription)asbd __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Checks an AudioStreamBasicDescription to check for an interleaved flag (samples are + stored in one buffer one after another instead of two (or n channels) parallel buffers + @param asbd A valid AudioStreamBasicDescription + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A BOOL indicating whether or not the AudioStreamBasicDescription is interleaved + */ ++ (BOOL)isInterleaved:(AudioStreamBasicDescription)asbd __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Checks an AudioStreamBasicDescription to see if it is a linear PCM format (uncompressed, + 1 frame per packet) + @param asbd A valid AudioStreamBasicDescription + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return A BOOL indicating whether or not the AudioStreamBasicDescription is linear PCM. + */ ++ (BOOL)isLinearPCM:(AudioStreamBasicDescription)asbd __attribute__((deprecated)); + +///----------------------------------------------------------- +/// @name AudioStreamBasicDescription Utilities +///----------------------------------------------------------- + +/** + Nicely logs out the contents of an AudioStreamBasicDescription struct + @param asbd The AudioStreamBasicDescription struct with content to print out + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)printASBD:(AudioStreamBasicDescription)asbd __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Converts seconds into a string formatted as MM:SS + @param seconds An NSTimeInterval representing the number of seconds + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return An NSString instance formatted as MM:SS from the seconds provided. + */ ++ (NSString *)displayTimeStringFromSeconds:(NSTimeInterval)seconds __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Creates a string to use when logging out the contents of an AudioStreamBasicDescription + @param asbd A valid AudioStreamBasicDescription struct. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return An NSString representing the contents of the AudioStreamBasicDescription. + */ ++ (NSString *)stringForAudioStreamBasicDescription:(AudioStreamBasicDescription)asbd __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Just a wrapper around the setCanonical function provided in the Core Audio Utility C++ class. + @param asbd The AudioStreamBasicDescription structure to modify + @param nChannels The number of expected channels on the description + @param interleaved A flag indicating whether the stereo samples should be interleaved in the buffer + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)setCanonicalAudioStreamBasicDescription:(AudioStreamBasicDescription*)asbd + numberOfChannels:(UInt32)nChannels + interleaved:(BOOL)interleaved __attribute__((deprecated)); + +//------------------------------------------------------------------------------ +#pragma mark - Math Utilities +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Math Utilities +///----------------------------------------------------------- + +/** + Appends an array of values to a history buffer and performs an internal shift to add the values to the tail and removes the same number of values from the head. + @param buffer A float array of values to append to the tail of the history buffer + @param bufferLength The length of the float array being appended to the history buffer + @param scrollHistory The target history buffer in which to append the values + @param scrollHistoryLength The length of the target history buffer + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)appendBufferAndShift:(float*)buffer + withBufferSize:(int)bufferLength + toScrollHistory:(float*)scrollHistory + withScrollHistorySize:(int)scrollHistoryLength __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Appends a value to a history buffer and performs an internal shift to add the value to the tail and remove the 0th value. + @param value The float value to append to the history array + @param scrollHistory The target history buffer in which to append the values + @param scrollHistoryLength The length of the target history buffer + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++(void) appendValue:(float)value + toScrollHistory:(float*)scrollHistory + withScrollHistorySize:(int)scrollHistoryLength __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Maps a value from one coordinate system into another one. Takes in the current value to map, the minimum and maximum values of the first coordinate system, and the minimum and maximum values of the second coordinate system and calculates the mapped value in the second coordinate system's constraints. + @param value The value expressed in the first coordinate system + @param leftMin The minimum of the first coordinate system + @param leftMax The maximum of the first coordinate system + @param rightMin The minimum of the second coordindate system + @param rightMax The maximum of the second coordinate system + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return The mapped value in terms of the second coordinate system + */ ++ (float)MAP:(float)value + leftMin:(float)leftMin + leftMax:(float)leftMax + rightMin:(float)rightMin + rightMax:(float)rightMax __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Calculates the root mean squared for a buffer. + @param buffer A float buffer array of values whose root mean squared to calculate + @param bufferSize The size of the float buffer + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return The root mean squared of the buffer + */ ++ (float)RMS:(float*)buffer length:(int)bufferSize __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Calculate the sign function sgn(x) = + { -1 , x < 0, + { 0 , x = 0, + { 1 , x > 0 + @param value The float value for which to use as x + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return The float sign value + */ ++ (float)SGN:(float)value __attribute__((deprecated)); + +//------------------------------------------------------------------------------ +#pragma mark - OSStatus Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name OSStatus Utility +///----------------------------------------------------------- + +/** + Basic check result function useful for checking each step of the audio setup process + @param result The OSStatus representing the result of an operation + @param operation A string (const char, not NSString) describing the operation taking place (will print if fails) + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)checkResult:(OSStatus)result operation:(const char *)operation __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Provides a string representation of the often cryptic Core Audio error codes + @param code A UInt32 representing an error code + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + @return An NSString with a human readable version of the error code. + */ ++ (NSString *)stringFromUInt32Code:(UInt32)code __attribute__((deprecated)); + +//------------------------------------------------------------------------------ +#pragma mark - Plot Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Plot Utility +///----------------------------------------------------------- + +/** + Given a buffer representing a window of float history data this append the RMS of a buffer of incoming float data...This will likely be deprecated in a future version of EZAudio for a circular buffer based approach. + @param scrollHistory An array of float arrays being used to hold the history values for each channel. + @param scrollHistoryLength An int representing the length of the history window. + @param index An int pointer to the index of the current read index of the history buffer. + @param buffer A float array representing the incoming audio data. + @param bufferSize An int representing the length of the incoming audio data. + @param isChanging A BOOL pointer representing whether the resolution (length of the history window) is currently changing. + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)updateScrollHistory:(float **)scrollHistory + withLength:(int)scrollHistoryLength + atIndex:(int *)index + withBuffer:(float *)buffer + withBufferSize:(int)bufferSize + isResolutionChanging:(BOOL *)isChanging __attribute__((deprecated)); + +//------------------------------------------------------------------------------ +#pragma mark - TPCircularBuffer Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name TPCircularBuffer Utility +///----------------------------------------------------------- + +/** + Appends the data from the audio buffer list to the circular buffer + @param circularBuffer Pointer to the instance of the TPCircularBuffer to add the audio data to + @param audioBufferList Pointer to the instance of the AudioBufferList with the audio data + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)appendDataToCircularBuffer:(TPCircularBuffer*)circularBuffer + fromAudioBufferList:(AudioBufferList*)audioBufferList __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Initializes the circular buffer (just a wrapper around the C method) + @param circularBuffer Pointer to an instance of the TPCircularBuffer + @param size The length of the TPCircularBuffer (usually 1024) + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)circularBuffer:(TPCircularBuffer*)circularBuffer + withSize:(int)size __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Frees a circular buffer + @param circularBuffer Pointer to the circular buffer to clear + @deprecated This method is deprecated starting in version 0.1.0. + @note Please use same method in EZAudioUtilities class instead. + */ ++ (void)freeCircularBuffer:(TPCircularBuffer*)circularBuffer __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudio.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudio.m new file mode 100644 index 0000000..d334e6a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudio.m @@ -0,0 +1,307 @@ +// +// EZAudio.m +// EZAudioCoreGraphicsWaveformExample +// +// Created by Syed Haris Ali on 5/13/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// + +#import "EZAudio.h" + +@implementation EZAudio + +//------------------------------------------------------------------------------ +#pragma mark - Debugging +//------------------------------------------------------------------------------ + ++ (void)setShouldExitOnCheckResultFail:(BOOL)shouldExitOnCheckResultFail +{ + [EZAudioUtilities setShouldExitOnCheckResultFail:shouldExitOnCheckResultFail]; +} + +//------------------------------------------------------------------------------ + ++ (BOOL)shouldExitOnCheckResultFail +{ + return [EZAudioUtilities shouldExitOnCheckResultFail]; +} + +//------------------------------------------------------------------------------ +#pragma mark - AudioBufferList Utility +//------------------------------------------------------------------------------ + ++ (AudioBufferList *)audioBufferListWithNumberOfFrames:(UInt32)frames + numberOfChannels:(UInt32)channels + interleaved:(BOOL)interleaved +{ + return [EZAudioUtilities audioBufferListWithNumberOfFrames:frames + numberOfChannels:channels + interleaved:interleaved]; +} + +//------------------------------------------------------------------------------ + ++ (float **)floatBuffersWithNumberOfFrames:(UInt32)frames + numberOfChannels:(UInt32)channels +{ + return [EZAudioUtilities floatBuffersWithNumberOfFrames:frames + numberOfChannels:channels]; +} + +//------------------------------------------------------------------------------ + ++ (void)freeBufferList:(AudioBufferList *)bufferList +{ + [EZAudioUtilities freeBufferList:bufferList]; +} + +//------------------------------------------------------------------------------ + ++ (void)freeFloatBuffers:(float **)buffers numberOfChannels:(UInt32)channels +{ + [EZAudioUtilities freeFloatBuffers:buffers numberOfChannels:channels]; +} + +//------------------------------------------------------------------------------ +#pragma mark - AudioStreamBasicDescription Utility +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)AIFFFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate +{ + return [EZAudioUtilities AIFFFormatWithNumberOfChannels:channels + sampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)iLBCFormatWithSampleRate:(float)sampleRate +{ + return [EZAudioUtilities iLBCFormatWithSampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)floatFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate +{ + return [EZAudioUtilities floatFormatWithNumberOfChannels:channels + sampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)M4AFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate +{ + return [EZAudioUtilities M4AFormatWithNumberOfChannels:channels + sampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)monoFloatFormatWithSampleRate:(float)sampleRate +{ + return [EZAudioUtilities monoFloatFormatWithSampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)monoCanonicalFormatWithSampleRate:(float)sampleRate +{ + return [EZAudioUtilities monoCanonicalFormatWithSampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)stereoCanonicalNonInterleavedFormatWithSampleRate:(float)sampleRate +{ + return [EZAudioUtilities stereoCanonicalNonInterleavedFormatWithSampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)stereoFloatInterleavedFormatWithSampleRate:(float)sampleRate +{ + return [EZAudioUtilities stereoFloatInterleavedFormatWithSampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)stereoFloatNonInterleavedFormatWithSampleRate:(float)sampleRate +{ + return [EZAudioUtilities stereoFloatNonInterleavedFormatWithSampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (BOOL)isFloatFormat:(AudioStreamBasicDescription)asbd +{ + return [EZAudioUtilities isFloatFormat:asbd]; +} + +//------------------------------------------------------------------------------ + ++ (BOOL)isInterleaved:(AudioStreamBasicDescription)asbd +{ + return [EZAudioUtilities isInterleaved:asbd]; +} + +//------------------------------------------------------------------------------ + ++ (BOOL)isLinearPCM:(AudioStreamBasicDescription)asbd +{ + return [EZAudioUtilities isLinearPCM:asbd]; +} + +//------------------------------------------------------------------------------ + ++ (void)printASBD:(AudioStreamBasicDescription)asbd +{ + [EZAudioUtilities printASBD:asbd]; +} + +//------------------------------------------------------------------------------ + ++ (NSString *)displayTimeStringFromSeconds:(NSTimeInterval)seconds +{ + return [EZAudioUtilities displayTimeStringFromSeconds:seconds]; +} + +//------------------------------------------------------------------------------ + ++ (NSString *)stringForAudioStreamBasicDescription:(AudioStreamBasicDescription)asbd +{ + return [EZAudioUtilities stringForAudioStreamBasicDescription:asbd]; +} + +//------------------------------------------------------------------------------ + ++ (void)setCanonicalAudioStreamBasicDescription:(AudioStreamBasicDescription*)asbd + numberOfChannels:(UInt32)nChannels + interleaved:(BOOL)interleaved +{ + [EZAudioUtilities setCanonicalAudioStreamBasicDescription:asbd + numberOfChannels:nChannels + interleaved:interleaved]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Math Utilities +//------------------------------------------------------------------------------ + ++ (void)appendBufferAndShift:(float*)buffer + withBufferSize:(int)bufferLength + toScrollHistory:(float*)scrollHistory + withScrollHistorySize:(int)scrollHistoryLength +{ + [EZAudioUtilities appendBufferAndShift:buffer + withBufferSize:bufferLength + toScrollHistory:scrollHistory + withScrollHistorySize:scrollHistoryLength]; +} + +//------------------------------------------------------------------------------ + ++ (void) appendValue:(float)value + toScrollHistory:(float*)scrollHistory + withScrollHistorySize:(int)scrollHistoryLength +{ + [EZAudioUtilities appendValue:value + toScrollHistory:scrollHistory + withScrollHistorySize:scrollHistoryLength]; +} + +//------------------------------------------------------------------------------ + ++ (float)MAP:(float)value + leftMin:(float)leftMin + leftMax:(float)leftMax + rightMin:(float)rightMin + rightMax:(float)rightMax +{ + return [EZAudioUtilities MAP:value + leftMin:leftMin + leftMax:leftMax + rightMin:rightMin + rightMax:rightMax]; +} + +//------------------------------------------------------------------------------ + ++ (float)RMS:(float *)buffer length:(int)bufferSize +{ + return [EZAudioUtilities RMS:buffer length:bufferSize]; +} + +//------------------------------------------------------------------------------ + ++ (float)SGN:(float)value +{ + return [EZAudioUtilities SGN:value]; +} + +//------------------------------------------------------------------------------ +#pragma mark - OSStatus Utility +//------------------------------------------------------------------------------ + ++ (void)checkResult:(OSStatus)result operation:(const char *)operation +{ + [EZAudioUtilities checkResult:result + operation:operation]; +} + +//------------------------------------------------------------------------------ + ++ (NSString *)stringFromUInt32Code:(UInt32)code +{ + return [EZAudioUtilities stringFromUInt32Code:code]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Plot Utility +//------------------------------------------------------------------------------ + ++ (void)updateScrollHistory:(float **)scrollHistory + withLength:(int)scrollHistoryLength + atIndex:(int *)index + withBuffer:(float *)buffer + withBufferSize:(int)bufferSize + isResolutionChanging:(BOOL *)isChanging +{ + [EZAudioUtilities updateScrollHistory:scrollHistory + withLength:scrollHistoryLength + atIndex:index + withBuffer:buffer + withBufferSize:bufferSize + isResolutionChanging:isChanging]; +} + +//------------------------------------------------------------------------------ +#pragma mark - TPCircularBuffer Utility +//------------------------------------------------------------------------------ + ++ (void)appendDataToCircularBuffer:(TPCircularBuffer *)circularBuffer + fromAudioBufferList:(AudioBufferList *)audioBufferList +{ + [EZAudioUtilities appendDataToCircularBuffer:circularBuffer + fromAudioBufferList:audioBufferList]; +} + +//------------------------------------------------------------------------------ + ++ (void)circularBuffer:(TPCircularBuffer *)circularBuffer withSize:(int)size +{ + [EZAudioUtilities circularBuffer:circularBuffer withSize:size]; +} + +//------------------------------------------------------------------------------ + ++ (void)freeCircularBuffer:(TPCircularBuffer *)circularBuffer +{ + [EZAudioUtilities freeCircularBuffer:circularBuffer]; +} + +//------------------------------------------------------------------------------ + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDevice.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDevice.h new file mode 100644 index 0000000..45d58be --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDevice.h @@ -0,0 +1,187 @@ +// +// EZAudioDevice.h +// EZAudio +// +// Created by Syed Haris Ali on 6/25/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import + +#if TARGET_OS_IPHONE +#import +#elif TARGET_OS_MAC +#endif + +/** + The EZAudioDevice provides an interface for getting the available input and output hardware devices on iOS and OSX. On iOS the EZAudioDevice uses the available devices found from the AVAudioSession, while on OSX the EZAudioDevice wraps the AudioHardware API to find any devices that are connected including the built-in devices (for instance, Built-In Microphone, Display Audio). Since the AVAudioSession and AudioHardware APIs are quite different the EZAudioDevice has different properties available on each platform. The EZMicrophone now supports setting any specific EZAudioDevice from the `inputDevices` function. + */ +@interface EZAudioDevice : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Class Methods +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ +// @name Getting The Devices +//------------------------------------------------------------------------------ + +/** + Provides the current EZAudioDevice that is being used to pull input. + @return An EZAudioDevice instance representing the currently selected input device. + */ ++ (EZAudioDevice *)currentInputDevice; + +//------------------------------------------------------------------------------ + +/** + Provides the current EZAudioDevice that is being used to output audio. + @return An EZAudioDevice instance representing the currently selected ouotput device. + */ ++ (EZAudioDevice *)currentOutputDevice; + +//------------------------------------------------------------------------------ + +/** + Enumerates all the available input devices and returns the result in an NSArray of EZAudioDevice instances. + @return An NSArray containing EZAudioDevice instances, one for each available input device. + */ ++ (NSArray *)inputDevices; + +//------------------------------------------------------------------------------ + +/** + Enumerates all the available output devices and returns the result in an NSArray of EZAudioDevice instances. + @return An NSArray of output EZAudioDevice instances. + */ ++ (NSArray *)outputDevices; + +#if TARGET_OS_IPHONE + +//------------------------------------------------------------------------------ + +/** + Enumerates all the available input devices. + - iOS only + @param block When enumerating this block executes repeatedly for each EZAudioDevice found. It contains two arguments - first, the EZAudioDevice found, then a pointer to a stop BOOL to allow breaking out of the enumeration) + */ ++ (void)enumerateInputDevicesUsingBlock:(void(^)(EZAudioDevice *device, + BOOL *stop))block; + +//------------------------------------------------------------------------------ + +/** + Enumerates all the available output devices. + - iOS only + @param block When enumerating this block executes repeatedly for each EZAudioDevice found. It contains two arguments - first, the EZAudioDevice found, then a pointer to a stop BOOL to allow breaking out of the enumeration) + */ ++ (void)enumerateOutputDevicesUsingBlock:(void (^)(EZAudioDevice *device, + BOOL *stop))block; + +#elif TARGET_OS_MAC + +/** + Enumerates all the available devices and returns the result in an NSArray of EZAudioDevice instances. + - OSX only + @return An NSArray of input and output EZAudioDevice instances. + */ ++ (NSArray *)devices; + +//------------------------------------------------------------------------------ + +/** + Enumerates all the available devices. + - OSX only + @param block When enumerating this block executes repeatedly for each EZAudioDevice found. It contains two arguments - first, the EZAudioDevice found, then a pointer to a stop BOOL to allow breaking out of the enumeration) + */ ++ (void)enumerateDevicesUsingBlock:(void(^)(EZAudioDevice *device, + BOOL *stop))block; + +#endif + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +/** + An NSString representing a human-reable version of the device. + */ +@property (nonatomic, copy, readonly) NSString *name; + +#if TARGET_OS_IPHONE + +/** + An AVAudioSessionPortDescription describing an input or output hardware port. + - iOS only + */ +@property (nonatomic, strong, readonly) AVAudioSessionPortDescription *port; + +//------------------------------------------------------------------------------ + +/** + An AVAudioSessionDataSourceDescription describing a specific data source for the `port` provided. + - iOS only + */ +@property (nonatomic, strong, readonly) AVAudioSessionDataSourceDescription *dataSource; + +#elif TARGET_OS_MAC + +/** + An AudioDeviceID representing the device in the AudioHardware API. + - OSX only + */ +@property (nonatomic, assign, readonly) AudioDeviceID deviceID; + +//------------------------------------------------------------------------------ + +/** + An NSString representing the name of the manufacturer of the device. + - OSX only + */ +@property (nonatomic, copy, readonly) NSString *manufacturer; + +//------------------------------------------------------------------------------ + +/** + An NSInteger representing the number of input channels available. + - OSX only + */ +@property (nonatomic, assign, readonly) NSInteger inputChannelCount; + +//------------------------------------------------------------------------------ + +/** + An NSInteger representing the number of output channels available. + - OSX only + */ +@property (nonatomic, assign, readonly) NSInteger outputChannelCount; + +//------------------------------------------------------------------------------ + +/** + An NSString representing the persistent identifier for the AudioDevice. + - OSX only + */ +@property (nonatomic, copy, readonly) NSString *UID; + +#endif + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDevice.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDevice.m new file mode 100644 index 0000000..6eacc2f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDevice.m @@ -0,0 +1,475 @@ +// +// EZAudioDevice.m +// EZAudio +// +// Created by Syed Haris Ali on 6/25/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioDevice.h" +#import "EZAudioUtilities.h" + +@interface EZAudioDevice () + +@property (nonatomic, copy, readwrite) NSString *name; + +#if TARGET_OS_IPHONE + +@property (nonatomic, strong, readwrite) AVAudioSessionPortDescription *port; +@property (nonatomic, strong, readwrite) AVAudioSessionDataSourceDescription *dataSource; + +#elif TARGET_OS_MAC + +@property (nonatomic, assign, readwrite) AudioDeviceID deviceID; +@property (nonatomic, copy, readwrite) NSString *manufacturer; +@property (nonatomic, assign, readwrite) NSInteger inputChannelCount; +@property (nonatomic, assign, readwrite) NSInteger outputChannelCount; +@property (nonatomic, copy, readwrite) NSString *UID; + +#endif + +@end + +@implementation EZAudioDevice + +#if TARGET_OS_IPHONE + +//------------------------------------------------------------------------------ + ++ (EZAudioDevice *)currentInputDevice +{ + AVAudioSession *session = [AVAudioSession sharedInstance]; + AVAudioSessionPortDescription *port = [[[session currentRoute] inputs] firstObject]; + AVAudioSessionDataSourceDescription *dataSource = [session inputDataSource]; + EZAudioDevice *device = [[EZAudioDevice alloc] init]; + device.port = port; + device.dataSource = dataSource; + return device; +} + +//------------------------------------------------------------------------------ + ++ (EZAudioDevice *)currentOutputDevice +{ + AVAudioSession *session = [AVAudioSession sharedInstance]; + AVAudioSessionPortDescription *port = [[[session currentRoute] outputs] firstObject]; + AVAudioSessionDataSourceDescription *dataSource = [session outputDataSource]; + EZAudioDevice *device = [[EZAudioDevice alloc] init]; + device.port = port; + device.dataSource = dataSource; + return device; +} + +//------------------------------------------------------------------------------ + ++ (NSArray *)inputDevices +{ + __block NSMutableArray *devices = [NSMutableArray array]; + [self enumerateInputDevicesUsingBlock:^(EZAudioDevice *device, BOOL *stop) + { + [devices addObject:device]; + }]; + return devices; +} + +//------------------------------------------------------------------------------ + ++ (NSArray *)outputDevices +{ + __block NSMutableArray *devices = [NSMutableArray array]; + [self enumerateOutputDevicesUsingBlock:^(EZAudioDevice *device, BOOL *stop) + { + [devices addObject:device]; + }]; + return devices; +} + +//------------------------------------------------------------------------------ + ++ (void)enumerateInputDevicesUsingBlock:(void (^)(EZAudioDevice *, BOOL *))block +{ + if (!block) + { + return; + } + + NSArray *inputs = [[AVAudioSession sharedInstance] availableInputs]; + if (inputs == nil) + { + NSLog(@"Audio session is not active! In order to enumerate the audio devices you must set the category and set active the audio session for your iOS app before calling this function."); + return; + } + + BOOL stop; + for (AVAudioSessionPortDescription *inputDevicePortDescription in inputs) + { + // add any additional sub-devices + NSArray *dataSources = [inputDevicePortDescription dataSources]; + if (dataSources.count) + { + for (AVAudioSessionDataSourceDescription *inputDeviceDataSourceDescription in dataSources) + { + EZAudioDevice *device = [[EZAudioDevice alloc] init]; + device.port = inputDevicePortDescription; + device.dataSource = inputDeviceDataSourceDescription; + block(device, &stop); + } + } + else + { + EZAudioDevice *device = [[EZAudioDevice alloc] init]; + device.port = inputDevicePortDescription; + block(device, &stop); + } + } +} + +//------------------------------------------------------------------------------ + ++ (void)enumerateOutputDevicesUsingBlock:(void (^)(EZAudioDevice *, BOOL *))block +{ + if (!block) + { + return; + } + + AVAudioSessionRouteDescription *currentRoute = [[AVAudioSession sharedInstance] currentRoute]; + NSArray *portDescriptions = [currentRoute outputs]; + + BOOL stop; + for (AVAudioSessionPortDescription *outputDevicePortDescription in portDescriptions) + { + // add any additional sub-devices + NSArray *dataSources = [outputDevicePortDescription dataSources]; + if (dataSources.count) + { + for (AVAudioSessionDataSourceDescription *outputDeviceDataSourceDescription in dataSources) + { + EZAudioDevice *device = [[EZAudioDevice alloc] init]; + device.port = outputDevicePortDescription; + device.dataSource = outputDeviceDataSourceDescription; + block(device, &stop); + } + } + else + { + EZAudioDevice *device = [[EZAudioDevice alloc] init]; + device.port = outputDevicePortDescription; + block(device, &stop); + } + } +} + +//------------------------------------------------------------------------------ + +- (NSString *)name +{ + NSMutableString *name = [NSMutableString string]; + if (self.port) + { + [name appendString:self.port.portName]; + } + if (self.dataSource) + { + [name appendFormat:@": %@", self.dataSource.dataSourceName]; + } + return name; +} + +//------------------------------------------------------------------------------ + +- (NSString *)description +{ + return [NSString stringWithFormat:@"%@ { port: %@, data source: %@ }", + [super description], + self.port, + self.dataSource]; +} + +//------------------------------------------------------------------------------ + +- (BOOL)isEqual:(id)object +{ + if ([object isKindOfClass:self.class]) + { + EZAudioDevice *device = (EZAudioDevice *)object; + BOOL isPortUIDEqual = [device.port.UID isEqualToString:self.port.UID]; + BOOL isDataSourceIDEqual = device.dataSource.dataSourceID.longValue == self.dataSource.dataSourceID.longValue; + return isPortUIDEqual && isDataSourceIDEqual; + } + else + { + return [super isEqual:object]; + } +} + +#elif TARGET_OS_MAC + ++ (void)enumerateDevicesUsingBlock:(void(^)(EZAudioDevice *device, + BOOL *stop))block +{ + if (!block) + { + return; + } + + // get the present system devices + AudioObjectPropertyAddress address = [self addressForPropertySelector:kAudioHardwarePropertyDevices]; + UInt32 devicesDataSize; + [EZAudioUtilities checkResult:AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, + &address, + 0, + NULL, + &devicesDataSize) + operation:"Failed to get data size"]; + + // enumerate devices + NSInteger count = devicesDataSize / sizeof(AudioDeviceID); + AudioDeviceID *deviceIDs = (AudioDeviceID *)malloc(devicesDataSize); + + // fill in the devices + [EZAudioUtilities checkResult:AudioObjectGetPropertyData(kAudioObjectSystemObject, + &address, + 0, + NULL, + &devicesDataSize, + deviceIDs) + operation:"Failed to get device IDs for available devices on OSX"]; + + BOOL stop = NO; + for (UInt32 i = 0; i < count; i++) + { + AudioDeviceID deviceID = deviceIDs[i]; + EZAudioDevice *device = [[EZAudioDevice alloc] init]; + device.deviceID = deviceID; + device.manufacturer = [self manufacturerForDeviceID:deviceID]; + device.name = [self namePropertyForDeviceID:deviceID]; + device.UID = [self UIDPropertyForDeviceID:deviceID]; + device.inputChannelCount = [self channelCountForScope:kAudioObjectPropertyScopeInput forDeviceID:deviceID]; + device.outputChannelCount = [self channelCountForScope:kAudioObjectPropertyScopeOutput forDeviceID:deviceID]; + block(device, &stop); + if (stop) + { + break; + } + } + + free(deviceIDs); +} + +//------------------------------------------------------------------------------ + ++ (NSArray *)devices +{ + __block NSMutableArray *devices = [NSMutableArray array]; + [self enumerateDevicesUsingBlock:^(EZAudioDevice *device, BOOL *stop) + { + [devices addObject:device]; + }]; + return devices; +} + +//------------------------------------------------------------------------------ + ++ (EZAudioDevice *)deviceWithPropertySelector:(AudioObjectPropertySelector)propertySelector +{ + AudioDeviceID deviceID; + UInt32 propSize = sizeof(AudioDeviceID); + AudioObjectPropertyAddress address = [self addressForPropertySelector:propertySelector]; + [EZAudioUtilities checkResult:AudioObjectGetPropertyData(kAudioObjectSystemObject, + &address, + 0, + NULL, + &propSize, + &deviceID) + operation:"Failed to get device device on OSX"]; + EZAudioDevice *device = [[EZAudioDevice alloc] init]; + device.deviceID = deviceID; + device.manufacturer = [self manufacturerForDeviceID:deviceID]; + device.name = [self namePropertyForDeviceID:deviceID]; + device.UID = [self UIDPropertyForDeviceID:deviceID]; + device.inputChannelCount = [self channelCountForScope:kAudioObjectPropertyScopeInput forDeviceID:deviceID]; + device.outputChannelCount = [self channelCountForScope:kAudioObjectPropertyScopeOutput forDeviceID:deviceID]; + return device; +} + +//------------------------------------------------------------------------------ + ++ (EZAudioDevice *)currentInputDevice +{ + return [self deviceWithPropertySelector:kAudioHardwarePropertyDefaultInputDevice]; +} + +//------------------------------------------------------------------------------ + ++ (EZAudioDevice *)currentOutputDevice +{ + return [self deviceWithPropertySelector:kAudioHardwarePropertyDefaultOutputDevice]; +} + +//------------------------------------------------------------------------------ + ++ (NSArray *)inputDevices +{ + __block NSMutableArray *devices = [NSMutableArray array]; + [self enumerateDevicesUsingBlock:^(EZAudioDevice *device, BOOL *stop) + { + if (device.inputChannelCount > 0) + { + [devices addObject:device]; + } + }]; + return devices; +} + +//------------------------------------------------------------------------------ + ++ (NSArray *)outputDevices +{ + __block NSMutableArray *devices = [NSMutableArray array]; + [self enumerateDevicesUsingBlock:^(EZAudioDevice *device, BOOL *stop) + { + if (device.outputChannelCount > 0) + { + [devices addObject:device]; + } + }]; + return devices; +} + +//------------------------------------------------------------------------------ +#pragma mark - Utility +//------------------------------------------------------------------------------ + ++ (AudioObjectPropertyAddress)addressForPropertySelector:(AudioObjectPropertySelector)selector +{ + AudioObjectPropertyAddress address; + address.mScope = kAudioObjectPropertyScopeGlobal; + address.mElement = kAudioObjectPropertyElementMaster; + address.mSelector = selector; + return address; +} + +//------------------------------------------------------------------------------ + ++ (NSString *)stringPropertyForSelector:(AudioObjectPropertySelector)selector + withDeviceID:(AudioDeviceID)deviceID +{ + AudioObjectPropertyAddress address = [self addressForPropertySelector:selector]; + CFStringRef string; + UInt32 propSize = sizeof(CFStringRef); + NSString *errorString = [NSString stringWithFormat:@"Failed to get device property (%u)",(unsigned int)selector]; + [EZAudioUtilities checkResult:AudioObjectGetPropertyData(deviceID, + &address, + 0, + NULL, + &propSize, + &string) + operation:errorString.UTF8String]; + return (__bridge_transfer NSString *)string; +} + +//------------------------------------------------------------------------------ + ++ (NSInteger)channelCountForScope:(AudioObjectPropertyScope)scope + forDeviceID:(AudioDeviceID)deviceID +{ + AudioObjectPropertyAddress address; + address.mScope = scope; + address.mElement = kAudioObjectPropertyElementMaster; + address.mSelector = kAudioDevicePropertyStreamConfiguration; + + AudioBufferList streamConfiguration; + UInt32 propSize = sizeof(streamConfiguration); + [EZAudioUtilities checkResult:AudioObjectGetPropertyData(deviceID, + &address, + 0, + NULL, + &propSize, + &streamConfiguration) + operation:"Failed to get frame size"]; + + NSInteger channelCount = 0; + for (NSInteger i = 0; i < streamConfiguration.mNumberBuffers; i++) + { + channelCount += streamConfiguration.mBuffers[i].mNumberChannels; + } + + return channelCount; +} + +//------------------------------------------------------------------------------ + ++ (NSString *)manufacturerForDeviceID:(AudioDeviceID)deviceID +{ + return [self stringPropertyForSelector:kAudioDevicePropertyDeviceManufacturerCFString + withDeviceID:deviceID]; +} + +//------------------------------------------------------------------------------ + ++ (NSString *)namePropertyForDeviceID:(AudioDeviceID)deviceID +{ + return [self stringPropertyForSelector:kAudioDevicePropertyDeviceNameCFString + withDeviceID:deviceID]; +} + +//------------------------------------------------------------------------------ + ++ (NSString *)UIDPropertyForDeviceID:(AudioDeviceID)deviceID +{ + return [self stringPropertyForSelector:kAudioDevicePropertyDeviceUID + withDeviceID:deviceID]; +} + +//------------------------------------------------------------------------------ + +- (NSString *)description +{ + return [NSString stringWithFormat:@"%@ { deviceID: %i, manufacturer: %@, name: %@, UID: %@, inputChannelCount: %ld, outputChannelCount: %ld }", + [super description], + self.deviceID, + self.manufacturer, + self.name, + self.UID, + self.inputChannelCount, + self.outputChannelCount]; +} + +//------------------------------------------------------------------------------ + +- (BOOL)isEqual:(id)object +{ + if ([object isKindOfClass:self.class]) + { + EZAudioDevice *device = (EZAudioDevice *)object; + return [self.UID isEqualToString:device.UID]; + } + else + { + return [super isEqual:object]; + } +} + +//------------------------------------------------------------------------------ + +#endif + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDisplayLink.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDisplayLink.h new file mode 100644 index 0000000..d3c6c94 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDisplayLink.h @@ -0,0 +1,94 @@ +// +// EZAudioDisplayLink.h +// EZAudio +// +// Created by Syed Haris Ali on 6/25/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import + +@class EZAudioDisplayLink; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioDisplayLinkDelegate +//------------------------------------------------------------------------------ + +/** + The EZAudioDisplayLinkDelegate provides a means for an EZAudioDisplayLink instance to notify a receiver when it should redraw itself. + */ +@protocol EZAudioDisplayLinkDelegate + +@required +/** + Required method for an EZAudioDisplayLinkDelegate to implement. This fires at the screen's display rate (typically 60 fps). + @param displayLink An EZAudioDisplayLink instance used by a receiver to draw itself at the screen's refresh rate. + */ +- (void)displayLinkNeedsDisplay:(EZAudioDisplayLink *)displayLink; + +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioDisplayLink +//------------------------------------------------------------------------------ + +/** + The EZAudioDisplayLink provides a cross-platform (iOS and Mac) abstraction over the CADisplayLink for iOS and CVDisplayLink for Mac. The purpose of this class is to provide an accurate timer for views that need to redraw themselves at 60 fps. This class is used by the EZAudioPlot and, eventually, the EZAudioPlotGL to provide a timer mechanism to draw real-time plots. + */ +@interface EZAudioDisplayLink : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Class Methods +//------------------------------------------------------------------------------ + +/** + Class method to create an EZAudioDisplayLink. The caller should implement the EZAudioDisplayLinkDelegate protocol to receive the `displayLinkNeedsDisplay:` delegate method to know when to redraw itself. + @param delegate An instance that implements the EZAudioDisplayLinkDelegate protocol. + @return An instance of the EZAudioDisplayLink. + */ ++ (instancetype)displayLinkWithDelegate:(id)delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +/** + The EZAudioDisplayLinkDelegate for which to receive the redraw calls. + */ +@property (nonatomic, weak) id delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Instance Methods +//------------------------------------------------------------------------------ + +/** + Method to start the display link and provide the `displayLinkNeedsDisplay:` calls to the `delegate` + */ +- (void)start; + +/** + Method to stop the display link from providing the `displayLinkNeedsDisplay:` calls to the `delegate` + */ +- (void)stop; + +//------------------------------------------------------------------------------ + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDisplayLink.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDisplayLink.m new file mode 100644 index 0000000..d649cf5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioDisplayLink.m @@ -0,0 +1,180 @@ +// +// EZAudioDisplayLink.m +// EZAudio +// +// Created by Syed Haris Ali on 6/25/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioDisplayLink.h" + +//------------------------------------------------------------------------------ +#pragma mark - CVDisplayLink Callback (Declaration) +//------------------------------------------------------------------------------ + +#if TARGET_OS_IPHONE +#elif TARGET_OS_MAC +static CVReturn EZAudioDisplayLinkCallback(CVDisplayLinkRef displayLinkRef, + const CVTimeStamp *now, + const CVTimeStamp *outputTime, + CVOptionFlags flagsIn, + CVOptionFlags *flagsOut, + void *displayLinkContext); +#endif + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioDisplayLink (Interface Extension) +//------------------------------------------------------------------------------ + +@interface EZAudioDisplayLink () +#if TARGET_OS_IPHONE +@property (nonatomic, strong) CADisplayLink *displayLink; +#elif TARGET_OS_MAC +@property (nonatomic, assign) CVDisplayLinkRef displayLink; +#endif +@property (nonatomic, assign) BOOL stopped; +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioDisplayLink (Implementation) +//------------------------------------------------------------------------------ + +@implementation EZAudioDisplayLink + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ +#if TARGET_OS_IPHONE + [self.displayLink invalidate]; +#elif TARGET_OS_MAC + CVDisplayLinkStop(self.displayLink); + CVDisplayLinkRelease(self.displayLink); + self.displayLink = nil; +#endif +} + +//------------------------------------------------------------------------------ +#pragma mark - Class Initialization +//------------------------------------------------------------------------------ + ++ (instancetype)displayLinkWithDelegate:(id)delegate +{ + EZAudioDisplayLink *displayLink = [[self alloc] init]; + displayLink.delegate = delegate; + return displayLink; +} + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ + +- (instancetype) init +{ + self = [super init]; + if (self) + { + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setup +//------------------------------------------------------------------------------ + +- (void)setup +{ + self.stopped = YES; +#if TARGET_OS_IPHONE + self.displayLink = [CADisplayLink displayLinkWithTarget:self selector:@selector(update)]; + [self.displayLink addToRunLoop:[NSRunLoop currentRunLoop] forMode:NSDefaultRunLoopMode]; +#elif TARGET_OS_MAC + CVDisplayLinkCreateWithActiveCGDisplays(&_displayLink); + CVDisplayLinkSetOutputCallback(self.displayLink, + EZAudioDisplayLinkCallback, + (__bridge void *)(self)); + CVDisplayLinkStart(self.displayLink); +#endif +} + +//------------------------------------------------------------------------------ +#pragma mark - Actions +//------------------------------------------------------------------------------ + +- (void)start +{ +#if TARGET_OS_IPHONE + self.displayLink.paused = NO; +#elif TARGET_OS_MAC + CVDisplayLinkStart(self.displayLink); +#endif + self.stopped = NO; +} + +//------------------------------------------------------------------------------ + +- (void)stop +{ +#if TARGET_OS_IPHONE + self.displayLink.paused = YES; +#elif TARGET_OS_MAC + CVDisplayLinkStop(self.displayLink); +#endif + self.stopped = YES; +} + +//------------------------------------------------------------------------------ + +- (void)update +{ + if (!self.stopped) + { + if ([self.delegate respondsToSelector:@selector(displayLinkNeedsDisplay:)]) + { + [self.delegate displayLinkNeedsDisplay:self]; + } + } +} + +//------------------------------------------------------------------------------ + +@end + +//------------------------------------------------------------------------------ +#pragma mark - CVDisplayLink Callback (Implementation) +//------------------------------------------------------------------------------ + +#if TARGET_OS_IPHONE +#elif TARGET_OS_MAC +static CVReturn EZAudioDisplayLinkCallback(CVDisplayLinkRef displayLinkRef, + const CVTimeStamp *now, + const CVTimeStamp *outputTime, + CVOptionFlags flagsIn, + CVOptionFlags *flagsOut, + void *displayLinkContext) +{ + EZAudioDisplayLink *displayLink = (__bridge EZAudioDisplayLink*)displayLinkContext; + [displayLink update]; + return kCVReturnSuccess; +} +#endif \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFFT.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFFT.h new file mode 100644 index 0000000..6085497 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFFT.h @@ -0,0 +1,392 @@ +// +// EZAudioFFT.h +// EZAudio +// +// Created by Syed Haris Ali on 7/10/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import + +@class EZAudioFFT; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFFTDelegate +//------------------------------------------------------------------------------ + +/** + The EZAudioFFTDelegate provides event callbacks for the EZAudioFFT (and subclasses such as the EZAudioFFTRolling) whenvever the FFT is computed. + */ +@protocol EZAudioFFTDelegate + +@optional + +///----------------------------------------------------------- +/// @name Getting FFT Output Data +///----------------------------------------------------------- + +/** + Triggered when the EZAudioFFT computes an FFT from a buffer of input data. Provides an array of float data representing the computed FFT. + @param fft The EZAudioFFT instance that triggered the event. + @param fftData A float pointer representing the float array of FFT data. + @param bufferSize A vDSP_Length (unsigned long) representing the length of the float array. + */ +- (void) fft:(EZAudioFFT *)fft + updatedWithFFTData:(float *)fftData + bufferSize:(vDSP_Length)bufferSize; + +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFFT +//------------------------------------------------------------------------------ + +/** + The EZAudioFFT provides a base class to quickly calculate the FFT of incoming audio data using the Accelerate framework. In addition, the EZAudioFFT contains an EZAudioFFTDelegate to receive an event anytime an FFT is computed. + */ +@interface EZAudioFFT : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Initializers +///----------------------------------------------------------- + +/** + Initializes an EZAudioFFT (or subclass) instance with a maximum buffer size and sample rate. The sample rate is used specifically to calculate the `maxFrequency` property. If you don't care about the `maxFrequency` property then you can set the sample rate to 0. + @param maximumBufferSize A vDSP_Length (unsigned long) representing the maximum length of the incoming audio data. + @param sampleRate A float representing the sample rate of the incoming audio data. + + @return A newly created EZAudioFFT (or subclass) instance. + */ +- (instancetype)initWithMaximumBufferSize:(vDSP_Length)maximumBufferSize + sampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Initializes an EZAudioFFT (or subclass) instance with a maximum buffer size, sample rate, and EZAudioFFTDelegate. The sample rate is used specifically to calculate the `maxFrequency` property. If you don't care about the `maxFrequency` property then you can set the sample rate to 0. The EZAudioFFTDelegate will act as a receive to get an event whenever the FFT is calculated. + @param maximumBufferSize A vDSP_Length (unsigned long) representing the maximum length of the incoming audio data. + @param sampleRate A float representing the sample rate of the incoming audio data. + @param delegate An EZAudioFFTDelegate to receive an event whenever the FFT is calculated. + @return A newly created EZAudioFFT (or subclass) instance. + */ +- (instancetype)initWithMaximumBufferSize:(vDSP_Length)maximumBufferSize + sampleRate:(float)sampleRate + delegate:(id)delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Class Initializers +///----------------------------------------------------------- + +/** + Class method to initialize an EZAudioFFT (or subclass) instance with a maximum buffer size and sample rate. The sample rate is used specifically to calculate the `maxFrequency` property. If you don't care about the `maxFrequency` property then you can set the sample rate to 0. + @param maximumBufferSize A vDSP_Length (unsigned long) representing the maximum length of the incoming audio data. + @param sampleRate A float representing the sample rate of the incoming audio data. + @return A newly created EZAudioFFT (or subclass) instance. + */ ++ (instancetype)fftWithMaximumBufferSize:(vDSP_Length)maximumBufferSize + sampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Class method to initialize an EZAudioFFT (or subclass) instance with a maximum buffer size, sample rate, and EZAudioFFTDelegate. The sample rate is used specifically to calculate the `maxFrequency` property. If you don't care about the `maxFrequency` property then you can set the sample rate to 0. The EZAudioFFTDelegate will act as a receive to get an event whenever the FFT is calculated. + @param maximumBufferSize A vDSP_Length (unsigned long) representing the maximum length of the incoming audio data. + @param sampleRate A float representing the sample rate of the incoming audio data. + @param delegate An EZAudioFFTDelegate to receive an event whenever the FFT is calculated. + @return A newly created EZAudioFFT (or subclass) instance. + */ ++ (instancetype)fftWithMaximumBufferSize:(vDSP_Length)maximumBufferSize + sampleRate:(float)sampleRate + delegate:(id)delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Properties +///----------------------------------------------------------- + +/** + An EZAudioFFTDelegate to receive an event whenever the FFT is calculated. + */ +@property (weak, nonatomic) id delegate; + +//------------------------------------------------------------------------------ + +/** + A COMPLEX_SPLIT data structure used to hold the FFT's imaginary and real components. + */ +@property (readonly, nonatomic) COMPLEX_SPLIT complexSplit; + +//------------------------------------------------------------------------------ + +/** + A float array containing the last calculated FFT data. + */ +@property (readonly, nonatomic) float *fftData; + +//------------------------------------------------------------------------------ + +/** + An FFTSetup data structure used to internally calculate the FFT using Accelerate. + */ +@property (readonly, nonatomic) FFTSetup fftSetup; + +//------------------------------------------------------------------------------ + +/** + A float array containing the last calculated inverse FFT data (the time domain signal). + */ +@property (readonly, nonatomic) float *inversedFFTData; + +//------------------------------------------------------------------------------ + +/** + A float representing the frequency with the highest energy is the last FFT calculation. + */ +@property (readonly, nonatomic) float maxFrequency; + +//------------------------------------------------------------------------------ + +/** + A vDSP_Length (unsigned long) representing the index of the frequency with the highest energy is the last FFT calculation. + */ +@property (readonly, nonatomic) vDSP_Length maxFrequencyIndex; + +//------------------------------------------------------------------------------ + +/** + A float representing the magnitude of the frequency with the highest energy is the last FFT calculation. + */ +@property (readonly, nonatomic) float maxFrequencyMagnitude; + +//------------------------------------------------------------------------------ + +/** + A vDSP_Length (unsigned long) representing the maximum buffer size. This is the maximum length the incoming audio data in the `computeFFTWithBuffer:withBufferSize` method can be. + */ +@property (readonly, nonatomic) vDSP_Length maximumBufferSize; + +//------------------------------------------------------------------------------ + +/** + A float representing the sample rate of the incoming audio data. + */ +@property (readwrite, nonatomic) float sampleRate; + +//------------------------------------------------------------------------------ +#pragma mark - Actions +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Computing The FFT +///----------------------------------------------------------- + +/** + Computes the FFT for a float array representing an incoming audio signal. This will trigger the EZAudioFFTDelegate method `fft:updatedWithFFTData:bufferSize:`. + @param buffer A float array representing the audio data. + @param bufferSize The length of the float array of audio data. + @return A float array containing the computed FFT data. The length of the output will be half the incoming buffer (half the `bufferSize` argument). + */ +- (float *)computeFFTWithBuffer:(float *)buffer withBufferSize:(UInt32)bufferSize; + +//------------------------------------------------------------------------------ + +/** + Provides the frequency corresponding to an index in the last computed FFT data. + @param index A vDSP_Length (unsigned integer) representing the index of the frequency bin value you'd like to get + @return A float representing the frequency value at that index. + */ +- (float)frequencyAtIndex:(vDSP_Length)index; + +//------------------------------------------------------------------------------ + +/** + Provides the magnitude of the frequenecy corresponding to an index in the last computed FFT data. + @param index A vDSP_Length (unsigned integer) representing the index of the frequency bin value you'd like to get + @return A float representing the frequency magnitude value at that index. + */ +- (float)frequencyMagnitudeAtIndex:(vDSP_Length)index; + +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFFTRolling +//------------------------------------------------------------------------------ + +/** + The EZAudioFFTRolling, a subclass of EZAudioFFT, provides a class to calculate an FFT for an incoming audio signal while maintaining a history of audio data to allow much higher resolution FFTs. For instance, the EZMicrophone typically provides 512 frames at a time, but you would probably want to provide 2048 or 4096 frames for a decent looking FFT if you're trying to extract precise frequency components. You will typically be using this class for variable length FFTs instead of the EZAudioFFT base class. + */ +@interface EZAudioFFTRolling : EZAudioFFT + +//------------------------------------------------------------------------------ +#pragma mark - Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Initializers +///----------------------------------------------------------- + +/** + Initializes an EZAudioFFTRolling instance with a window size and a sample rate. The EZAudioFFTRolling has an internal EZPlotHistoryInfo data structure that writes audio data to a circular buffer and manages sliding windows of audio data to support efficient, large FFT calculations. Here you provide a window size that represents how many audio sample will be used to calculate the FFT and a float representing the sample rate of the incoming audio (can be 0 if you don't care about the `maxFrequency` property). The history buffer size in this case is the `windowSize` * 8, which is pretty good for most cases. + @param windowSize A vDSP_Length (unsigned long) representing the size of the window (i.e. the resolution) of data that should be used to calculate the FFT. A typical value for this would be something like 1024 - 4096 (or higher for an even higher resolution FFT). + @param sampleRate A float representing the sample rate of the incoming audio signal. + @return A newly created EZAudioFFTRolling instance. + */ +- (instancetype)initWithWindowSize:(vDSP_Length)windowSize + sampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Initializes an EZAudioFFTRolling instance with a window size, a sample rate, and an EZAudioFFTDelegate. The EZAudioFFTRolling has an internal EZPlotHistoryInfo data structure that writes audio data to a circular buffer and manages sliding windows of audio data to support efficient, large FFT calculations. Here you provide a window size that represents how many audio sample will be used to calculate the FFT, a float representing the sample rate of the incoming audio (can be 0 if you don't care about the `maxFrequency` property), and an EZAudioFFTDelegate to receive a callback anytime the FFT is calculated. The history buffer size in this case is the `windowSize` * 8, which is pretty good for most cases. + @param windowSize A vDSP_Length (unsigned long) representing the size of the window (i.e. the resolution) of data that should be used to calculate the FFT. A typical value for this would be something like 1024 - 4096 (or higher for an even higher resolution FFT). + @param sampleRate A float representing the sample rate of the incoming audio signal. + @param delegate An EZAudioFFTDelegate to receive an event whenever the FFT is calculated. + @return A newly created EZAudioFFTRolling instance. + */ +- (instancetype)initWithWindowSize:(vDSP_Length)windowSize + sampleRate:(float)sampleRate + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Initializes an EZAudioFFTRolling instance with a window size, a history buffer size, and a sample rate. The EZAudioFFTRolling has an internal EZPlotHistoryInfo data structure that writes audio data to a circular buffer and manages sliding windows of audio data to support efficient, large FFT calculations. Here you provide a window size that represents how many audio sample will be used to calculate the FFT, a history buffer size representing the maximum length of the sliding window's underlying circular buffer, and a float representing the sample rate of the incoming audio (can be 0 if you don't care about the `maxFrequency` property). + @param windowSize A vDSP_Length (unsigned long) representing the size of the window (i.e. the resolution) of data that should be used to calculate the FFT. A typical value for this would be something like 1024 - 4096 (or higher for an even higher resolution FFT). + @param historyBufferSize A vDSP_Length (unsigned long) representing the length of the history buffer. This should be AT LEAST the size of the window. A recommended value for this would be at least 8x greater than the `windowSize` argument. + @param sampleRate A float representing the sample rate of the incoming audio signal. + @return A newly created EZAudioFFTRolling instance. + */ +- (instancetype)initWithWindowSize:(vDSP_Length)windowSize + historyBufferSize:(vDSP_Length)historyBufferSize + sampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Initializes an EZAudioFFTRolling instance with a window size, a history buffer size, a sample rate, and an EZAudioFFTDelegate. The EZAudioFFTRolling has an internal EZPlotHistoryInfo data structure that writes audio data to a circular buffer and manages sliding windows of audio data to support efficient, large FFT calculations. Here you provide a window size that represents how many audio sample will be used to calculate the FFT, a history buffer size representing the maximum length of the sliding window's underlying circular buffer, a float representing the sample rate of the incoming audio (can be 0 if you don't care about the `maxFrequency` property), and an EZAudioFFTDelegate to receive a callback anytime the FFT is calculated. + @param windowSize A vDSP_Length (unsigned long) representing the size of the window (i.e. the resolution) of data that should be used to calculate the FFT. A typical value for this would be something like 1024 - 4096 (or higher for an even higher resolution FFT). + @param historyBufferSize A vDSP_Length (unsigned long) representing the length of the history buffer. This should be AT LEAST the size of the window. A recommended value for this would be at least 8x greater than the `windowSize` argument. + @param sampleRate A float representing the sample rate of the incoming audio signal. + @param delegate An EZAudioFFTDelegate to receive an event whenever the FFT is calculated. + @return A newly created EZAudioFFTRolling instance. + */ +- (instancetype)initWithWindowSize:(vDSP_Length)windowSize + historyBufferSize:(vDSP_Length)historyBufferSize + sampleRate:(float)sampleRate + delegate:(id)delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Class Initializers +///----------------------------------------------------------- + +/** + Class method to initialize an EZAudioFFTRolling instance with a window size and a sample rate. The EZAudioFFTRolling has an internal EZPlotHistoryInfo data structure that writes audio data to a circular buffer and manages sliding windows of audio data to support efficient, large FFT calculations. Here you provide a window size that represents how many audio sample will be used to calculate the FFT and a float representing the sample rate of the incoming audio (can be 0 if you don't care about the `maxFrequency` property). The history buffer size in this case is the `windowSize` * 8, which is pretty good for most cases. + @param windowSize A vDSP_Length (unsigned long) representing the size of the window (i.e. the resolution) of data that should be used to calculate the FFT. A typical value for this would be something like 1024 - 4096 (or higher for an even higher resolution FFT). + @param sampleRate A float representing the sample rate of the incoming audio signal. + @return A newly created EZAudioFFTRolling instance. + */ ++ (instancetype)fftWithWindowSize:(vDSP_Length)windowSize + sampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Class method to initialize an EZAudioFFTRolling instance with a window size, a sample rate, and an EZAudioFFTDelegate. The EZAudioFFTRolling has an internal EZPlotHistoryInfo data structure that writes audio data to a circular buffer and manages sliding windows of audio data to support efficient, large FFT calculations. Here you provide a window size that represents how many audio sample will be used to calculate the FFT, a float representing the sample rate of the incoming audio (can be 0 if you don't care about the `maxFrequency` property), and an EZAudioFFTDelegate to receive a callback anytime the FFT is calculated. The history buffer size in this case is the `windowSize` * 8, which is pretty good for most cases. + @param windowSize A vDSP_Length (unsigned long) representing the size of the window (i.e. the resolution) of data that should be used to calculate the FFT. A typical value for this would be something like 1024 - 4096 (or higher for an even higher resolution FFT). + @param sampleRate A float representing the sample rate of the incoming audio signal. + @param delegate An EZAudioFFTDelegate to receive an event whenever the FFT is calculated. + @return A newly created EZAudioFFTRolling instance. + */ ++ (instancetype)fftWithWindowSize:(vDSP_Length)windowSize + sampleRate:(float)sampleRate + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Class method to initialize an EZAudioFFTRolling instance with a window size, a history buffer size, and a sample rate. The EZAudioFFTRolling has an internal EZPlotHistoryInfo data structure that writes audio data to a circular buffer and manages sliding windows of audio data to support efficient, large FFT calculations. Here you provide a window size that represents how many audio sample will be used to calculate the FFT, a history buffer size representing the maximum length of the sliding window's underlying circular buffer, and a float representing the sample rate of the incoming audio (can be 0 if you don't care about the `maxFrequency` property). + @param windowSize A vDSP_Length (unsigned long) representing the size of the window (i.e. the resolution) of data that should be used to calculate the FFT. A typical value for this would be something like 1024 - 4096 (or higher for an even higher resolution FFT). + @param historyBufferSize A vDSP_Length (unsigned long) representing the length of the history buffer. This should be AT LEAST the size of the window. A recommended value for this would be at least 8x greater than the `windowSize` argument. + @param sampleRate A float representing the sample rate of the incoming audio signal. + @return A newly created EZAudioFFTRolling instance. + */ ++ (instancetype)fftWithWindowSize:(vDSP_Length)windowSize + historyBufferSize:(vDSP_Length)historyBufferSize + sampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Class method to initialize an EZAudioFFTRolling instance with a window size, a history buffer size, a sample rate, and an EZAudioFFTDelegate. The EZAudioFFTRolling has an internal EZPlotHistoryInfo data structure that writes audio data to a circular buffer and manages sliding windows of audio data to support efficient, large FFT calculations. Here you provide a window size that represents how many audio sample will be used to calculate the FFT, a history buffer size representing the maximum length of the sliding window's underlying circular buffer, a float representing the sample rate of the incoming audio (can be 0 if you don't care about the `maxFrequency` property), and an EZAudioFFTDelegate to receive a callback anytime the FFT is calculated. + @param windowSize A vDSP_Length (unsigned long) representing the size of the window (i.e. the resolution) of data that should be used to calculate the FFT. A typical value for this would be something like 1024 - 4096 (or higher for an even higher resolution FFT). + @param historyBufferSize A vDSP_Length (unsigned long) representing the length of the history buffer. This should be AT LEAST the size of the window. A recommended value for this would be at least 8x greater than the `windowSize` argument. + @param sampleRate A float representing the sample rate of the incoming audio signal. + @param delegate An EZAudioFFTDelegate to receive an event whenever the FFT is calculated. + @return A newly created EZAudioFFTRolling instance. + */ ++ (instancetype)fftWithWindowSize:(vDSP_Length)windowSize + historyBufferSize:(vDSP_Length)historyBufferSize + sampleRate:(float)sampleRate + delegate:(id)delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Properties +///----------------------------------------------------------- + +/** + A vDSP_Length (unsigned long) representing the length of the FFT window. + */ +@property (readonly, nonatomic) vDSP_Length windowSize; + +//------------------------------------------------------------------------------ + +/** + A float array representing the audio data in the internal circular buffer used to perform the FFT. This will increase as more data is appended to the internal circular buffer via the `computeFFTWithBuffer:withBufferSize:` method. The length of this array is the `timeDomainBufferSize` property. + */ +@property (readonly, nonatomic) float *timeDomainData; + +//------------------------------------------------------------------------------ + +/** + A UInt32 representing the length of the audio data used to perform the FFT. + */ +@property (readonly, nonatomic) UInt32 timeDomainBufferSize; + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFFT.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFFT.m new file mode 100644 index 0000000..02e7a5e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFFT.m @@ -0,0 +1,444 @@ +// +// EZAudioFFT.m +// EZAudio +// +// Created by Syed Haris Ali on 7/10/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioFFT.h" +#import "EZAudioUtilities.h" + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +typedef struct EZAudioFFTInfo +{ + FFTSetup fftSetup; + COMPLEX_SPLIT complexA; + float *outFFTData; + vDSP_Length outFFTDataLength; + float *inversedFFTData; + vDSP_Length maxFrequencyIndex; + float maxFrequencyMangitude; + float maxFrequency; +} EZAudioFFTInfo; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFFT (Interface Extension) +//------------------------------------------------------------------------------ + +@interface EZAudioFFT () +@property (assign, nonatomic) EZAudioFFTInfo *info; +@property (readwrite, nonatomic) vDSP_Length maximumBufferSize; +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFFT (Implementation) +//------------------------------------------------------------------------------ + +@implementation EZAudioFFT + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + vDSP_destroy_fftsetup(self.info->fftSetup); + free(self.info->complexA.realp); + free(self.info->complexA.imagp); + free(self.info->outFFTData); + free(self.info->inversedFFTData); +} + +//------------------------------------------------------------------------------ +#pragma mark - Initializers +//------------------------------------------------------------------------------ + +- (instancetype)initWithMaximumBufferSize:(vDSP_Length)maximumBufferSize + sampleRate:(float)sampleRate +{ + return [self initWithMaximumBufferSize:maximumBufferSize + sampleRate:sampleRate + delegate:nil]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithMaximumBufferSize:(vDSP_Length)maximumBufferSize + sampleRate:(float)sampleRate + delegate:(id)delegate +{ + self = [super init]; + if (self) + { + self.maximumBufferSize = (vDSP_Length)maximumBufferSize; + self.sampleRate = sampleRate; + self.delegate = delegate; + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + ++ (instancetype)fftWithMaximumBufferSize:(vDSP_Length)maximumBufferSize + sampleRate:(float)sampleRate +{ + return [[self alloc] initWithMaximumBufferSize:maximumBufferSize + sampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)fftWithMaximumBufferSize:(vDSP_Length)maximumBufferSize + sampleRate:(float)sampleRate + delegate:(id)delegate +{ + return [[self alloc] initWithMaximumBufferSize:maximumBufferSize + sampleRate:sampleRate + delegate:delegate]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setup +//------------------------------------------------------------------------------ + +- (void)setup +{ + NSAssert(self.maximumBufferSize > 0, @"Expected FFT buffer size to be greater than 0!"); + + // + // Initialize FFT + // + float maximumBufferSizeBytes = self.maximumBufferSize * sizeof(float); + self.info = (EZAudioFFTInfo *)calloc(1, sizeof(EZAudioFFTInfo)); + vDSP_Length log2n = log2f(self.maximumBufferSize); + self.info->fftSetup = vDSP_create_fftsetup(log2n, FFT_RADIX2); + long nOver2 = maximumBufferSizeBytes / 2; + size_t maximumSizePerComponentBytes = nOver2 * sizeof(float); + self.info->complexA.realp = (float *)malloc(maximumSizePerComponentBytes); + self.info->complexA.imagp = (float *)malloc(maximumSizePerComponentBytes); + self.info->outFFTData = (float *)malloc(maximumSizePerComponentBytes); + memset(self.info->outFFTData, 0, maximumSizePerComponentBytes); + self.info->inversedFFTData = (float *)malloc(maximumSizePerComponentBytes); +} + +//------------------------------------------------------------------------------ +#pragma mark - Actions +//------------------------------------------------------------------------------ + +- (float *)computeFFTWithBuffer:(float *)buffer withBufferSize:(UInt32)bufferSize +{ + if (buffer == NULL) + { + return NULL; + } + + // + // Calculate real + imaginary components and normalize + // + vDSP_Length log2n = log2f(bufferSize); + long nOver2 = bufferSize / 2; + float mFFTNormFactor = 10.0 / (2 * bufferSize); + vDSP_ctoz((COMPLEX*)buffer, 2, &(self.info->complexA), 1, nOver2); + vDSP_fft_zrip(self.info->fftSetup, &(self.info->complexA), 1, log2n, FFT_FORWARD); + vDSP_vsmul(self.info->complexA.realp, 1, &mFFTNormFactor, self.info->complexA.realp, 1, nOver2); + vDSP_vsmul(self.info->complexA.imagp, 1, &mFFTNormFactor, self.info->complexA.imagp, 1, nOver2); + vDSP_zvmags(&(self.info->complexA), 1, self.info->outFFTData, 1, nOver2); + vDSP_fft_zrip(self.info->fftSetup, &(self.info->complexA), 1, log2n, FFT_INVERSE); + vDSP_ztoc(&(self.info->complexA), 1, (COMPLEX *) self.info->inversedFFTData , 2, nOver2); + self.info->outFFTDataLength = nOver2; + + // + // Calculate max freq + // + if (self.sampleRate > 0.0f) + { + vDSP_maxvi(self.info->outFFTData, 1, &self.info->maxFrequencyMangitude, &self.info->maxFrequencyIndex, nOver2); + self.info->maxFrequency = [self frequencyAtIndex:self.info->maxFrequencyIndex]; + } + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(fft:updatedWithFFTData:bufferSize:)]) + { + [self.delegate fft:self + updatedWithFFTData:self.info->outFFTData + bufferSize:nOver2]; + } + + // + // Return the FFT + // + return self.info->outFFTData; +} + +//------------------------------------------------------------------------------ + +- (float)frequencyAtIndex:(vDSP_Length)index +{ + if (!(self.info->outFFTData == NULL || self.sampleRate == 0.0f)) + { + float nyquistMaxFreq = self.sampleRate / 2.0; + return ((float)index / (float)self.info->outFFTDataLength) * nyquistMaxFreq; + } + return NSNotFound; +} + +//------------------------------------------------------------------------------ + +- (float)frequencyMagnitudeAtIndex:(vDSP_Length)index +{ + if (self.info->outFFTData != NULL) + { + return self.info->outFFTData[index]; + } + return NSNotFound; +} + +//------------------------------------------------------------------------------ +#pragma mark - Getters +//------------------------------------------------------------------------------ + +- (COMPLEX_SPLIT)complexSplit +{ + return self.info->complexA; +} + +//------------------------------------------------------------------------------ + +- (float *)fftData +{ + return self.info->outFFTData; +} + +//------------------------------------------------------------------------------ + +- (FFTSetup)fftSetup +{ + return self.info->fftSetup; +} + +//------------------------------------------------------------------------------ + +- (float *)inversedFFTData +{ + return self.info->inversedFFTData; +} + +//------------------------------------------------------------------------------ + +- (vDSP_Length)maxFrequencyIndex +{ + return self.info->maxFrequencyIndex; +} + +//------------------------------------------------------------------------------ + +- (float)maxFrequencyMagnitude +{ + return self.info->maxFrequencyMangitude; +} + +//------------------------------------------------------------------------------ + +- (float)maxFrequency +{ + return self.info->maxFrequency; +} + +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFFTRolling +//------------------------------------------------------------------------------ + +@interface EZAudioFFTRolling () +@property (assign, nonatomic) EZPlotHistoryInfo *historyInfo; +@property (readwrite, nonatomic) vDSP_Length windowSize; + +@end + +@implementation EZAudioFFTRolling + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + [EZAudioUtilities freeHistoryInfo:self.historyInfo]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ + +- (instancetype)initWithWindowSize:(vDSP_Length)windowSize + sampleRate:(float)sampleRate +{ + return [self initWithWindowSize:windowSize + historyBufferSize:windowSize * 8 + sampleRate:sampleRate + delegate:nil]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithWindowSize:(vDSP_Length)windowSize + sampleRate:(float)sampleRate + delegate:(id)delegate +{ + return [self initWithWindowSize:windowSize + historyBufferSize:windowSize * 8 + sampleRate:sampleRate + delegate:delegate]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithWindowSize:(vDSP_Length)windowSize + historyBufferSize:(vDSP_Length)historyBufferSize + sampleRate:(float)sampleRate +{ + return [self initWithWindowSize:windowSize + historyBufferSize:historyBufferSize + sampleRate:sampleRate + delegate:nil]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithWindowSize:(vDSP_Length)windowSize + historyBufferSize:(vDSP_Length)historyBufferSize + sampleRate:(float)sampleRate + delegate:(id)delegate +{ + self = [super initWithMaximumBufferSize:historyBufferSize + sampleRate:sampleRate]; + if (self) + { + self.delegate = delegate; + self.windowSize = windowSize; + + // + // Allocate an appropriately sized history buffer in bytes + // + self.historyInfo = [EZAudioUtilities historyInfoWithDefaultLength:(UInt32)windowSize + maximumLength:(UInt32)historyBufferSize]; + } + return self; +} + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + ++ (instancetype)fftWithWindowSize:(vDSP_Length)windowSize + sampleRate:(float)sampleRate +{ + return [[self alloc] initWithWindowSize:windowSize + sampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)fftWithWindowSize:(vDSP_Length)windowSize + sampleRate:(float)sampleRate + delegate:(id)delegate +{ + return [[self alloc] initWithWindowSize:windowSize + sampleRate:sampleRate + delegate:delegate]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)fftWithWindowSize:(vDSP_Length)windowSize + historyBufferSize:(vDSP_Length)historyBufferSize + sampleRate:(float)sampleRate +{ + return [[self alloc] initWithWindowSize:windowSize + historyBufferSize:historyBufferSize + sampleRate:sampleRate]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)fftWithWindowSize:(vDSP_Length)windowSize + historyBufferSize:(vDSP_Length)historyBufferSize + sampleRate:(float)sampleRate + delegate:(id)delegate +{ + return [[self alloc] initWithWindowSize:windowSize + historyBufferSize:historyBufferSize + sampleRate:sampleRate + delegate:delegate]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Actions +//------------------------------------------------------------------------------ + +- (float *)computeFFTWithBuffer:(float *)buffer + withBufferSize:(UInt32)bufferSize +{ + if (buffer == NULL) + { + return NULL; + } + + // + // Append buffer to history window + // + [EZAudioUtilities appendBuffer:buffer + withBufferSize:bufferSize + toHistoryInfo:self.historyInfo]; + + // + // Call super to calculate the FFT of the window + // + return [super computeFFTWithBuffer:self.historyInfo->buffer + withBufferSize:self.historyInfo->bufferSize]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Getters +//------------------------------------------------------------------------------ + +- (UInt32)timeDomainBufferSize +{ + return self.historyInfo->bufferSize; +} + +//------------------------------------------------------------------------------ + +- (float *)timeDomainData +{ + return self.historyInfo->buffer; +} + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFile.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFile.h new file mode 100644 index 0000000..d9977ce --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFile.h @@ -0,0 +1,380 @@ +// +// EZAudioFile.h +// EZAudio +// +// Created by Syed Haris Ali on 12/1/13. +// Copyright (c) 2013 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import +#import "EZAudioFloatData.h" + +//------------------------------------------------------------------------------ + +@class EZAudio; +@class EZAudioFile; + +//------------------------------------------------------------------------------ +#pragma mark - Blocks +//------------------------------------------------------------------------------ +/** + A block used when returning back the waveform data. The waveform data itself will be an array of float arrays, one for each channel, and the length indicates the total length of each float array. + @param waveformData An array of float arrays, each representing a channel of audio data from the file + @param length An int representing the length of each channel of float audio data + */ +typedef void (^EZAudioWaveformDataCompletionBlock)(float **waveformData, int length); + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFileDelegate +//------------------------------------------------------------------------------ +/** + The EZAudioFileDelegate provides event callbacks for the EZAudioFile object. These type of events are triggered by reads and seeks on the file and gives feedback such as the audio data read as a float array for visualizations and the new seek position for UI updating. + */ +@protocol EZAudioFileDelegate + +@optional +/** + Triggered from the EZAudioFile function `readFrames:audioBufferList:bufferSize:eof:` to notify the delegate of the read audio data as a float array instead of a buffer list. Common use case of this would be to visualize the float data using an audio plot or audio data dependent OpenGL sketch. + @param audioFile The instance of the EZAudioFile that triggered the event. + @param buffer A float array of float arrays holding the audio data. buffer[0] would be the left channel's float array while buffer[1] would be the right channel's float array in a stereo file. + @param bufferSize The length of the buffers float arrays + @param numberOfChannels The number of channels. 2 for stereo, 1 for mono. + */ +- (void) audioFile:(EZAudioFile *)audioFile + readAudio:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels; + +//------------------------------------------------------------------------------ + +/** + Occurs when the audio file's internal seek position has been updated by the EZAudioFile functions `readFrames:audioBufferList:bufferSize:eof:` or `audioFile:updatedPosition:`. As of 0.8.0 this is the preferred method of listening for position updates on the audio file since a user may want the pull the currentTime, formattedCurrentTime, or the frame index from the EZAudioFile instance provided. + @param audioFile The instance of the EZAudio in which the change occured. + */ +- (void)audioFileUpdatedPosition:(EZAudioFile *)audioFile; + +//------------------------------------------------------------------------------ + +/** + Occurs when the audio file's internal seek position has been updated by the EZAudioFile functions `readFrames:audioBufferList:bufferSize:eof:` or `audioFile:updatedPosition:`. + @param audioFile The instance of the EZAudio in which the change occured + @param framePosition The new frame index as a 64-bit signed integer + @deprecated This property is deprecated starting in version 0.8.0. + @note Please use `audioFileUpdatedPosition:` property instead. + */ +- (void)audioFile:(EZAudioFile *)audioFile + updatedPosition:(SInt64)framePosition __attribute__((deprecated)); + +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFile +//------------------------------------------------------------------------------ +/** + The EZAudioFile provides a lightweight and intuitive way to asynchronously interact with audio files. These interactions included reading audio data, seeking within an audio file, getting information about the file, and pulling the waveform data for visualizing the contents of the audio file. The EZAudioFileDelegate provides event callbacks for when reads, seeks, and various updates happen within the audio file to allow the caller to interact with the action in meaningful ways. Common use cases here could be to read the audio file's data as AudioBufferList structures for output (see EZOutput) and visualizing the audio file's data as a float array using an audio plot (see EZAudioPlot). + */ +@interface EZAudioFile : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +/** + A EZAudioFileDelegate for the audio file that is used to return events such as new seek positions within the file and the read audio data as a float array. + */ +@property (nonatomic, weak) id delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ +/** + @name Initialization +*/ + +/** + Creates a new instance of the EZAudioFile using a file path URL. + @param url The file path reference of the audio file as an NSURL. + @return The newly created EZAudioFile instance. nil if the file path does not exist. + */ +- (instancetype)initWithURL:(NSURL *)url; + +/** + Creates a new instance of the EZAudioFile using a file path URL with a delegate conforming to the EZAudioFileDelegate protocol. + @param delegate The audio file delegate that receives events specified by the EZAudioFileDelegate protocol + @param url The file path reference of the audio file as an NSURL. + @return The newly created EZAudioFile instance. + */ +- (instancetype)initWithURL:(NSURL *)url + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Creates a new instance of the EZAudioFile using a file path URL with a delegate conforming to the EZAudioFileDelegate protocol and a client format. + @param url The file path reference of the audio file as an NSURL. + @param delegate The audio file delegate that receives events specified by the EZAudioFileDelegate protocol + @param clientFormat An AudioStreamBasicDescription that will be used as the client format on the audio file. For instance, the audio file might be in a 22.5 kHz sample rate format in its file format, but your app wants to read the samples at a sample rate of 44.1 kHz so it can iterate with other components (like a audio processing graph) without any weird playback effects. If this initializer is not used then a non-interleaved float format will be assumed. + @return The newly created EZAudioFile instance. + */ +- (instancetype)initWithURL:(NSURL *)url + delegate:(id)delegate + clientFormat:(AudioStreamBasicDescription)clientFormat; + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ +/** + @name Class Initializers + */ + +/** + Class method that creates a new instance of the EZAudioFile using a file path URL. + @param url The file path reference of the audio file as an NSURL. + @return The newly created EZAudioFile instance. + */ ++ (instancetype)audioFileWithURL:(NSURL *)url; + +//------------------------------------------------------------------------------ + +/** + Class method that creates a new instance of the EZAudioFile using a file path URL with a delegate conforming to the EZAudioFileDelegate protocol. + @param url The file path reference of the audio file as an NSURL. + @param delegate The audio file delegate that receives events specified by the EZAudioFileDelegate protocol + @return The newly created EZAudioFile instance. + */ ++ (instancetype)audioFileWithURL:(NSURL *)url + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Class method that creates a new instance of the EZAudioFile using a file path URL with a delegate conforming to the EZAudioFileDelegate protocol and a client format. + @param url The file path reference of the audio file as an NSURL. + @param delegate The audio file delegate that receives events specified by the EZAudioFileDelegate protocol + @param clientFormat An AudioStreamBasicDescription that will be used as the client format on the audio file. For instance, the audio file might be in a 22.5 kHz sample rate, interleaved MP3 file format, but your app wants to read linear PCM samples at a sample rate of 44.1 kHz so it can be read in the context of other components sharing a common stream format (like a audio processing graph). If this initializer is not used then the `defaultClientFormat` will be used as teh default value for the client format. + @return The newly created EZAudioFile instance. + */ ++ (instancetype)audioFileWithURL:(NSURL *)url + delegate:(id)delegate + clientFormat:(AudioStreamBasicDescription)clientFormat; + +//------------------------------------------------------------------------------ +#pragma mark - Class Methods +//------------------------------------------------------------------------------ +/** + @name Class Methods + */ + +/** + A class method that subclasses can override to specify the default client format that will be used to read audio data from this file. A client format is different from the file format in that it is the format of the other components interacting with this file. For instance, the file on disk could be a 22.5 kHz, float format, but we might have an audio processing graph that has a 44.1 kHz, signed integer format that we'd like to interact with. The client format lets us set that 44.1 kHz format on the audio file to properly read samples from it with any interpolation or format conversion that must take place done automatically within the EZAudioFile `readFrames:audioBufferList:bufferSize:eof:` method. Default is stereo, non-interleaved, 44.1 kHz. + @return An AudioStreamBasicDescription that serves as the audio file's client format. + */ ++ (AudioStreamBasicDescription)defaultClientFormat; + +//------------------------------------------------------------------------------ + +/** + A class method that subclasses can override to specify the default sample rate that will be used in the `defaultClientFormat` method. Default is 44100.0 (44.1 kHz). + @return A Float64 representing the sample rate that should be used in the default client format. + */ ++ (Float64)defaultClientFormatSampleRate; + +//------------------------------------------------------------------------------ + +/** + Provides an array of the supported audio files types. Each audio file type is provided as a string, i.e. @"caf". Useful for filtering lists of files in an open panel to only the types allowed. + @return An array of NSString objects representing the represented file types. + */ ++ (NSArray *)supportedAudioFileTypes; + +//------------------------------------------------------------------------------ +#pragma mark - Events +//------------------------------------------------------------------------------ +/** + @name Reading From The Audio File + */ + +/** + Reads a specified number of frames from the audio file. In addition, this will notify the EZAudioFileDelegate (if specified) of the read data as a float array with the audioFile:readAudio:withBufferSize:withNumberOfChannels: event and the new seek position within the file with the audioFile:updatedPosition: event. + @param frames The number of frames to read from the file. + @param audioBufferList An allocated AudioBufferList structure in which to store the read audio data + @param bufferSize A pointer to a UInt32 in which to store the read buffersize + @param eof A pointer to a BOOL in which to store whether the read operation reached the end of the audio file. + */ +- (void)readFrames:(UInt32)frames + audioBufferList:(AudioBufferList *)audioBufferList + bufferSize:(UInt32 *)bufferSize + eof:(BOOL *)eof; + +//------------------------------------------------------------------------------ + +/** + @name Seeking Through The Audio File + */ + +/** + Seeks through an audio file to a specified frame. This will notify the EZAudioFileDelegate (if specified) with the audioFile:updatedPosition: function. + @param frame The new frame position to seek to as a SInt64. + */ +- (void)seekToFrame:(SInt64)frame; + +//------------------------------------------------------------------------------ +#pragma mark - Getters +//------------------------------------------------------------------------------ +/** + @name Getting Information About The Audio File + */ + +/** + Provides the common AudioStreamBasicDescription that will be used for in-app interaction. The file's format will be converted to this format and then sent back as either a float array or a `AudioBufferList` pointer. For instance, the file on disk could be a 22.5 kHz, float format, but we might have an audio processing graph that has a 44.1 kHz, signed integer format that we'd like to interact with. The client format lets us set that 44.1 kHz format on the audio file to properly read samples from it with any interpolation or format conversion that must take place done automatically within the EZAudioFile `readFrames:audioBufferList:bufferSize:eof:` method. Default is stereo, non-interleaved, 44.1 kHz. + @warning This must be a linear PCM format! + @return An AudioStreamBasicDescription structure describing the format of the audio file. + */ +@property (readwrite) AudioStreamBasicDescription clientFormat; + +//------------------------------------------------------------------------------ + +/** + Provides the current offset in the audio file as an NSTimeInterval (i.e. in seconds). When setting this it will determine the correct frame offset and perform a `seekToFrame` to the new time offset. + @warning Make sure the new current time offset is less than the `duration` or you will receive an invalid seek assertion. + */ +@property (nonatomic, readwrite) NSTimeInterval currentTime; + +//------------------------------------------------------------------------------ + +/** + Provides the duration of the audio file in seconds. + */ +@property (readonly) NSTimeInterval duration; + +//------------------------------------------------------------------------------ + +/** + Provides the AudioStreamBasicDescription structure containing the format of the file. + @return An AudioStreamBasicDescription structure describing the format of the audio file. + */ +@property (readonly) AudioStreamBasicDescription fileFormat; + +//------------------------------------------------------------------------------ + +/** + Provides the current time as an NSString with the time format MM:SS. + */ +@property (readonly) NSString *formattedCurrentTime; + +//------------------------------------------------------------------------------ + +/** + Provides the duration as an NSString with the time format MM:SS. + */ +@property (readonly) NSString *formattedDuration; + +//------------------------------------------------------------------------------ + +/** + Provides the frame index (a.k.a the seek positon) within the audio file as SInt64. This can be helpful when seeking through the audio file. + @return The current frame index within the audio file as a SInt64. + */ +@property (readonly) SInt64 frameIndex; + +//------------------------------------------------------------------------------ + +/** + Provides a dictionary containing the metadata (ID3) tags that are included in the header for the audio file. Typically this contains stuff like artist, title, release year, etc. + @return An NSDictionary containing the metadata for the audio file. + */ +@property (readonly) NSDictionary *metadata; + +//------------------------------------------------------------------------------ + +/** + Provides the total duration of the audio file in seconds. + @deprecated This property is deprecated starting in version 0.3.0. + @note Please use `duration` property instead. + @return The total duration of the audio file as a Float32. + */ +@property (readonly) NSTimeInterval totalDuration __attribute__((deprecated)); + +//------------------------------------------------------------------------------ + +/** + Provides the total frame count of the audio file in the client format. + @return The total number of frames in the audio file in the AudioStreamBasicDescription representing the client format as a SInt64. + */ +@property (readonly) SInt64 totalClientFrames; + +//------------------------------------------------------------------------------ + +/** + Provides the total frame count of the audio file in the file format. + @return The total number of frames in the audio file in the AudioStreamBasicDescription representing the file format as a SInt64. + */ +@property (readonly) SInt64 totalFrames; + +//------------------------------------------------------------------------------ + +/** + Provides the NSURL for the audio file. + @return An NSURL representing the path of the EZAudioFile instance. + */ +@property (nonatomic, copy, readonly) NSURL *url; + +//------------------------------------------------------------------------------ +#pragma mark - Helpers +//------------------------------------------------------------------------------ + +/** + Synchronously pulls the waveform amplitude data into a float array for the receiver. This returns a waveform with a default resolution of 1024, meaning there are 1024 data points to plot the waveform. + @param numberOfPoints A UInt32 representing the number of data points you need. The higher the number of points the more detailed the waveform will be. + @return A EZAudioFloatData instance containing the audio data for all channels of the audio. + */ +- (EZAudioFloatData *)getWaveformData; + +//------------------------------------------------------------------------------ + +/** + Synchronously pulls the waveform amplitude data into a float array for the receiver. + @param numberOfPoints A UInt32 representing the number of data points you need. The higher the number of points the more detailed the waveform will be. + @return A EZAudioFloatData instance containing the audio data for all channels of the audio. + */ +- (EZAudioFloatData *)getWaveformDataWithNumberOfPoints:(UInt32)numberOfPoints; + +//------------------------------------------------------------------------------ + +/** + Asynchronously pulls the waveform amplitude data into a float array for the receiver. This returns a waveform with a default resolution of 1024, meaning there are 1024 data points to plot the waveform. + @param completion A EZAudioWaveformDataCompletionBlock that executes when the waveform data has been extracted. Provides a `EZAudioFloatData` instance containing the waveform data for all audio channels. + */ +- (void)getWaveformDataWithCompletionBlock:(EZAudioWaveformDataCompletionBlock)completion; + +//------------------------------------------------------------------------------ + +/** + Asynchronously pulls the waveform amplitude data into a float array for the receiver. + @param numberOfPoints A UInt32 representing the number of data points you need. The higher the number of points the more detailed the waveform will be. + @param completion A EZAudioWaveformDataCompletionBlock that executes when the waveform data has been extracted. Provides a `EZAudioFloatData` instance containing the waveform data for all audio channels. + */ +- (void)getWaveformDataWithNumberOfPoints:(UInt32)numberOfPoints + completion:(EZAudioWaveformDataCompletionBlock)completion; + +//------------------------------------------------------------------------------ + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFile.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFile.m new file mode 100644 index 0000000..447bc7b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFile.m @@ -0,0 +1,735 @@ +// +// EZAudioFile.m +// EZAudio +// +// Created by Syed Haris Ali on 12/1/13. +// Copyright (c) 2013 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioFile.h" + +//------------------------------------------------------------------------------ + +#import "EZAudio.h" +#import "EZAudioFloatConverter.h" +#import "EZAudioFloatData.h" +#include + +// constants +static UInt32 EZAudioFileWaveformDefaultResolution = 1024; +static NSString *EZAudioFileWaveformDataQueueIdentifier = @"com.ezaudio.waveformQueue"; + +//------------------------------------------------------------------------------ + +typedef struct +{ + AudioFileID audioFileID; + AudioStreamBasicDescription clientFormat; + NSTimeInterval duration; + ExtAudioFileRef extAudioFileRef; + AudioStreamBasicDescription fileFormat; + SInt64 frames; + CFURLRef sourceURL; +} EZAudioFileInfo; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFile +//------------------------------------------------------------------------------ + +@interface EZAudioFile () +@property (nonatomic, strong) EZAudioFloatConverter *floatConverter; +@property (nonatomic) float **floatData; +@property (nonatomic) EZAudioFileInfo *info; +@property (nonatomic) pthread_mutex_t lock; +@property (nonatomic) dispatch_queue_t waveformQueue; +@end + +//------------------------------------------------------------------------------ + +@implementation EZAudioFile + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + self.floatConverter = nil; + pthread_mutex_destroy(&_lock); + [EZAudioUtilities freeFloatBuffers:self.floatData numberOfChannels:self.clientFormat.mChannelsPerFrame]; + [EZAudioUtilities checkResult:ExtAudioFileDispose(self.info->extAudioFileRef) operation:"Failed to dispose of ext audio file"]; + free(self.info); +} + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ + +- (instancetype)init +{ + self = [super init]; + if (self) + { + self.info = (EZAudioFileInfo *)malloc(sizeof(EZAudioFileInfo)); + _floatData = NULL; + pthread_mutex_init(&_lock, NULL); + _waveformQueue = dispatch_queue_create(EZAudioFileWaveformDataQueueIdentifier.UTF8String, DISPATCH_QUEUE_PRIORITY_DEFAULT); + } + return self; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithURL:(NSURL *)url +{ + return [self initWithURL:url delegate:nil]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithURL:(NSURL *)url + delegate:(id)delegate +{ + return [self initWithURL:url + delegate:delegate + clientFormat:[self.class defaultClientFormat]]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithURL:(NSURL *)url + delegate:(id)delegate + clientFormat:(AudioStreamBasicDescription)clientFormat +{ + self = [self init]; + if (self) + { + self.info->sourceURL = (__bridge CFURLRef)(url); + self.info->clientFormat = clientFormat; + self.delegate = delegate; + if (![self setup]) + { + return nil; + } + } + return self; +} + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + ++ (instancetype)audioFileWithURL:(NSURL *)url +{ + return [[self alloc] initWithURL:url]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)audioFileWithURL:(NSURL *)url + delegate:(id)delegate +{ + return [[self alloc] initWithURL:url delegate:delegate]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)audioFileWithURL:(NSURL *)url + delegate:(id)delegate + clientFormat:(AudioStreamBasicDescription)clientFormat +{ + return [[self alloc] initWithURL:url + delegate:delegate + clientFormat:clientFormat]; +} + +//------------------------------------------------------------------------------ +#pragma mark - NSCopying +//------------------------------------------------------------------------------ + +- (id)copyWithZone:(NSZone *)zone +{ + return [EZAudioFile audioFileWithURL:self.url]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Class Methods +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)defaultClientFormat +{ + return [EZAudioUtilities stereoFloatNonInterleavedFormatWithSampleRate:[self defaultClientFormatSampleRate]]; +} + +//------------------------------------------------------------------------------ + ++ (Float64)defaultClientFormatSampleRate +{ + return 44100.0f; +} + +//------------------------------------------------------------------------------ + ++ (NSArray *)supportedAudioFileTypes +{ + return @ + [ + @"aac", + @"caf", + @"aif", + @"aiff", + @"aifc", + @"mp3", + @"mp4", + @"m4a", + @"snd", + @"au", + @"sd2", + @"wav" + ]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setup +//------------------------------------------------------------------------------ + +- (BOOL)setup +{ + // + // Try to open the file, bail if the file could not be opened + // + BOOL success = [self openAudioFile]; + if (!success) + { + return success; + } + + // + // Set the client format + // + self.clientFormat = self.info->clientFormat; + + return YES; +} + +//------------------------------------------------------------------------------ +#pragma mark - Creating/Opening Audio File +//------------------------------------------------------------------------------ + +- (BOOL)openAudioFile +{ + // + // Need a source url + // + NSAssert(self.info->sourceURL, @"EZAudioFile cannot be created without a source url!"); + + // + // Determine if the file actually exists + // + CFURLRef url = self.info->sourceURL; + NSURL *fileURL = (__bridge NSURL *)(url); + BOOL fileExists = [[NSFileManager defaultManager] fileExistsAtPath:fileURL.path]; + + // + // Create an ExtAudioFileRef for the file handle + // + if (fileExists) + { + [EZAudioUtilities checkResult:ExtAudioFileOpenURL(url, &self.info->extAudioFileRef) + operation:"Failed to create ExtAudioFileRef"]; + } + else + { + return NO; + } + + // + // Get the underlying AudioFileID + // + UInt32 propSize = sizeof(self.info->audioFileID); + [EZAudioUtilities checkResult:ExtAudioFileGetProperty(self.info->extAudioFileRef, + kExtAudioFileProperty_AudioFile, + &propSize, + &self.info->audioFileID) + operation:"Failed to get underlying AudioFileID"]; + + // + // Store the file format + // + propSize = sizeof(self.info->fileFormat); + [EZAudioUtilities checkResult:ExtAudioFileGetProperty(self.info->extAudioFileRef, + kExtAudioFileProperty_FileDataFormat, + &propSize, + &self.info->fileFormat) + operation:"Failed to get file audio format on existing audio file"]; + + // + // Get the total frames and duration + // + propSize = sizeof(SInt64); + [EZAudioUtilities checkResult:ExtAudioFileGetProperty(self.info->extAudioFileRef, + kExtAudioFileProperty_FileLengthFrames, + &propSize, + &self.info->frames) + operation:"Failed to get total frames"]; + self.info->duration = (NSTimeInterval) self.info->frames / self.info->fileFormat.mSampleRate; + + return YES; +} + +//------------------------------------------------------------------------------ +#pragma mark - Events +//------------------------------------------------------------------------------ + +- (void)readFrames:(UInt32)frames + audioBufferList:(AudioBufferList *)audioBufferList + bufferSize:(UInt32 *)bufferSize + eof:(BOOL *)eof +{ + if (pthread_mutex_trylock(&_lock) == 0) + { + // perform read + [EZAudioUtilities checkResult:ExtAudioFileRead(self.info->extAudioFileRef, + &frames, + audioBufferList) + operation:"Failed to read audio data from file"]; + *bufferSize = frames; + *eof = frames == 0; + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(audioFileUpdatedPosition:)]) + { + [self.delegate audioFileUpdatedPosition:self]; + } + + // + // Deprecated, but supported until 1.0 + // +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + if ([self.delegate respondsToSelector:@selector(audioFile:updatedPosition:)]) + { + [self.delegate audioFile:self updatedPosition:[self frameIndex]]; + } +#pragma GCC diagnostic pop + + if ([self.delegate respondsToSelector:@selector(audioFile:readAudio:withBufferSize:withNumberOfChannels:)]) + { + // convert into float data + [self.floatConverter convertDataFromAudioBufferList:audioBufferList + withNumberOfFrames:*bufferSize + toFloatBuffers:self.floatData]; + + // notify delegate + UInt32 channels = self.clientFormat.mChannelsPerFrame; + [self.delegate audioFile:self + readAudio:self.floatData + withBufferSize:*bufferSize + withNumberOfChannels:channels]; + } + + pthread_mutex_unlock(&_lock); + + } +} + +//------------------------------------------------------------------------------ + +- (void)seekToFrame:(SInt64)frame +{ + if (pthread_mutex_trylock(&_lock) == 0) + { + [EZAudioUtilities checkResult:ExtAudioFileSeek(self.info->extAudioFileRef, + frame) + operation:"Failed to seek frame position within audio file"]; + + pthread_mutex_unlock(&_lock); + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(audioFileUpdatedPosition:)]) + { + [self.delegate audioFileUpdatedPosition:self]; + } + + // + // Deprecated, but supported until 1.0 + // +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + if ([self.delegate respondsToSelector:@selector(audioFile:updatedPosition:)]) + { + [self.delegate audioFile:self updatedPosition:[self frameIndex]]; + } +#pragma GCC diagnostic pop + } +} + +//------------------------------------------------------------------------------ +#pragma mark - Getters +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)floatFormat +{ + return [EZAudioUtilities stereoFloatNonInterleavedFormatWithSampleRate:44100.0f]; +} + +//------------------------------------------------------------------------------ + +- (EZAudioFloatData *)getWaveformData +{ + return [self getWaveformDataWithNumberOfPoints:EZAudioFileWaveformDefaultResolution]; +} + +//------------------------------------------------------------------------------ + +- (EZAudioFloatData *)getWaveformDataWithNumberOfPoints:(UInt32)numberOfPoints +{ + EZAudioFloatData *waveformData; + if (pthread_mutex_trylock(&_lock) == 0) + { + // store current frame + SInt64 currentFrame = self.frameIndex; + BOOL interleaved = [EZAudioUtilities isInterleaved:self.clientFormat]; + UInt32 channels = self.clientFormat.mChannelsPerFrame; + if (channels == 0) + { + // prevent division by zero + pthread_mutex_unlock(&_lock); + return nil; + } + float **data = (float **)malloc( sizeof(float*) * channels ); + for (int i = 0; i < channels; i++) + { + data[i] = (float *)malloc( sizeof(float) * numberOfPoints ); + } + + // seek to 0 + [EZAudioUtilities checkResult:ExtAudioFileSeek(self.info->extAudioFileRef, + 0) + operation:"Failed to seek frame position within audio file"]; + + // calculate the required number of frames per buffer + SInt64 framesPerBuffer = ((SInt64) self.totalClientFrames / numberOfPoints); + SInt64 framesPerChannel = framesPerBuffer / channels; + + // allocate an audio buffer list + AudioBufferList *audioBufferList = [EZAudioUtilities audioBufferListWithNumberOfFrames:(UInt32)framesPerBuffer + numberOfChannels:self.info->clientFormat.mChannelsPerFrame + interleaved:interleaved]; + + // read through file and calculate rms at each point + for (SInt64 i = 0; i < numberOfPoints; i++) + { + UInt32 bufferSize = (UInt32) framesPerBuffer; + [EZAudioUtilities checkResult:ExtAudioFileRead(self.info->extAudioFileRef, + &bufferSize, + audioBufferList) + operation:"Failed to read audio data from file waveform"]; + if (interleaved) + { + float *buffer = (float *)audioBufferList->mBuffers[0].mData; + for (int channel = 0; channel < channels; channel++) + { + float channelData[framesPerChannel]; + for (int frame = 0; frame < framesPerChannel; frame++) + { + channelData[frame] = buffer[frame * channels + channel]; + } + float rms = [EZAudioUtilities RMS:channelData length:(UInt32)framesPerChannel]; + data[channel][i] = rms; + } + } + else + { + for (int channel = 0; channel < channels; channel++) + { + float *channelData = audioBufferList->mBuffers[channel].mData; + float rms = [EZAudioUtilities RMS:channelData length:bufferSize]; + data[channel][i] = rms; + } + } + } + + // clean up + [EZAudioUtilities freeBufferList:audioBufferList]; + + // seek back to previous position + [EZAudioUtilities checkResult:ExtAudioFileSeek(self.info->extAudioFileRef, + currentFrame) + operation:"Failed to seek frame position within audio file"]; + + pthread_mutex_unlock(&_lock); + + waveformData = [EZAudioFloatData dataWithNumberOfChannels:channels + buffers:(float **)data + bufferSize:numberOfPoints]; + + // cleanup + for (int i = 0; i < channels; i++) + { + free(data[i]); + } + free(data); + } + return waveformData; +} + +//------------------------------------------------------------------------------ + +- (void)getWaveformDataWithCompletionBlock:(EZAudioWaveformDataCompletionBlock)waveformDataCompletionBlock +{ + [self getWaveformDataWithNumberOfPoints:EZAudioFileWaveformDefaultResolution + completion:waveformDataCompletionBlock]; +} + +//------------------------------------------------------------------------------ + +- (void)getWaveformDataWithNumberOfPoints:(UInt32)numberOfPoints + completion:(EZAudioWaveformDataCompletionBlock)completion +{ + if (!completion) + { + return; + } + + // async get waveform data + __weak EZAudioFile *weakSelf = self; + dispatch_async(self.waveformQueue, ^{ + EZAudioFloatData *waveformData = [weakSelf getWaveformDataWithNumberOfPoints:numberOfPoints]; + dispatch_async(dispatch_get_main_queue(), ^{ + completion(waveformData.buffers, waveformData.bufferSize); + }); + }); +} + +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)clientFormat +{ + return self.info->clientFormat; +} + +//------------------------------------------------------------------------------ + +- (NSTimeInterval)currentTime +{ + return [EZAudioUtilities MAP:(float)[self frameIndex] + leftMin:0.0f + leftMax:(float)[self totalFrames] + rightMin:0.0f + rightMax:[self duration]]; +} + +//------------------------------------------------------------------------------ + +- (NSTimeInterval)duration +{ + return self.info->duration; +} + +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)fileFormat +{ + return self.info->fileFormat; +} + +//------------------------------------------------------------------------------ + +- (NSString *)formattedCurrentTime +{ + return [EZAudioUtilities displayTimeStringFromSeconds:[self currentTime]]; +} + +//------------------------------------------------------------------------------ + +- (NSString *)formattedDuration +{ + return [EZAudioUtilities displayTimeStringFromSeconds:[self duration]]; +} + +//------------------------------------------------------------------------------ + +- (SInt64)frameIndex +{ + SInt64 frameIndex; + [EZAudioUtilities checkResult:ExtAudioFileTell(self.info->extAudioFileRef, &frameIndex) + operation:"Failed to get frame index"]; + return frameIndex; +} + +//------------------------------------------------------------------------------ + +- (NSDictionary *)metadata +{ + // get size of metadata property (dictionary) + UInt32 propSize = sizeof(self.info->audioFileID); + CFDictionaryRef metadata; + UInt32 writable; + [EZAudioUtilities checkResult:AudioFileGetPropertyInfo(self.info->audioFileID, + kAudioFilePropertyInfoDictionary, + &propSize, + &writable) + operation:"Failed to get the size of the metadata dictionary"]; + + // pull metadata + [EZAudioUtilities checkResult:AudioFileGetProperty(self.info->audioFileID, + kAudioFilePropertyInfoDictionary, + &propSize, + &metadata) + operation:"Failed to get metadata dictionary"]; + + // cast to NSDictionary + return (__bridge NSDictionary*)metadata; +} + +//------------------------------------------------------------------------------ + +- (NSTimeInterval)totalDuration +{ + return self.info->duration; +} + +//------------------------------------------------------------------------------ + +- (SInt64)totalClientFrames +{ + SInt64 totalFrames = [self totalFrames]; + AudioStreamBasicDescription clientFormat = self.info->clientFormat; + AudioStreamBasicDescription fileFormat = self.info->fileFormat; + BOOL sameSampleRate = clientFormat.mSampleRate == fileFormat.mSampleRate; + if (!sameSampleRate) + { + totalFrames = self.info->duration * clientFormat.mSampleRate; + } + return totalFrames; +} + +//------------------------------------------------------------------------------ + +- (SInt64)totalFrames +{ + return self.info->frames; +} + +//------------------------------------------------------------------------------ + +- (NSURL *)url +{ + return (__bridge NSURL*)self.info->sourceURL; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setters +//------------------------------------------------------------------------------ + +- (void)setClientFormat:(AudioStreamBasicDescription)clientFormat +{ + // + // Clear any float data currently cached + // + if (self.floatData) + { + self.floatData = nil; + } + + // + // Client format can only be linear PCM! + // + NSAssert([EZAudioUtilities isLinearPCM:clientFormat], @"Client format must be linear PCM"); + + // + // Store the client format + // + self.info->clientFormat = clientFormat; + + // + // Set the client format on the ExtAudioFileRef + // + [EZAudioUtilities checkResult:ExtAudioFileSetProperty(self.info->extAudioFileRef, + kExtAudioFileProperty_ClientDataFormat, + sizeof(clientFormat), + &clientFormat) + operation:"Couldn't set client data format on file"]; + + // + // Create a new float converter using the client format as the input format + // + self.floatConverter = [EZAudioFloatConverter converterWithInputFormat:clientFormat]; + + // + // Determine how big our float buffers need to be to hold a buffer of float + // data for the audio received callback. + // + UInt32 maxPacketSize; + UInt32 propSize = sizeof(maxPacketSize); + [EZAudioUtilities checkResult:ExtAudioFileGetProperty(self.info->extAudioFileRef, + kExtAudioFileProperty_ClientMaxPacketSize, + &propSize, + &maxPacketSize) + operation:"Failed to get max packet size"]; + + self.floatData = [EZAudioUtilities floatBuffersWithNumberOfFrames:1024 + numberOfChannels:self.clientFormat.mChannelsPerFrame]; +} + +//------------------------------------------------------------------------------ + +- (void)setCurrentTime:(NSTimeInterval)currentTime +{ + NSAssert(currentTime < [self duration], @"Invalid seek operation, expected current time to be less than duration"); + SInt64 frame = [EZAudioUtilities MAP:currentTime + leftMin:0.0f + leftMax:[self duration] + rightMin:0.0f + rightMax:[self totalFrames]]; + [self seekToFrame:frame]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Description +//------------------------------------------------------------------------------ + +- (NSString *)description +{ + return [NSString stringWithFormat:@"%@ {\n" + " url: %@,\n" + " duration: %f,\n" + " totalFrames: %lld,\n" + " metadata: %@,\n" + " fileFormat: { %@ },\n" + " clientFormat: { %@ } \n" + "}", + [super description], + [self url], + [self duration], + [self totalFrames], + [self metadata], + [EZAudioUtilities stringForAudioStreamBasicDescription:[self fileFormat]], + [EZAudioUtilities stringForAudioStreamBasicDescription:[self clientFormat]]]; +} + +//------------------------------------------------------------------------------ + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatConverter.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatConverter.h new file mode 100644 index 0000000..63d5635 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatConverter.h @@ -0,0 +1,75 @@ +// +// EZAudioFloatConverter.h +// EZAudio +// +// Created by Syed Haris Ali on 6/23/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import + +//------------------------------------------------------------------------------ +#pragma mark - Constants +//------------------------------------------------------------------------------ + +FOUNDATION_EXPORT UInt32 const EZAudioFloatConverterDefaultPacketSize; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFloatConverter +//------------------------------------------------------------------------------ + +@interface EZAudioFloatConverter : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Class Methods +//------------------------------------------------------------------------------ + ++ (instancetype)converterWithInputFormat:(AudioStreamBasicDescription)inputFormat; + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +@property (nonatomic, assign, readonly) AudioStreamBasicDescription inputFormat; +@property (nonatomic, assign, readonly) AudioStreamBasicDescription floatFormat; + +//------------------------------------------------------------------------------ +#pragma mark - Instance Methods +//------------------------------------------------------------------------------ + +- (instancetype)initWithInputFormat:(AudioStreamBasicDescription)inputFormat; + +//------------------------------------------------------------------------------ + +- (void)convertDataFromAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + toFloatBuffers:(float **)buffers; + +//------------------------------------------------------------------------------ + +- (void)convertDataFromAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + toFloatBuffers:(float **)buffers + packetDescriptions:(AudioStreamPacketDescription *)packetDescriptions; + +//------------------------------------------------------------------------------ + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatConverter.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatConverter.m new file mode 100644 index 0000000..bf7bff9 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatConverter.m @@ -0,0 +1,239 @@ +// +// EZAudioFloatConverter.m +// EZAudio +// +// Created by Syed Haris Ali on 6/23/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioFloatConverter.h" +#import "EZAudioUtilities.h" + +//------------------------------------------------------------------------------ +#pragma mark - Constants +//------------------------------------------------------------------------------ + +static UInt32 EZAudioFloatConverterDefaultOutputBufferSize = 128 * 32; +UInt32 const EZAudioFloatConverterDefaultPacketSize = 2048; + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +typedef struct +{ + AudioConverterRef converterRef; + AudioBufferList *floatAudioBufferList; + AudioStreamBasicDescription inputFormat; + AudioStreamBasicDescription outputFormat; + AudioStreamPacketDescription *packetDescriptions; + UInt32 packetsPerBuffer; +} EZAudioFloatConverterInfo; + +//------------------------------------------------------------------------------ +#pragma mark - Callbacks +//------------------------------------------------------------------------------ + +OSStatus EZAudioFloatConverterCallback(AudioConverterRef inAudioConverter, + UInt32 *ioNumberDataPackets, + AudioBufferList *ioData, + AudioStreamPacketDescription **outDataPacketDescription, + void *inUserData) +{ + AudioBufferList *sourceBuffer = (AudioBufferList *)inUserData; + + memcpy(ioData, + sourceBuffer, + sizeof(AudioBufferList) + (sourceBuffer->mNumberBuffers - 1) * sizeof(AudioBuffer)); + sourceBuffer = NULL; + + return noErr; +} + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFloatConverter (Interface Extension) +//------------------------------------------------------------------------------ + +@interface EZAudioFloatConverter () +@property (nonatomic, assign) EZAudioFloatConverterInfo *info; +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFloatConverter (Implementation) +//------------------------------------------------------------------------------ + +@implementation EZAudioFloatConverter + +//------------------------------------------------------------------------------ +#pragma mark - Class Methods +//------------------------------------------------------------------------------ + ++ (instancetype)converterWithInputFormat:(AudioStreamBasicDescription)inputFormat +{ + return [[self alloc] initWithInputFormat:inputFormat]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + AudioConverterDispose(self.info->converterRef); + [EZAudioUtilities freeBufferList:self.info->floatAudioBufferList]; + free(self.info->packetDescriptions); + free(self.info); +} + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ + +- (instancetype)initWithInputFormat:(AudioStreamBasicDescription)inputFormat +{ + self = [super init]; + if (self) + { + self.info = (EZAudioFloatConverterInfo *)malloc(sizeof(EZAudioFloatConverterInfo)); + memset(self.info, 0, sizeof(EZAudioFloatConverterInfo)); + self.info->inputFormat = inputFormat; + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setup +//------------------------------------------------------------------------------ + +- (void)setup +{ + // create output format + self.info->outputFormat = [EZAudioUtilities floatFormatWithNumberOfChannels:self.info->inputFormat.mChannelsPerFrame + sampleRate:self.info->inputFormat.mSampleRate]; + + // create a new instance of the audio converter + [EZAudioUtilities checkResult:AudioConverterNew(&self.info->inputFormat, + &self.info->outputFormat, + &self.info->converterRef) + operation:"Failed to create new audio converter"]; + + // get max packets per buffer so you can allocate a proper AudioBufferList + UInt32 packetsPerBuffer = 0; + UInt32 outputBufferSize = EZAudioFloatConverterDefaultOutputBufferSize; + UInt32 sizePerPacket = self.info->inputFormat.mBytesPerPacket; + BOOL isVBR = sizePerPacket == 0; + + // VBR + if (isVBR) + { + // determine the max output buffer size + UInt32 maxOutputPacketSize; + UInt32 propSize = sizeof(maxOutputPacketSize); + OSStatus result = AudioConverterGetProperty(self.info->converterRef, + kAudioConverterPropertyMaximumOutputPacketSize, + &propSize, + &maxOutputPacketSize); + if (result != noErr) + { + maxOutputPacketSize = EZAudioFloatConverterDefaultPacketSize; + } + + // set the output buffer size to at least the max output size + if (maxOutputPacketSize > outputBufferSize) + { + outputBufferSize = maxOutputPacketSize; + } + packetsPerBuffer = outputBufferSize / maxOutputPacketSize; + + // allocate memory for the packet descriptions + self.info->packetDescriptions = (AudioStreamPacketDescription *)malloc(sizeof(AudioStreamPacketDescription) * packetsPerBuffer); + } + else + { + packetsPerBuffer = outputBufferSize / sizePerPacket; + } + self.info->packetsPerBuffer = packetsPerBuffer; + + // allocate the AudioBufferList to hold the float values + BOOL isInterleaved = [EZAudioUtilities isInterleaved:self.info->outputFormat]; + self.info->floatAudioBufferList = [EZAudioUtilities audioBufferListWithNumberOfFrames:packetsPerBuffer + numberOfChannels:self.info->outputFormat.mChannelsPerFrame + interleaved:isInterleaved]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Events +//------------------------------------------------------------------------------ + +- (void)convertDataFromAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + toFloatBuffers:(float **)buffers +{ + [self convertDataFromAudioBufferList:audioBufferList + withNumberOfFrames:frames + toFloatBuffers:buffers + packetDescriptions:self.info->packetDescriptions]; +} + +//------------------------------------------------------------------------------ + +- (void)convertDataFromAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + toFloatBuffers:(float **)buffers + packetDescriptions:(AudioStreamPacketDescription *)packetDescriptions +{ + if (frames != 0) + { + // + // Make sure the data size coming in is consistent with the number + // of frames we're actually getting + // + for (int i = 0; i < audioBufferList->mNumberBuffers; i++) { + audioBufferList->mBuffers[i].mDataByteSize = frames * self.info->inputFormat.mBytesPerFrame; + } + + // + // Fill out the audio converter with the source buffer + // + [EZAudioUtilities checkResult:AudioConverterFillComplexBuffer(self.info->converterRef, + EZAudioFloatConverterCallback, + audioBufferList, + &frames, + self.info->floatAudioBufferList, + packetDescriptions ? packetDescriptions : self.info->packetDescriptions) + operation:"Failed to fill complex buffer in float converter"]; + + // + // Copy the converted buffers into the float buffer array stored + // in memory + // + for (int i = 0; i < self.info->floatAudioBufferList->mNumberBuffers; i++) + { + memcpy(buffers[i], + self.info->floatAudioBufferList->mBuffers[i].mData, + self.info->floatAudioBufferList->mBuffers[i].mDataByteSize); + } + } +} + +//------------------------------------------------------------------------------ + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatData.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatData.h new file mode 100644 index 0000000..547c715 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatData.h @@ -0,0 +1,52 @@ +// +// EZAudioFloatData.h +// EZAudio +// +// Created by Syed Haris Ali on 6/23/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFloatData +//------------------------------------------------------------------------------ + +@interface EZAudioFloatData : NSObject + +//------------------------------------------------------------------------------ + ++ (instancetype)dataWithNumberOfChannels:(int)numberOfChannels + buffers:(float **)buffers + bufferSize:(UInt32)bufferSize; + +//------------------------------------------------------------------------------ + +@property (nonatomic, assign, readonly) int numberOfChannels; +@property (nonatomic, assign, readonly) float **buffers; +@property (nonatomic, assign, readonly) UInt32 bufferSize; + +//------------------------------------------------------------------------------ + +- (float *)bufferForChannel:(int)channel; + +//------------------------------------------------------------------------------ + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatData.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatData.m new file mode 100644 index 0000000..674517e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioFloatData.m @@ -0,0 +1,85 @@ +// +// EZAudioFloatData.m +// EZAudio +// +// Created by Syed Haris Ali on 6/23/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioFloatData.h" +#import "EZAudioUtilities.h" + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFloatData +//------------------------------------------------------------------------------ + +@interface EZAudioFloatData () +@property (nonatomic, assign, readwrite) int numberOfChannels; +@property (nonatomic, assign, readwrite) float **buffers; +@property (nonatomic, assign, readwrite) UInt32 bufferSize; +@end + +//------------------------------------------------------------------------------ + +@implementation EZAudioFloatData + +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + [EZAudioUtilities freeFloatBuffers:self.buffers + numberOfChannels:self.numberOfChannels]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)dataWithNumberOfChannels:(int)numberOfChannels + buffers:(float **)buffers + bufferSize:(UInt32)bufferSize +{ + id data = [[self alloc] init]; + size_t size = sizeof(float) * bufferSize; + float **buffersCopy = [EZAudioUtilities floatBuffersWithNumberOfFrames:bufferSize + numberOfChannels:numberOfChannels]; + for (int i = 0; i < numberOfChannels; i++) + { + memcpy(buffersCopy[i], buffers[i], size); + } + ((EZAudioFloatData *)data).buffers = buffersCopy; + ((EZAudioFloatData *)data).bufferSize = bufferSize; + ((EZAudioFloatData *)data).numberOfChannels = numberOfChannels; + return data; +} + +//------------------------------------------------------------------------------ + +- (float *)bufferForChannel:(int)channel +{ + float *buffer = NULL; + if (channel < self.numberOfChannels) + { + buffer = self.buffers[channel]; + } + return buffer; +} + +//------------------------------------------------------------------------------ + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioOSX.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioOSX.h new file mode 100644 index 0000000..895b9c9 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioOSX.h @@ -0,0 +1,26 @@ +// +// EZAudioOSX.m +// EZAudio +// +// Created by Tommaso Piazza on 30/09/15. +// Copyright © 2015 Andrew Breckenridge. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlayer.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlayer.h new file mode 100644 index 0000000..246d324 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlayer.h @@ -0,0 +1,444 @@ +// +// EZAudioPlayer.h +// EZAudio +// +// Created by Syed Haris Ali on 1/16/14. +// Copyright (c) 2014 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import "TargetConditionals.h" +#import "EZAudioFile.h" +#import "EZOutput.h" + +@class EZAudioPlayer; + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +typedef NS_ENUM(NSUInteger, EZAudioPlayerState) +{ + EZAudioPlayerStateEndOfFile, + EZAudioPlayerStatePaused, + EZAudioPlayerStatePlaying, + EZAudioPlayerStateReadyToPlay, + EZAudioPlayerStateSeeking, + EZAudioPlayerStateUnknown, +}; + +//------------------------------------------------------------------------------ +#pragma mark - Notifications +//------------------------------------------------------------------------------ + +/** + Notification that occurs whenever the EZAudioPlayer changes its `audioFile` property. Check the new value using the EZAudioPlayer's `audioFile` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangeAudioFileNotification; + +/** + Notification that occurs whenever the EZAudioPlayer changes its `device` property. Check the new value using the EZAudioPlayer's `device` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangeOutputDeviceNotification; + +/** + Notification that occurs whenever the EZAudioPlayer changes its `output` component's `pan` property. Check the new value using the EZAudioPlayer's `pan` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangePanNotification; + +/** + Notification that occurs whenever the EZAudioPlayer changes its `output` component's play state. Check the new value using the EZAudioPlayer's `isPlaying` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangePlayStateNotification; + +/** + Notification that occurs whenever the EZAudioPlayer changes its `output` component's `volume` property. Check the new value using the EZAudioPlayer's `volume` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangeVolumeNotification; + +/** + Notification that occurs whenever the EZAudioPlayer has reached the end of a file and its `shouldLoop` property has been set to NO. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidReachEndOfFileNotification; + +/** + Notification that occurs whenever the EZAudioPlayer performs a seek via the `seekToFrame` method or `setCurrentTime:` property setter. Check the new `currentTime` or `frameIndex` value using the EZAudioPlayer's `currentTime` or `frameIndex` property, respectively. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidSeekNotification; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlayerDelegate +//------------------------------------------------------------------------------ + +/** + The EZAudioPlayerDelegate provides event callbacks for the EZAudioPlayer. Since 0.5.0 the EZAudioPlayerDelegate provides a smaller set of delegate methods in favor of notifications to allow multiple receivers of the EZAudioPlayer event callbacks since only one player is typically used in an application. Specifically, these methods are provided for high frequency callbacks that wrap the EZAudioPlayer's internal EZAudioFile and EZOutput instances. + @warning These callbacks don't necessarily occur on the main thread so make sure you wrap any UI code in a GCD block like: dispatch_async(dispatch_get_main_queue(), ^{ // Update UI }); + */ +@protocol EZAudioPlayerDelegate + +@optional + +//------------------------------------------------------------------------------ + +/** + Triggered by the EZAudioPlayer's internal EZAudioFile's EZAudioFileDelegate callback and notifies the delegate of the read audio data as a float array instead of a buffer list. Common use case of this would be to visualize the float data using an audio plot or audio data dependent OpenGL sketch. + @param audioPlayer The instance of the EZAudioPlayer that triggered the event + @param buffer A float array of float arrays holding the audio data. buffer[0] would be the left channel's float array while buffer[1] would be the right channel's float array in a stereo file. + @param bufferSize The length of the buffers float arrays + @param numberOfChannels The number of channels. 2 for stereo, 1 for mono. + @param audioFile The instance of the EZAudioFile that the event was triggered from + */ +- (void) audioPlayer:(EZAudioPlayer *)audioPlayer + playedAudio:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels + inAudioFile:(EZAudioFile *)audioFile;; + +//------------------------------------------------------------------------------ + +/** + Triggered by EZAudioPlayer's internal EZAudioFile's EZAudioFileDelegate callback and notifies the delegate of the current playback position. The framePosition provides the current frame position and can be calculated against the EZAudioPlayer's total frames using the `totalFrames` function from the EZAudioPlayer. + @param audioPlayer The instance of the EZAudioPlayer that triggered the event + @param framePosition The new frame index as a 64-bit signed integer + @param audioFile The instance of the EZAudioFile that the event was triggered from + */ +- (void)audioPlayer:(EZAudioPlayer *)audioPlayer + updatedPosition:(SInt64)framePosition + inAudioFile:(EZAudioFile *)audioFile; + + +/** + Triggered by EZAudioPlayer's internal EZAudioFile's EZAudioFileDelegate callback and notifies the delegate that the end of the file has been reached. + @param audioPlayer The instance of the EZAudioPlayer that triggered the event + @param audioFile The instance of the EZAudioFile that the event was triggered from + */ +- (void)audioPlayer:(EZAudioPlayer *)audioPlayer +reachedEndOfAudioFile:(EZAudioFile *)audioFile; + +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlayer +//------------------------------------------------------------------------------ + +/** + The EZAudioPlayer provides an interface that combines the EZAudioFile and EZOutput to play local audio files. This class acts as the master delegate (the EZAudioFileDelegate) over whatever EZAudioFile instance, the `audioFile` property, it is using for playback as well as the EZOutputDelegate and EZOutputDataSource over whatever EZOutput instance is set as the `output`. Classes that want to get the EZAudioFileDelegate callbacks should implement the EZAudioPlayer's EZAudioPlayerDelegate on the EZAudioPlayer instance. Since 0.5.0 the EZAudioPlayer offers notifications over the usual delegate methods to allow multiple receivers to get the EZAudioPlayer's state changes since one player will typically be used in one application. The EZAudioPlayerDelegate, the `delegate`, provides callbacks for high frequency methods that simply wrap the EZAudioFileDelegate and EZOutputDelegate callbacks for providing the audio buffer played as well as the position updating (you will typically have one scrub bar in an application). + */ +@interface EZAudioPlayer : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Properties +///----------------------------------------------------------- + +/** + The EZAudioPlayerDelegate that will handle the audio player callbacks + */ +@property (nonatomic, weak) id delegate; + +//------------------------------------------------------------------------------ + +/** + A BOOL indicating whether the player should loop the file + */ +@property (nonatomic, assign) BOOL shouldLoop; + +//------------------------------------------------------------------------------ + +/** + An EZAudioPlayerState value representing the current internal playback and file state of the EZAudioPlayer instance. + */ +@property (nonatomic, assign, readonly) EZAudioPlayerState state; + +//------------------------------------------------------------------------------ +#pragma mark - Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Initializers +///----------------------------------------------------------- + +/** + Initializes the EZAudioPlayer with an EZAudioFile instance. This does not use the EZAudioFile by reference, but instead creates a separate EZAudioFile instance with the same file at the given file path provided by the internal NSURL to use for internal seeking so it doesn't cause any locking between the caller's instance of the EZAudioFile. + @param audioFile The instance of the EZAudioFile to use for initializing the EZAudioPlayer + @return The newly created instance of the EZAudioPlayer + */ +- (instancetype)initWithAudioFile:(EZAudioFile *)audioFile; + +//------------------------------------------------------------------------------ + +/** + Initializes the EZAudioPlayer with an EZAudioFile instance and provides a way to assign the EZAudioPlayerDelegate on instantiation. This does not use the EZAudioFile by reference, but instead creates a separate EZAudioFile instance with the same file at the given file path provided by the internal NSURL to use for internal seeking so it doesn't cause any locking between the caller's instance of the EZAudioFile. + @param audioFile The instance of the EZAudioFile to use for initializing the EZAudioPlayer + @param delegate The receiver that will act as the EZAudioPlayerDelegate. Set to nil if it should have no delegate or use the initWithAudioFile: function instead. + @return The newly created instance of the EZAudioPlayer + */ +- (instancetype)initWithAudioFile:(EZAudioFile *)audioFile + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Initializes the EZAudioPlayer with an EZAudioPlayerDelegate. + @param delegate The receiver that will act as the EZAudioPlayerDelegate. Set to nil if it should have no delegate or use the initWithAudioFile: function instead. + @return The newly created instance of the EZAudioPlayer + */ +- (instancetype)initWithDelegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Initializes the EZAudioPlayer with an NSURL instance representing the file path of the audio file. + @param url The NSURL instance representing the file path of the audio file. + @return The newly created instance of the EZAudioPlayer + */ +- (instancetype)initWithURL:(NSURL*)url; + +//------------------------------------------------------------------------------ + +/** + Initializes the EZAudioPlayer with an NSURL instance representing the file path of the audio file and a caller to assign as the EZAudioPlayerDelegate on instantiation. + @param url The NSURL instance representing the file path of the audio file. + @param delegate The receiver that will act as the EZAudioPlayerDelegate. Set to nil if it should have no delegate or use the initWithAudioFile: function instead. + @return The newly created instance of the EZAudioPlayer + */ +- (instancetype)initWithURL:(NSURL*)url + delegate:(id)delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Class Initializers +///----------------------------------------------------------- + +/** + Class initializer that creates a default EZAudioPlayer. + @return The newly created instance of the EZAudioPlayer + */ ++ (instancetype)audioPlayer; + +//------------------------------------------------------------------------------ + +/** + Class initializer that creates the EZAudioPlayer with an EZAudioFile instance. This does not use the EZAudioFile by reference, but instead creates a separate EZAudioFile instance with the same file at the given file path provided by the internal NSURL to use for internal seeking so it doesn't cause any locking between the caller's instance of the EZAudioFile. + @param audioFile The instance of the EZAudioFile to use for initializing the EZAudioPlayer + @return The newly created instance of the EZAudioPlayer + */ ++ (instancetype)audioPlayerWithAudioFile:(EZAudioFile *)audioFile; + +//------------------------------------------------------------------------------ + +/** + Class initializer that creates the EZAudioPlayer with an EZAudioFile instance and provides a way to assign the EZAudioPlayerDelegate on instantiation. This does not use the EZAudioFile by reference, but instead creates a separate EZAudioFile instance with the same file at the given file path provided by the internal NSURL to use for internal seeking so it doesn't cause any locking between the caller's instance of the EZAudioFile. + @param audioFile The instance of the EZAudioFile to use for initializing the EZAudioPlayer + @param delegate The receiver that will act as the EZAudioPlayerDelegate. Set to nil if it should have no delegate or use the audioPlayerWithAudioFile: function instead. + @return The newly created instance of the EZAudioPlayer + */ ++ (instancetype)audioPlayerWithAudioFile:(EZAudioFile *)audioFile + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Class initializer that creates a default EZAudioPlayer with an EZAudioPlayerDelegate.. + @return The newly created instance of the EZAudioPlayer + */ ++ (instancetype)audioPlayerWithDelegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Class initializer that creates the EZAudioPlayer with an NSURL instance representing the file path of the audio file. + @param url The NSURL instance representing the file path of the audio file. + @return The newly created instance of the EZAudioPlayer + */ ++ (instancetype)audioPlayerWithURL:(NSURL*)url; + +//------------------------------------------------------------------------------ + +/** + Class initializer that creates the EZAudioPlayer with an NSURL instance representing the file path of the audio file and a caller to assign as the EZAudioPlayerDelegate on instantiation. + @param url The NSURL instance representing the file path of the audio file. + @param delegate The receiver that will act as the EZAudioPlayerDelegate. Set to nil if it should have no delegate or use the audioPlayerWithURL: function instead. + @return The newly created instance of the EZAudioPlayer + */ ++ (instancetype)audioPlayerWithURL:(NSURL*)url + delegate:(id)delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Singleton +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Shared Instance +///----------------------------------------------------------- + +/** + The shared instance (singleton) of the audio player. Most applications will only have one instance of the EZAudioPlayer that can be reused with multiple different audio files. + * @return The shared instance of the EZAudioPlayer. + */ ++ (instancetype)sharedAudioPlayer; + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Properties +///----------------------------------------------------------- + +/** + Provides the EZAudioFile instance that is being used as the datasource for playback. When set it creates a copy of the EZAudioFile provided for internal use. This does not use the EZAudioFile by reference, but instead creates a copy of the EZAudioFile instance provided. + */ +@property (nonatomic, readwrite, copy) EZAudioFile *audioFile; + +//------------------------------------------------------------------------------ + +/** + Provides the current offset in the audio file as an NSTimeInterval (i.e. in seconds). When setting this it will determine the correct frame offset and perform a `seekToFrame` to the new time offset. + @warning Make sure the new current time offset is less than the `duration` or you will receive an invalid seek assertion. + */ +@property (nonatomic, readwrite) NSTimeInterval currentTime; + +//------------------------------------------------------------------------------ + +/** + The EZAudioDevice instance that is being used by the `output`. Similarly, setting this just sets the `device` property of the `output`. + */ +@property (readwrite) EZAudioDevice *device; + +//------------------------------------------------------------------------------ + +/** + Provides the duration of the audio file in seconds. + */ +@property (readonly) NSTimeInterval duration; + +//------------------------------------------------------------------------------ + +/** + Provides the current time as an NSString with the time format MM:SS. + */ +@property (readonly) NSString *formattedCurrentTime; + +//------------------------------------------------------------------------------ + +/** + Provides the duration as an NSString with the time format MM:SS. + */ +@property (readonly) NSString *formattedDuration; + +//------------------------------------------------------------------------------ + +/** + Provides the EZOutput that is being used to handle the actual playback of the audio data. This property is also settable, but note that the EZAudioPlayer will become the output's EZOutputDataSource and EZOutputDelegate. To listen for the EZOutput's delegate methods your view should implement the EZAudioPlayerDelegate and set itself as the EZAudioPlayer's `delegate`. + */ +@property (nonatomic, strong, readwrite) EZOutput *output; + +//------------------------------------------------------------------------------ + +/** + Provides the frame index (a.k.a the seek positon) within the audio file being used for playback. This can be helpful when seeking through the audio file. + @return An SInt64 representing the current frame index within the audio file used for playback. + */ +@property (readonly) SInt64 frameIndex; + +//------------------------------------------------------------------------------ + +/** + Provides a flag indicating whether the EZAudioPlayer is currently playing back any audio. + @return A BOOL indicating whether or not the EZAudioPlayer is performing playback, + */ +@property (readonly) BOOL isPlaying; + +//------------------------------------------------------------------------------ + +/** + Provides the current pan from the audio player's internal `output` component. Setting the pan adjusts the direction of the audio signal from left (0) to right (1). Default is 0.5 (middle). + */ +@property (nonatomic, assign) float pan; + +//------------------------------------------------------------------------------ + +/** + Provides the total amount of frames in the current audio file being used for playback. + @return A SInt64 representing the total amount of frames in the current audio file being used for playback. + */ +@property (readonly) SInt64 totalFrames; + +//------------------------------------------------------------------------------ + +/** + Provides the file path that's currently being used by the player for playback. + @return The NSURL representing the file path of the audio file being used for playback. + */ +@property (nonatomic, copy, readonly) NSURL *url; + +//------------------------------------------------------------------------------ + +/** + Provides the current volume from the audio player's internal `output` component. Setting the volume adjusts the gain of the output between 0 and 1. Default is 1. + */ +@property (nonatomic, assign) float volume; + +//------------------------------------------------------------------------------ +#pragma mark - Actions +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Controlling Playback +///----------------------------------------------------------- + +/** + Starts playback. + */ +- (void)play; + +//------------------------------------------------------------------------------ + +/** + Loads an EZAudioFile and immediately starts playing it. + @param audioFile An EZAudioFile to use for immediate playback. + */ +- (void)playAudioFile:(EZAudioFile *)audioFile; + +//------------------------------------------------------------------------------ + +/** + Pauses playback. + */ +- (void)pause; + +//------------------------------------------------------------------------------ + +/** + Seeks playback to a specified frame within the internal EZAudioFile. This will notify the EZAudioFileDelegate (if specified) with the audioPlayer:updatedPosition:inAudioFile: function. + @param frame The new frame position to seek to as a SInt64. + */ +- (void)seekToFrame:(SInt64)frame; + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlayer.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlayer.m new file mode 100644 index 0000000..d105d09 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlayer.m @@ -0,0 +1,459 @@ +// +// EZAudioPlayer.m +// EZAudio +// +// Created by Syed Haris Ali on 1/16/14. +// Copyright (c) 2014 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioPlayer.h" +#import "EZAudioUtilities.h" + +//------------------------------------------------------------------------------ +#pragma mark - Notifications +//------------------------------------------------------------------------------ + +NSString * const EZAudioPlayerDidChangeAudioFileNotification = @"EZAudioPlayerDidChangeAudioFileNotification"; +NSString * const EZAudioPlayerDidChangeOutputDeviceNotification = @"EZAudioPlayerDidChangeOutputDeviceNotification"; +NSString * const EZAudioPlayerDidChangePanNotification = @"EZAudioPlayerDidChangePanNotification"; +NSString * const EZAudioPlayerDidChangePlayStateNotification = @"EZAudioPlayerDidChangePlayStateNotification"; +NSString * const EZAudioPlayerDidChangeVolumeNotification = @"EZAudioPlayerDidChangeVolumeNotification"; +NSString * const EZAudioPlayerDidReachEndOfFileNotification = @"EZAudioPlayerDidReachEndOfFileNotification"; +NSString * const EZAudioPlayerDidSeekNotification = @"EZAudioPlayerDidSeekNotification"; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlayer (Interface Extension) +//------------------------------------------------------------------------------ + +@interface EZAudioPlayer () +@property (nonatomic, assign, readwrite) EZAudioPlayerState state; +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlayer (Implementation) +//------------------------------------------------------------------------------ + +@implementation EZAudioPlayer + +//------------------------------------------------------------------------------ +#pragma mark - Class Methods +//------------------------------------------------------------------------------ + ++ (instancetype)audioPlayer +{ + return [[self alloc] init]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)audioPlayerWithDelegate:(id)delegate +{ + return [[self alloc] initWithDelegate:delegate]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)audioPlayerWithAudioFile:(EZAudioFile *)audioFile +{ + return [[self alloc] initWithAudioFile:audioFile]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)audioPlayerWithAudioFile:(EZAudioFile *)audioFile + delegate:(id)delegate +{ + return [[self alloc] initWithAudioFile:audioFile + delegate:delegate]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)audioPlayerWithURL:(NSURL *)url +{ + return [[self alloc] initWithURL:url]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)audioPlayerWithURL:(NSURL *)url + delegate:(id)delegate +{ + return [[self alloc] initWithURL:url delegate:delegate]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ + +- (instancetype)init +{ + self = [super init]; + if (self) + { + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithDelegate:(id)delegate +{ + self = [self init]; + if (self) + { + self.delegate = delegate; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithAudioFile:(EZAudioFile *)audioFile +{ + return [self initWithAudioFile:audioFile delegate:nil]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithAudioFile:(EZAudioFile *)audioFile + delegate:(id)delegate +{ + self = [self initWithDelegate:delegate]; + if (self) + { + self.audioFile = audioFile; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithURL:(NSURL *)url +{ + return [self initWithURL:url delegate:nil]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithURL:(NSURL *)url + delegate:(id)delegate +{ + self = [self initWithDelegate:delegate]; + if (self) + { + self.audioFile = [EZAudioFile audioFileWithURL:url delegate:self]; + } + return self; +} + +//------------------------------------------------------------------------------ +#pragma mark - Singleton +//------------------------------------------------------------------------------ + ++ (instancetype)sharedAudioPlayer +{ + static EZAudioPlayer *player; + static dispatch_once_t onceToken; + dispatch_once(&onceToken, ^ + { + player = [[self alloc] init]; + }); + return player; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setup +//------------------------------------------------------------------------------ + +- (void)setup +{ + self.output = [EZOutput output]; + self.state = EZAudioPlayerStateReadyToPlay; +} + +//------------------------------------------------------------------------------ +#pragma mark - Getters +//------------------------------------------------------------------------------ + +- (NSTimeInterval)currentTime +{ + return [self.audioFile currentTime]; +} + +//------------------------------------------------------------------------------ + +- (EZAudioDevice *)device +{ + return [self.output device]; +} + +//------------------------------------------------------------------------------ + +- (NSTimeInterval)duration +{ + return [self.audioFile duration]; +} + +//------------------------------------------------------------------------------ + +- (NSString *)formattedCurrentTime +{ + return [self.audioFile formattedCurrentTime]; +} + +//------------------------------------------------------------------------------ + +- (NSString *)formattedDuration +{ + return [self.audioFile formattedDuration]; +} + +//------------------------------------------------------------------------------ + +- (SInt64)frameIndex +{ + return [self.audioFile frameIndex]; +} + +//------------------------------------------------------------------------------ + +- (BOOL)isPlaying +{ + return [self.output isPlaying]; +} + +//------------------------------------------------------------------------------ + +- (float)pan +{ + return [self.output pan]; +} + +//------------------------------------------------------------------------------ + +- (SInt64)totalFrames +{ + return [self.audioFile totalFrames]; +} + +//------------------------------------------------------------------------------ + +- (float)volume +{ + return [self.output volume]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setters +//------------------------------------------------------------------------------ + +- (void)setAudioFile:(EZAudioFile *)audioFile +{ + _audioFile = [audioFile copy]; + _audioFile.delegate = self; + AudioStreamBasicDescription inputFormat = _audioFile.clientFormat; + [self.output setInputFormat:inputFormat]; + [[NSNotificationCenter defaultCenter] postNotificationName:EZAudioPlayerDidChangeAudioFileNotification + object:self]; +} + +//------------------------------------------------------------------------------ + +- (void)setCurrentTime:(NSTimeInterval)currentTime +{ + [self.audioFile setCurrentTime:currentTime]; + [[NSNotificationCenter defaultCenter] postNotificationName:EZAudioPlayerDidSeekNotification + object:self]; +} + +//------------------------------------------------------------------------------ + +- (void)setDevice:(EZAudioDevice *)device +{ + [self.output setDevice:device]; +} + +//------------------------------------------------------------------------------ + +- (void)setOutput:(EZOutput *)output +{ + _output = output; + _output.dataSource = self; + _output.delegate = self; +} + +//------------------------------------------------------------------------------ + +- (void)setPan:(float)pan +{ + [self.output setPan:pan]; + [[NSNotificationCenter defaultCenter] postNotificationName:EZAudioPlayerDidChangePanNotification + object:self]; +} + +//------------------------------------------------------------------------------ + +- (void)setVolume:(float)volume +{ + [self.output setVolume:volume]; + [[NSNotificationCenter defaultCenter] postNotificationName:EZAudioPlayerDidChangeVolumeNotification + object:self]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Actions +//------------------------------------------------------------------------------ + +- (void)play +{ + [self.output startPlayback]; + self.state = EZAudioPlayerStatePlaying; +} + +//------------------------------------------------------------------------------ + +- (void)playAudioFile:(EZAudioFile *)audioFile +{ + // + // stop playing anything that might currently be playing + // + [self pause]; + + // + // set new stream + // + self.audioFile = audioFile; + + // + // begin playback + // + [self play]; +} + +//------------------------------------------------------------------------------ + +- (void)pause +{ + [self.output stopPlayback]; + self.state = EZAudioPlayerStatePaused; +} + +//------------------------------------------------------------------------------ + +- (void)seekToFrame:(SInt64)frame +{ + self.state = EZAudioPlayerStateSeeking; + [self.audioFile seekToFrame:frame]; + self.state = self.isPlaying ? EZAudioPlayerStatePlaying : EZAudioPlayerStatePaused; + [[NSNotificationCenter defaultCenter] postNotificationName:EZAudioPlayerDidSeekNotification + object:self]; +} + +//------------------------------------------------------------------------------ +#pragma mark - EZOutputDataSource +//------------------------------------------------------------------------------ + +- (OSStatus) output:(EZOutput *)output + shouldFillAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + timestamp:(const AudioTimeStamp *)timestamp +{ + if (self.audioFile) + { + UInt32 bufferSize; + BOOL eof; + [self.audioFile readFrames:frames + audioBufferList:audioBufferList + bufferSize:&bufferSize + eof:&eof]; + if (eof && [self.delegate respondsToSelector:@selector(audioPlayer:reachedEndOfAudioFile:)]) + { + [self.delegate audioPlayer:self reachedEndOfAudioFile:self.audioFile]; + } + if (eof && self.shouldLoop) + { + [self seekToFrame:0]; + } + else if (eof) + { + [self pause]; + [self seekToFrame:0]; + self.state = EZAudioPlayerStateEndOfFile; + [[NSNotificationCenter defaultCenter] postNotificationName:EZAudioPlayerDidReachEndOfFileNotification + object:self]; + } + } + return noErr; +} + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioFileDelegate +//------------------------------------------------------------------------------ + +- (void)audioFileUpdatedPosition:(EZAudioFile *)audioFile +{ + if ([self.delegate respondsToSelector:@selector(audioPlayer:updatedPosition:inAudioFile:)]) + { + [self.delegate audioPlayer:self + updatedPosition:[audioFile frameIndex] + inAudioFile:audioFile]; + } +} + +//------------------------------------------------------------------------------ +#pragma mark - EZOutputDelegate +//------------------------------------------------------------------------------ + +- (void)output:(EZOutput *)output changedDevice:(EZAudioDevice *)device +{ + [[NSNotificationCenter defaultCenter] postNotificationName:EZAudioPlayerDidChangeOutputDeviceNotification + object:self]; +} + +//------------------------------------------------------------------------------ + +- (void)output:(EZOutput *)output changedPlayingState:(BOOL)isPlaying +{ + [[NSNotificationCenter defaultCenter] postNotificationName:EZAudioPlayerDidChangePlayStateNotification + object:self]; +} + +//------------------------------------------------------------------------------ + +- (void) output:(EZOutput *)output + playedAudio:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels +{ + if ([self.delegate respondsToSelector:@selector(audioPlayer:playedAudio:withBufferSize:withNumberOfChannels:inAudioFile:)]) + { + [self.delegate audioPlayer:self + playedAudio:buffer + withBufferSize:bufferSize + withNumberOfChannels:numberOfChannels + inAudioFile:self.audioFile]; + } +} + +//------------------------------------------------------------------------------ + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlot.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlot.h new file mode 100644 index 0000000..92009a3 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlot.h @@ -0,0 +1,207 @@ +// +// EZAudioPlot.h +// EZAudio +// +// Created by Syed Haris Ali on 9/2/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import "EZPlot.h" +#import "EZAudioDisplayLink.h" + +@class EZAudio; + +//------------------------------------------------------------------------------ +#pragma mark - Constants +//------------------------------------------------------------------------------ + +/** + The default value used for the maximum rolling history buffer length of any EZAudioPlot. + @deprecated This constant is deprecated starting in version 0.2.0. + @note Please use EZAudioPlotDefaultMaxHistoryBufferLength instead. + */ +FOUNDATION_EXPORT UInt32 const kEZAudioPlotMaxHistoryBufferLength __attribute__((deprecated)); + +/** + The default value used for the default rolling history buffer length of any EZAudioPlot. + @deprecated This constant is deprecated starting in version 0.2.0. + @note Please use EZAudioPlotDefaultHistoryBufferLength instead. + */ +FOUNDATION_EXPORT UInt32 const kEZAudioPlotDefaultHistoryBufferLength __attribute__((deprecated)); + +/** + The default value used for the default rolling history buffer length of any EZAudioPlot. + */ +FOUNDATION_EXPORT UInt32 const EZAudioPlotDefaultHistoryBufferLength; + +/** + The default value used for the maximum rolling history buffer length of any EZAudioPlot. + */ +FOUNDATION_EXPORT UInt32 const EZAudioPlotDefaultMaxHistoryBufferLength; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlotWaveformLayer +//------------------------------------------------------------------------------ + +/** + The EZAudioPlotWaveformLayer is a lightweight subclass of the CAShapeLayer that allows implicit animations on the `path` key. + */ +@interface EZAudioPlotWaveformLayer : CAShapeLayer +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlot +//------------------------------------------------------------------------------ + +/** + `EZAudioPlot`, a subclass of `EZPlot`, is a cross-platform (iOS and OSX) class that plots an audio waveform using Core Graphics. + + The caller provides updates a constant stream of updated audio data in the `updateBuffer:withBufferSize:` function, which in turn will be plotted in one of the plot types: + + * Buffer (`EZPlotTypeBuffer`) - A plot that only consists of the current buffer and buffer size from the last call to `updateBuffer:withBufferSize:`. This looks similar to the default openFrameworks input audio example. + * Rolling (`EZPlotTypeRolling`) - A plot that consists of a rolling history of values averaged from each buffer. This is the traditional waveform look. + + #Parent Methods and Properties# + + See EZPlot for full API methods and properties (colors, plot type, update function) + + */ +@interface EZAudioPlot : EZPlot + +/** + A BOOL that allows optimizing the audio plot's drawing for real-time displays. Since the update function may be updating the plot's data very quickly (over 60 frames per second) this property will throttle the drawing calls to be 60 frames per second (or whatever the screen rate is). Specifically, it disables implicit path change animations on the `waveformLayer` and sets up a display link to render 60 fps (audio updating the plot at 44.1 kHz causes it to re-render 86 fps - far greater than what is needed for a visual display). + */ +@property (nonatomic, assign) BOOL shouldOptimizeForRealtimePlot; + +//------------------------------------------------------------------------------ + +/** + A BOOL indicating whether the plot should center itself vertically. + */ +@property (nonatomic, assign) BOOL shouldCenterYAxis; + +//------------------------------------------------------------------------------ + +/** + An EZAudioPlotWaveformLayer that is used to render the actual waveform. By switching the drawing code to Core Animation layers in version 0.2.0 most work, specifically the compositing step, is now done on the GPU. Hence, multiple EZAudioPlot instances can be used simultaneously with very low CPU overhead so these are now practical for table and collection views. + */ +@property (nonatomic, strong) EZAudioPlotWaveformLayer *waveformLayer; + +//------------------------------------------------------------------------------ +#pragma mark - Adjust Resolution +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Adjusting The Resolution +///----------------------------------------------------------- + +/** + Sets the length of the rolling history buffer (i.e. the number of points in the rolling plot's buffer). Can grow or shrink the display up to the maximum size specified by the `maximumRollingHistoryLength` method. Will return the actual set value, which will be either the given value if smaller than the `maximumRollingHistoryLength` or `maximumRollingHistoryLength` if a larger value is attempted to be set. + @param historyLength The new length of the rolling history buffer. + @return The new value equal to the historyLength or the `maximumRollingHistoryLength`. + */ +-(int)setRollingHistoryLength:(int)historyLength; + +//------------------------------------------------------------------------------ + +/** + Provides the length of the rolling history buffer (i.e. the number of points in the rolling plot's buffer). + * @return An int representing the length of the rolling history buffer + */ +-(int)rollingHistoryLength; + +//------------------------------------------------------------------------------ +#pragma mark - Subclass Methods +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Subclass Methods +///----------------------------------------------------------- + +/** + Main method that handles converting the points created from the `updatedBuffer:withBufferSize:` method into a CGPathRef to store in the `waveformLayer`. In this method you can create any path you'd like using the point array (for instance, maybe mapping the points to a circle instead of the standard 2D plane). + @param points An array of CGPoint structures, with the x values ranging from 0 - (pointCount - 1) and y values containing the last audio data's buffer. + @param pointCount A UInt32 of the length of the point array. + @param rect An EZRect (CGRect on iOS or NSRect on OSX) that the path should be created relative to. + @return A CGPathRef that is the path you'd like to store on the `waveformLayer` to visualize the audio data. + */ +- (CGPathRef)createPathWithPoints:(CGPoint *)points + pointCount:(UInt32)pointCount + inRect:(EZRect)rect; + +//------------------------------------------------------------------------------ + +/** + Provides the default length of the rolling history buffer when the plot is initialized. Default is `EZAudioPlotDefaultHistoryBufferLength` constant. + @return An int describing the initial length of the rolling history buffer. + */ +- (int)defaultRollingHistoryLength; + +//------------------------------------------------------------------------------ + +/** + Called after the view has been created. Subclasses should use to add any additional methods needed instead of overriding the init methods. + */ +- (void)setupPlot; + +//------------------------------------------------------------------------------ + +/** + Provides the default number of points that will be used to initialize the graph's points data structure that holds. Essentially the plot starts off as a flat line of this many points. Default is 100. + @return An int describing the initial number of points the plot should have when flat lined. + */ +- (int)initialPointCount; + +//------------------------------------------------------------------------------ + +/** + Provides the default maximum rolling history length - that is, the maximum amount of points the `setRollingHistoryLength:` method may be set to. If a length higher than this is set then the plot will likely crash because the appropriate resources are only allocated once during the plot's initialization step. Defualt is `EZAudioPlotDefaultMaxHistoryBufferLength` constant. + @return An int describing the maximum length of the absolute rolling history buffer. + */ +- (int)maximumRollingHistoryLength; + +//------------------------------------------------------------------------------ + +/** + Method to cause the waveform layer's path to get recreated and redrawn on screen using the last buffer of data provided. This is the equivalent to the drawRect: method used to normally subclass a view's drawing. This normally don't need to be overrode though - a better approach would be to override the `createPathWithPoints:pointCount:inRect:` method. + */ +- (void)redraw; + +//------------------------------------------------------------------------------ + +/** + Main method used to copy the sample data from the source buffer and update the + plot. Subclasses can overwrite this method for custom behavior. + @param data A float array of the sample data. Subclasses should copy this data to a separate array to avoid threading issues. + @param length The length of the float array as an int. + */ +-(void)setSampleData:(float *)data length:(int)length; + +//------------------------------------------------------------------------------ + +@end + +@interface EZAudioPlot () +@property (nonatomic, strong) EZAudioDisplayLink *displayLink; +@property (nonatomic, assign) EZPlotHistoryInfo *historyInfo; +@property (nonatomic, assign) CGPoint *points; +@property (nonatomic, assign) UInt32 pointCount; +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlot.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlot.m new file mode 100644 index 0000000..7603e26 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlot.m @@ -0,0 +1,453 @@ +// +// EZAudioPlot.m +// EZAudio +// +// Created by Syed Haris Ali on 9/2/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioPlot.h" + +//------------------------------------------------------------------------------ +#pragma mark - Constants +//------------------------------------------------------------------------------ + +UInt32 const kEZAudioPlotMaxHistoryBufferLength = 8192; +UInt32 const kEZAudioPlotDefaultHistoryBufferLength = 512; +UInt32 const EZAudioPlotDefaultHistoryBufferLength = 512; +UInt32 const EZAudioPlotDefaultMaxHistoryBufferLength = 8192; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlot (Implementation) +//------------------------------------------------------------------------------ + +@implementation EZAudioPlot + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + [EZAudioUtilities freeHistoryInfo:self.historyInfo]; + free(self.points); +} + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ + +- (id)init +{ + self = [super init]; + if (self) + { + [self initPlot]; + } + return self; +} + +- (id)initWithCoder:(NSCoder *)aDecoder +{ + self = [super initWithCoder:aDecoder]; + if (self) + { + [self initPlot]; + } + return self; +} + +#if TARGET_OS_IPHONE +- (id)initWithFrame:(CGRect)frameRect +#elif TARGET_OS_MAC +- (id)initWithFrame:(NSRect)frameRect +#endif +{ + self = [super initWithFrame:frameRect]; + if (self) + { + [self initPlot]; + } + return self; +} + +#if TARGET_OS_IPHONE +- (void)layoutSubviews +{ + [super layoutSubviews]; + [CATransaction begin]; + [CATransaction setDisableActions:YES]; + self.waveformLayer.frame = self.bounds; + [self redraw]; + [CATransaction commit]; +} +#elif TARGET_OS_MAC +- (void)layout +{ + [super layout]; + [CATransaction begin]; + [CATransaction setDisableActions:YES]; + self.waveformLayer.frame = self.bounds; + [self redraw]; + [CATransaction commit]; +} +#endif + +- (void)initPlot +{ + self.shouldCenterYAxis = YES; + self.shouldOptimizeForRealtimePlot = YES; + self.gain = 1.0; + self.plotType = EZPlotTypeBuffer; + self.shouldMirror = NO; + self.shouldFill = NO; + + // Setup history window + [self resetHistoryBuffers]; + + self.waveformLayer = [EZAudioPlotWaveformLayer layer]; + self.waveformLayer.frame = self.bounds; + self.waveformLayer.lineWidth = 1.0f; + self.waveformLayer.fillColor = nil; + self.waveformLayer.backgroundColor = nil; + self.waveformLayer.opaque = YES; + +#if TARGET_OS_IPHONE + self.color = [UIColor colorWithHue:0 saturation:1.0 brightness:1.0 alpha:1.0]; +#elif TARGET_OS_MAC + self.color = [NSColor colorWithCalibratedHue:0 saturation:1.0 brightness:1.0 alpha:1.0]; + self.wantsLayer = YES; + self.layerContentsRedrawPolicy = NSViewLayerContentsRedrawOnSetNeedsDisplay; +#endif + self.backgroundColor = nil; + [self.layer insertSublayer:self.waveformLayer atIndex:0]; + + // + // Allow subclass to initialize plot + // + [self setupPlot]; + + self.points = calloc(EZAudioPlotDefaultMaxHistoryBufferLength, sizeof(CGPoint)); + self.pointCount = [self initialPointCount]; + [self redraw]; +} + +//------------------------------------------------------------------------------ + +- (void)setupPlot +{ + // + // Override in subclass + // +} + +//------------------------------------------------------------------------------ +#pragma mark - Setup +//------------------------------------------------------------------------------ + +- (void)resetHistoryBuffers +{ + // + // Clear any existing data + // + if (self.historyInfo) + { + [EZAudioUtilities freeHistoryInfo:self.historyInfo]; + } + + self.historyInfo = [EZAudioUtilities historyInfoWithDefaultLength:[self defaultRollingHistoryLength] + maximumLength:[self maximumRollingHistoryLength]]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setters +//------------------------------------------------------------------------------ + +- (void)setBackgroundColor:(id)backgroundColor +{ + [super setBackgroundColor:backgroundColor]; + self.layer.backgroundColor = [backgroundColor CGColor]; +} + +//------------------------------------------------------------------------------ + +- (void)setColor:(id)color +{ + [super setColor:color]; + self.waveformLayer.strokeColor = [color CGColor]; + if (self.shouldFill) + { + self.waveformLayer.fillColor = [color CGColor]; + } +} + +//------------------------------------------------------------------------------ + +- (void)setShouldOptimizeForRealtimePlot:(BOOL)shouldOptimizeForRealtimePlot +{ + _shouldOptimizeForRealtimePlot = shouldOptimizeForRealtimePlot; + if (shouldOptimizeForRealtimePlot && !self.displayLink) + { + self.displayLink = [EZAudioDisplayLink displayLinkWithDelegate:self]; + [self.displayLink start]; + } + else + { + [self.displayLink stop]; + self.displayLink = nil; + } +} + +//------------------------------------------------------------------------------ + +- (void)setShouldFill:(BOOL)shouldFill +{ + [super setShouldFill:shouldFill]; + self.waveformLayer.fillColor = shouldFill ? [self.color CGColor] : nil; +} + +//------------------------------------------------------------------------------ +#pragma mark - Drawing +//------------------------------------------------------------------------------ + +- (void)clear +{ + if (self.pointCount > 0) + { + [self resetHistoryBuffers]; + float data[self.pointCount]; + memset(data, 0, self.pointCount * sizeof(float)); + [self setSampleData:data length:self.pointCount]; + [self redraw]; + } +} + +//------------------------------------------------------------------------------ + +- (void)redraw +{ + EZRect frame = [self.waveformLayer frame]; + CGPathRef path = [self createPathWithPoints:self.points + pointCount:self.pointCount + inRect:frame]; + if (self.shouldOptimizeForRealtimePlot) + { + [CATransaction begin]; + [CATransaction setDisableActions:YES]; + self.waveformLayer.path = path; + [CATransaction commit]; + } + else + { + self.waveformLayer.path = path; + } + CGPathRelease(path); +} + +//------------------------------------------------------------------------------ + +- (CGPathRef)createPathWithPoints:(CGPoint *)points + pointCount:(UInt32)pointCount + inRect:(EZRect)rect +{ + CGMutablePathRef path = NULL; + if (pointCount > 0) + { + path = CGPathCreateMutable(); + double xscale = (rect.size.width) / ((float)self.pointCount); + double halfHeight = floor(rect.size.height / 2.0); + int deviceOriginFlipped = [self isDeviceOriginFlipped] ? -1 : 1; + CGAffineTransform xf = CGAffineTransformIdentity; + CGFloat translateY = 0.0f; + if (!self.shouldCenterYAxis) + { +#if TARGET_OS_IPHONE + translateY = CGRectGetHeight(rect); +#elif TARGET_OS_MAC + translateY = 0.0f; +#endif + } + else + { + translateY = halfHeight + rect.origin.y; + } + xf = CGAffineTransformTranslate(xf, 0.0, translateY); + double yScaleFactor = halfHeight; + if (!self.shouldCenterYAxis) + { + yScaleFactor = 2.0 * halfHeight; + } + xf = CGAffineTransformScale(xf, xscale, deviceOriginFlipped * yScaleFactor); + CGPathAddLines(path, &xf, self.points, self.pointCount); + if (self.shouldMirror) + { + xf = CGAffineTransformScale(xf, 1.0f, -1.0f); + CGPathAddLines(path, &xf, self.points, self.pointCount); + } + if (self.shouldFill) + { + CGPathCloseSubpath(path); + } + } + return path; +} + +//------------------------------------------------------------------------------ +#pragma mark - Update +//------------------------------------------------------------------------------ + +- (void)updateBuffer:(float *)buffer withBufferSize:(UInt32)bufferSize +{ + // append the buffer to the history + [EZAudioUtilities appendBufferRMS:buffer + withBufferSize:bufferSize + toHistoryInfo:self.historyInfo]; + + // copy samples + switch (self.plotType) + { + case EZPlotTypeBuffer: + [self setSampleData:buffer + length:bufferSize]; + break; + case EZPlotTypeRolling: + + [self setSampleData:self.historyInfo->buffer + length:self.historyInfo->bufferSize]; + break; + default: + break; + } + + // update drawing + if (!self.shouldOptimizeForRealtimePlot) + { + [self redraw]; + } +} + +//------------------------------------------------------------------------------ + +- (void)setSampleData:(float *)data length:(int)length +{ + CGPoint *points = self.points; + for (int i = 0; i < length; i++) + { + points[i].x = i; + points[i].y = data[i] * self.gain; + } + points[0].y = points[length - 1].y = 0.0f; + self.pointCount = length; +} + +//------------------------------------------------------------------------------ +#pragma mark - Adjusting History Resolution +//------------------------------------------------------------------------------ + +- (int)rollingHistoryLength +{ + return self.historyInfo->bufferSize; +} + +//------------------------------------------------------------------------------ + +- (int)setRollingHistoryLength:(int)historyLength +{ + self.historyInfo->bufferSize = MIN(EZAudioPlotDefaultMaxHistoryBufferLength, historyLength); + return self.historyInfo->bufferSize; +} + +//------------------------------------------------------------------------------ +#pragma mark - Subclass +//------------------------------------------------------------------------------ + +- (int)defaultRollingHistoryLength +{ + return EZAudioPlotDefaultHistoryBufferLength; +} + +//------------------------------------------------------------------------------ + +- (int)initialPointCount +{ + return 100; +} + +//------------------------------------------------------------------------------ + +- (int)maximumRollingHistoryLength +{ + return EZAudioPlotDefaultMaxHistoryBufferLength; +} + +//------------------------------------------------------------------------------ +#pragma mark - Utility +//------------------------------------------------------------------------------ + +- (BOOL)isDeviceOriginFlipped +{ + BOOL isDeviceOriginFlipped = NO; +#if TARGET_OS_IPHONE + isDeviceOriginFlipped = YES; +#elif TARGET_OS_MAC +#endif + return isDeviceOriginFlipped; +} + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioDisplayLinkDelegate +//------------------------------------------------------------------------------ + +- (void)displayLinkNeedsDisplay:(EZAudioDisplayLink *)displayLink +{ + [self redraw]; +} + +//------------------------------------------------------------------------------ + +@end + +////------------------------------------------------------------------------------ +#pragma mark - EZAudioPlotWaveformLayer (Implementation) +////------------------------------------------------------------------------------ + +@implementation EZAudioPlotWaveformLayer + +- (id)actionForKey:(NSString *)event +{ + if ([event isEqualToString:@"path"]) + { + if ([CATransaction disableActions]) + { + return nil; + } + else + { + CABasicAnimation *animation = [CABasicAnimation animation]; + animation.timingFunction = [CATransaction animationTimingFunction]; + animation.duration = [CATransaction animationDuration]; + return animation; + } + return nil; + } + return [super actionForKey:event]; +} + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlotGL.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlotGL.h new file mode 100644 index 0000000..70e5a00 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlotGL.h @@ -0,0 +1,251 @@ +// +// EZAudioPlotGL.h +// EZAudio +// +// Created by Syed Haris Ali on 11/22/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import "EZPlot.h" +#if !TARGET_OS_IPHONE +#import +#endif + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +typedef struct +{ + GLfloat x; + GLfloat y; +} EZAudioPlotGLPoint; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlotGL +//------------------------------------------------------------------------------ + +/** + EZAudioPlotGL is a subclass of either a GLKView on iOS or an NSOpenGLView on OSX. As of 0.6.0 this class no longer depends on an embedded GLKViewController for iOS as the display link is just manually managed within this single view instead. The EZAudioPlotGL provides the same kind of audio plot as the EZAudioPlot, but uses OpenGL to GPU-accelerate the drawing of the points, which means you can fit a lot more points and complex geometries. + */ +#if TARGET_OS_IPHONE +@interface EZAudioPlotGL : GLKView +#elif TARGET_OS_MAC +@interface EZAudioPlotGL : NSOpenGLView +#endif + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Customizing The Plot's Appearance +///----------------------------------------------------------- + +/** + The default background color of the plot. For iOS the color is specified as a UIColor while for OSX the color is an NSColor. The default value on both platforms is a sweet looking green. + @warning On OSX, if you set the background to a value where the alpha component is 0 then the EZAudioPlotGL will automatically set its superview to be layer-backed. + */ +#if TARGET_OS_IPHONE +@property (nonatomic, strong) IBInspectable UIColor *backgroundColor; +#elif TARGET_OS_MAC +@property (nonatomic, strong) IBInspectable NSColor *backgroundColor; +#endif + +//------------------------------------------------------------------------------ + +/** + The default color of the plot's data (i.e. waveform, y-axis values). For iOS the color is specified as a UIColor while for OSX the color is an NSColor. The default value on both platforms is white. + */ +#if TARGET_OS_IPHONE +@property (nonatomic, strong) IBInspectable UIColor *color; +#elif TARGET_OS_MAC +@property (nonatomic, strong) IBInspectable NSColor *color; +#endif + +//------------------------------------------------------------------------------ + +/** + The plot's gain value, which controls the scale of the y-axis values. The default value of the gain is 1.0f and should always be greater than 0.0f. + */ +@property (nonatomic, assign) IBInspectable float gain; + +//------------------------------------------------------------------------------ + +/** + The type of plot as specified by the `EZPlotType` enumeration (i.e. a buffer or rolling plot type). Default is EZPlotTypeBuffer. + */ +@property (nonatomic, assign) EZPlotType plotType; + +//------------------------------------------------------------------------------ + +/** + A BOOL indicating whether or not to fill in the graph. A value of YES will make a filled graph (filling in the space between the x-axis and the y-value), while a value of NO will create a stroked graph (connecting the points along the y-axis). Default is NO. + */ +@property (nonatomic, assign) IBInspectable BOOL shouldFill; + +//------------------------------------------------------------------------------ + +/** + A boolean indicating whether the graph should be rotated along the x-axis to give a mirrored reflection. This is typical for audio plots to produce the classic waveform look. A value of YES will produce a mirrored reflection of the y-values about the x-axis, while a value of NO will only plot the y-values. Default is NO. + */ +@property (nonatomic, assign) IBInspectable BOOL shouldMirror; + +//------------------------------------------------------------------------------ +#pragma mark - Updating The Plot +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Updating The Plot +///----------------------------------------------------------- + +/** + Updates the plot with the new buffer data and tells the view to redraw itself. Caller will provide a float array with the values they expect to see on the y-axis. The plot will internally handle mapping the x-axis and y-axis to the current view port, any interpolation for fills effects, and mirroring. + @param buffer A float array of values to map to the y-axis. + @param bufferSize The size of the float array that will be mapped to the y-axis. + */ +-(void)updateBuffer:(float *)buffer withBufferSize:(UInt32)bufferSize; + +//------------------------------------------------------------------------------ +#pragma mark - Adjust Resolution +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Adjusting The Resolution +///----------------------------------------------------------- + +/** + Sets the length of the rolling history buffer (i.e. the number of points in the rolling plot's buffer). Can grow or shrink the display up to the maximum size specified by the `maximumRollingHistoryLength` method. Will return the actual set value, which will be either the given value if smaller than the `maximumRollingHistoryLength` or `maximumRollingHistoryLength` if a larger value is attempted to be set. + @param historyLength The new length of the rolling history buffer. + @return The new value equal to the historyLength or the `maximumRollingHistoryLength`. + */ +-(int)setRollingHistoryLength:(int)historyLength; + +//------------------------------------------------------------------------------ + +/** + Provides the length of the rolling history buffer (i.e. the number of points in the rolling plot's buffer). + * @return An int representing the length of the rolling history buffer + */ +-(int)rollingHistoryLength; + +//------------------------------------------------------------------------------ +#pragma mark - Clearing The Plot +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Clearing The Plot +///----------------------------------------------------------- + +/** + Clears all data from the audio plot (includes both EZPlotTypeBuffer and EZPlotTypeRolling) + */ +-(void)clear; + +//------------------------------------------------------------------------------ +#pragma mark - Start/Stop Display Link +//------------------------------------------------------------------------------ + +/** + Call this method to tell the EZAudioDisplayLink to stop drawing temporarily. + */ +- (void)pauseDrawing; + +//------------------------------------------------------------------------------ + +/** + Call this method to manually tell the EZAudioDisplayLink to start drawing again. + */ +- (void)resumeDrawing; + +//------------------------------------------------------------------------------ +#pragma mark - Subclass +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Customizing The Drawing +///----------------------------------------------------------- + +/** + This method is used to perform the actual OpenGL drawing code to clear the background and draw the lines representing the 2D audio plot. Subclasses can use the current implementation as an example and implement their own custom geometries. This is the analogy of overriding the drawRect: method in an NSView or UIView. + @param points An array of EZAudioPlotGLPoint structures representing the mapped audio data to x,y coordinates. The x-axis goes from 0 to the number of points (pointCount) while the y-axis goes from -1 to 1. Check out the implementation of this method to see how the model view matrix of the base effect is transformed to map this properly to the viewport. + @param pointCount A UInt32 representing the number of points contained in the points array. + @param baseEffect An optional GLKBaseEffect to use as a default shader. Call prepareToDraw on the base effect before any glDrawArrays call. + @param vbo The Vertex Buffer Object used to buffer the point data. + @param vab The Vertex Array Buffer used to bind the Vertex Buffer Object. This is a Mac only thing, you can ignore this completely on iOS. + @param interpolated A BOOL indicating whether the data has been interpolated. This means the point data is twice as long, where every other point is 0 on the y-axis to allow drawing triangle stripes for filled in waveforms. Typically if the point data is interpolated you will be using the GL_TRIANGLE_STRIP drawing mode, while non-interpolated plots will just use a GL_LINE_STRIP drawing mode. + @param mirrored A BOOL indicating whether the plot should be mirrored about the y-axis (or whatever geometry you come up with). + @param gain A float representing a gain that should be used to influence the height or intensity of your geometry's shape. A gain of 0.0 means silence, a gain of 1.0 means full volume (you're welcome to boost this to whatever you want). + */ +- (void)redrawWithPoints:(EZAudioPlotGLPoint *)points + pointCount:(UInt32)pointCount + baseEffect:(GLKBaseEffect *)baseEffect + vertexBufferObject:(GLuint)vbo + vertexArrayBuffer:(GLuint)vab + interpolated:(BOOL)interpolated + mirrored:(BOOL)mirrored + gain:(float)gain; + +//------------------------------------------------------------------------------ + +/** + Called during the OpenGL run loop to constantly update the drawing 60 fps. Callers can use this force update the screen while subclasses can override this for complete control over their rendering. However, subclasses are more encouraged to use the `redrawWithPoints:pointCount:baseEffect:vertexBufferObject:vertexArrayBuffer:interpolated:mirrored:gain:` + */ +- (void)redraw; + +//------------------------------------------------------------------------------ + +/** + Called after the view has been created. Subclasses should use to add any additional methods needed instead of overriding the init methods. + */ +- (void)setup; + +//------------------------------------------------------------------------------ + +/** + Main method used to copy the sample data from the source buffer and update the + plot. Subclasses can overwrite this method for custom behavior. + @param data A float array of the sample data. Subclasses should copy this data to a separate array to avoid threading issues. + @param length The length of the float array as an int. + */ +- (void)setSampleData:(float *)data length:(int)length; + +///----------------------------------------------------------- +/// @name Subclass Methods +///----------------------------------------------------------- + +/** + Provides the default length of the rolling history buffer when the plot is initialized. Default is `EZAudioPlotDefaultHistoryBufferLength` constant. + @return An int describing the initial length of the rolling history buffer. + */ +- (int)defaultRollingHistoryLength; + +//------------------------------------------------------------------------------ + +/** + Provides the default maximum rolling history length - that is, the maximum amount of points the `setRollingHistoryLength:` method may be set to. If a length higher than this is set then the plot will likely crash because the appropriate resources are only allocated once during the plot's initialization step. Defualt is `EZAudioPlotDefaultMaxHistoryBufferLength` constant. + @return An int describing the maximum length of the absolute rolling history buffer. + */ +- (int)maximumRollingHistoryLength; + +//------------------------------------------------------------------------------ + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlotGL.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlotGL.m new file mode 100644 index 0000000..1672850 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioPlotGL.m @@ -0,0 +1,548 @@ +// +// EZAudioPlotGL.m +// EZAudio +// +// Created by Syed Haris Ali on 11/22/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioPlotGL.h" +#import "EZAudioDisplayLink.h" +#import "EZAudioUtilities.h" +#import "EZAudioPlot.h" + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +typedef struct +{ + BOOL interpolated; + EZPlotHistoryInfo *historyInfo; + EZAudioPlotGLPoint *points; + UInt32 pointCount; + GLuint vbo; + GLuint vab; +} EZAudioPlotGLInfo; + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlotGL (Interface Extension) +//------------------------------------------------------------------------------ + +@interface EZAudioPlotGL () +@property (nonatomic, strong) GLKBaseEffect *baseEffect; +@property (nonatomic, strong) EZAudioDisplayLink *displayLink; +@property (nonatomic, assign) EZAudioPlotGLInfo *info; +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioPlotGL (Implementation) +//------------------------------------------------------------------------------ + +@implementation EZAudioPlotGL + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + [self.displayLink stop]; + self.displayLink = nil; + [EZAudioUtilities freeHistoryInfo:self.info->historyInfo]; +#if !TARGET_OS_IPHONE + glDeleteVertexArrays(1, &self.info->vab); +#endif + glDeleteBuffers(1, &self.info->vbo); + free(self.info->points); + free(self.info); + self.baseEffect = nil; +} + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ + +- (instancetype)init +{ + self = [super init]; + if (self) + { + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (id)initWithCoder:(NSCoder *)aDecoder +{ + self = [super initWithCoder:aDecoder]; + if (self) + { + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithFrame:(EZRect)frame +{ + self = [super initWithFrame:frame]; + if (self) + { + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ + +#if TARGET_OS_IPHONE +- (instancetype)initWithFrame:(CGRect)frame + context:(EAGLContext *)context +{ + self = [super initWithFrame:frame context:context]; + if (self) + { + [self setup]; + } + return self; +} +#elif TARGET_OS_MAC +- (instancetype)initWithFrame:(NSRect)frameRect + pixelFormat:(NSOpenGLPixelFormat *)format +{ + self = [super initWithFrame:frameRect pixelFormat:format]; + if (self) + { + [self setup]; + } + return self; +} +#endif + +//------------------------------------------------------------------------------ +#pragma mark - Setup +//------------------------------------------------------------------------------ + +- (void)setup +{ + // + // Setup info data structure + // + self.info = (EZAudioPlotGLInfo *)malloc(sizeof(EZAudioPlotGLInfo)); + memset(self.info, 0, sizeof(EZAudioPlotGLInfo)); + + // + // Create points array + // + UInt32 pointCount = [self maximumRollingHistoryLength]; + self.info->points = (EZAudioPlotGLPoint *)calloc(sizeof(EZAudioPlotGLPoint), pointCount); + self.info->pointCount = pointCount; + + // + // Create the history data structure to hold the rolling data + // + self.info->historyInfo = [EZAudioUtilities historyInfoWithDefaultLength:[self defaultRollingHistoryLength] + maximumLength:[self maximumRollingHistoryLength]]; + + // + // Setup OpenGL specific stuff + // + [self setupOpenGL]; + + // + // Setup view properties + // + self.gain = 1.0f; +#if TARGET_OS_IPHONE + self.backgroundColor = [UIColor colorWithRed:0.569f green:0.82f blue:0.478f alpha:1.0f]; + self.color = [UIColor colorWithRed:1.0f green:1.0f blue:1.0f alpha:1.0f]; +#elif TARGET_OS_MAC + self.backgroundColor = [NSColor colorWithCalibratedRed:0.569f green:0.82f blue:0.478f alpha:1.0f]; + self.color = [NSColor colorWithCalibratedRed:1.0f green:1.0f blue:1.0f alpha:1.0f]; +#endif + + // + // Allow subclass to initialize plot + // + [self setupPlot]; + + // + // Create the display link + // + self.displayLink = [EZAudioDisplayLink displayLinkWithDelegate:self]; + [self.displayLink start]; +} + +//------------------------------------------------------------------------------ + +- (void)setupPlot +{ + // + // Override in subclass + // +} + +//------------------------------------------------------------------------------ + +- (void)setupOpenGL +{ + self.baseEffect = [[GLKBaseEffect alloc] init]; + self.baseEffect.useConstantColor = YES; +#if TARGET_OS_IPHONE + if (!self.context) + { + self.context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2]; + } + [EAGLContext setCurrentContext:self.context]; + self.drawableColorFormat = GLKViewDrawableColorFormatRGBA8888; + self.drawableDepthFormat = GLKViewDrawableDepthFormat24; + self.drawableStencilFormat = GLKViewDrawableStencilFormat8; + self.drawableMultisample = GLKViewDrawableMultisample4X; + self.opaque = NO; + self.enableSetNeedsDisplay = NO; +#elif TARGET_OS_MAC + self.wantsBestResolutionOpenGLSurface = YES; + self.wantsLayer = YES; + self.layer.opaque = YES; + self.layer.backgroundColor = [NSColor clearColor].CGColor; + if (!self.pixelFormat) + { + NSOpenGLPixelFormatAttribute attrs[] = + { + NSOpenGLPFADoubleBuffer, + NSOpenGLPFAMultisample, + NSOpenGLPFASampleBuffers, 1, + NSOpenGLPFASamples, 4, + NSOpenGLPFADepthSize, 24, + NSOpenGLPFAOpenGLProfile, + NSOpenGLProfileVersion3_2Core, 0 + }; + self.pixelFormat = [[NSOpenGLPixelFormat alloc] initWithAttributes:attrs]; + } +#if DEBUG + NSAssert(self.pixelFormat, @"Could not create OpenGL pixel format so context is not valid"); +#endif + self.openGLContext = [[NSOpenGLContext alloc] initWithFormat:self.pixelFormat + shareContext:nil]; + GLint swapInt = 1; GLint surfaceOpacity = 0; + [self.openGLContext setValues:&swapInt forParameter:NSOpenGLCPSwapInterval]; + [self.openGLContext setValues:&surfaceOpacity forParameter:NSOpenGLCPSurfaceOpacity]; + [self.openGLContext lock]; + glGenVertexArrays(1, &self.info->vab); + glBindVertexArray(self.info->vab); +#endif + glGenBuffers(1, &self.info->vbo); + glBindBuffer(GL_ARRAY_BUFFER, self.info->vbo); + glBufferData(GL_ARRAY_BUFFER, + self.info->pointCount * sizeof(EZAudioPlotGLPoint), + self.info->points, + GL_STREAM_DRAW); +#if !TARGET_OS_IPHONE + [self.openGLContext unlock]; +#endif + self.frame = self.frame; +} + +//------------------------------------------------------------------------------ +#pragma mark - Updating The Plot +//------------------------------------------------------------------------------ + +- (void)updateBuffer:(float *)buffer withBufferSize:(UInt32)bufferSize +{ + // + // Update history + // + [EZAudioUtilities appendBufferRMS:buffer + withBufferSize:bufferSize + toHistoryInfo:self.info->historyInfo]; + + // + // Convert this data to point data + // + switch (self.plotType) + { + case EZPlotTypeBuffer: + [self setSampleData:buffer + length:bufferSize]; + break; + case EZPlotTypeRolling: + [self setSampleData:self.info->historyInfo->buffer + length:self.info->historyInfo->bufferSize]; + break; + default: + break; + } +} + +//------------------------------------------------------------------------------ + +- (void)setSampleData:(float *)data length:(int)length +{ + int pointCount = self.shouldFill ? length * 2 : length; + EZAudioPlotGLPoint *points = self.info->points; + for (int i = 0; i < length; i++) + { + if (self.shouldFill) + { + points[i * 2].x = points[i * 2 + 1].x = i; + points[i * 2].y = data[i]; + points[i * 2 + 1].y = 0.0f; + } + else + { + points[i].x = i; + points[i].y = data[i]; + } + } + points[0].y = points[pointCount - 1].y = 0.0f; + self.info->pointCount = pointCount; + self.info->interpolated = self.shouldFill; +#if !TARGET_OS_IPHONE + [self.openGLContext lock]; + glBindVertexArray(self.info->vab); +#endif + glBindBuffer(GL_ARRAY_BUFFER, self.info->vbo); + glBufferSubData(GL_ARRAY_BUFFER, + 0, + pointCount * sizeof(EZAudioPlotGLPoint), + self.info->points); +#if !TARGET_OS_IPHONE + [self.openGLContext unlock]; +#endif +} + +//------------------------------------------------------------------------------ +#pragma mark - Adjusting History Resolution +//------------------------------------------------------------------------------ + +- (int)rollingHistoryLength +{ + return self.info->historyInfo->bufferSize; +} + +//------------------------------------------------------------------------------ + +- (int)setRollingHistoryLength:(int)historyLength +{ + self.info->historyInfo->bufferSize = MIN(EZAudioPlotDefaultMaxHistoryBufferLength, historyLength); + return self.info->historyInfo->bufferSize; +} + +//------------------------------------------------------------------------------ +#pragma mark - Clearing The Plot +//------------------------------------------------------------------------------ + +- (void)clear +{ + float emptyBuffer[1]; + emptyBuffer[0] = 0.0f; + [self setSampleData:emptyBuffer length:1]; + [EZAudioUtilities clearHistoryInfo:self.info->historyInfo]; +#if TARGET_OS_IPHONE + [self display]; +#elif TARGET_OS_MAC + [self redraw]; +#endif +} + +//------------------------------------------------------------------------------ +#pragma mark - Start/Stop Display Link +//------------------------------------------------------------------------------ + +- (void)pauseDrawing +{ + [self.displayLink stop]; +} + +//------------------------------------------------------------------------------ + +- (void)resumeDrawing +{ + [self.displayLink start]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setters +//------------------------------------------------------------------------------ + +- (void)setBackgroundColor:(id)backgroundColor +{ + _backgroundColor = backgroundColor; + if (backgroundColor) + { + CGColorRef colorRef = [backgroundColor CGColor]; + CGFloat red; CGFloat green; CGFloat blue; CGFloat alpha; + [EZAudioUtilities getColorComponentsFromCGColor:colorRef + red:&red + green:&green + blue:&blue + alpha:&alpha]; + // + // Note! If you set the alpha to be 0 on mac for a transparent view + // the EZAudioPlotGL will make the superview layer-backed to make + // sure there is a surface to display itself on (or else you will get + // some pretty weird drawing glitches + // +#if !TARGET_OS_IPHONE + if (alpha == 0.0f) + { + [self.superview setWantsLayer:YES]; + } +#endif + glClearColor(red, green, blue, alpha); + } + else + { + glClearColor(0.0f, 0.0f, 0.0f, 0.0f); + } +} + +//------------------------------------------------------------------------------ + +- (void)setColor:(id)color +{ + _color = color; + if (color) + { + CGColorRef colorRef = [color CGColor]; + CGFloat red; CGFloat green; CGFloat blue; CGFloat alpha; + [EZAudioUtilities getColorComponentsFromCGColor:colorRef + red:&red + green:&green + blue:&blue + alpha:&alpha]; + self.baseEffect.constantColor = GLKVector4Make(red, green, blue, alpha); + } + else + { + self.baseEffect.constantColor = GLKVector4Make(0.0f, 0.0f, 0.0f, 0.0f); + } +} + +//------------------------------------------------------------------------------ +#pragma mark - Drawing +//------------------------------------------------------------------------------ + +- (void)drawRect:(EZRect)rect +{ + [self redraw]; +} + +//------------------------------------------------------------------------------ + +- (void)redraw +{ +#if !TARGET_OS_IPHONE + [self.openGLContext makeCurrentContext]; + [self.openGLContext lock]; +#endif + [self redrawWithPoints:self.info->points + pointCount:self.info->pointCount + baseEffect:self.baseEffect + vertexBufferObject:self.info->vbo + vertexArrayBuffer:self.info->vab + interpolated:self.info->interpolated + mirrored:self.shouldMirror + gain:self.gain]; +#if !TARGET_OS_IPHONE + [self.openGLContext flushBuffer]; + [self.openGLContext unlock]; +#endif +} + +//------------------------------------------------------------------------------ + +- (void)redrawWithPoints:(EZAudioPlotGLPoint *)points + pointCount:(UInt32)pointCount + baseEffect:(GLKBaseEffect *)baseEffect + vertexBufferObject:(GLuint)vbo + vertexArrayBuffer:(GLuint)vab + interpolated:(BOOL)interpolated + mirrored:(BOOL)mirrored + gain:(float)gain +{ + glClear(GL_COLOR_BUFFER_BIT); + GLenum mode = interpolated ? GL_TRIANGLE_STRIP : GL_LINE_STRIP; + float interpolatedFactor = interpolated ? 2.0f : 1.0f; + float xscale = 2.0f / ((float)pointCount / interpolatedFactor); + float yscale = 1.0f * gain; + GLKMatrix4 transform = GLKMatrix4MakeTranslation(-1.0f, 0.0f, 0.0f); + transform = GLKMatrix4Scale(transform, xscale, yscale, 1.0f); + baseEffect.transform.modelviewMatrix = transform; +#if !TARGET_OS_IPHONE + glBindVertexArray(vab); +#endif + glBindBuffer(GL_ARRAY_BUFFER, vbo); + [baseEffect prepareToDraw]; + glEnableVertexAttribArray(GLKVertexAttribPosition); + glVertexAttribPointer(GLKVertexAttribPosition, + 2, + GL_FLOAT, + GL_FALSE, + sizeof(EZAudioPlotGLPoint), + NULL); + glDrawArrays(mode, 0, pointCount); + if (mirrored) + { + baseEffect.transform.modelviewMatrix = GLKMatrix4Rotate(transform, M_PI, 1.0f, 0.0f, 0.0f); + [baseEffect prepareToDraw]; + glDrawArrays(mode, 0, pointCount); + } +} + +//------------------------------------------------------------------------------ +#pragma mark - Subclass +//------------------------------------------------------------------------------ + +- (int)defaultRollingHistoryLength +{ + return EZAudioPlotDefaultHistoryBufferLength; +} + +//------------------------------------------------------------------------------ + +- (int)maximumRollingHistoryLength +{ + return EZAudioPlotDefaultMaxHistoryBufferLength; +} + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioDisplayLinkDelegate +//------------------------------------------------------------------------------ + +- (void)displayLinkNeedsDisplay:(EZAudioDisplayLink *)displayLink +{ +#if TARGET_OS_IPHONE + if ([[UIApplication sharedApplication] applicationState] == UIApplicationStateActive) + { + [self display]; + } +#elif TARGET_OS_MAC + [self redraw]; +#endif +} + +//------------------------------------------------------------------------------ + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioUtilities.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioUtilities.h new file mode 100644 index 0000000..1ae9745 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioUtilities.h @@ -0,0 +1,549 @@ +// +// EZAudioUtilities.h +// EZAudio +// +// Created by Syed Haris Ali on 6/23/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import +#import +#import "TPCircularBuffer.h" +#if TARGET_OS_IPHONE +#import +#elif TARGET_OS_MAC +#endif + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +/** + A data structure that holds information about audio data over time. It contains a circular buffer to incrementally write the audio data to and a scratch buffer to hold a window of audio data relative to the whole circular buffer. In use, this will provide a way to continuously append data while having an adjustable viewable window described by the bufferSize. + */ +typedef struct +{ + float *buffer; + int bufferSize; + TPCircularBuffer circularBuffer; +} EZPlotHistoryInfo; + +//------------------------------------------------------------------------------ + +/** + A data structure that holds information about a node in the context of an AUGraph. + */ +typedef struct +{ + AudioUnit audioUnit; + AUNode node; +} EZAudioNodeInfo; + +//------------------------------------------------------------------------------ +#pragma mark - Types +//------------------------------------------------------------------------------ + +#if TARGET_OS_IPHONE +typedef CGRect EZRect; +#elif TARGET_OS_MAC +typedef NSRect EZRect; +#endif + +//------------------------------------------------------------------------------ +#pragma mark - EZAudioUtilities +//------------------------------------------------------------------------------ + +/** + The EZAudioUtilities class provides a set of class-level utility methods used throughout EZAudio to handle common operations such as allocating audio buffers and structures, creating various types of AudioStreamBasicDescription structures, string helpers for formatting and debugging, various math utilities, a very handy check result function (used everywhere!), and helpers for dealing with circular buffers. These were previously on the EZAudio class, but as of the 0.1.0 release have been moved here so the whole EZAudio is not needed when using only certain modules. + */ +@interface EZAudioUtilities : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Debugging +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Debugging EZAudio +///----------------------------------------------------------- + +/** + Globally sets whether or not the program should exit if a `checkResult:operation:` operation fails. Currently the behavior on EZAudio is to quit if a `checkResult:operation:` fails, but this is not desirable in any production environment. Internally there are a lot of `checkResult:operation:` operations used on all the core classes. This should only ever be set to NO in production environments since a `checkResult:operation:` failing means something breaking has likely happened. + @param shouldExitOnCheckResultFail A BOOL indicating whether or not the running program should exist due to a `checkResult:operation:` fail. + */ ++ (void)setShouldExitOnCheckResultFail:(BOOL)shouldExitOnCheckResultFail; + +//------------------------------------------------------------------------------ + +/** + Provides a flag indicating whether or not the program will exit if a `checkResult:operation:` fails. + @return A BOOL indicating whether or not the program will exit if a `checkResult:operation:` fails. + */ ++ (BOOL)shouldExitOnCheckResultFail; + +//------------------------------------------------------------------------------ +#pragma mark - AudioBufferList Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name AudioBufferList Utility +///----------------------------------------------------------- + +/** + Allocates an AudioBufferList structure. Make sure to call freeBufferList when done using AudioBufferList or it will leak. + @param frames The number of frames that will be stored within each audio buffer + @param channels The number of channels (e.g. 2 for stereo, 1 for mono, etc.) + @param interleaved Whether the samples will be interleaved (if not it will be assumed to be non-interleaved and each channel will have an AudioBuffer allocated) + @return An AudioBufferList struct that has been allocated in memory + */ ++ (AudioBufferList *)audioBufferListWithNumberOfFrames:(UInt32)frames + numberOfChannels:(UInt32)channels + interleaved:(BOOL)interleaved; + +//------------------------------------------------------------------------------ + +/** + Allocates an array of float arrays given the number of frames needed to store in each float array. + @param frames A UInt32 representing the number of frames to store in each float buffer + @param channels A UInt32 representing the number of channels (i.e. the number of float arrays to allocate) + @return An array of float arrays, each the length of the number of frames specified + */ ++ (float **)floatBuffersWithNumberOfFrames:(UInt32)frames + numberOfChannels:(UInt32)channels; + +//------------------------------------------------------------------------------ + +/** + Deallocates an AudioBufferList structure from memory. + @param bufferList A pointer to the buffer list you would like to free + */ ++ (void)freeBufferList:(AudioBufferList *)bufferList; + +//------------------------------------------------------------------------------ + +/** + Deallocates an array of float buffers + @param buffers An array of float arrays + @param channels A UInt32 representing the number of channels (i.e. the number of float arrays to deallocate) + */ ++ (void)freeFloatBuffers:(float **)buffers numberOfChannels:(UInt32)channels; + +//------------------------------------------------------------------------------ +#pragma mark - AudioStreamBasicDescription Utilties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Creating An AudioStreamBasicDescription +///----------------------------------------------------------- + +/** + Creates a signed-integer, interleaved AudioStreamBasicDescription for the number of channels specified for an AIFF format. + @param channels The desired number of channels + @param sampleRate A float representing the sample rate. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)AIFFFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Creates an AudioStreamBasicDescription for the iLBC narrow band speech codec. + @param sampleRate A float representing the sample rate. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)iLBCFormatWithSampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Creates a float-based, non-interleaved AudioStreamBasicDescription for the number of channels specified. + @param channels A UInt32 representing the number of channels. + @param sampleRate A float representing the sample rate. + @return A float-based AudioStreamBasicDescription with the number of channels specified. + */ ++ (AudioStreamBasicDescription)floatFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Creates an AudioStreamBasicDescription for an M4A AAC format. + @param channels The desired number of channels + @param sampleRate A float representing the sample rate. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)M4AFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Creates a single-channel, float-based AudioStreamBasicDescription. + @param sampleRate A float representing the sample rate. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)monoFloatFormatWithSampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Creates a single-channel, float-based AudioStreamBasicDescription (as of 0.0.6 this is the same as `monoFloatFormatWithSampleRate:`). + @param sampleRate A float representing the sample rate. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)monoCanonicalFormatWithSampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Creates a two-channel, non-interleaved, float-based AudioStreamBasicDescription (as of 0.0.6 this is the same as `stereoFloatNonInterleavedFormatWithSampleRate:`). + @param sampleRate A float representing the sample rate. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)stereoCanonicalNonInterleavedFormatWithSampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Creates a two-channel, interleaved, float-based AudioStreamBasicDescription. + @param sampleRate A float representing the sample rate. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)stereoFloatInterleavedFormatWithSampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ + +/** + Creates a two-channel, non-interleaved, float-based AudioStreamBasicDescription. + @param sampleRate A float representing the sample rate. + @return A new AudioStreamBasicDescription with the specified format. + */ ++ (AudioStreamBasicDescription)stereoFloatNonInterleavedFormatWithSampleRate:(float)sampleRate; + +//------------------------------------------------------------------------------ +// @name AudioStreamBasicDescription Helper Functions +//------------------------------------------------------------------------------ + +/** + Checks an AudioStreamBasicDescription to see if it is a float-based format (as opposed to a signed integer based format). + @param asbd A valid AudioStreamBasicDescription + @return A BOOL indicating whether or not the AudioStreamBasicDescription is a float format. + */ ++ (BOOL)isFloatFormat:(AudioStreamBasicDescription)asbd; + +//------------------------------------------------------------------------------ + +/** + Checks an AudioStreamBasicDescription to check for an interleaved flag (samples are + stored in one buffer one after another instead of two (or n channels) parallel buffers + @param asbd A valid AudioStreamBasicDescription + @return A BOOL indicating whether or not the AudioStreamBasicDescription is interleaved + */ ++ (BOOL)isInterleaved:(AudioStreamBasicDescription)asbd; + +//------------------------------------------------------------------------------ + +/** + Checks an AudioStreamBasicDescription to see if it is a linear PCM format (uncompressed, + 1 frame per packet) + @param asbd A valid AudioStreamBasicDescription + @return A BOOL indicating whether or not the AudioStreamBasicDescription is linear PCM. + */ ++ (BOOL)isLinearPCM:(AudioStreamBasicDescription)asbd; + +///----------------------------------------------------------- +/// @name AudioStreamBasicDescription Utilities +///----------------------------------------------------------- + +/** + Nicely logs out the contents of an AudioStreamBasicDescription struct + @param asbd The AudioStreamBasicDescription struct with content to print out + */ ++ (void)printASBD:(AudioStreamBasicDescription)asbd; + +//------------------------------------------------------------------------------ + +/** + Converts seconds into a string formatted as MM:SS + @param seconds An NSTimeInterval representing the number of seconds + @return An NSString instance formatted as MM:SS from the seconds provided. + */ ++ (NSString *)displayTimeStringFromSeconds:(NSTimeInterval)seconds; + +//------------------------------------------------------------------------------ + +/** + Creates a string to use when logging out the contents of an AudioStreamBasicDescription + @param asbd A valid AudioStreamBasicDescription struct. + @return An NSString representing the contents of the AudioStreamBasicDescription. + */ ++ (NSString *)stringForAudioStreamBasicDescription:(AudioStreamBasicDescription)asbd; + +//------------------------------------------------------------------------------ + +/** + Just a wrapper around the setCanonical function provided in the Core Audio Utility C++ class. + @param asbd The AudioStreamBasicDescription structure to modify + @param nChannels The number of expected channels on the description + @param interleaved A flag indicating whether the stereo samples should be interleaved in the buffer + */ ++ (void)setCanonicalAudioStreamBasicDescription:(AudioStreamBasicDescription*)asbd + numberOfChannels:(UInt32)nChannels + interleaved:(BOOL)interleaved; + +//------------------------------------------------------------------------------ +#pragma mark - Math Utilities +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Math Utilities +///----------------------------------------------------------- + +/** + Appends an array of values to a history buffer and performs an internal shift to add the values to the tail and removes the same number of values from the head. + @param buffer A float array of values to append to the tail of the history buffer + @param bufferLength The length of the float array being appended to the history buffer + @param scrollHistory The target history buffer in which to append the values + @param scrollHistoryLength The length of the target history buffer + */ ++ (void)appendBufferAndShift:(float*)buffer + withBufferSize:(int)bufferLength + toScrollHistory:(float*)scrollHistory + withScrollHistorySize:(int)scrollHistoryLength; + +//------------------------------------------------------------------------------ + +/** + Appends a value to a history buffer and performs an internal shift to add the value to the tail and remove the 0th value. + @param value The float value to append to the history array + @param scrollHistory The target history buffer in which to append the values + @param scrollHistoryLength The length of the target history buffer + */ ++(void) appendValue:(float)value + toScrollHistory:(float*)scrollHistory + withScrollHistorySize:(int)scrollHistoryLength; + +//------------------------------------------------------------------------------ + +/** + Maps a value from one coordinate system into another one. Takes in the current value to map, the minimum and maximum values of the first coordinate system, and the minimum and maximum values of the second coordinate system and calculates the mapped value in the second coordinate system's constraints. + @param value The value expressed in the first coordinate system + @param leftMin The minimum of the first coordinate system + @param leftMax The maximum of the first coordinate system + @param rightMin The minimum of the second coordindate system + @param rightMax The maximum of the second coordinate system + @return The mapped value in terms of the second coordinate system + */ ++ (float)MAP:(float)value + leftMin:(float)leftMin + leftMax:(float)leftMax + rightMin:(float)rightMin + rightMax:(float)rightMax; + +//------------------------------------------------------------------------------ + +/** + Calculates the root mean squared for a buffer. + @param buffer A float buffer array of values whose root mean squared to calculate + @param bufferSize The size of the float buffer + @return The root mean squared of the buffer + */ ++ (float)RMS:(float*)buffer length:(int)bufferSize; + +//------------------------------------------------------------------------------ + +/** + Calculate the sign function sgn(x) = + { -1 , x < 0, + { 0 , x = 0, + { 1 , x > 0 + @param value The float value for which to use as x + @return The float sign value + */ ++ (float)SGN:(float)value; + +//------------------------------------------------------------------------------ +#pragma mark - Music Utilities +//------------------------------------------------------------------------------ + ++ (NSString *)noteNameStringForFrequency:(float)frequency + includeOctave:(BOOL)includeOctave; + +//------------------------------------------------------------------------------ +#pragma mark - OSStatus Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name OSStatus Utility +///----------------------------------------------------------- + +/** + Basic check result function useful for checking each step of the audio setup process + @param result The OSStatus representing the result of an operation + @param operation A string (const char, not NSString) describing the operation taking place (will print if fails) + */ ++ (void)checkResult:(OSStatus)result operation:(const char *)operation; + +//------------------------------------------------------------------------------ + +/** + Provides a string representation of the often cryptic Core Audio error codes + @param code A UInt32 representing an error code + @return An NSString with a human readable version of the error code. + */ ++ (NSString *)stringFromUInt32Code:(UInt32)code; + +//------------------------------------------------------------------------------ +#pragma mark - Color Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Color Utility +///----------------------------------------------------------- + +/** + Helper function to get the color components from a CGColorRef in the RGBA colorspace. + @param color A CGColorRef that represents a color. + @param red A pointer to a CGFloat to hold the value of the red component. This value will be between 0 and 1. + @param green A pointer to a CGFloat to hold the value of the green component. This value will be between 0 and 1. + @param blue A pointer to a CGFloat to hold the value of the blue component. This value will be between 0 and 1. + @param alpha A pointer to a CGFloat to hold the value of the alpha component. This value will be between 0 and 1. + */ ++ (void)getColorComponentsFromCGColor:(CGColorRef)color + red:(CGFloat *)red + green:(CGFloat *)green + blue:(CGFloat *)blue + alpha:(CGFloat *)alpha; + +//------------------------------------------------------------------------------ +#pragma mark - Plot Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Plot Utility +///----------------------------------------------------------- + +/** + Given a buffer representing a window of float history data this append the RMS of a buffer of incoming float data...This will likely be deprecated in a future version of EZAudio for a circular buffer based approach. + @param scrollHistory An array of float arrays being used to hold the history values for each channel. + @param scrollHistoryLength An int representing the length of the history window. + @param index An int pointer to the index of the current read index of the history buffer. + @param buffer A float array representing the incoming audio data. + @param bufferSize An int representing the length of the incoming audio data. + @param isChanging A BOOL pointer representing whether the resolution (length of the history window) is currently changing. + */ ++ (void)updateScrollHistory:(float **)scrollHistory + withLength:(int)scrollHistoryLength + atIndex:(int *)index + withBuffer:(float *)buffer + withBufferSize:(int)bufferSize + isResolutionChanging:(BOOL *)isChanging; + +//------------------------------------------------------------------------------ +#pragma mark - TPCircularBuffer Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name TPCircularBuffer Utility +///----------------------------------------------------------- + +/** + Appends the data from the audio buffer list to the circular buffer + @param circularBuffer Pointer to the instance of the TPCircularBuffer to add the audio data to + @param audioBufferList Pointer to the instance of the AudioBufferList with the audio data + */ ++ (void)appendDataToCircularBuffer:(TPCircularBuffer*)circularBuffer + fromAudioBufferList:(AudioBufferList*)audioBufferList; + +//------------------------------------------------------------------------------ + +/** + Initializes the circular buffer (just a wrapper around the C method) + @param circularBuffer Pointer to an instance of the TPCircularBuffer + @param size The length of the TPCircularBuffer (usually 1024) + */ ++ (void)circularBuffer:(TPCircularBuffer*)circularBuffer + withSize:(int)size; + +//------------------------------------------------------------------------------ + +/** + Frees a circular buffer + @param circularBuffer Pointer to the circular buffer to clear + */ ++ (void)freeCircularBuffer:(TPCircularBuffer*)circularBuffer; + +//------------------------------------------------------------------------------ +#pragma mark - EZPlotHistoryInfo Utility +//------------------------------------------------------------------------------ + +/** + Calculates the RMS of a float array containing audio data and appends it to the tail of a EZPlotHistoryInfo data structure. Thread-safe. + @param buffer A float array containing the incoming audio buffer to append to the history buffer + @param bufferSize A UInt32 representing the length of the incoming audio buffer + @param historyInfo A pointer to a EZPlotHistoryInfo structure to use for managing the history buffers + */ ++ (void)appendBufferRMS:(float *)buffer + withBufferSize:(UInt32)bufferSize + toHistoryInfo:(EZPlotHistoryInfo *)historyInfo; + +//------------------------------------------------------------------------------ + +/** + Appends a buffer of audio data to the tail of a EZPlotHistoryInfo data structure. Thread-safe. + @param buffer A float array containing the incoming audio buffer to append to the history buffer + @param bufferSize A UInt32 representing the length of the incoming audio buffer + @param historyInfo A pointer to a EZPlotHistoryInfo structure to use for managing the history buffers + */ ++ (void)appendBuffer:(float *)buffer + withBufferSize:(UInt32)bufferSize + toHistoryInfo:(EZPlotHistoryInfo *)historyInfo; + +//------------------------------------------------------------------------------ + +/** + Zeroes out a EZPlotHistoryInfo data structure without freeing the resources. + @param historyInfo A pointer to a EZPlotHistoryInfo data structure + */ ++ (void)clearHistoryInfo:(EZPlotHistoryInfo *)historyInfo; + +//------------------------------------------------------------------------------ + +/** + Frees a EZPlotHistoryInfo data structure + @param historyInfo A pointer to a EZPlotHistoryInfo data structure + */ ++ (void)freeHistoryInfo:(EZPlotHistoryInfo *)historyInfo; + +//------------------------------------------------------------------------------ + +/** + Creates an EZPlotHistoryInfo data structure with a default length for the window buffer and a maximum length capacity for the internal circular buffer that holds all the audio data. + @param defaultLength An int representing the default length (i.e. the number of points that will be displayed on screen) of the history window. + @param maximumLength An int representing the default maximum length that is the absolute maximum amount of values that can be held in the history's circular buffer. + @return A pointer to the EZPlotHistoryInfo created. The caller is responsible for freeing this structure using the `freeHistoryInfo` method above. + */ ++ (EZPlotHistoryInfo *)historyInfoWithDefaultLength:(int)defaultLength + maximumLength:(int)maximumLength; + +//------------------------------------------------------------------------------ + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioUtilities.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioUtilities.m new file mode 100644 index 0000000..82402da --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioUtilities.m @@ -0,0 +1,744 @@ +// +// EZAudioUtilities.m +// EZAudio +// +// Created by Syed Haris Ali on 6/23/15. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZAudioUtilities.h" + +static float const EZAudioUtilitiesFixedNoteA = 440.0f; +static int const EZAudioUtilitiesFixedNoteAIndex = 9; +static int const EZAudioUtilitiesFixedNoteAOctave = 4; +static float const EZAudioUtilitiesEQFrequencyRatio = 1.059463094359f; +static int const EZAudioUtilitiesNotesLength = 12; +static NSString * const EZAudioUtilitiesNotes[EZAudioUtilitiesNotesLength] = +{ + @"C", @"C#", + @"D", @"D#", + @"E", + @"F", @"F#", + @"G", @"G#", + @"A", @"A#", + @"B" +}; + +BOOL __shouldExitOnCheckResultFail = YES; + +@implementation EZAudioUtilities + +//------------------------------------------------------------------------------ +#pragma mark - Debugging +//------------------------------------------------------------------------------ + ++ (void)setShouldExitOnCheckResultFail:(BOOL)shouldExitOnCheckResultFail +{ + __shouldExitOnCheckResultFail = shouldExitOnCheckResultFail; +} + +//------------------------------------------------------------------------------ + ++ (BOOL)shouldExitOnCheckResultFail +{ + return __shouldExitOnCheckResultFail; +} + +//------------------------------------------------------------------------------ +#pragma mark - AudioBufferList Utility +//------------------------------------------------------------------------------ + ++ (AudioBufferList *)audioBufferListWithNumberOfFrames:(UInt32)frames + numberOfChannels:(UInt32)channels + interleaved:(BOOL)interleaved +{ + unsigned nBuffers; + unsigned bufferSize; + unsigned channelsPerBuffer; + if (interleaved) + { + nBuffers = 1; + bufferSize = sizeof(float) * frames * channels; + channelsPerBuffer = channels; + } + else + { + nBuffers = channels; + bufferSize = sizeof(float) * frames; + channelsPerBuffer = 1; + } + + AudioBufferList *audioBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * (channels-1)); + audioBufferList->mNumberBuffers = nBuffers; + for(unsigned i = 0; i < nBuffers; i++) + { + audioBufferList->mBuffers[i].mNumberChannels = channelsPerBuffer; + audioBufferList->mBuffers[i].mDataByteSize = bufferSize; + audioBufferList->mBuffers[i].mData = calloc(bufferSize, 1); + } + return audioBufferList; +} + +//------------------------------------------------------------------------------ + ++ (float **)floatBuffersWithNumberOfFrames:(UInt32)frames + numberOfChannels:(UInt32)channels +{ + size_t size = sizeof(float *) * channels; + float **buffers = (float **)malloc(size); + for (int i = 0; i < channels; i++) + { + size = sizeof(float) * frames; + buffers[i] = (float *)malloc(size); + } + return buffers; +} + +//------------------------------------------------------------------------------ + ++ (void)freeBufferList:(AudioBufferList *)bufferList +{ + if (bufferList) + { + if (bufferList->mNumberBuffers) + { + for( int i = 0; i < bufferList->mNumberBuffers; i++) + { + if (bufferList->mBuffers[i].mData) + { + free(bufferList->mBuffers[i].mData); + } + } + } + free(bufferList); + } + bufferList = NULL; +} + +//------------------------------------------------------------------------------ + ++ (void)freeFloatBuffers:(float **)buffers numberOfChannels:(UInt32)channels +{ + if (!buffers || !*buffers) + { + return; + } + + for (int i = 0; i < channels; i++) + { + free(buffers[i]); + } + free(buffers); +} + +//------------------------------------------------------------------------------ +#pragma mark - AudioStreamBasicDescription Utility +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)AIFFFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate +{ + AudioStreamBasicDescription asbd; + memset(&asbd, 0, sizeof(asbd)); + asbd.mFormatID = kAudioFormatLinearPCM; + asbd.mFormatFlags = kAudioFormatFlagIsBigEndian|kAudioFormatFlagIsPacked|kAudioFormatFlagIsSignedInteger; + asbd.mSampleRate = sampleRate; + asbd.mChannelsPerFrame = channels; + asbd.mBitsPerChannel = 32; + asbd.mBytesPerPacket = (asbd.mBitsPerChannel / 8) * asbd.mChannelsPerFrame; + asbd.mFramesPerPacket = 1; + asbd.mBytesPerFrame = (asbd.mBitsPerChannel / 8) * asbd.mChannelsPerFrame; + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)iLBCFormatWithSampleRate:(float)sampleRate +{ + AudioStreamBasicDescription asbd; + memset(&asbd, 0, sizeof(asbd)); + asbd.mFormatID = kAudioFormatiLBC; + asbd.mChannelsPerFrame = 1; + asbd.mSampleRate = sampleRate; + + // Fill in the rest of the descriptions using the Audio Format API + UInt32 propSize = sizeof(asbd); + [EZAudioUtilities checkResult:AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, + 0, + NULL, + &propSize, + &asbd) + operation:"Failed to fill out the rest of the iLBC AudioStreamBasicDescription"]; + + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)floatFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate +{ + AudioStreamBasicDescription asbd; + UInt32 floatByteSize = sizeof(float); + asbd.mBitsPerChannel = 8 * floatByteSize; + asbd.mBytesPerFrame = floatByteSize; + asbd.mBytesPerPacket = floatByteSize; + asbd.mChannelsPerFrame = channels; + asbd.mFormatFlags = kAudioFormatFlagIsFloat|kAudioFormatFlagIsNonInterleaved; + asbd.mFormatID = kAudioFormatLinearPCM; + asbd.mFramesPerPacket = 1; + asbd.mSampleRate = sampleRate; + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)M4AFormatWithNumberOfChannels:(UInt32)channels + sampleRate:(float)sampleRate +{ + AudioStreamBasicDescription asbd; + memset(&asbd, 0, sizeof(asbd)); + asbd.mFormatID = kAudioFormatMPEG4AAC; + asbd.mChannelsPerFrame = channels; + asbd.mSampleRate = sampleRate; + + // Fill in the rest of the descriptions using the Audio Format API + UInt32 propSize = sizeof(asbd); + [EZAudioUtilities checkResult:AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, + 0, + NULL, + &propSize, + &asbd) + operation:"Failed to fill out the rest of the m4a AudioStreamBasicDescription"]; + + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)monoFloatFormatWithSampleRate:(float)sampleRate +{ + AudioStreamBasicDescription asbd; + UInt32 byteSize = sizeof(float); + asbd.mBitsPerChannel = 8 * byteSize; + asbd.mBytesPerFrame = byteSize; + asbd.mBytesPerPacket = byteSize; + asbd.mChannelsPerFrame = 1; + asbd.mFormatFlags = kAudioFormatFlagIsPacked|kAudioFormatFlagIsFloat; + asbd.mFormatID = kAudioFormatLinearPCM; + asbd.mFramesPerPacket = 1; + asbd.mSampleRate = sampleRate; + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)monoCanonicalFormatWithSampleRate:(float)sampleRate +{ + AudioStreamBasicDescription asbd; + UInt32 byteSize = sizeof(float); + asbd.mBitsPerChannel = 8 * byteSize; + asbd.mBytesPerFrame = byteSize; + asbd.mBytesPerPacket = byteSize; + asbd.mChannelsPerFrame = 1; + asbd.mFormatFlags = kAudioFormatFlagsNativeFloatPacked|kAudioFormatFlagIsNonInterleaved; + asbd.mFormatID = kAudioFormatLinearPCM; + asbd.mFramesPerPacket = 1; + asbd.mSampleRate = sampleRate; + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)stereoCanonicalNonInterleavedFormatWithSampleRate:(float)sampleRate +{ + AudioStreamBasicDescription asbd; + UInt32 byteSize = sizeof(float); + asbd.mBitsPerChannel = 8 * byteSize; + asbd.mBytesPerFrame = byteSize; + asbd.mBytesPerPacket = byteSize; + asbd.mChannelsPerFrame = 2; + asbd.mFormatFlags = kAudioFormatFlagsNativeFloatPacked|kAudioFormatFlagIsNonInterleaved; + asbd.mFormatID = kAudioFormatLinearPCM; + asbd.mFramesPerPacket = 1; + asbd.mSampleRate = sampleRate; + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)stereoFloatInterleavedFormatWithSampleRate:(float)sampleRate +{ + AudioStreamBasicDescription asbd; + UInt32 floatByteSize = sizeof(float); + asbd.mChannelsPerFrame = 2; + asbd.mBitsPerChannel = 8 * floatByteSize; + asbd.mBytesPerFrame = asbd.mChannelsPerFrame * floatByteSize; + asbd.mFramesPerPacket = 1; + asbd.mBytesPerPacket = asbd.mFramesPerPacket * asbd.mBytesPerFrame; + asbd.mFormatFlags = kAudioFormatFlagIsFloat; + asbd.mFormatID = kAudioFormatLinearPCM; + asbd.mSampleRate = sampleRate; + asbd.mReserved = 0; + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)stereoFloatNonInterleavedFormatWithSampleRate:(float)sampleRate +{ + AudioStreamBasicDescription asbd; + UInt32 floatByteSize = sizeof(float); + asbd.mBitsPerChannel = 8 * floatByteSize; + asbd.mBytesPerFrame = floatByteSize; + asbd.mChannelsPerFrame = 2; + asbd.mFormatFlags = kAudioFormatFlagIsFloat|kAudioFormatFlagIsNonInterleaved; + asbd.mFormatID = kAudioFormatLinearPCM; + asbd.mFramesPerPacket = 1; + asbd.mBytesPerPacket = asbd.mFramesPerPacket * asbd.mBytesPerFrame; + asbd.mSampleRate = sampleRate; + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (BOOL)isFloatFormat:(AudioStreamBasicDescription)asbd +{ + return asbd.mFormatFlags & kAudioFormatFlagIsFloat; +} + +//------------------------------------------------------------------------------ + ++ (BOOL)isInterleaved:(AudioStreamBasicDescription)asbd +{ + return !(asbd.mFormatFlags & kAudioFormatFlagIsNonInterleaved); +} + +//------------------------------------------------------------------------------ + ++ (BOOL)isLinearPCM:(AudioStreamBasicDescription)asbd +{ + return asbd.mFormatID == kAudioFormatLinearPCM; +} + +//------------------------------------------------------------------------------ + ++ (void)printASBD:(AudioStreamBasicDescription)asbd +{ + char formatIDString[5]; + UInt32 formatID = CFSwapInt32HostToBig(asbd.mFormatID); + bcopy (&formatID, formatIDString, 4); + formatIDString[4] = '\0'; + NSLog (@" Sample Rate: %10.0f", asbd.mSampleRate); + NSLog (@" Format ID: %10s", formatIDString); + NSLog (@" Format Flags: %10X", (unsigned int)asbd.mFormatFlags); + NSLog (@" Bytes per Packet: %10d", (unsigned int)asbd.mBytesPerPacket); + NSLog (@" Frames per Packet: %10d", (unsigned int)asbd.mFramesPerPacket); + NSLog (@" Bytes per Frame: %10d", (unsigned int)asbd.mBytesPerFrame); + NSLog (@" Channels per Frame: %10d", (unsigned int)asbd.mChannelsPerFrame); + NSLog (@" Bits per Channel: %10d", (unsigned int)asbd.mBitsPerChannel); +} + +//------------------------------------------------------------------------------ + ++ (NSString *)displayTimeStringFromSeconds:(NSTimeInterval)seconds +{ + int totalSeconds = (int)ceil(seconds); + int secondsComponent = totalSeconds % 60; + int minutesComponent = (totalSeconds / 60) % 60; + return [NSString stringWithFormat:@"%02d:%02d", minutesComponent, secondsComponent]; +} + +//------------------------------------------------------------------------------ + ++ (NSString *)stringForAudioStreamBasicDescription:(AudioStreamBasicDescription)asbd +{ + char formatIDString[5]; + UInt32 formatID = CFSwapInt32HostToBig(asbd.mFormatID); + bcopy (&formatID, formatIDString, 4); + formatIDString[4] = '\0'; + return [NSString stringWithFormat: + @"\nSample Rate: %10.0f,\n" + @"Format ID: %10s,\n" + @"Format Flags: %10X,\n" + @"Bytes per Packet: %10d,\n" + @"Frames per Packet: %10d,\n" + @"Bytes per Frame: %10d,\n" + @"Channels per Frame: %10d,\n" + @"Bits per Channel: %10d,\n" + @"IsInterleaved: %i,\n" + @"IsFloat: %i,", + asbd.mSampleRate, + formatIDString, + (unsigned int)asbd.mFormatFlags, + (unsigned int)asbd.mBytesPerPacket, + (unsigned int)asbd.mFramesPerPacket, + (unsigned int)asbd.mBytesPerFrame, + (unsigned int)asbd.mChannelsPerFrame, + (unsigned int)asbd.mBitsPerChannel, + [self isInterleaved:asbd], + [self isFloatFormat:asbd]]; +} + +//------------------------------------------------------------------------------ + ++ (void)setCanonicalAudioStreamBasicDescription:(AudioStreamBasicDescription*)asbd + numberOfChannels:(UInt32)nChannels + interleaved:(BOOL)interleaved +{ + + asbd->mFormatID = kAudioFormatLinearPCM; +#if TARGET_OS_IPHONE + int sampleSize = sizeof(float); + asbd->mFormatFlags = kAudioFormatFlagsNativeFloatPacked; +#elif TARGET_OS_MAC + int sampleSize = sizeof(Float32); + asbd->mFormatFlags = kAudioFormatFlagsNativeFloatPacked; +#endif + asbd->mBitsPerChannel = 8 * sampleSize; + asbd->mChannelsPerFrame = nChannels; + asbd->mFramesPerPacket = 1; + if (interleaved) + asbd->mBytesPerPacket = asbd->mBytesPerFrame = nChannels * sampleSize; + else { + asbd->mBytesPerPacket = asbd->mBytesPerFrame = sampleSize; + asbd->mFormatFlags |= kAudioFormatFlagIsNonInterleaved; + } +} + +//------------------------------------------------------------------------------ +#pragma mark - Math Utilities +//------------------------------------------------------------------------------ + ++ (void)appendBufferAndShift:(float*)buffer + withBufferSize:(int)bufferLength + toScrollHistory:(float*)scrollHistory + withScrollHistorySize:(int)scrollHistoryLength +{ + int shiftLength = scrollHistoryLength - bufferLength; + size_t floatByteSize = sizeof(float); + size_t shiftByteSize = shiftLength * floatByteSize; + size_t bufferByteSize = bufferLength * floatByteSize; + memmove(&scrollHistory[0], + &scrollHistory[bufferLength], + shiftByteSize); + memmove(&scrollHistory[shiftLength], + &buffer[0], + bufferByteSize); +} + +//------------------------------------------------------------------------------ + ++ (void) appendValue:(float)value + toScrollHistory:(float*)scrollHistory + withScrollHistorySize:(int)scrollHistoryLength +{ + float val[1]; val[0] = value; + [self appendBufferAndShift:val + withBufferSize:1 + toScrollHistory:scrollHistory + withScrollHistorySize:scrollHistoryLength]; +} + +//------------------------------------------------------------------------------ + ++ (float)MAP:(float)value + leftMin:(float)leftMin + leftMax:(float)leftMax + rightMin:(float)rightMin + rightMax:(float)rightMax +{ + float leftSpan = leftMax - leftMin; + float rightSpan = rightMax - rightMin; + float valueScaled = ( value - leftMin) / leftSpan; + return rightMin + (valueScaled * rightSpan); +} + +//------------------------------------------------------------------------------ + ++ (float)RMS:(float *)buffer length:(int)bufferSize +{ + float sum = 0.0; + for(int i = 0; i < bufferSize; i++) + sum += buffer[i] * buffer[i]; + return sqrtf( sum / bufferSize); +} + +//------------------------------------------------------------------------------ + ++ (float)SGN:(float)value +{ + return value < 0 ? -1.0f : ( value > 0 ? 1.0f : 0.0f); +} + +//------------------------------------------------------------------------------ +#pragma mark - Music Utilities +//------------------------------------------------------------------------------ + ++ (NSString *)noteNameStringForFrequency:(float)frequency + includeOctave:(BOOL)includeOctave +{ + NSMutableString *noteName = [NSMutableString string]; + int halfStepsFromFixedNote = roundf(log(frequency / EZAudioUtilitiesFixedNoteA) / log(EZAudioUtilitiesEQFrequencyRatio)); + int halfStepsModOctaves = halfStepsFromFixedNote % EZAudioUtilitiesNotesLength; + int indexOfNote = EZAudioUtilitiesFixedNoteAIndex + halfStepsModOctaves; + float octaves = halfStepsFromFixedNote / EZAudioUtilitiesNotesLength; + if (indexOfNote >= EZAudioUtilitiesNotesLength) + { + indexOfNote -= EZAudioUtilitiesNotesLength; + octaves += 1; + } + else if (indexOfNote < 0) + { + indexOfNote += EZAudioUtilitiesNotesLength; + octaves = -1; + } + [noteName appendString:EZAudioUtilitiesNotes[indexOfNote]]; + if (includeOctave) + { + int noteOctave = EZAudioUtilitiesFixedNoteAOctave + octaves; + [noteName appendFormat:@"%i", noteOctave]; + } + return noteName; +} + +//------------------------------------------------------------------------------ +#pragma mark - OSStatus Utility +//------------------------------------------------------------------------------ + ++ (void)checkResult:(OSStatus)result operation:(const char *)operation +{ + if (result == noErr) return; + char errorString[20]; + // see if it appears to be a 4-char-code + *(UInt32 *)(errorString + 1) = CFSwapInt32HostToBig(result); + if (isprint(errorString[1]) && isprint(errorString[2]) && isprint(errorString[3]) && isprint(errorString[4])) + { + errorString[0] = errorString[5] = '\''; + errorString[6] = '\0'; + } else + // no, format it as an integer + sprintf(errorString, "%d", (int)result); + fprintf(stderr, "Error: %s (%s)\n", operation, errorString); + if (__shouldExitOnCheckResultFail) + { + exit(-1); + } +} + +//------------------------------------------------------------------------------ + ++ (NSString *)stringFromUInt32Code:(UInt32)code +{ + char errorString[20]; + // see if it appears to be a 4-char-code + *(UInt32 *)(errorString + 1) = CFSwapInt32HostToBig(code); + if (isprint(errorString[1]) && + isprint(errorString[2]) && + isprint(errorString[3]) && + isprint(errorString[4])) + { + errorString[0] = errorString[5] = '\''; + errorString[6] = '\0'; + } + return [NSString stringWithUTF8String:errorString]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Plot Utility +//------------------------------------------------------------------------------ + ++ (void)updateScrollHistory:(float **)scrollHistory + withLength:(int)scrollHistoryLength + atIndex:(int *)index + withBuffer:(float *)buffer + withBufferSize:(int)bufferSize + isResolutionChanging:(BOOL *)isChanging +{ + // + size_t floatByteSize = sizeof(float); + if(*scrollHistory == NULL) + { + // Create the history buffer + *scrollHistory = (float *)calloc(8192, floatByteSize); + } + + // + if(!*isChanging) + { + float rms = [EZAudioUtilities RMS:buffer length:bufferSize]; + if(*index < scrollHistoryLength) + { + float *hist = *scrollHistory; + hist[*index] = rms; + (*index)++; + } + else + { + [EZAudioUtilities appendValue:rms + toScrollHistory:*scrollHistory + withScrollHistorySize:scrollHistoryLength]; + } + } +} + +//------------------------------------------------------------------------------ +#pragma mark - Color Utility +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Color Utility +///----------------------------------------------------------- + ++ (void)getColorComponentsFromCGColor:(CGColorRef)color + red:(CGFloat *)red + green:(CGFloat *)green + blue:(CGFloat *)blue + alpha:(CGFloat *)alpha +{ + size_t componentCount = CGColorGetNumberOfComponents(color); + if (componentCount == 4) + { + const CGFloat *components = CGColorGetComponents(color); + *red = components[0]; + *green = components[1]; + *blue = components[2]; + *alpha = components[3]; + } +} + +//------------------------------------------------------------------------------ +#pragma mark - TPCircularBuffer Utility +//------------------------------------------------------------------------------ + ++ (void)appendDataToCircularBuffer:(TPCircularBuffer *)circularBuffer + fromAudioBufferList:(AudioBufferList *)audioBufferList +{ + TPCircularBufferProduceBytes(circularBuffer, + audioBufferList->mBuffers[0].mData, + audioBufferList->mBuffers[0].mDataByteSize); +} + +//------------------------------------------------------------------------------ + ++ (void)circularBuffer:(TPCircularBuffer *)circularBuffer withSize:(int)size +{ + TPCircularBufferInit(circularBuffer, size); +} + +//------------------------------------------------------------------------------ + ++ (void)freeCircularBuffer:(TPCircularBuffer *)circularBuffer +{ + TPCircularBufferClear(circularBuffer); + TPCircularBufferCleanup(circularBuffer); +} + +//------------------------------------------------------------------------------ +#pragma mark - EZPlotHistoryInfo Utility +//------------------------------------------------------------------------------ + ++ (void)appendBufferRMS:(float *)buffer + withBufferSize:(UInt32)bufferSize + toHistoryInfo:(EZPlotHistoryInfo *)historyInfo +{ + // + // Calculate RMS and append to buffer + // + float rms = [EZAudioUtilities RMS:buffer length:bufferSize]; + float src[1]; + src[0] = isnan(rms) ? 0.0 : rms; + [self appendBuffer:src withBufferSize:1 toHistoryInfo:historyInfo]; +} + +//------------------------------------------------------------------------------ + ++ (void)appendBuffer:(float *)buffer + withBufferSize:(UInt32)bufferSize + toHistoryInfo:(EZPlotHistoryInfo *)historyInfo +{ + // + // Do nothing if there is no buffer + // + if (bufferSize == 0) + { + return; + } + + // + // Update the scroll history datasource + // + TPCircularBufferProduceBytes(&historyInfo->circularBuffer, buffer, bufferSize * sizeof(float)); + int32_t targetBytes = historyInfo->bufferSize * sizeof(float); + int32_t availableBytes = 0; + float *historyBuffer = TPCircularBufferTail(&historyInfo->circularBuffer, &availableBytes); + int32_t bytes = MIN(targetBytes, availableBytes); + memmove(historyInfo->buffer, historyBuffer, bytes); + if (targetBytes <= availableBytes) + { + TPCircularBufferConsume(&historyInfo->circularBuffer, availableBytes - targetBytes); + } +} + +//------------------------------------------------------------------------------ + ++ (void)clearHistoryInfo:(EZPlotHistoryInfo *)historyInfo +{ + memset(historyInfo->buffer, 0, historyInfo->bufferSize * sizeof(float)); + TPCircularBufferClear(&historyInfo->circularBuffer); +} + +//------------------------------------------------------------------------------ + ++ (void)freeHistoryInfo:(EZPlotHistoryInfo *)historyInfo +{ + free(historyInfo->buffer); + free(historyInfo); + TPCircularBufferCleanup(&historyInfo->circularBuffer); +} + +//------------------------------------------------------------------------------ + ++ (EZPlotHistoryInfo *)historyInfoWithDefaultLength:(int)defaultLength + maximumLength:(int)maximumLength +{ + // + // Setup buffers + // + EZPlotHistoryInfo *historyInfo = (EZPlotHistoryInfo *)malloc(sizeof(EZPlotHistoryInfo)); + historyInfo->bufferSize = defaultLength; + historyInfo->buffer = calloc(maximumLength, sizeof(float)); + TPCircularBufferInit(&historyInfo->circularBuffer, maximumLength); + + // + // Zero out circular buffer + // + float emptyBuffer[maximumLength]; + memset(emptyBuffer, 0, sizeof(emptyBuffer)); + TPCircularBufferProduceBytes(&historyInfo->circularBuffer, + emptyBuffer, + (int32_t)sizeof(emptyBuffer)); + + return historyInfo; +} + +//------------------------------------------------------------------------------ + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioiOS.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioiOS.h new file mode 100644 index 0000000..40af2ee --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZAudioiOS.h @@ -0,0 +1,26 @@ +// +// EZAudioiOS.m +// EZAudio +// +// Created by Tommaso Piazza on 30/09/15. +// Copyright © 2015 Andrew Breckenridge. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZMicrophone.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZMicrophone.h new file mode 100644 index 0000000..ddac53e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZMicrophone.h @@ -0,0 +1,381 @@ +// +// EZMicrophone.h +// EZAudio +// +// Created by Syed Haris Ali on 9/2/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import +#import "TargetConditionals.h" +#import "EZAudioDevice.h" +#import "EZOutput.h" + +@class EZMicrophone; + +//------------------------------------------------------------------------------ +#pragma mark - EZMicrophoneDelegate +//------------------------------------------------------------------------------ + +/** + The EZMicrophoneDelegate for the EZMicrophone provides a receiver for the incoming audio data events. When the microphone has been successfully internally configured it will try to send its delegate an AudioStreamBasicDescription describing the format of the incoming audio data. + + The audio data itself is sent back to the delegate in various forms: + + -`microphone:hasAudioReceived:withBufferSize:withNumberOfChannels:` + Provides float arrays instead of the AudioBufferList structure to hold the audio data. There could be a number of float arrays depending on the number of channels (see the function description below). These are useful for doing any visualizations that would like to make use of the raw audio data. + + -`microphone:hasBufferList:withBufferSize:withNumberOfChannels:` + Provides the AudioBufferList structures holding the audio data. These are the native structures Core Audio uses to hold the buffer information and useful for piping out directly to an output (see EZOutput). + + */ +@protocol EZMicrophoneDelegate + +@optional +///----------------------------------------------------------- +/// @name Audio Data Description +///----------------------------------------------------------- + +/** + Called anytime the EZMicrophone starts or stops. + @param output The instance of the EZMicrophone that triggered the event. + @param isPlaying A BOOL indicating whether the EZMicrophone instance is playing or not. + */ +- (void)microphone:(EZMicrophone *)microphone changedPlayingState:(BOOL)isPlaying; + +//------------------------------------------------------------------------------ + +/** + Called anytime the input device changes on an `EZMicrophone` instance. + @param microphone The instance of the EZMicrophone that triggered the event. + @param device The instance of the new EZAudioDevice the microphone is using to pull input. + */ +- (void)microphone:(EZMicrophone *)microphone changedDevice:(EZAudioDevice *)device; + +//------------------------------------------------------------------------------ + +/** + Returns back the audio stream basic description as soon as it has been initialized. This is guaranteed to occur before the stream callbacks, `microphone:hasBufferList:withBufferSize:withNumberOfChannels:` or `microphone:hasAudioReceived:withBufferSize:withNumberOfChannels:` + @param microphone The instance of the EZMicrophone that triggered the event. + @param audioStreamBasicDescription The AudioStreamBasicDescription that was created for the microphone instance. + */ +- (void) microphone:(EZMicrophone *)microphone + hasAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription; + +///----------------------------------------------------------- +/// @name Audio Data Callbacks +///----------------------------------------------------------- + +/** + This method provides an array of float arrays of the audio received, each float array representing a channel of audio data This occurs on the background thread so any drawing code must explicity perform its functions on the main thread. + @param microphone The instance of the EZMicrophone that triggered the event. + @param buffer The audio data as an array of float arrays. In a stereo signal buffer[0] represents the left channel while buffer[1] would represent the right channel. + @param bufferSize The size of each of the buffers (the length of each float array). + @param numberOfChannels The number of channels for the incoming audio. + @warning This function executes on a background thread to avoid blocking any audio operations. If operations should be performed on any other thread (like the main thread) it should be performed within a dispatch block like so: dispatch_async(dispatch_get_main_queue(), ^{ ...Your Code... }) + */ +- (void) microphone:(EZMicrophone *)microphone + hasAudioReceived:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels; + +//------------------------------------------------------------------------------ + +/** + Returns back the buffer list containing the audio received. This occurs on the background thread so any drawing code must explicity perform its functions on the main thread. + @param microphone The instance of the EZMicrophone that triggered the event. + @param bufferList The AudioBufferList holding the audio data. + @param bufferSize The size of each of the buffers of the AudioBufferList. + @param numberOfChannels The number of channels for the incoming audio. + @warning This function executes on a background thread to avoid blocking any audio operations. If operations should be performed on any other thread (like the main thread) it should be performed within a dispatch block like so: dispatch_async(dispatch_get_main_queue(), ^{ ...Your Code... }) + */ +- (void) microphone:(EZMicrophone *)microphone + hasBufferList:(AudioBufferList *)bufferList + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels; + +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZMicrophone +//------------------------------------------------------------------------------ + +/** + The EZMicrophone provides a component to get audio data from the default device microphone. On OSX this is the default selected input device in the system preferences while on iOS this defaults to use the default RemoteIO audio unit. The microphone data is converted to a float buffer array and returned back to the caller via the EZMicrophoneDelegate protocol. + */ +@interface EZMicrophone : NSObject + +//------------------------------------------------------------------------------ + +/** + The EZMicrophoneDelegate for which to handle the microphone callbacks + */ +@property (nonatomic, weak) id delegate; + +//------------------------------------------------------------------------------ + +/** + The EZAudioDevice being used to pull the microphone data. + - On iOS this can be any of the available microphones on the iPhone/iPad devices (usually there are 3). Defaults to the first microphone found (bottom mic) + - On OSX this can be any of the plugged in devices that Core Audio can detect (see kAudioUnitSubType_HALOutput for more information) + System Preferences -> Sound for the available inputs) + */ +@property (nonatomic, strong) EZAudioDevice *device; + +//------------------------------------------------------------------------------ + +/** + A BOOL describing whether the microphone is on and passing back audio data to its delegate. + */ +@property (nonatomic, assign) BOOL microphoneOn; + +//------------------------------------------------------------------------------ + +/** + An EZOutput to use for porting the microphone input out (passthrough). + */ +@property (nonatomic, strong) EZOutput *output; + +//------------------------------------------------------------------------------ +#pragma mark - Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Initializers +///----------------------------------------------------------- + +/** + Creates an instance of the EZMicrophone with a delegate to respond to the audioReceived callback. This will not start fetching the audio until startFetchingAudio has been called. Use initWithMicrophoneDelegate:startsImmediately: to instantiate this class and immediately start fetching audio data. + @param delegate A EZMicrophoneDelegate delegate that will receive the audioReceived callback. + @return An instance of the EZMicrophone class. This should be strongly retained. + */ +- (EZMicrophone *)initWithMicrophoneDelegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Creates an instance of the EZMicrophone with a custom AudioStreamBasicDescription and provides the caller to specify a delegate to respond to the audioReceived callback. This will not start fetching the audio until startFetchingAudio has been called. Use initWithMicrophoneDelegate:startsImmediately: to instantiate this class and immediately start fetching audio data. + @param microphoneDelegate A EZMicrophoneDelegate delegate that will receive the audioReceived callback. + @param audioStreamBasicDescription A custom AudioStreamBasicFormat for the microphone input. + @return An instance of the EZMicrophone class. This should be strongly retained. + */ +-(EZMicrophone *)initWithMicrophoneDelegate:(id)delegate + withAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription; + +//------------------------------------------------------------------------------ + +/** + Creates an instance of the EZMicrophone with a delegate to respond to the audioReceived callback and allows the caller to specify whether they'd immediately like to start fetching the audio data. + @param delegate A EZMicrophoneDelegate delegate that will receive the audioReceived callback. + @param startsImmediately A boolean indicating whether to start fetching the data immediately. IF YES, the delegate's audioReceived callback will immediately start getting called. + @return An instance of the EZMicrophone class. This should be strongly retained. + */ +- (EZMicrophone *)initWithMicrophoneDelegate:(id)delegate + startsImmediately:(BOOL)startsImmediately; + +//------------------------------------------------------------------------------ + +/** + Creates an instance of the EZMicrophone with a custom AudioStreamBasicDescription and provides the caller with a delegate to respond to the audioReceived callback and allows the caller to specify whether they'd immediately like to start fetching the audio data. + @param delegate A EZMicrophoneDelegate delegate that will receive the audioReceived callback. + @param audioStreamBasicDescription A custom AudioStreamBasicFormat for the microphone input. + @param startsImmediately A boolean indicating whether to start fetching the data immediately. IF YES, the delegate's audioReceived callback will immediately start getting called. + @return An instance of the EZMicrophone class. This should be strongly retained. + */ +- (EZMicrophone *)initWithMicrophoneDelegate:(id)delegate + withAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription + startsImmediately:(BOOL)startsImmediately; + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Class Initializers +///----------------------------------------------------------- + +/** + Creates an instance of the EZMicrophone with a delegate to respond to the audioReceived callback. This will not start fetching the audio until startFetchingAudio has been called. Use microphoneWithDelegate:startsImmediately: to instantiate this class and immediately start fetching audio data. + @param delegate A EZMicrophoneDelegate delegate that will receive the audioReceived callback. + @return An instance of the EZMicrophone class. This should be declared as a strong property! + */ ++ (EZMicrophone *)microphoneWithDelegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Creates an instance of the EZMicrophone with a delegate to respond to the audioReceived callback. This will not start fetching the audio until startFetchingAudio has been called. Use microphoneWithDelegate:startsImmediately: to instantiate this class and immediately start fetching audio data. + @param delegate A EZMicrophoneDelegate delegate that will receive the audioReceived callback. + @param audioStreamBasicDescription A custom AudioStreamBasicFormat for the microphone input. + @return An instance of the EZMicrophone class. This should be declared as a strong property! + */ ++ (EZMicrophone *)microphoneWithDelegate:(id)delegate + withAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription; + +//------------------------------------------------------------------------------ + +/** + Creates an instance of the EZMicrophone with a delegate to respond to the audioReceived callback and allows the caller to specify whether they'd immediately like to start fetching the audio data. + + @param microphoneDelegate A EZMicrophoneDelegate delegate that will receive the audioReceived callback. + @param startsImmediately A boolean indicating whether to start fetching the data immediately. IF YES, the delegate's audioReceived callback will immediately start getting called. + @return An instance of the EZMicrophone class. This should be strongly retained. + */ ++ (EZMicrophone *)microphoneWithDelegate:(id)delegate + startsImmediately:(BOOL)startsImmediately; + +//------------------------------------------------------------------------------ + +/** + Creates an instance of the EZMicrophone with a delegate to respond to the audioReceived callback and allows the caller to specify whether they'd immediately like to start fetching the audio data. + + @param microphoneDelegate A EZMicrophoneDelegate delegate that will receive the audioReceived callback. + @param audioStreamBasicDescription A custom AudioStreamBasicFormat for the microphone input. + @param startsImmediately A boolean indicating whether to start fetching the data immediately. IF YES, the delegate's audioReceived callback will immediately start getting called. + @return An instance of the EZMicrophone class. This should be strongly retained. + */ ++ (EZMicrophone *)microphoneWithDelegate:(id)delegate + withAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription + startsImmediately:(BOOL)startsImmediately; + +//------------------------------------------------------------------------------ +#pragma mark - Shared Instance +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Shared Instance +///----------------------------------------------------------- + +/** + A shared instance of the microphone component. Most applications will only need to use one instance of the microphone component across multiple views. Make sure to call the `startFetchingAudio` method to receive the audio data in the microphone delegate. + @return A shared instance of the `EZAudioMicrophone` component. + */ ++ (EZMicrophone *)sharedMicrophone; + +//------------------------------------------------------------------------------ +#pragma mark - Events +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Starting/Stopping The Microphone +///----------------------------------------------------------- + +/** + Starts fetching audio from the default microphone. Will notify delegate with audioReceived callback. + */ +- (void)startFetchingAudio; + +//------------------------------------------------------------------------------ + +/** + Stops fetching audio. Will stop notifying the delegate's audioReceived callback. + */ +- (void)stopFetchingAudio; + +//------------------------------------------------------------------------------ +#pragma mark - Getters +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Getting The Microphone's Audio Format +///----------------------------------------------------------- + +/** + Provides the AudioStreamBasicDescription structure containing the format of the microphone's audio. + @return An AudioStreamBasicDescription structure describing the format of the microphone's audio. + */ +- (AudioStreamBasicDescription)audioStreamBasicDescription; + +//------------------------------------------------------------------------------ + +/** + Provides the underlying Audio Unit that is being used to fetch the audio. + @return The AudioUnit used for the microphone + */ +- (AudioUnit *)audioUnit; + +//------------------------------------------------------------------------------ +#pragma mark - Setters +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Customizing The Microphone Stream Format +///----------------------------------------------------------- + +/** + Sets the AudioStreamBasicDescription on the microphone input. Must be linear PCM and must be the same sample rate as the stream format coming in (check the current `audioStreamBasicDescription` before setting). + @warning Do not set this while fetching audio (startFetchingAudio) + @param asbd The new AudioStreamBasicDescription to use in place of the current audio format description. + */ +- (void)setAudioStreamBasicDescription:(AudioStreamBasicDescription)asbd; + +///----------------------------------------------------------- +/// @name Setting The Microphone's Hardware Device +///----------------------------------------------------------- + +/** + Sets the EZAudioDevice being used to pull the microphone data. + - On iOS this can be any of the available microphones on the iPhone/iPad devices (usually there are 3). Defaults to the first microphone found (bottom mic) + - On OSX this can be any of the plugged in devices that Core Audio can detect (see kAudioUnitSubType_HALOutput for more information) + System Preferences -> Sound for the available inputs) + @param device An EZAudioDevice instance that should be used to fetch the microphone data. + */ +- (void)setDevice:(EZAudioDevice *)device; + +//------------------------------------------------------------------------------ +#pragma mark - Direct Output +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Setting The Microphone's Output (Direct Out) +///----------------------------------------------------------- + +/** + When set this will pipe out the contents of the microphone into an EZOutput. This is known as a passthrough or direct out that will simply pipe the microphone input to an output. + @param output An EZOutput instance that the microphone will use to output its audio data to the speaker. + */ +- (void)setOutput:(EZOutput *)output; + +//------------------------------------------------------------------------------ +#pragma mark - Subclass Methods +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Subclass +///----------------------------------------------------------- + +/** + The default AudioStreamBasicDescription set as the stream format of the microphone if no custom description is set. Defaults to a non-interleaved float format with the number of channels specified by the `numberOfChannels` method. + @return An AudioStreamBasicDescription that will be used as the default stream format. + */ +- (AudioStreamBasicDescription)defaultStreamFormat; + +//------------------------------------------------------------------------------ + +/** + The number of channels the input microphone is expected to have. Defaults to 1 (assumes microphone is mono). + @return A UInt32 representing the number of channels expected for the microphone. + */ +- (UInt32)numberOfChannels; + +//------------------------------------------------------------------------------ + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZMicrophone.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZMicrophone.m new file mode 100644 index 0000000..df6db13 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZMicrophone.m @@ -0,0 +1,662 @@ +// +// EZMicrophone.m +// EZAudio +// +// Created by Syed Haris Ali on 9/2/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZMicrophone.h" +#import "EZAudioFloatConverter.h" +#import "EZAudioUtilities.h" +#import "EZAudioDevice.h" + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +typedef struct EZMicrophoneInfo +{ + AudioUnit audioUnit; + AudioBufferList *audioBufferList; + float **floatData; + AudioStreamBasicDescription inputFormat; + AudioStreamBasicDescription streamFormat; +} EZMicrophoneInfo; + +//------------------------------------------------------------------------------ +#pragma mark - Callbacks +//------------------------------------------------------------------------------ + +static OSStatus EZAudioMicrophoneCallback(void *inRefCon, + AudioUnitRenderActionFlags *ioActionFlags, + const AudioTimeStamp *inTimeStamp, + UInt32 inBusNumber, + UInt32 inNumberFrames, + AudioBufferList *ioData); + +//------------------------------------------------------------------------------ +#pragma mark - EZMicrophone (Interface Extension) +//------------------------------------------------------------------------------ + +@interface EZMicrophone () +@property (nonatomic, strong) EZAudioFloatConverter *floatConverter; +@property (nonatomic, assign) EZMicrophoneInfo *info; +@end + +@implementation EZMicrophone + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + [[NSNotificationCenter defaultCenter] removeObserver:self]; + [EZAudioUtilities checkResult:AudioUnitUninitialize(self.info->audioUnit) + operation:"Failed to unintialize audio unit for microphone"]; + [EZAudioUtilities freeBufferList:self.info->audioBufferList]; + [EZAudioUtilities freeFloatBuffers:self.info->floatData + numberOfChannels:self.info->streamFormat.mChannelsPerFrame]; + free(self.info); +} + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ + +- (id)init +{ + self = [super init]; + if(self) + { + self.info = (EZMicrophoneInfo *)malloc(sizeof(EZMicrophoneInfo)); + memset(self.info, 0, sizeof(EZMicrophoneInfo)); + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (EZMicrophone *)initWithMicrophoneDelegate:(id)delegate +{ + self = [super init]; + if(self) + { + self.info = (EZMicrophoneInfo *)malloc(sizeof(EZMicrophoneInfo)); + memset(self.info, 0, sizeof(EZMicrophoneInfo)); + _delegate = delegate; + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ + +-(EZMicrophone *)initWithMicrophoneDelegate:(id)delegate + withAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription +{ + self = [self initWithMicrophoneDelegate:delegate]; + if(self) + { + [self setAudioStreamBasicDescription:audioStreamBasicDescription]; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (EZMicrophone *)initWithMicrophoneDelegate:(id)delegate + startsImmediately:(BOOL)startsImmediately +{ + self = [self initWithMicrophoneDelegate:delegate]; + if(self) + { + startsImmediately ? [self startFetchingAudio] : -1; + } + return self; +} + +//------------------------------------------------------------------------------ + +-(EZMicrophone *)initWithMicrophoneDelegate:(id)delegate + withAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription + startsImmediately:(BOOL)startsImmediately +{ + self = [self initWithMicrophoneDelegate:delegate + withAudioStreamBasicDescription:audioStreamBasicDescription]; + if(self) + { + startsImmediately ? [self startFetchingAudio] : -1; + } + return self; +} + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + ++ (EZMicrophone *)microphoneWithDelegate:(id)delegate +{ + return [[EZMicrophone alloc] initWithMicrophoneDelegate:delegate]; +} + +//------------------------------------------------------------------------------ + ++ (EZMicrophone *)microphoneWithDelegate:(id)delegate + withAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription +{ + return [[EZMicrophone alloc] initWithMicrophoneDelegate:delegate + withAudioStreamBasicDescription:audioStreamBasicDescription]; +} + +//------------------------------------------------------------------------------ + ++ (EZMicrophone *)microphoneWithDelegate:(id)delegate + startsImmediately:(BOOL)startsImmediately +{ + return [[EZMicrophone alloc] initWithMicrophoneDelegate:delegate + startsImmediately:startsImmediately]; +} + +//------------------------------------------------------------------------------ + ++ (EZMicrophone *)microphoneWithDelegate:(id)delegate + withAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription + startsImmediately:(BOOL)startsImmediately +{ + return [[EZMicrophone alloc] initWithMicrophoneDelegate:delegate + withAudioStreamBasicDescription:audioStreamBasicDescription + startsImmediately:startsImmediately]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Singleton +//------------------------------------------------------------------------------ + ++ (EZMicrophone *)sharedMicrophone +{ + static EZMicrophone *_sharedMicrophone = nil; + static dispatch_once_t onceToken; + dispatch_once(&onceToken, ^{ + _sharedMicrophone = [[EZMicrophone alloc] init]; + }); + return _sharedMicrophone; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setup +//------------------------------------------------------------------------------ + +- (void)setup +{ + // Create an input component description for mic input + AudioComponentDescription inputComponentDescription; + inputComponentDescription.componentType = kAudioUnitType_Output; + inputComponentDescription.componentManufacturer = kAudioUnitManufacturer_Apple; +#if TARGET_OS_IPHONE + inputComponentDescription.componentSubType = kAudioUnitSubType_RemoteIO; +#elif TARGET_OS_MAC + inputComponentDescription.componentSubType = kAudioUnitSubType_HALOutput; +#endif + + // get the first matching component + AudioComponent inputComponent = AudioComponentFindNext( NULL , &inputComponentDescription); + NSAssert(inputComponent, @"Couldn't get input component unit!"); + + // create new instance of component + [EZAudioUtilities checkResult:AudioComponentInstanceNew(inputComponent, &self.info->audioUnit) + operation:"Failed to get audio component instance"]; + +#if TARGET_OS_IPHONE + // must enable input scope for remote IO unit + UInt32 flag = 1; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->audioUnit, + kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Input, + 1, + &flag, + sizeof(flag)) + operation:"Couldn't enable input on remote IO unit."]; +#endif + [self setDevice:[EZAudioDevice currentInputDevice]]; + + UInt32 propSize = sizeof(self.info->inputFormat); + [EZAudioUtilities checkResult:AudioUnitGetProperty(self.info->audioUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, + 1, + &self.info->inputFormat, + &propSize) + operation:"Failed to get stream format of microphone input scope"]; +#if TARGET_OS_IPHONE + self.info->inputFormat.mSampleRate = [[AVAudioSession sharedInstance] sampleRate]; + NSAssert(self.info->inputFormat.mSampleRate, @"Expected AVAudioSession sample rate to be greater than 0.0. Did you setup the audio session?"); +#elif TARGET_OS_MAC +#endif + [self setAudioStreamBasicDescription:[self defaultStreamFormat]]; + + // render callback + AURenderCallbackStruct renderCallbackStruct; + renderCallbackStruct.inputProc = EZAudioMicrophoneCallback; + renderCallbackStruct.inputProcRefCon = (__bridge void *)(self); + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->audioUnit, + kAudioOutputUnitProperty_SetInputCallback, + kAudioUnitScope_Global, + 1, + &renderCallbackStruct, + sizeof(renderCallbackStruct)) + operation:"Failed to set render callback"]; + + [EZAudioUtilities checkResult:AudioUnitInitialize(self.info->audioUnit) + operation:"Failed to initialize input unit"]; + + // setup notifications + [self setupNotifications]; +} + +- (void)setupNotifications +{ +#if TARGET_OS_IPHONE + [[NSNotificationCenter defaultCenter] addObserver:self + selector:@selector(microphoneWasInterrupted:) + name:AVAudioSessionInterruptionNotification + object:nil]; + [[NSNotificationCenter defaultCenter] addObserver:self + selector:@selector(microphoneRouteChanged:) + name:AVAudioSessionRouteChangeNotification + object:nil]; +#elif TARGET_OS_MAC +#endif +} + +//------------------------------------------------------------------------------ +#pragma mark - Notifications +//------------------------------------------------------------------------------ + +#if TARGET_OS_IPHONE + +- (void)microphoneWasInterrupted:(NSNotification *)notification +{ + AVAudioSessionInterruptionType type = [notification.userInfo[AVAudioSessionInterruptionTypeKey] unsignedIntegerValue]; + switch (type) + { + case AVAudioSessionInterruptionTypeBegan: + { + [self stopFetchingAudio]; + break; + } + case AVAudioSessionInterruptionTypeEnded: + { + AVAudioSessionInterruptionOptions option = [notification.userInfo[AVAudioSessionInterruptionOptionKey] unsignedIntegerValue]; + if (option == AVAudioSessionInterruptionOptionShouldResume) + { + [self startFetchingAudio]; + } + break; + } + default: + { + break; + } + } +} + +//------------------------------------------------------------------------------ + +- (void)microphoneRouteChanged:(NSNotification *)notification +{ + EZAudioDevice *device = [EZAudioDevice currentInputDevice]; + [self setDevice:device]; +} + +#elif TARGET_OS_MAC +#endif + +//------------------------------------------------------------------------------ +#pragma mark - Events +//------------------------------------------------------------------------------ + +-(void)startFetchingAudio +{ + // + // Start output unit + // + [EZAudioUtilities checkResult:AudioOutputUnitStart(self.info->audioUnit) + operation:"Failed to start microphone audio unit"]; + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(microphone:changedPlayingState:)]) + { + [self.delegate microphone:self changedPlayingState:YES]; + } +} + +//------------------------------------------------------------------------------ + +-(void)stopFetchingAudio +{ + // + // Stop output unit + // + [EZAudioUtilities checkResult:AudioOutputUnitStop(self.info->audioUnit) + operation:"Failed to stop microphone audio unit"]; + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(microphone:changedPlayingState:)]) + { + [self.delegate microphone:self changedPlayingState:NO]; + } +} + +//------------------------------------------------------------------------------ +#pragma mark - Getters +//------------------------------------------------------------------------------ + +-(AudioStreamBasicDescription)audioStreamBasicDescription +{ + return self.info->streamFormat; +} + +//------------------------------------------------------------------------------ + +-(AudioUnit *)audioUnit +{ + return &self.info->audioUnit; +} + +//------------------------------------------------------------------------------ + +- (UInt32)maximumBufferSize +{ + UInt32 maximumBufferSize; + UInt32 propSize = sizeof(maximumBufferSize); + [EZAudioUtilities checkResult:AudioUnitGetProperty(self.info->audioUnit, + kAudioUnitProperty_MaximumFramesPerSlice, + kAudioUnitScope_Global, + 0, + &maximumBufferSize, + &propSize) + operation:"Failed to get maximum number of frames per slice"]; + return maximumBufferSize; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setters +//------------------------------------------------------------------------------ + +- (void)setMicrophoneOn:(BOOL)microphoneOn +{ + _microphoneOn = microphoneOn; + if (microphoneOn) + { + [self startFetchingAudio]; + } + else { + [self stopFetchingAudio]; + } +} + +//------------------------------------------------------------------------------ + +- (void)setAudioStreamBasicDescription:(AudioStreamBasicDescription)asbd +{ + if (self.floatConverter) + { + [EZAudioUtilities freeBufferList:self.info->audioBufferList]; + [EZAudioUtilities freeFloatBuffers:self.info->floatData + numberOfChannels:self.info->streamFormat.mChannelsPerFrame]; + } + + // + // Set new stream format + // + self.info->streamFormat = asbd; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->audioUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, + 0, + &asbd, + sizeof(asbd)) + operation:"Failed to set stream format on input scope"]; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->audioUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, + 1, + &asbd, + sizeof(asbd)) + operation:"Failed to set stream format on output scope"]; + + // + // Allocate scratch buffers + // + UInt32 maximumBufferSize = [self maximumBufferSize]; + BOOL isInterleaved = [EZAudioUtilities isInterleaved:asbd]; + UInt32 channels = asbd.mChannelsPerFrame; + self.floatConverter = [[EZAudioFloatConverter alloc] initWithInputFormat:asbd]; + self.info->floatData = [EZAudioUtilities floatBuffersWithNumberOfFrames:maximumBufferSize + numberOfChannels:channels]; + self.info->audioBufferList = [EZAudioUtilities audioBufferListWithNumberOfFrames:maximumBufferSize + numberOfChannels:channels + interleaved:isInterleaved]; + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(microphone:hasAudioStreamBasicDescription:)]) + { + [self.delegate microphone:self hasAudioStreamBasicDescription:asbd]; + } +} + +//------------------------------------------------------------------------------ + +- (void)setDevice:(EZAudioDevice *)device +{ +#if TARGET_OS_IPHONE + + // if the devices are equal then ignore + if ([device isEqual:self.device]) + { + return; + } + + NSError *error; + [[AVAudioSession sharedInstance] setPreferredInput:device.port error:&error]; + if (error) + { + NSLog(@"Error setting input device port (%@), reason: %@", + device.port, + error.localizedDescription); + } + else + { + if (device.dataSource) + { + [[AVAudioSession sharedInstance] setInputDataSource:device.dataSource error:&error]; + if (error) + { + NSLog(@"Error setting input data source (%@), reason: %@", + device.dataSource, + error.localizedDescription); + } + } + } + +#elif TARGET_OS_MAC + UInt32 inputEnabled = device.inputChannelCount > 0; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->audioUnit, + kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Input, + 1, + &inputEnabled, + sizeof(inputEnabled)) + operation:"Failed to set flag on device input"]; + + UInt32 outputEnabled = device.outputChannelCount > 0; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->audioUnit, + kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Output, + 0, + &outputEnabled, + sizeof(outputEnabled)) + operation:"Failed to set flag on device output"]; + + AudioDeviceID deviceId = device.deviceID; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->audioUnit, + kAudioOutputUnitProperty_CurrentDevice, + kAudioUnitScope_Global, + 0, + &deviceId, + sizeof(AudioDeviceID)) + operation:"Couldn't set default device on I/O unit"]; +#endif + + // + // Store device + // + _device = device; + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(microphone:changedDevice:)]) + { + [self.delegate microphone:self changedDevice:device]; + } +} + +//------------------------------------------------------------------------------ +#pragma mark - Output +//------------------------------------------------------------------------------ + +- (void)setOutput:(EZOutput *)output +{ + _output = output; + _output.inputFormat = self.audioStreamBasicDescription; + _output.dataSource = self; +} + +//------------------------------------------------------------------------------ +#pragma mark - EZOutputDataSource +//------------------------------------------------------------------------------ + +- (OSStatus) output:(EZOutput *)output + shouldFillAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + timestamp:(const AudioTimeStamp *)timestamp +{ + memcpy(audioBufferList, + self.info->audioBufferList, + sizeof(AudioBufferList) + (self.info->audioBufferList->mNumberBuffers - 1)*sizeof(AudioBuffer)); + return noErr; +} + +//------------------------------------------------------------------------------ +#pragma mark - Subclass +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)defaultStreamFormat +{ + return [EZAudioUtilities floatFormatWithNumberOfChannels:[self numberOfChannels] + sampleRate:self.info->inputFormat.mSampleRate]; +} + +//------------------------------------------------------------------------------ + +- (UInt32)numberOfChannels +{ +#if TARGET_OS_IPHONE + return 1; +#elif TARGET_OS_MAC + return (UInt32)self.device.inputChannelCount; +#endif +} + +//------------------------------------------------------------------------------ + +@end + +//------------------------------------------------------------------------------ +#pragma mark - Callbacks +//------------------------------------------------------------------------------ + +static OSStatus EZAudioMicrophoneCallback(void *inRefCon, + AudioUnitRenderActionFlags *ioActionFlags, + const AudioTimeStamp *inTimeStamp, + UInt32 inBusNumber, + UInt32 inNumberFrames, + AudioBufferList *ioData) +{ + EZMicrophone *microphone = (__bridge EZMicrophone *)inRefCon; + EZMicrophoneInfo *info = (EZMicrophoneInfo *)microphone.info; + + // + // Make sure the size of each buffer in the stored buffer array + // is properly set using the actual number of frames coming in! + // + for (int i = 0; i < info->audioBufferList->mNumberBuffers; i++) { + info->audioBufferList->mBuffers[i].mDataByteSize = inNumberFrames * info->streamFormat.mBytesPerFrame; + } + + // + // Render audio into buffer + // + OSStatus result = AudioUnitRender(info->audioUnit, + ioActionFlags, + inTimeStamp, + inBusNumber, + inNumberFrames, + info->audioBufferList); + + // + // Notify delegate of new buffer list to process + // + if ([microphone.delegate respondsToSelector:@selector(microphone:hasBufferList:withBufferSize:withNumberOfChannels:)]) + { + [microphone.delegate microphone:microphone + hasBufferList:info->audioBufferList + withBufferSize:inNumberFrames + withNumberOfChannels:info->streamFormat.mChannelsPerFrame]; + } + + // + // Notify delegate of new float data processed + // + if ([microphone.delegate respondsToSelector:@selector(microphone:hasAudioReceived:withBufferSize:withNumberOfChannels:)]) + { + // + // Convert to float + // + [microphone.floatConverter convertDataFromAudioBufferList:info->audioBufferList + withNumberOfFrames:inNumberFrames + toFloatBuffers:info->floatData]; + [microphone.delegate microphone:microphone + hasAudioReceived:info->floatData + withBufferSize:inNumberFrames + withNumberOfChannels:info->streamFormat.mChannelsPerFrame]; + } + + return result; +} \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZOutput.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZOutput.h new file mode 100644 index 0000000..701894e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZOutput.h @@ -0,0 +1,376 @@ +// +// EZOutput.h +// EZAudio +// +// Created by Syed Haris Ali on 12/2/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import +#if TARGET_OS_IPHONE +#elif TARGET_OS_MAC +#import +#endif + +@class EZAudioDevice; +@class EZOutput; + +//------------------------------------------------------------------------------ +#pragma mark - Constants +//------------------------------------------------------------------------------ + +FOUNDATION_EXPORT UInt32 const EZOutputMaximumFramesPerSlice; +FOUNDATION_EXPORT Float64 const EZOutputDefaultSampleRate; + +//------------------------------------------------------------------------------ +#pragma mark - EZOutputDataSource +//------------------------------------------------------------------------------ + +/** + The EZOutputDataSource specifies a receiver to provide audio data when the EZOutput is started. Since the 0.4.0 release this has been simplified to only one data source method. + */ +@protocol EZOutputDataSource + +@optional +///----------------------------------------------------------- +/// @name Providing Audio Data +///----------------------------------------------------------- + +@required + +/** + Provides a way to provide output with data anytime the EZOutput needs audio data to play. This function provides an already allocated AudioBufferList to use for providing audio data into the output buffer. The expected format of the audio data provided here is specified by the EZOutput `inputFormat` property. This audio data will be converted into the client format specified by the EZOutput `clientFormat` property. + @param output The instance of the EZOutput that asked for the data. + @param audioBufferList The AudioBufferList structure pointer that needs to be filled with audio data + @param frames The amount of frames as a UInt32 that output will need to properly fill its output buffer. + @param timestamp A AudioTimeStamp pointer to use if you need the current host time. + @return An OSStatus code. If there was no error then use the noErr status code. + */ +- (OSStatus) output:(EZOutput *)output + shouldFillAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + timestamp:(const AudioTimeStamp *)timestamp; + +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZOutputDelegate +//------------------------------------------------------------------------------ + +/** + The EZOutputDelegate for the EZOutput component provides a receiver to handle play state, device, and audio data change events. This is very similar to the EZMicrophoneDelegate for the EZMicrophone and the EZAudioFileDelegate for the EZAudioFile. + */ +@protocol EZOutputDelegate + +@optional + +/** + Called anytime the EZOutput starts or stops. + @param output The instance of the EZOutput that triggered the event. + @param isPlaying A BOOL indicating whether the EZOutput instance is playing or not. + */ +- (void)output:(EZOutput *)output changedPlayingState:(BOOL)isPlaying; + +//------------------------------------------------------------------------------ + +/** + Called anytime the `device` changes on an EZOutput instance. + @param output The instance of the EZOutput that triggered the event. + @param device The instance of the new EZAudioDevice the output is using to play audio data. + */ +- (void)output:(EZOutput *)output changedDevice:(EZAudioDevice *)device; + +//------------------------------------------------------------------------------ + +/** + Like the EZMicrophoneDelegate, for the EZOutput this method provides an array of float arrays of the audio received, each float array representing a channel of audio data. This occurs on the background thread so any drawing code must explicity perform its functions on the main thread. + @param output The instance of the EZOutput that triggered the event. + @param buffer The audio data as an array of float arrays. In a stereo signal buffer[0] represents the left channel while buffer[1] would represent the right channel. + @param bufferSize A UInt32 representing the size of each of the buffers (the length of each float array). + @param numberOfChannels A UInt32 representing the number of channels (you can use this to know how many float arrays are in the `buffer` parameter. + @warning This function executes on a background thread to avoid blocking any audio operations. If operations should be performed on any other thread (like the main thread) it should be performed within a dispatch block like so: dispatch_async(dispatch_get_main_queue(), ^{ ...Your Code... }) + */ +- (void) output:(EZOutput *)output + playedAudio:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels; + +//------------------------------------------------------------------------------ + +@end + +/** + The EZOutput component provides a generic output to glue all the other EZAudio components together and push whatever sound you've created to the default output device (think opposite of the microphone). The EZOutputDataSource provides the required AudioBufferList needed to populate the output buffer while the EZOutputDelegate provides the same kind of mechanism as the EZMicrophoneDelegate or EZAudioFileDelegate in that you will receive a callback that provides non-interleaved, float data for visualizing the output (done using an internal float converter). As of 0.4.0 the EZOutput has been simplified to a single EZOutputDataSource method and now uses an AUGraph to provide format conversion from the `inputFormat` to the playback graph's `clientFormat` linear PCM formats, mixer controls for setting volume and pan settings, hooks to add in any number of effect audio units (see the `connectOutputOfSourceNode:sourceNodeOutputBus:toDestinationNode:destinationNodeInputBus:inGraph:` subclass method), and hardware device toggling (via EZAudioDevice). + */ +@interface EZOutput : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Initializers +///----------------------------------------------------------- + +/** + Creates a new instance of the EZOutput and allows the caller to specify an EZOutputDataSource. + @param dataSource The EZOutputDataSource that will be used to pull the audio data for the output callback. + @return A newly created instance of the EZOutput class. + */ +- (instancetype)initWithDataSource:(id)dataSource; + +/** + Creates a new instance of the EZOutput and allows the caller to specify an EZOutputDataSource. + @param dataSource The EZOutputDataSource that will be used to pull the audio data for the output callback. + @param inputFormat The AudioStreamBasicDescription of the EZOutput. + @warning AudioStreamBasicDescription input formats must be linear PCM! + @return A newly created instance of the EZOutput class. + */ +- (instancetype)initWithDataSource:(id)dataSource + inputFormat:(AudioStreamBasicDescription)inputFormat; + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Class Initializers +///----------------------------------------------------------- + +/** + Class method to create a new instance of the EZOutput + @return A newly created instance of the EZOutput class. + */ ++ (instancetype)output; + +/** + Class method to create a new instance of the EZOutput and allows the caller to specify an EZOutputDataSource. + @param dataSource The EZOutputDataSource that will be used to pull the audio data for the output callback. + @return A newly created instance of the EZOutput class. + */ ++ (instancetype)outputWithDataSource:(id)dataSource; + +/** + Class method to create a new instance of the EZOutput and allows the caller to specify an EZOutputDataSource. + @param dataSource The EZOutputDataSource that will be used to pull the audio data for the output callback. + @param audioStreamBasicDescription The AudioStreamBasicDescription of the EZOutput. + @warning AudioStreamBasicDescriptions that are invalid will cause the EZOutput to fail to initialize + @return A newly created instance of the EZOutput class. + */ ++ (instancetype)outputWithDataSource:(id)dataSource + inputFormat:(AudioStreamBasicDescription)inputFormat; + +//------------------------------------------------------------------------------ +#pragma mark - Singleton +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Shared Instance +///----------------------------------------------------------- + +/** + Creates a shared instance of the EZOutput (one app will usually only need one output and share the role of the EZOutputDataSource). + @return The shared instance of the EZOutput class. + */ ++ (instancetype)sharedOutput; + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Setting/Getting The Stream Formats +///----------------------------------------------------------- + +/** + Provides the AudioStreamBasicDescription structure used at the beginning of the playback graph which is then converted into the `clientFormat` using the AUConverter audio unit. + @warning The AudioStreamBasicDescription set here must be linear PCM. Compressed formats are not supported...the EZAudioFile's clientFormat performs the audio conversion on the fly from compressed to linear PCM so there is no additional work to be done there. + @return An AudioStreamBasicDescription structure describing + */ +@property (nonatomic, readwrite) AudioStreamBasicDescription inputFormat; + +//------------------------------------------------------------------------------ + +/** + Provides the AudioStreamBasicDescription structure that serves as the common format used throughout the playback graph (similar to how the EZAudioFile as a clientFormat that is linear PCM to be shared amongst other components). The `inputFormat` is converted into this format at the beginning of the playback graph using an AUConverter audio unit. Defaults to the whatever the `defaultClientFormat` method returns is if a custom one isn't explicitly set. + @warning The AudioStreamBasicDescription set here must be linear PCM. Compressed formats are not supported by Audio Units. + @return An AudioStreamBasicDescription structure describing the common client format for the playback graph. + */ +@property (nonatomic, readwrite) AudioStreamBasicDescription clientFormat; + +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Setting/Getting The Data Source and Delegate +///----------------------------------------------------------- + +/** + The EZOutputDataSource that provides the audio data in the `inputFormat` for the EZOutput to play. If an EZOutputDataSource is not specified then the EZOutput will just output silence. + */ +@property (nonatomic, weak) id dataSource; + +//------------------------------------------------------------------------------ + +/** + The EZOutputDelegate for which to handle the output callbacks + */ +@property (nonatomic, weak) id delegate; + +//------------------------------------------------------------------------------ + +/** + Provides a flag indicating whether the EZOutput is pulling audio data from the EZOutputDataSource for playback. + @return YES if the EZOutput is running, NO if it is stopped + */ +@property (readonly) BOOL isPlaying; + +//------------------------------------------------------------------------------ + +/** + Provides the current pan from the audio player's mixer audio unit in the playback graph. Setting the pan adjusts the direction of the audio signal from left (0) to right (1). Default is 0.5 (middle). + */ +@property (nonatomic, assign) float pan; + +//------------------------------------------------------------------------------ + +/** + Provides the current volume from the audio player's mixer audio unit in the playback graph. Setting the volume adjusts the gain of the output between 0 and 1. Default is 1. + */ +@property (nonatomic, assign) float volume; + +//------------------------------------------------------------------------------ +#pragma mark - Core Audio Properties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Core Audio Properties +///----------------------------------------------------------- + +/** + The AUGraph used to chain together the converter, mixer, and output audio units. + */ +@property (readonly) AUGraph graph; + +//------------------------------------------------------------------------------ + +/** + The AudioUnit that is being used to convert the audio data coming into the output's playback graph. + */ +@property (readonly) AudioUnit converterAudioUnit; + +//------------------------------------------------------------------------------ + +/** + The AudioUnit that is being used as the mixer to adjust the volume on the output's playback graph. + */ +@property (readonly) AudioUnit mixerAudioUnit; + +//------------------------------------------------------------------------------ + +/** + The AudioUnit that is being used as the hardware output for the output's playback graph. + */ +@property (readonly) AudioUnit outputAudioUnit; + +//------------------------------------------------------------------------------ +#pragma mark - Setters +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Getting/Setting The Output's Hardware Device +///----------------------------------------------------------- + +/** + An EZAudioDevice instance that is used to route the audio data out to the speaker. To find a list of available output devices see the EZAudioDevice `outputDevices` method. + */ +@property (nonatomic, strong, readwrite) EZAudioDevice *device; + +//------------------------------------------------------------------------------ +#pragma mark - Actions +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Starting/Stopping The Output +///----------------------------------------------------------- + +/** + Starts pulling audio data from the EZOutputDataSource to the default device output. + */ +- (void)startPlayback; + +///----------------------------------------------------------- + +/** + Stops pulling audio data from the EZOutputDataSource to the default device output. + */ +- (void)stopPlayback; + +//------------------------------------------------------------------------------ +#pragma mark - Subclass +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Subclass +///----------------------------------------------------------- + +/** + This method handles connecting the converter node to the mixer node within the AUGraph that is being used as the playback graph. Subclasses can override this method and insert their custom nodes to perform effects processing on the audio data being rendered. + + This was inspired by Daniel Kennett's blog post on how to add a custom equalizer to a CocoaLibSpotify SPCoreAudioController's AUGraph. For more information see Daniel's post and example code here: http://ikennd.ac/blog/2012/04/augraph-basics-in-cocoalibspotify/. + @param sourceNode An AUNode representing the node the audio data is coming from. + @param sourceNodeOutputBus A UInt32 representing the output bus from the source node that should be connected into the next node's input bus. + @param destinationNode An AUNode representing the node the audio data should be connected to. + @param destinationNodeInputBus A UInt32 representing the input bus the source node's output bus should be connecting to. + @param graph The AUGraph that is being used to hold the playback graph. Same as from the `graph` property. + @return An OSStatus code. For no error return back `noErr`. + */ +- (OSStatus)connectOutputOfSourceNode:(AUNode)sourceNode + sourceNodeOutputBus:(UInt32)sourceNodeOutputBus + toDestinationNode:(AUNode)destinationNode + destinationNodeInputBus:(UInt32)destinationNodeInputBus + inGraph:(AUGraph)graph; + +//------------------------------------------------------------------------------ + +/** + The default AudioStreamBasicDescription set as the client format of the output if no custom `clientFormat` is set. Defaults to a 44.1 kHz stereo, non-interleaved, float format. + @return An AudioStreamBasicDescription that will be used as the default stream format. + */ +- (AudioStreamBasicDescription)defaultClientFormat; + +//------------------------------------------------------------------------------ + +/** + The default AudioStreamBasicDescription set as the `inputFormat` of the output if no custom `inputFormat` is set. Defaults to a 44.1 kHz stereo, non-interleaved, float format. + @return An AudioStreamBasicDescription that will be used as the default stream format. + */ +- (AudioStreamBasicDescription)defaultInputFormat; + +//------------------------------------------------------------------------------ + +/** + The default value used as the AudioUnit subtype when creating the hardware output component. By default this is kAudioUnitSubType_RemoteIO for iOS and kAudioUnitSubType_HALOutput for OSX. + @warning If you change this to anything other than kAudioUnitSubType_HALOutput for OSX you will get a failed assertion because devices can only be set when using the HAL audio unit. + @return An OSType that represents the AudioUnit subtype for the hardware output component. + */ +- (OSType)outputAudioUnitSubType; + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZOutput.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZOutput.m new file mode 100644 index 0000000..0d660d6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZOutput.m @@ -0,0 +1,753 @@ +// +// EZOutput.m +// EZAudio +// +// Created by Syed Haris Ali on 12/2/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZOutput.h" +#import "EZAudioDevice.h" +#import "EZAudioFloatConverter.h" +#import "EZAudioUtilities.h" + +//------------------------------------------------------------------------------ +#pragma mark - Constants +//------------------------------------------------------------------------------ + +UInt32 const EZOutputMaximumFramesPerSlice = 4096; +Float64 const EZOutputDefaultSampleRate = 44100.0f; + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +typedef struct +{ + // stream format params + AudioStreamBasicDescription inputFormat; + AudioStreamBasicDescription clientFormat; + + // float converted data + float **floatData; + + // nodes + EZAudioNodeInfo converterNodeInfo; + EZAudioNodeInfo mixerNodeInfo; + EZAudioNodeInfo outputNodeInfo; + + // audio graph + AUGraph graph; +} EZOutputInfo; + +//------------------------------------------------------------------------------ +#pragma mark - Callbacks (Declaration) +//------------------------------------------------------------------------------ + +OSStatus EZOutputConverterInputCallback(void *inRefCon, + AudioUnitRenderActionFlags *ioActionFlags, + const AudioTimeStamp *inTimeStamp, + UInt32 inBusNumber, + UInt32 inNumberFrames, + AudioBufferList *ioData); + +//------------------------------------------------------------------------------ + +OSStatus EZOutputGraphRenderCallback(void *inRefCon, + AudioUnitRenderActionFlags *ioActionFlags, + const AudioTimeStamp *inTimeStamp, + UInt32 inBusNumber, + UInt32 inNumberFrames, + AudioBufferList *ioData); + +//------------------------------------------------------------------------------ +#pragma mark - EZOutput (Interface Extension) +//------------------------------------------------------------------------------ + +@interface EZOutput () +@property (nonatomic, strong) EZAudioFloatConverter *floatConverter; +@property (nonatomic, assign) EZOutputInfo *info; +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZOutput (Implementation) +//------------------------------------------------------------------------------ + +@implementation EZOutput + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + if (self.floatConverter) + { + self.floatConverter = nil; + [EZAudioUtilities freeFloatBuffers:self.info->floatData + numberOfChannels:self.info->clientFormat.mChannelsPerFrame]; + } + [EZAudioUtilities checkResult:AUGraphStop(self.info->graph) + operation:"Failed to stop graph"]; + [EZAudioUtilities checkResult:AUGraphClose(self.info->graph) + operation:"Failed to close graph"]; + [EZAudioUtilities checkResult:DisposeAUGraph(self.info->graph) + operation:"Failed to dispose of graph"]; + free(self.info); +} + +//------------------------------------------------------------------------------ +#pragma mark - Initialization +//------------------------------------------------------------------------------ + +- (instancetype) init +{ + self = [super init]; + if (self) + { + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithDataSource:(id)dataSource +{ + self = [self init]; + if (self) + { + self.dataSource = dataSource; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithDataSource:(id)dataSource + inputFormat:(AudioStreamBasicDescription)inputFormat +{ + self = [self initWithDataSource:dataSource]; + if (self) + { + self.inputFormat = inputFormat; + } + return self; +} + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + ++ (instancetype)output +{ + return [[self alloc] init]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)outputWithDataSource:(id)dataSource +{ + return [[self alloc] initWithDataSource:dataSource]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)outputWithDataSource:(id)dataSource + inputFormat:(AudioStreamBasicDescription)inputFormat +{ + return [[self alloc] initWithDataSource:dataSource + inputFormat:inputFormat]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Singleton +//------------------------------------------------------------------------------ + ++ (instancetype)sharedOutput +{ + static EZOutput *output; + static dispatch_once_t onceToken; + dispatch_once(&onceToken, ^ + { + output = [[self alloc] init]; + }); + return output; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setup +//------------------------------------------------------------------------------ + +- (void)setup +{ + // + // Create structure to hold state data + // + self.info = (EZOutputInfo *)malloc(sizeof(EZOutputInfo)); + memset(self.info, 0, sizeof(EZOutputInfo)); + + // + // Setup the audio graph + // + [EZAudioUtilities checkResult:NewAUGraph(&self.info->graph) + operation:"Failed to create graph"]; + + // + // Add converter node + // + AudioComponentDescription converterDescription; + converterDescription.componentType = kAudioUnitType_FormatConverter; + converterDescription.componentSubType = kAudioUnitSubType_AUConverter; + converterDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + [EZAudioUtilities checkResult:AUGraphAddNode(self.info->graph, + &converterDescription, + &self.info->converterNodeInfo.node) + operation:"Failed to add converter node to audio graph"]; + + // + // Add mixer node + // + AudioComponentDescription mixerDescription; + mixerDescription.componentType = kAudioUnitType_Mixer; +#if TARGET_OS_IPHONE + mixerDescription.componentSubType = kAudioUnitSubType_MultiChannelMixer; +#elif TARGET_OS_MAC + mixerDescription.componentSubType = kAudioUnitSubType_StereoMixer; +#endif + mixerDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + [EZAudioUtilities checkResult:AUGraphAddNode(self.info->graph, + &mixerDescription, + &self.info->mixerNodeInfo.node) + operation:"Failed to add mixer node to audio graph"]; + + // + // Add output node + // + AudioComponentDescription outputDescription; + outputDescription.componentType = kAudioUnitType_Output; + outputDescription.componentSubType = [self outputAudioUnitSubType]; + outputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + [EZAudioUtilities checkResult:AUGraphAddNode(self.info->graph, + &outputDescription, + &self.info->outputNodeInfo.node) + operation:"Failed to add output node to audio graph"]; + + // + // Open the graph + // + [EZAudioUtilities checkResult:AUGraphOpen(self.info->graph) + operation:"Failed to open graph"]; + + // + // Make node connections + // + OSStatus status = [self connectOutputOfSourceNode:self.info->converterNodeInfo.node + sourceNodeOutputBus:0 + toDestinationNode:self.info->mixerNodeInfo.node + destinationNodeInputBus:0 + inGraph:self.info->graph]; + [EZAudioUtilities checkResult:status + operation:"Failed to connect output of source node to destination node in graph"]; + + // + // Connect mixer to output + // + [EZAudioUtilities checkResult:AUGraphConnectNodeInput(self.info->graph, + self.info->mixerNodeInfo.node, + 0, + self.info->outputNodeInfo.node, + 0) + operation:"Failed to connect mixer node to output node"]; + + // + // Get the audio units + // + [EZAudioUtilities checkResult:AUGraphNodeInfo(self.info->graph, + self.info->converterNodeInfo.node, + &converterDescription, + &self.info->converterNodeInfo.audioUnit) + operation:"Failed to get converter audio unit"]; + [EZAudioUtilities checkResult:AUGraphNodeInfo(self.info->graph, + self.info->mixerNodeInfo.node, + &mixerDescription, + &self.info->mixerNodeInfo.audioUnit) + operation:"Failed to get mixer audio unit"]; + [EZAudioUtilities checkResult:AUGraphNodeInfo(self.info->graph, + self.info->outputNodeInfo.node, + &outputDescription, + &self.info->outputNodeInfo.audioUnit) + operation:"Failed to get output audio unit"]; + + // + // Add a node input callback for the converter node + // + AURenderCallbackStruct converterCallback; + converterCallback.inputProc = EZOutputConverterInputCallback; + converterCallback.inputProcRefCon = (__bridge void *)(self); + [EZAudioUtilities checkResult:AUGraphSetNodeInputCallback(self.info->graph, + self.info->converterNodeInfo.node, + 0, + &converterCallback) + operation:"Failed to set render callback on converter node"]; + + // + // Set stream formats + // + [self setClientFormat:[self defaultClientFormat]]; + [self setInputFormat:[self defaultInputFormat]]; + + // + // Use the default device + // + EZAudioDevice *currentOutputDevice = [EZAudioDevice currentOutputDevice]; + [self setDevice:currentOutputDevice]; + + // + // Set maximum frames per slice to 4096 to allow playback during + // lock screen (iOS only?) + // + UInt32 maximumFramesPerSlice = EZOutputMaximumFramesPerSlice; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->mixerNodeInfo.audioUnit, + kAudioUnitProperty_MaximumFramesPerSlice, + kAudioUnitScope_Global, + 0, + &maximumFramesPerSlice, + sizeof(maximumFramesPerSlice)) + operation:"Failed to set maximum frames per slice on mixer node"]; + + // + // Initialize all the audio units in the graph + // + [EZAudioUtilities checkResult:AUGraphInitialize(self.info->graph) + operation:"Failed to initialize graph"]; + + // + // Add render callback + // + [EZAudioUtilities checkResult:AudioUnitAddRenderNotify(self.info->mixerNodeInfo.audioUnit, + EZOutputGraphRenderCallback, + (__bridge void *)(self)) + operation:"Failed to add render callback"]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Actions +//------------------------------------------------------------------------------ + +- (void)startPlayback +{ + // + // Start the AUGraph + // + [EZAudioUtilities checkResult:AUGraphStart(self.info->graph) + operation:"Failed to start graph"]; + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(output:changedPlayingState:)]) + { + [self.delegate output:self changedPlayingState:[self isPlaying]]; + } +} + +//------------------------------------------------------------------------------ + +- (void)stopPlayback +{ + // + // Stop the AUGraph + // + [EZAudioUtilities checkResult:AUGraphStop(self.info->graph) + operation:"Failed to stop graph"]; + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(output:changedPlayingState:)]) + { + [self.delegate output:self changedPlayingState:[self isPlaying]]; + } +} + +//------------------------------------------------------------------------------ +#pragma mark - Getters +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)clientFormat +{ + return self.info->clientFormat; +} + +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)inputFormat +{ + return self.info->inputFormat; +} + +//------------------------------------------------------------------------------ + +- (BOOL)isPlaying +{ + Boolean isPlaying; + [EZAudioUtilities checkResult:AUGraphIsRunning(self.info->graph, + &isPlaying) + operation:"Failed to check if graph is running"]; + return isPlaying; +} + +//------------------------------------------------------------------------------ + +- (float)pan +{ + AudioUnitParameterID param; +#if TARGET_OS_IPHONE + param = kMultiChannelMixerParam_Pan; +#elif TARGET_OS_MAC + param = kStereoMixerParam_Pan; +#endif + AudioUnitParameterValue pan; + [EZAudioUtilities checkResult:AudioUnitGetParameter(self.info->mixerNodeInfo.audioUnit, + param, + kAudioUnitScope_Input, + 0, + &pan) operation:"Failed to get pan from mixer unit"]; + return pan; +} + +//------------------------------------------------------------------------------ + +- (float)volume +{ + AudioUnitParameterID param; +#if TARGET_OS_IPHONE + param = kMultiChannelMixerParam_Volume; +#elif TARGET_OS_MAC + param = kStereoMixerParam_Volume; +#endif + AudioUnitParameterValue volume; + [EZAudioUtilities checkResult:AudioUnitGetParameter(self.info->mixerNodeInfo.audioUnit, + param, + kAudioUnitScope_Input, + 0, + &volume) + operation:"Failed to get volume from mixer unit"]; + return volume; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setters +//------------------------------------------------------------------------------ + +- (void)setClientFormat:(AudioStreamBasicDescription)clientFormat +{ + if (self.floatConverter) + { + self.floatConverter = nil; + [EZAudioUtilities freeFloatBuffers:self.info->floatData + numberOfChannels:self.clientFormat.mChannelsPerFrame]; + } + + self.info->clientFormat = clientFormat; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->converterNodeInfo.audioUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, + 0, + &self.info->clientFormat, + sizeof(self.info->clientFormat)) + operation:"Failed to set output client format on converter audio unit"]; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->mixerNodeInfo.audioUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, + 0, + &self.info->clientFormat, + sizeof(self.info->clientFormat)) + operation:"Failed to set input client format on mixer audio unit"]; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->mixerNodeInfo.audioUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, + 0, + &self.info->clientFormat, + sizeof(self.info->clientFormat)) + operation:"Failed to set output client format on mixer audio unit"]; + + self.floatConverter = [[EZAudioFloatConverter alloc] initWithInputFormat:clientFormat]; + self.info->floatData = [EZAudioUtilities floatBuffersWithNumberOfFrames:EZOutputMaximumFramesPerSlice + numberOfChannels:clientFormat.mChannelsPerFrame]; +} + +//------------------------------------------------------------------------------ + +- (void)setDevice:(EZAudioDevice *)device +{ +#if TARGET_OS_IPHONE + + // if the devices are equal then ignore + if ([device isEqual:self.device]) + { + return; + } + + NSError *error; + [[AVAudioSession sharedInstance] setOutputDataSource:device.dataSource error:&error]; + if (error) + { + NSLog(@"Error setting output device data source (%@), reason: %@", + device.dataSource, + error.localizedDescription); + } + +#elif TARGET_OS_MAC + UInt32 outputEnabled = device.outputChannelCount > 0; + NSAssert(outputEnabled, @"Selected EZAudioDevice does not have any output channels"); + NSAssert([self outputAudioUnitSubType] == kAudioUnitSubType_HALOutput, + @"Audio device selection on OSX is only available when using the kAudioUnitSubType_HALOutput output unit subtype"); + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->outputNodeInfo.audioUnit, + kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Output, + 0, + &outputEnabled, + sizeof(outputEnabled)) + operation:"Failed to set flag on device output"]; + + AudioDeviceID deviceId = device.deviceID; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->outputNodeInfo.audioUnit, + kAudioOutputUnitProperty_CurrentDevice, + kAudioUnitScope_Global, + 0, + &deviceId, + sizeof(AudioDeviceID)) + operation:"Couldn't set default device on I/O unit"]; +#endif + + // store device + _device = device; + + // notify delegate + if ([self.delegate respondsToSelector:@selector(output:changedDevice:)]) + { + [self.delegate output:self changedDevice:device]; + } +} + +//------------------------------------------------------------------------------ + +- (void)setInputFormat:(AudioStreamBasicDescription)inputFormat +{ + self.info->inputFormat = inputFormat; + [EZAudioUtilities checkResult:AudioUnitSetProperty(self.info->converterNodeInfo.audioUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, + 0, + &inputFormat, + sizeof(inputFormat)) + operation:"Failed to set input format on converter audio unit"]; +} + +//------------------------------------------------------------------------------ + +- (void)setPan:(float)pan +{ + AudioUnitParameterID param; +#if TARGET_OS_IPHONE + param = kMultiChannelMixerParam_Pan; +#elif TARGET_OS_MAC + param = kStereoMixerParam_Pan; +#endif + [EZAudioUtilities checkResult:AudioUnitSetParameter(self.info->mixerNodeInfo.audioUnit, + param, + kAudioUnitScope_Input, + 0, + pan, + 0) + operation:"Failed to set volume on mixer unit"]; +} + +//------------------------------------------------------------------------------ + +- (void)setVolume:(float)volume +{ + AudioUnitParameterID param; +#if TARGET_OS_IPHONE + param = kMultiChannelMixerParam_Volume; +#elif TARGET_OS_MAC + param = kStereoMixerParam_Volume; +#endif + [EZAudioUtilities checkResult:AudioUnitSetParameter(self.info->mixerNodeInfo.audioUnit, + param, + kAudioUnitScope_Input, + 0, + volume, + 0) + operation:"Failed to set volume on mixer unit"]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Core Audio Properties +//------------------------------------------------------------------------------ + +- (AUGraph)graph +{ + return self.info->graph; +} + +//------------------------------------------------------------------------------ + +- (AudioUnit)converterAudioUnit +{ + return self.info->converterNodeInfo.audioUnit; +} + +//------------------------------------------------------------------------------ + +- (AudioUnit)mixerAudioUnit +{ + return self.info->mixerNodeInfo.audioUnit; +} + +//------------------------------------------------------------------------------ + +- (AudioUnit)outputAudioUnit +{ + return self.info->outputNodeInfo.audioUnit; +} + +//------------------------------------------------------------------------------ +#pragma mark - Subclass +//------------------------------------------------------------------------------ + +- (OSStatus)connectOutputOfSourceNode:(AUNode)sourceNode + sourceNodeOutputBus:(UInt32)sourceNodeOutputBus + toDestinationNode:(AUNode)destinationNode + destinationNodeInputBus:(UInt32)destinationNodeInputBus + inGraph:(AUGraph)graph +{ + // + // Default implementation is to just connect the source to destination + // + [EZAudioUtilities checkResult:AUGraphConnectNodeInput(graph, + sourceNode, + sourceNodeOutputBus, + destinationNode, + destinationNodeInputBus) + operation:"Failed to connect converter node to mixer node"]; + return noErr; +} + +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)defaultClientFormat +{ + return [EZAudioUtilities stereoFloatNonInterleavedFormatWithSampleRate:EZOutputDefaultSampleRate]; +} + +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)defaultInputFormat +{ + return [EZAudioUtilities stereoFloatNonInterleavedFormatWithSampleRate:EZOutputDefaultSampleRate]; +} + +//------------------------------------------------------------------------------ + +- (OSType)outputAudioUnitSubType +{ +#if TARGET_OS_IPHONE + return kAudioUnitSubType_RemoteIO; +#elif TARGET_OS_MAC + return kAudioUnitSubType_HALOutput; +#endif +} + +//------------------------------------------------------------------------------ + +@end + +//------------------------------------------------------------------------------ +#pragma mark - Callbacks (Implementation) +//------------------------------------------------------------------------------ + +OSStatus EZOutputConverterInputCallback(void *inRefCon, + AudioUnitRenderActionFlags *ioActionFlags, + const AudioTimeStamp *inTimeStamp, + UInt32 inBusNumber, + UInt32 inNumberFrames, + AudioBufferList *ioData) +{ + EZOutput *output = (__bridge EZOutput *)inRefCon; + + // + // Try to ask the data source for audio data to fill out the output's + // buffer list + // + if ([output.dataSource respondsToSelector:@selector(output:shouldFillAudioBufferList:withNumberOfFrames:timestamp:)]) + { + return [output.dataSource output:output + shouldFillAudioBufferList:ioData + withNumberOfFrames:inNumberFrames + timestamp:inTimeStamp]; + } + else + { + // + // Silence if there is nothing to output + // + for (int i = 0; i < ioData->mNumberBuffers; i++) + { + memset(ioData->mBuffers[i].mData, + 0, + ioData->mBuffers[i].mDataByteSize); + } + } + return noErr; +} + +//------------------------------------------------------------------------------ + +OSStatus EZOutputGraphRenderCallback(void *inRefCon, + AudioUnitRenderActionFlags *ioActionFlags, + const AudioTimeStamp *inTimeStamp, + UInt32 inBusNumber, + UInt32 inNumberFrames, + AudioBufferList *ioData) +{ + EZOutput *output = (__bridge EZOutput *)inRefCon; + + // + // provide the audio received delegate callback + // + if (*ioActionFlags & kAudioUnitRenderAction_PostRender) + { + if ([output.delegate respondsToSelector:@selector(output:playedAudio:withBufferSize:withNumberOfChannels:)]) + { + UInt32 frames = ioData->mBuffers[0].mDataByteSize / output.info->clientFormat.mBytesPerFrame; + [output.floatConverter convertDataFromAudioBufferList:ioData + withNumberOfFrames:frames + toFloatBuffers:output.info->floatData]; + [output.delegate output:output + playedAudio:output.info->floatData + withBufferSize:inNumberFrames + withNumberOfChannels:output.info->clientFormat.mChannelsPerFrame]; + } + } + return noErr; +} \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZPlot.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZPlot.h new file mode 100644 index 0000000..bbde1f5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZPlot.h @@ -0,0 +1,142 @@ +// +// EZPlot.h +// EZAudio +// +// Created by Syed Haris Ali on 11/24/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import "EZAudioUtilities.h" + +//------------------------------------------------------------------------------ +#pragma mark - Enumerations +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Plot Types +///----------------------------------------------------------- + +/** + The types of plots that can be displayed in the view using the data. + */ +typedef NS_ENUM(NSInteger, EZPlotType) +{ + /** + Plot that displays only the samples of the current buffer + */ + EZPlotTypeBuffer, + + /** + Plot that displays a rolling history of values using the RMS calculated for each incoming buffer + */ + EZPlotTypeRolling +}; + +/** + EZPlot is a cross-platform (iOS and OSX) class used to subclass the default view type (either UIView or NSView, respectively). + + ## Subclassing Notes + + This class isn't meant to be directly used in practice, but instead establishes the default properties and behaviors subclasses should obey to provide consistent behavior accross multiple types of graphs (i.e. set background color, plot type, should fill in, etc.). Subclasses should make use of the inherited properties from this class to allow all child plots to benefit from the same + */ +#if TARGET_OS_IPHONE +#import +@interface EZPlot : UIView +#elif TARGET_OS_MAC +#import +@interface EZPlot : NSView +#endif + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Customizing The Plot's Appearance +///----------------------------------------------------------- +/** + The default background color of the plot. For iOS the color is specified as a UIColor while for OSX the color is an NSColor. The default value on both platforms is black. + */ +#if TARGET_OS_IPHONE +@property (nonatomic, strong) IBInspectable UIColor *backgroundColor; +#elif TARGET_OS_MAC +@property (nonatomic, strong) IBInspectable NSColor *backgroundColor; +#endif + +/** + The default color of the plot's data (i.e. waveform, y-axis values). For iOS the color is specified as a UIColor while for OSX the color is an NSColor. The default value on both platforms is red. + */ +#if TARGET_OS_IPHONE +@property (nonatomic, strong) IBInspectable UIColor *color; +#elif TARGET_OS_MAC +@property (nonatomic, strong) IBInspectable NSColor *color; +#endif + +/** + The plot's gain value, which controls the scale of the y-axis values. The default value of the gain is 1.0f and should always be greater than 0.0f. + */ +@property (nonatomic, assign) IBInspectable float gain; + +/** + The type of plot as specified by the `EZPlotType` enumeration (i.e. a buffer or rolling plot type). + */ +@property (nonatomic, assign) IBInspectable EZPlotType plotType; + +/** + A boolean indicating whether or not to fill in the graph. A value of YES will make a filled graph (filling in the space between the x-axis and the y-value), while a value of NO will create a stroked graph (connecting the points along the y-axis). + */ +@property (nonatomic, assign) IBInspectable BOOL shouldFill; + +/** + A boolean indicating whether the graph should be rotated along the x-axis to give a mirrored reflection. This is typical for audio plots to produce the classic waveform look. A value of YES will produce a mirrored reflection of the y-values about the x-axis, while a value of NO will only plot the y-values. + */ +@property (nonatomic, assign) IBInspectable BOOL shouldMirror; + +//------------------------------------------------------------------------------ +#pragma mark - Clearing +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Clearing The Plot +///----------------------------------------------------------- + +/** + Clears all data from the audio plot (includes both EZPlotTypeBuffer and EZPlotTypeRolling) + */ +-(void)clear; + +//------------------------------------------------------------------------------ +#pragma mark - Get Samples +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Updating The Plot +///----------------------------------------------------------- + +/** + Updates the plot with the new buffer data and tells the view to redraw itself. Caller will provide a float array with the values they expect to see on the y-axis. The plot will internally handle mapping the x-axis and y-axis to the current view port, any interpolation for fills effects, and mirroring. + @param buffer A float array of values to map to the y-axis. + @param bufferSize The size of the float array that will be mapped to the y-axis. + @warning The bufferSize is expected to be the same, constant value once initial triggered. For plots using OpenGL a vertex buffer object will be allocated with a maximum buffersize of (2 * the initial given buffer size) to account for any interpolation necessary for filling in the graph. Updates use the glBufferSubData(...) function, which will crash if the buffersize exceeds the initial maximum allocated size. + */ +-(void)updateBuffer:(float *)buffer withBufferSize:(UInt32)bufferSize; + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZPlot.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZPlot.m new file mode 100644 index 0000000..f8ac3b0 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZPlot.m @@ -0,0 +1,43 @@ +// +// EZPlot.m +// EZAudio +// +// Created by Syed Haris Ali on 11/24/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZPlot.h" + +@implementation EZPlot + +#pragma mark - Clearing +-(void)clear +{ + // Override in subclass +} + +#pragma mark - Get Samples +-(void)updateBuffer:(float *)buffer + withBufferSize:(UInt32)bufferSize +{ + // Override in subclass +} + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZRecorder.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZRecorder.h new file mode 100644 index 0000000..95d5c05 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZRecorder.h @@ -0,0 +1,364 @@ +// +// EZRecorder.h +// EZAudio +// +// Created by Syed Haris Ali on 12/1/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import +#import + +@class EZRecorder; + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +/** + To ensure valid recording formats are used when recording to a file the EZRecorderFileType describes the most common file types that a file can be encoded in. Each of these types can be used to output recordings as such: + + EZRecorderFileTypeAIFF - .aif, .aiff, .aifc, .aac + EZRecorderFileTypeM4A - .m4a, .mp4 + EZRecorderFileTypeWAV - .wav + + */ +typedef NS_ENUM(NSInteger, EZRecorderFileType) +{ + /** + Recording format that describes AIFF file types. These are uncompressed, LPCM files that are completely lossless, but are large in file size. + */ + EZRecorderFileTypeAIFF, + /** + Recording format that describes M4A file types. These are compressed, but yield great results especially when file size is an issue. + */ + EZRecorderFileTypeM4A, + /** + Recording format that describes WAV file types. These are uncompressed, LPCM files that are completely lossless, but are large in file size. + */ + EZRecorderFileTypeWAV +}; + +//------------------------------------------------------------------------------ +#pragma mark - EZRecorderDelegate +//------------------------------------------------------------------------------ + +/** + The EZRecorderDelegate for the EZRecorder provides a receiver for write events, `recorderUpdatedCurrentTime:`, and the close event, `recorderDidClose:`. + */ +@protocol EZRecorderDelegate + +@optional + +/** + Triggers when the EZRecorder is explicitly closed with the `closeAudioFile` method. + @param recorder The EZRecorder instance that triggered the action + */ +- (void)recorderDidClose:(EZRecorder *)recorder; + +/** + Triggers after the EZRecorder has successfully written audio data from the `appendDataFromBufferList:withBufferSize:` method. + @param recorder The EZRecorder instance that triggered the action + */ +- (void)recorderUpdatedCurrentTime:(EZRecorder *)recorder; + +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZRecorder +//------------------------------------------------------------------------------ + +/** + The EZRecorder provides a flexible way to create an audio file and append raw audio data to it. The EZRecorder will convert the incoming audio on the fly to the destination format so no conversion is needed between this and any other component. Right now the only supported output format is 'caf'. Each output file should have its own EZRecorder instance (think 1 EZRecorder = 1 audio file). + */ +@interface EZRecorder : NSObject + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +/** + An EZRecorderDelegate to listen for the write and close events. + */ +@property (nonatomic, weak) id delegate; + +//------------------------------------------------------------------------------ +#pragma mark - Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Initializers +///----------------------------------------------------------- + +/** + Creates an instance of the EZRecorder with a file path URL to write out the file to, a client format describing the in-application common format (see `clientFormat` for more info), and a file type (see `EZRecorderFileType`) that will automatically create an internal `fileFormat` and audio file type hint. + @param url An NSURL representing the file path the output file should be written + @param clientFormat An AudioStreamBasicDescription describing the in-applciation common format (always linear PCM) + @param fileType A constant described by the EZRecorderFileType that corresponds to the type of destination file that should be written. For instance, an AAC file written using an '.m4a' extension would correspond to EZRecorderFileTypeM4A. See EZRecorderFileType for all the constants and mapping combinations. + @return A newly created EZRecorder instance. + */ +- (instancetype)initWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileType:(EZRecorderFileType)fileType; + +//------------------------------------------------------------------------------ + +/** + Creates an instance of the EZRecorder with a file path URL to write out the file to, a client format describing the in-application common format (see `clientFormat` for more info), and a file type (see `EZRecorderFileType`) that will automatically create an internal `fileFormat` and audio file type hint, as well as a delegate to respond to the recorder's write and close events. + @param url An NSURL representing the file path the output file should be written + @param clientFormat An AudioStreamBasicDescription describing the in-applciation common format (always linear PCM) + @param fileType A constant described by the EZRecorderFileType that corresponds to the type of destination file that should be written. For instance, an AAC file written using an '.m4a' extension would correspond to EZRecorderFileTypeM4A. See EZRecorderFileType for all the constants and mapping combinations. + @param delegate An EZRecorderDelegate to listen for the recorder's write and close events. + @return A newly created EZRecorder instance. + */ +- (instancetype)initWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileType:(EZRecorderFileType)fileType + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Creates an instance of the EZRecorder with a file path URL to write out the file to, a client format describing the in-application common format (see `clientFormat` for more info), a file format describing the destination format on disk (see `fileFormat` for more info), and an audio file type (an AudioFileTypeID for Core Audio, not a EZRecorderFileType). + @param url An NSURL representing the file path the output file should be written + @param clientFormat An AudioStreamBasicDescription describing the in-applciation common format (always linear PCM) + @param fileFormat An AudioStreamBasicDescription describing the format of the audio being written to disk (MP3, AAC, WAV, etc) + @param audioFileTypeID An AudioFileTypeID that matches your fileFormat (i.e. kAudioFileM4AType for an M4A format) + @return A newly created EZRecorder instance. + */ +- (instancetype)initWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileFormat:(AudioStreamBasicDescription)fileFormat + audioFileTypeID:(AudioFileTypeID)audioFileTypeID; + +//------------------------------------------------------------------------------ + +/** + Creates an instance of the EZRecorder with a file path URL to write out the file to, a client format describing the in-application common format (see `clientFormat` for more info), a file format describing the destination format on disk (see `fileFormat` for more info), an audio file type (an AudioFileTypeID for Core Audio, not a EZRecorderFileType), and delegate to respond to the recorder's write and close events. + @param url An NSURL representing the file path the output file should be written + @param clientFormat An AudioStreamBasicDescription describing the in-applciation common format (always linear PCM) + @param fileFormat An AudioStreamBasicDescription describing the format of the audio being written to disk (MP3, AAC, WAV, etc) + @param audioFileTypeID An AudioFileTypeID that matches your fileFormat (i.e. kAudioFileM4AType for an M4A format) + @param delegate An EZRecorderDelegate to listen for the recorder's write and close events. + @return A newly created EZRecorder instance. + */ +- (instancetype)initWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileFormat:(AudioStreamBasicDescription)fileFormat + audioFileTypeID:(AudioFileTypeID)audioFileTypeID + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Creates a new instance of an EZRecorder using a destination file path URL and the source format of the incoming audio. + @param url An NSURL specifying the file path location of where the audio file should be written to. + @param sourceFormat The AudioStreamBasicDescription for the incoming audio that will be written to the file. + @param destinationFileType A constant described by the EZRecorderFileType that corresponds to the type of destination file that should be written. For instance, an AAC file written using an '.m4a' extension would correspond to EZRecorderFileTypeM4A. See EZRecorderFileType for all the constants and mapping combinations. + @deprecated This property is deprecated starting in version 0.8.0. + @note Please use `initWithURL:clientFormat:fileType:` initializer instead. + @return The newly created EZRecorder instance. + */ +- (instancetype)initWithDestinationURL:(NSURL*)url + sourceFormat:(AudioStreamBasicDescription)sourceFormat + destinationFileType:(EZRecorderFileType)destinationFileType __attribute__((deprecated)); + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Class Initializers +///----------------------------------------------------------- + +/** + Class method to create an instance of the EZRecorder with a file path URL to write out the file to, a client format describing the in-application common format (see `clientFormat` for more info), and a file type (see `EZRecorderFileType`) that will automatically create an internal `fileFormat` and audio file type hint. + @param url An NSURL representing the file path the output file should be written + @param clientFormat An AudioStreamBasicDescription describing the in-applciation common format (always linear PCM) + @param fileType A constant described by the EZRecorderFileType that corresponds to the type of destination file that should be written. For instance, an AAC file written using an '.m4a' extension would correspond to EZRecorderFileTypeM4A. See EZRecorderFileType for all the constants and mapping combinations. + @return A newly created EZRecorder instance. + */ ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileType:(EZRecorderFileType)fileType; + +//------------------------------------------------------------------------------ + +/** + Class method to create an instance of the EZRecorder with a file path URL to write out the file to, a client format describing the in-application common format (see `clientFormat` for more info), and a file type (see `EZRecorderFileType`) that will automatically create an internal `fileFormat` and audio file type hint, as well as a delegate to respond to the recorder's write and close events. + @param url An NSURL representing the file path the output file should be written + @param clientFormat An AudioStreamBasicDescription describing the in-applciation common format (always linear PCM) + @param fileType A constant described by the EZRecorderFileType that corresponds to the type of destination file that should be written. For instance, an AAC file written using an '.m4a' extension would correspond to EZRecorderFileTypeM4A. See EZRecorderFileType for all the constants and mapping combinations. + @param delegate An EZRecorderDelegate to listen for the recorder's write and close events. + @return A newly created EZRecorder instance. + */ ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileType:(EZRecorderFileType)fileType + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Class method to create an instance of the EZRecorder with a file path URL to write out the file to, a client format describing the in-application common format (see `clientFormat` for more info), a file format describing the destination format on disk (see `fileFormat` for more info), and an audio file type (an AudioFileTypeID for Core Audio, not a EZRecorderFileType). + @param url An NSURL representing the file path the output file should be written + @param clientFormat An AudioStreamBasicDescription describing the in-applciation common format (always linear PCM) + @param fileFormat An AudioStreamBasicDescription describing the format of the audio being written to disk (MP3, AAC, WAV, etc) + @param audioFileTypeID An AudioFileTypeID that matches your fileFormat (i.e. kAudioFileM4AType for an M4A format) + @return A newly created EZRecorder instance. + */ ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileFormat:(AudioStreamBasicDescription)fileFormat + audioFileTypeID:(AudioFileTypeID)audioFileTypeID; + +//------------------------------------------------------------------------------ + +/** + Class method to create an instance of the EZRecorder with a file path URL to write out the file to, a client format describing the in-application common format (see `clientFormat` for more info), a file format describing the destination format on disk (see `fileFormat` for more info), an audio file type (an AudioFileTypeID for Core Audio, not a EZRecorderFileType), and delegate to respond to the recorder's write and close events. + @param url An NSURL representing the file path the output file should be written + @param clientFormat An AudioStreamBasicDescription describing the in-applciation common format (always linear PCM) + @param fileFormat An AudioStreamBasicDescription describing the format of the audio being written to disk (MP3, AAC, WAV, etc) + @param audioFileTypeID An AudioFileTypeID that matches your fileFormat (i.e. kAudioFileM4AType for an M4A format) + @param delegate An EZRecorderDelegate to listen for the recorder's write and close events. + @return A newly created EZRecorder instance. + */ ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileFormat:(AudioStreamBasicDescription)fileFormat + audioFileTypeID:(AudioFileTypeID)audioFileTypeID + delegate:(id)delegate; + +//------------------------------------------------------------------------------ + +/** + Class method to create a new instance of an EZRecorder using a destination file path URL and the source format of the incoming audio. + @param url An NSURL specifying the file path location of where the audio file should be written to. + @param sourceFormat The AudioStreamBasicDescription for the incoming audio that will be written to the file (also called the `clientFormat`). + @param destinationFileType A constant described by the EZRecorderFileType that corresponds to the type of destination file that should be written. For instance, an AAC file written using an '.m4a' extension would correspond to EZRecorderFileTypeM4A. See EZRecorderFileType for all the constants and mapping combinations. + @return The newly created EZRecorder instance. + */ ++ (instancetype)recorderWithDestinationURL:(NSURL*)url + sourceFormat:(AudioStreamBasicDescription)sourceFormat + destinationFileType:(EZRecorderFileType)destinationFileType __attribute__((deprecated)); + +//------------------------------------------------------------------------------ +#pragma mark - Properties +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Getting The Recorder's Properties +///----------------------------------------------------------- + +/** + Provides the common AudioStreamBasicDescription that will be used for in-app interaction. The recorder's format will be converted from this format to the `fileFormat`. For instance, the file on disk could be a 22.5 kHz, float format, but we might have an audio processing graph that has a 44.1 kHz, signed integer format that we'd like to interact with. The client format lets us set that 44.1 kHz format on the recorder to properly write samples from the graph out to the file in the desired destination format. + @warning This must be a linear PCM format! + @return An AudioStreamBasicDescription structure describing the format of the audio file. + */ +@property (readwrite) AudioStreamBasicDescription clientFormat; +//------------------------------------------------------------------------------ + +/** + Provides the current write offset in the audio file as an NSTimeInterval (i.e. in seconds). When setting this it will determine the correct frame offset and perform a `seekToFrame` to the new time offset. + @warning Make sure the new current time offset is less than the `duration` or you will receive an invalid seek assertion. + */ +@property (readonly) NSTimeInterval currentTime; + +//------------------------------------------------------------------------------ + +/** + Provides the duration of the audio file in seconds. + */ +@property (readonly) NSTimeInterval duration; + +//------------------------------------------------------------------------------ + +/** + Provides the AudioStreamBasicDescription structure containing the format of the recorder's audio file. + @return An AudioStreamBasicDescription structure describing the format of the audio file. + */ +@property (readonly) AudioStreamBasicDescription fileFormat; + +//------------------------------------------------------------------------------ + +/** + Provides the current time as an NSString with the time format MM:SS. + */ +@property (readonly) NSString *formattedCurrentTime; + +//------------------------------------------------------------------------------ + +/** + Provides the duration as an NSString with the time format MM:SS. + */ +@property (readonly) NSString *formattedDuration; + +//------------------------------------------------------------------------------ + +/** + Provides the frame index (a.k.a the write positon) within the audio file as SInt64. This can be helpful when seeking through the audio file. + @return The current frame index within the audio file as a SInt64. + */ +@property (readonly) SInt64 frameIndex; + +//------------------------------------------------------------------------------ + +/** + Provides the total frame count of the recorder's audio file in the file format. + @return The total number of frames in the recorder in the AudioStreamBasicDescription representing the file format as a SInt64. + */ +@property (readonly) SInt64 totalFrames; + +//------------------------------------------------------------------------------ + +/** + Provides the file path that's currently being used by the recorder. + @return The NSURL representing the file path of the recorder path being used for recording. + */ +- (NSURL *)url; + +//------------------------------------------------------------------------------ +#pragma mark - Events +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Appending Data To The Recorder +///----------------------------------------------------------- + +/** + Appends audio data to the tail of the output file from an AudioBufferList. + @param bufferList The AudioBufferList holding the audio data to append + @param bufferSize The size of each of the buffers in the buffer list. + */ +- (void)appendDataFromBufferList:(AudioBufferList *)bufferList + withBufferSize:(UInt32)bufferSize; + +//------------------------------------------------------------------------------ + +///----------------------------------------------------------- +/// @name Closing The Recorder +///----------------------------------------------------------- + +/** + Finishes writes to the recorder's audio file and closes it. + */ +- (void)closeAudioFile; + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZRecorder.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZRecorder.m new file mode 100644 index 0000000..494943e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/EZRecorder.m @@ -0,0 +1,456 @@ +// +// EZRecorder.m +// EZAudio +// +// Created by Syed Haris Ali on 12/1/13. +// Copyright (c) 2015 Syed Haris Ali. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import "EZRecorder.h" +#import "EZAudioUtilities.h" + +//------------------------------------------------------------------------------ +#pragma mark - Data Structures +//------------------------------------------------------------------------------ + +typedef struct +{ + AudioFileTypeID audioFileTypeID; + ExtAudioFileRef extAudioFileRef; + AudioStreamBasicDescription clientFormat; + BOOL closed; + CFURLRef fileURL; + AudioStreamBasicDescription fileFormat; +} EZRecorderInfo; + +//------------------------------------------------------------------------------ +#pragma mark - EZRecorder (Interface Extension) +//------------------------------------------------------------------------------ + +@interface EZRecorder () +@property (nonatomic, assign) EZRecorderInfo *info; +@end + +//------------------------------------------------------------------------------ +#pragma mark - EZRecorder (Implementation) +//------------------------------------------------------------------------------ + +@implementation EZRecorder + +//------------------------------------------------------------------------------ +#pragma mark - Dealloc +//------------------------------------------------------------------------------ + +- (void)dealloc +{ + if (!self.info->closed) + { + [self closeAudioFile]; + } + free(self.info); +} + +//------------------------------------------------------------------------------ +#pragma mark - Initializers +//------------------------------------------------------------------------------ + +- (instancetype)initWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileType:(EZRecorderFileType)fileType +{ + return [self initWithURL:url + clientFormat:clientFormat + fileType:fileType + delegate:nil]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileType:(EZRecorderFileType)fileType + delegate:(id)delegate +{ + AudioStreamBasicDescription fileFormat = [EZRecorder formatForFileType:fileType + withSourceFormat:clientFormat]; + AudioFileTypeID audioFileTypeID = [EZRecorder fileTypeIdForFileType:fileType + withSourceFormat:clientFormat]; + return [self initWithURL:url + clientFormat:clientFormat + fileFormat:fileFormat + audioFileTypeID:audioFileTypeID + delegate:delegate]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileFormat:(AudioStreamBasicDescription)fileFormat + audioFileTypeID:(AudioFileTypeID)audioFileTypeID +{ + return [self initWithURL:url + clientFormat:clientFormat + fileFormat:fileFormat + audioFileTypeID:audioFileTypeID + delegate:nil]; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileFormat:(AudioStreamBasicDescription)fileFormat + audioFileTypeID:(AudioFileTypeID)audioFileTypeID + delegate:(id)delegate +{ + + self = [super init]; + if (self) + { + // Set defaults + self.info = (EZRecorderInfo *)calloc(1, sizeof(EZRecorderInfo)); + self.info->audioFileTypeID = audioFileTypeID; + self.info->fileURL = (__bridge CFURLRef)url; + self.info->clientFormat = clientFormat; + self.info->fileFormat = fileFormat; + self.delegate = delegate; + [self setup]; + } + return self; +} + +//------------------------------------------------------------------------------ + +- (instancetype)initWithDestinationURL:(NSURL*)url + sourceFormat:(AudioStreamBasicDescription)sourceFormat + destinationFileType:(EZRecorderFileType)destinationFileType +{ + return [self initWithURL:url + clientFormat:sourceFormat + fileType:destinationFileType]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Class Initializers +//------------------------------------------------------------------------------ + ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileType:(EZRecorderFileType)fileType +{ + return [[self alloc] initWithURL:url + clientFormat:clientFormat + fileType:fileType]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileType:(EZRecorderFileType)fileType + delegate:(id)delegate +{ + return [[self alloc] initWithURL:url + clientFormat:clientFormat + fileType:fileType + delegate:delegate]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileFormat:(AudioStreamBasicDescription)fileFormat + audioFileTypeID:(AudioFileTypeID)audioFileTypeID +{ + return [[self alloc] initWithURL:url + clientFormat:clientFormat + fileFormat:fileFormat + audioFileTypeID:audioFileTypeID]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileFormat:(AudioStreamBasicDescription)fileFormat + audioFileTypeID:(AudioFileTypeID)audioFileTypeID + delegate:(id)delegate +{ + return [[self alloc] initWithURL:url + clientFormat:clientFormat + fileFormat:fileFormat + audioFileTypeID:audioFileTypeID + delegate:delegate]; +} + +//------------------------------------------------------------------------------ + ++ (instancetype)recorderWithDestinationURL:(NSURL*)url + sourceFormat:(AudioStreamBasicDescription)sourceFormat + destinationFileType:(EZRecorderFileType)destinationFileType +{ + return [[EZRecorder alloc] initWithDestinationURL:url + sourceFormat:sourceFormat + destinationFileType:destinationFileType]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Class Methods +//------------------------------------------------------------------------------ + ++ (AudioStreamBasicDescription)formatForFileType:(EZRecorderFileType)fileType + withSourceFormat:(AudioStreamBasicDescription)sourceFormat +{ + AudioStreamBasicDescription asbd; + switch (fileType) + { + case EZRecorderFileTypeAIFF: + asbd = [EZAudioUtilities AIFFFormatWithNumberOfChannels:sourceFormat.mChannelsPerFrame + sampleRate:sourceFormat.mSampleRate]; + break; + case EZRecorderFileTypeM4A: + asbd = [EZAudioUtilities M4AFormatWithNumberOfChannels:sourceFormat.mChannelsPerFrame + sampleRate:sourceFormat.mSampleRate]; + break; + + case EZRecorderFileTypeWAV: + asbd = [EZAudioUtilities stereoFloatInterleavedFormatWithSampleRate:sourceFormat.mSampleRate]; + break; + + default: + asbd = [EZAudioUtilities stereoCanonicalNonInterleavedFormatWithSampleRate:sourceFormat.mSampleRate]; + break; + } + return asbd; +} + +//------------------------------------------------------------------------------ + ++ (AudioFileTypeID)fileTypeIdForFileType:(EZRecorderFileType)fileType + withSourceFormat:(AudioStreamBasicDescription)sourceFormat +{ + AudioFileTypeID audioFileTypeID; + switch (fileType) + { + case EZRecorderFileTypeAIFF: + audioFileTypeID = kAudioFileAIFFType; + break; + + case EZRecorderFileTypeM4A: + audioFileTypeID = kAudioFileM4AType; + break; + + case EZRecorderFileTypeWAV: + audioFileTypeID = kAudioFileWAVEType; + break; + + default: + audioFileTypeID = kAudioFileWAVEType; + break; + } + return audioFileTypeID; +} + +//------------------------------------------------------------------------------ + +- (void)setup +{ + // Finish filling out the destination format description + UInt32 propSize = sizeof(self.info->fileFormat); + [EZAudioUtilities checkResult:AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, + 0, + NULL, + &propSize, + &self.info->fileFormat) + operation:"Failed to fill out rest of destination format"]; + + // + // Create the audio file + // + [EZAudioUtilities checkResult:ExtAudioFileCreateWithURL(self.info->fileURL, + self.info->audioFileTypeID, + &self.info->fileFormat, + NULL, + kAudioFileFlags_EraseFile, + &self.info->extAudioFileRef) + operation:"Failed to create audio file"]; + + // + // Set the client format + // + [self setClientFormat:self.info->clientFormat]; +} + +//------------------------------------------------------------------------------ +#pragma mark - Events +//------------------------------------------------------------------------------ + +- (void)appendDataFromBufferList:(AudioBufferList *)bufferList + withBufferSize:(UInt32)bufferSize +{ + // + // Make sure the audio file is not closed + // + NSAssert(!self.info->closed, @"Cannot append data when EZRecorder has been closed. You must create a new instance.;"); + + // + // Perform the write + // + [EZAudioUtilities checkResult:ExtAudioFileWrite(self.info->extAudioFileRef, + bufferSize, + bufferList) + operation:"Failed to write audio data to recorded audio file"]; + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(recorderUpdatedCurrentTime:)]) + { + [self.delegate recorderUpdatedCurrentTime:self]; + } +} + +//------------------------------------------------------------------------------ + +- (void)closeAudioFile +{ + if (!self.info->closed) + { + // + // Close, audio file can no longer be written to + // + [EZAudioUtilities checkResult:ExtAudioFileDispose(self.info->extAudioFileRef) + operation:"Failed to close audio file"]; + self.info->closed = YES; + + // + // Notify delegate + // + if ([self.delegate respondsToSelector:@selector(recorderDidClose:)]) + { + [self.delegate recorderDidClose:self]; + } + } +} + +//------------------------------------------------------------------------------ +#pragma mark - Getters +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)clientFormat +{ + return self.info->clientFormat; +} + +//----------------------------------------------------------------------------- + +- (NSTimeInterval)currentTime +{ + NSTimeInterval currentTime = 0.0; + NSTimeInterval duration = [self duration]; + if (duration != 0.0) + { + currentTime = (NSTimeInterval)[EZAudioUtilities MAP:(float)[self frameIndex] + leftMin:0.0f + leftMax:(float)[self totalFrames] + rightMin:0.0f + rightMax:duration]; + } + return currentTime; +} + +//------------------------------------------------------------------------------ + +- (NSTimeInterval)duration +{ + NSTimeInterval frames = (NSTimeInterval)[self totalFrames]; + return (NSTimeInterval) frames / self.info->fileFormat.mSampleRate; +} + +//------------------------------------------------------------------------------ + +- (AudioStreamBasicDescription)fileFormat +{ + return self.info->fileFormat; +} + +//------------------------------------------------------------------------------ + +- (NSString *)formattedCurrentTime +{ + return [EZAudioUtilities displayTimeStringFromSeconds:[self currentTime]]; +} + +//------------------------------------------------------------------------------ + +- (NSString *)formattedDuration +{ + return [EZAudioUtilities displayTimeStringFromSeconds:[self duration]]; +} + +//------------------------------------------------------------------------------ + +- (SInt64)frameIndex +{ + SInt64 frameIndex; + [EZAudioUtilities checkResult:ExtAudioFileTell(self.info->extAudioFileRef, + &frameIndex) + operation:"Failed to get frame index"]; + return frameIndex; +} + +//------------------------------------------------------------------------------ + +- (SInt64)totalFrames +{ + SInt64 totalFrames; + UInt32 propSize = sizeof(SInt64); + [EZAudioUtilities checkResult:ExtAudioFileGetProperty(self.info->extAudioFileRef, + kExtAudioFileProperty_FileLengthFrames, + &propSize, + &totalFrames) + operation:"Recorder failed to get total frames."]; + return totalFrames; +} + +//------------------------------------------------------------------------------ + +- (NSURL *)url +{ + return (__bridge NSURL*)self.info->fileURL; +} + +//------------------------------------------------------------------------------ +#pragma mark - Setters +//------------------------------------------------------------------------------ + +- (void)setClientFormat:(AudioStreamBasicDescription)clientFormat +{ + [EZAudioUtilities checkResult:ExtAudioFileSetProperty(self.info->extAudioFileRef, + kExtAudioFileProperty_ClientDataFormat, + sizeof(clientFormat), + &clientFormat) + operation:"Failed to set client format on recorded audio file"]; + self.info->clientFormat = clientFormat; +} + +@end \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/TPCircularBuffer.c b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/TPCircularBuffer.c new file mode 100644 index 0000000..1d85d6c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/TPCircularBuffer.c @@ -0,0 +1,149 @@ +// +// TPCircularBuffer.c +// Circular/Ring buffer implementation +// +// https://github.com/michaeltyson/TPCircularBuffer +// +// Created by Michael Tyson on 10/12/2011. +// +// Copyright (C) 2012-2013 A Tasty Pixel +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// +// 3. This notice may not be removed or altered from any source distribution. +// + +#include "TPCircularBuffer.h" +#include +#include +#include + +#define reportResult(result,operation) (_reportResult((result),(operation),strrchr(__FILE__, '/')+1,__LINE__)) +static inline bool _reportResult(kern_return_t result, const char *operation, const char* file, int line) { + if ( result != ERR_SUCCESS ) { + printf("%s:%d: %s: %s\n", file, line, operation, mach_error_string(result)); + return false; + } + return true; +} + +bool _TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length, size_t structSize) { + + assert(length > 0); + + if ( structSize != sizeof(TPCircularBuffer) ) { + fprintf(stderr, "TPCircularBuffer: Header version mismatch. Check for old versions of TPCircularBuffer in your project\n"); + abort(); + } + + // Keep trying until we get our buffer, needed to handle race conditions + int retries = 3; + while ( true ) { + + buffer->length = (int32_t)round_page(length); // We need whole page sizes + + // Temporarily allocate twice the length, so we have the contiguous address space to + // support a second instance of the buffer directly after + vm_address_t bufferAddress; + kern_return_t result = vm_allocate(mach_task_self(), + &bufferAddress, + buffer->length * 2, + VM_FLAGS_ANYWHERE); // allocate anywhere it'll fit + if ( result != ERR_SUCCESS ) { + if ( retries-- == 0 ) { + reportResult(result, "Buffer allocation"); + return false; + } + // Try again if we fail + continue; + } + + // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half... + result = vm_deallocate(mach_task_self(), + bufferAddress + buffer->length, + buffer->length); + if ( result != ERR_SUCCESS ) { + if ( retries-- == 0 ) { + reportResult(result, "Buffer deallocation"); + return false; + } + // If this fails somehow, deallocate the whole region and try again + vm_deallocate(mach_task_self(), bufferAddress, buffer->length); + continue; + } + + // Re-map the buffer to the address space immediately after the buffer + vm_address_t virtualAddress = bufferAddress + buffer->length; + vm_prot_t cur_prot, max_prot; + result = vm_remap(mach_task_self(), + &virtualAddress, // mirror target + buffer->length, // size of mirror + 0, // auto alignment + 0, // force remapping to virtualAddress + mach_task_self(), // same task + bufferAddress, // mirror source + 0, // MAP READ-WRITE, NOT COPY + &cur_prot, // unused protection struct + &max_prot, // unused protection struct + VM_INHERIT_DEFAULT); + if ( result != ERR_SUCCESS ) { + if ( retries-- == 0 ) { + reportResult(result, "Remap buffer memory"); + return false; + } + // If this remap failed, we hit a race condition, so deallocate and try again + vm_deallocate(mach_task_self(), bufferAddress, buffer->length); + continue; + } + + if ( virtualAddress != bufferAddress+buffer->length ) { + // If the memory is not contiguous, clean up both allocated buffers and try again + if ( retries-- == 0 ) { + printf("Couldn't map buffer memory to end of buffer\n"); + return false; + } + + vm_deallocate(mach_task_self(), virtualAddress, buffer->length); + vm_deallocate(mach_task_self(), bufferAddress, buffer->length); + continue; + } + + buffer->buffer = (void*)bufferAddress; + buffer->fillCount = 0; + buffer->head = buffer->tail = 0; + buffer->atomic = true; + + return true; + } + return false; +} + +void TPCircularBufferCleanup(TPCircularBuffer *buffer) { + vm_deallocate(mach_task_self(), (vm_address_t)buffer->buffer, buffer->length * 2); + memset(buffer, 0, sizeof(TPCircularBuffer)); +} + +void TPCircularBufferClear(TPCircularBuffer *buffer) { + int32_t fillCount; + if ( TPCircularBufferTail(buffer, &fillCount) ) { + TPCircularBufferConsume(buffer, fillCount); + } +} + +void TPCircularBufferSetAtomic(TPCircularBuffer *buffer, bool atomic) { + buffer->atomic = atomic; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/TPCircularBuffer.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/TPCircularBuffer.h new file mode 100644 index 0000000..b19bc6a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/EZAudio/TPCircularBuffer.h @@ -0,0 +1,224 @@ +// +// TPCircularBuffer.h +// Circular/Ring buffer implementation +// +// https://github.com/michaeltyson/TPCircularBuffer +// +// Created by Michael Tyson on 10/12/2011. +// +// +// This implementation makes use of a virtual memory mapping technique that inserts a virtual copy +// of the buffer memory directly after the buffer's end, negating the need for any buffer wrap-around +// logic. Clients can simply use the returned memory address as if it were contiguous space. +// +// The implementation is thread-safe in the case of a single producer and single consumer. +// +// Virtual memory technique originally proposed by Philip Howard (http://vrb.slashusr.org/), and +// adapted to Darwin by Kurt Revis (http://www.snoize.com, +// http://www.snoize.com/Code/PlayBufferedSoundFile.tar.gz) +// +// +// Copyright (C) 2012-2013 A Tasty Pixel +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// +// 3. This notice may not be removed or altered from any source distribution. +// + +#ifndef TPCircularBuffer_h +#define TPCircularBuffer_h + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + void *buffer; + int32_t length; + int32_t tail; + int32_t head; + volatile int32_t fillCount; + bool atomic; +} TPCircularBuffer; + +/*! + * Initialise buffer + * + * Note that the length is advisory only: Because of the way the + * memory mirroring technique works, the true buffer length will + * be multiples of the device page size (e.g. 4096 bytes) + * + * @param buffer Circular buffer + * @param length Length of buffer + */ +#define TPCircularBufferInit(buffer, length) \ + _TPCircularBufferInit(buffer, length, sizeof(*buffer)) +bool _TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length, size_t structSize); + +/*! + * Cleanup buffer + * + * Releases buffer resources. + */ +void TPCircularBufferCleanup(TPCircularBuffer *buffer); + +/*! + * Clear buffer + * + * Resets buffer to original, empty state. + * + * This is safe for use by consumer while producer is accessing + * buffer. + */ +void TPCircularBufferClear(TPCircularBuffer *buffer); + +/*! + * Set the atomicity + * + * If you set the atomiticy to false using this method, the buffer will + * not use atomic operations. This can be used to give the compiler a little + * more optimisation opportunities when the buffer is only used on one thread. + * + * Important note: Only set this to false if you know what you're doing! + * + * The default value is true (the buffer will use atomic operations) + * + * @param buffer Circular buffer + * @param atomic Whether the buffer is atomic (default true) + */ +void TPCircularBufferSetAtomic(TPCircularBuffer *buffer, bool atomic); + +// Reading (consuming) + +/*! + * Access end of buffer + * + * This gives you a pointer to the end of the buffer, ready + * for reading, and the number of available bytes to read. + * + * @param buffer Circular buffer + * @param availableBytes On output, the number of bytes ready for reading + * @return Pointer to the first bytes ready for reading, or NULL if buffer is empty + */ +static __inline__ __attribute__((always_inline)) void* TPCircularBufferTail(TPCircularBuffer *buffer, int32_t* availableBytes) { + *availableBytes = buffer->fillCount; + if ( *availableBytes == 0 ) return NULL; + return (void*)((char*)buffer->buffer + buffer->tail); +} + +/*! + * Consume bytes in buffer + * + * This frees up the just-read bytes, ready for writing again. + * + * @param buffer Circular buffer + * @param amount Number of bytes to consume + */ +static __inline__ __attribute__((always_inline)) void TPCircularBufferConsume(TPCircularBuffer *buffer, int32_t amount) { + buffer->tail = (buffer->tail + amount) % buffer->length; + if ( buffer->atomic ) { + OSAtomicAdd32Barrier(-amount, &buffer->fillCount); + } else { + buffer->fillCount -= amount; + } + assert(buffer->fillCount >= 0); +} + +/*! + * Access front of buffer + * + * This gives you a pointer to the front of the buffer, ready + * for writing, and the number of available bytes to write. + * + * @param buffer Circular buffer + * @param availableBytes On output, the number of bytes ready for writing + * @return Pointer to the first bytes ready for writing, or NULL if buffer is full + */ +static __inline__ __attribute__((always_inline)) void* TPCircularBufferHead(TPCircularBuffer *buffer, int32_t* availableBytes) { + *availableBytes = (buffer->length - buffer->fillCount); + if ( *availableBytes == 0 ) return NULL; + return (void*)((char*)buffer->buffer + buffer->head); +} + +// Writing (producing) + +/*! + * Produce bytes in buffer + * + * This marks the given section of the buffer ready for reading. + * + * @param buffer Circular buffer + * @param amount Number of bytes to produce + */ +static __inline__ __attribute__((always_inline)) void TPCircularBufferProduce(TPCircularBuffer *buffer, int32_t amount) { + buffer->head = (buffer->head + amount) % buffer->length; + if ( buffer->atomic ) { + OSAtomicAdd32Barrier(amount, &buffer->fillCount); + } else { + buffer->fillCount += amount; + } + assert(buffer->fillCount <= buffer->length); +} + +/*! + * Helper routine to copy bytes to buffer + * + * This copies the given bytes to the buffer, and marks them ready for reading. + * + * @param buffer Circular buffer + * @param src Source buffer + * @param len Number of bytes in source buffer + * @return true if bytes copied, false if there was insufficient space + */ +static __inline__ __attribute__((always_inline)) bool TPCircularBufferProduceBytes(TPCircularBuffer *buffer, const void* src, int32_t len) { + int32_t space; + void *ptr = TPCircularBufferHead(buffer, &space); + if ( space < len ) return false; + memcpy(ptr, src, len); + TPCircularBufferProduce(buffer, len); + return true; +} + +/*! + * Deprecated method + */ +static __inline__ __attribute__((always_inline)) __deprecated_msg("use TPCircularBufferSetAtomic(false) and TPCircularBufferConsume instead") +void TPCircularBufferConsumeNoBarrier(TPCircularBuffer *buffer, int32_t amount) { + buffer->tail = (buffer->tail + amount) % buffer->length; + buffer->fillCount -= amount; + assert(buffer->fillCount >= 0); +} + +/*! + * Deprecated method + */ +static __inline__ __attribute__((always_inline)) __deprecated_msg("use TPCircularBufferSetAtomic(false) and TPCircularBufferProduce instead") +void TPCircularBufferProduceNoBarrier(TPCircularBuffer *buffer, int32_t amount) { + buffer->head = (buffer->head + amount) % buffer->length; + buffer->fillCount += amount; + assert(buffer->fillCount <= buffer->length); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/LICENSE b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/LICENSE new file mode 100644 index 0000000..34eac18 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +EZAudio +Copyright (c) 2013 Syed Haris Ali + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/README.md b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/README.md new file mode 100644 index 0000000..7734e67 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/EZAudio/README.md @@ -0,0 +1,1117 @@ +![alt text](http://i.imgur.com/ll5q68r.png "EZAudioLogo") + +A simple, intuitive audio framework for iOS and OSX. + +## Welcome to 1.0.0! +Thank you guys for being so patient over the last year - I've been working like crazy the last few weeks rewriting and extending the EZAudio core and interface components and squashing bugs. Finally, EZAudio is now at its 1.0.0 release with all new updated components, examples, and documentation. Happy coding! + +## Apps Using EZAudio +I'd really like to start creating a list of projects made using EZAudio. If you've used EZAudio to make something cool, whether it's an app or open source visualization or whatever, please email me at syedhali07[at]gmail.com and I'll add it to our wall of fame! +To start it off: +- [Detour](https://www.detour.com/) - Gorgeous location-aware audio walks +- [Jumpshare](https://jumpshare.com/) - Incredibly fast, real-time file sharing + +##Features + +**Awesome Components** + +I've designed six audio components and two interface components to allow you to immediately get your hands dirty recording, playing, and visualizing audio data. These components simply plug into each other and build on top of the high-performance, low-latency AudioUnits API and give you an easy to use API written in Objective-C instead of pure C. + +[EZAudioDevice](#EZAudioDevice) + +A useful class for getting all the current and available inputs/output on any Apple device. The `EZMicrophone` and `EZOutput` use this to direct sound in/out from different hardware components. + +[EZMicrophone](#EZMicrophone) + +A microphone class that provides its delegate audio data from the default device microphone with one line of code. + +[EZOutput](#EZOutput) + +An output class that will playback any audio it is provided by its datasource. + +[EZAudioFile](#EZAudioFile) + +An audio file class that reads/seeks through audio files and provides useful delegate callbacks. + +[EZAudioPlayer](#EZAudioPlayer) + +A replacement for `AVAudioPlayer` that combines an `EZAudioFile` and a `EZOutput` to perform robust playback of any file on any piece of hardware. + +[EZRecorder](#EZRecorder) + +A recorder class that provides a quick and easy way to write audio files from any datasource. + +[EZAudioPlot](#EZAudioPlot) + +A Core Graphics-based audio waveform plot capable of visualizing any float array as a buffer or rolling plot. + +[EZAudioPlotGL](#EZAudioPlotGL) + +An OpenGL-based, GPU-accelerated audio waveform plot capable of visualizing any float array as a buffer or rolling plot. + +**Cross Platform** + +`EZAudio` was designed to work transparently across all iOS and OSX devices. This means one universal API whether you're building for Mac or iOS. For instance, under the hood an `EZAudioPlot` knows that it will subclass a UIView for iOS or an NSView for OSX and the `EZMicrophone` knows to build on top of the RemoteIO AudioUnit for iOS, but defaults to the system defaults for input and output for OSX. + +##Examples & Docs + +Within this repo you'll find the examples for iOS and OSX to get you up to speed using each component and plugging them into each other. With just a few lines of code you'll be recording from the microphone, generating audio waveforms, and playing audio files like a boss. See the full Getting Started guide for an interactive look into each of components. + +### Example Projects + +**_EZAudioCoreGraphicsWaveformExample_** + +![CoreGraphicsWaveformExampleGif](https://cloud.githubusercontent.com/assets/1275640/8516226/1eb885ec-2366-11e5-8d76-3a4b4d982eb0.gif) + +Shows how to use the `EZMicrophone` and `EZAudioPlot` to visualize the audio data from the microphone in real-time. The waveform can be displayed as a buffer or a rolling waveform plot (traditional waveform look). + +**_EZAudioOpenGLWaveformExample_** + +![OpenGLWaveformExampleGif](https://cloud.githubusercontent.com/assets/1275640/8516234/499f6fd2-2366-11e5-9771-7d0afae59391.gif) + +Shows how to use the `EZMicrophone` and `EZAudioPlotGL` to visualize the audio data from the microphone in real-time. The drawing is using OpenGL so the performance much better for plots needing a lot of points. + +**_EZAudioPlayFileExample_** + +![PlayFileExample](https://cloud.githubusercontent.com/assets/1275640/8516245/711ca232-2366-11e5-8d20-2538164f3307.gif) + +Shows how to use the `EZAudioPlayer` and `EZAudioPlotGL` to playback, pause, and seek through an audio file while displaying its waveform as a buffer or a rolling waveform plot. + +**_EZAudioRecordWaveformExample_** + +![RecordWaveformExample](https://cloud.githubusercontent.com/assets/1275640/8516310/86da80f2-2367-11e5-84aa-aea25a439a76.gif) + +Shows how to use the `EZMicrophone`, `EZRecorder`, and `EZAudioPlotGL` to record the audio from the microphone input to a file while displaying the audio waveform of the incoming data. You can then playback the newly recorded audio file using AVFoundation and keep adding more audio data to the tail of the file. + +**_EZAudioWaveformFromFileExample_** + +![WaveformExample](https://cloud.githubusercontent.com/assets/1275640/8516597/f27240ea-236a-11e5-8ecd-68cf05b7ce40.gif) + +Shows how to use the `EZAudioFile` and `EZAudioPlot` to animate in an audio waveform for an entire audio file. + +**_EZAudioPassThroughExample_** + +![PassthroughExample](https://cloud.githubusercontent.com/assets/1275640/8516692/7abfbe36-236c-11e5-9d69-4f82956177b3.gif) + +Shows how to use the `EZMicrophone`, `EZOutput`, and the `EZAudioPlotGL` to pass the microphone input to the output for playback while displaying the audio waveform (as a buffer or rolling plot) in real-time. + +**_EZAudioFFTExample_** + +![FFTExample](https://cloud.githubusercontent.com/assets/1275640/8662077/5621705a-2971-11e5-88ed-9a865e422ade.gif) + +Shows how to calculate the real-time FFT of the audio data coming from the `EZMicrophone` and the Accelerate framework. The audio data is plotted using two `EZAudioPlots` for the time and frequency displays. + +### Documentation +The official documentation for EZAudio can be found here: http://cocoadocs.org/docsets/EZAudio/1.1.4/ +
You can also generate the docset yourself using appledocs by running the appledocs on the EZAudio source folder. + +##
Getting Started +To begin using `EZAudio` you must first make sure you have the proper build requirements and frameworks. Below you'll find explanations of each component and code snippets to show how to use each to perform common tasks like getting microphone data, updating audio waveform plots, reading/seeking through audio files, and performing playback. + +###Build Requirements +**iOS** +- 6.0+ + +**OSX** +- 10.8+ + +###Frameworks +**iOS** +- Accelerate +- AudioToolbox +- AVFoundation +- GLKit + +**OSX** +- Accelerate +- AudioToolbox +- AudioUnit +- CoreAudio +- QuartzCore +- OpenGL +- GLKit + +###Adding To Project +You can add EZAudio to your project in a few ways:

1.) The easiest way to use EZAudio is via
Cocoapods. Simply add EZAudio to your Podfile like so: + +` +pod 'EZAudio', '~> 1.1.4' +` + +####Using EZAudio & The Amazing Audio Engine +If you're also using the Amazing Audio Engine then use the `EZAudio/Core` subspec like so: + +` +pod 'EZAudio/Core', '~> 1.1.4' +` + +2.) EZAudio now supports Carthage (thanks Andrew and Tommaso!). You can refer to Carthage's installation for a how-to guide: +https://github.com/Carthage/Carthage + +3.) Alternatively, you can check out the iOS/Mac examples for how to setup a project using the EZAudio project as an embedded project and utilizing the frameworks. Be sure to set your header search path to the folder containing the EZAudio source. + +##Core Components + +`EZAudio` currently offers six audio components that encompass a wide range of functionality. In addition to the functional aspects of these components such as pulling audio data, reading/writing from files, and performing playback they also take special care to hook into the interface components to allow developers to display visual feedback (see the [Interface Components](#InterfaceComponents) below). + +###EZAudioDevice +Provides a simple interface for obtaining the current and all available inputs and output for any Apple device. For instance, the iPhone 6 has three microphones available for input, while on OSX you can choose the Built-In Microphone or any available HAL device on your system. Similarly, for iOS you can choose from a pair of headphones connected or speaker, while on OSX you can choose from the Built-In Output, any available HAL device, or Airplay. + +![EZAudioDeviceInputsExample](https://cloud.githubusercontent.com/assets/1275640/8535722/51e8f702-23fd-11e5-9f1c-8c45e80d19ef.gif) + +####Getting Input Devices +To get all the available input devices use the `inputDevices` class method: +```objectivec +NSArray *inputDevices = [EZAudioDevice inputDevices]; +``` + +or to just get the currently selected input device use the `currentInputDevice` method: +```objectivec +// On iOS this will default to the headset device or bottom microphone, while on OSX this will +// be your selected inpupt device from the Sound preferences +EZAudioDevice *currentInputDevice = [EZAudioDevice currentInputDevice]; +``` + +####Getting Output Devices +Similarly, to get all the available output devices use the `outputDevices` class method: +```objectivec +NSArray *outputDevices = [EZAudioDevice outputDevices]; +``` + +or to just get the currently selected output device use the `currentInputDevice` method: +```objectivec +// On iOS this will default to the headset speaker, while on OSX this will be your selected +// output device from the Sound preferences +EZAudioDevice *currentOutputDevice = [EZAudioDevice currentOutputDevice]; +``` + +###EZMicrophone +Provides access to the default device microphone in one line of code and provides delegate callbacks to receive the audio data as an AudioBufferList and float arrays. + +**_Relevant Example Projects_** +- EZAudioCoreGraphicsWaveformExample (iOS) +- EZAudioCoreGraphicsWaveformExample (OSX) +- EZAudioOpenGLWaveformExample (iOS) +- EZAudioOpenGLWaveformExample (OSX) +- EZAudioRecordExample (iOS) +- EZAudioRecordExample (OSX) +- EZAudioPassThroughExample (iOS) +- EZAudioPassThroughExample (OSX) +- EZAudioFFTExample (iOS) +- EZAudioFFTExample (OSX) + +####Creating A Microphone + +Create an `EZMicrophone` instance by declaring a property and initializing it like so: + +```objectivec +// Declare the EZMicrophone as a strong property +@property (nonatomic, strong) EZMicrophone *microphone; + +... + +// Initialize the microphone instance and assign it a delegate to receive the audio data +// callbacks +self.microphone = [EZMicrophone microphoneWithDelegate:self]; +``` +Alternatively, you could also use the shared `EZMicrophone` instance and just assign its `EZMicrophoneDelegate`. +```objectivec +// Assign a delegate to the shared instance of the microphone to receive the audio data +// callbacks +[EZMicrophone sharedMicrophone].delegate = self; +``` + +####Setting The Device +The `EZMicrophone` uses an `EZAudioDevice` instance to select what specific hardware destination it will use to pull audio data. You'd use this if you wanted to change the input device like in the EZAudioCoreGraphicsWaveformExample for [iOS](https://github.com/syedhali/EZAudio/tree/master/EZAudioExamples/iOS/EZAudioCoreGraphicsWaveformExample) or [OSX](https://github.com/syedhali/EZAudio/tree/master/EZAudioExamples/OSX/EZAudioCoreGraphicsWaveformExample). At any time you can change which input device is used by setting the device property: +```objectivec +NSArray *inputs = [EZAudioDevice inputDevices]; +[self.microphone setDevice:[inputs lastObject]]; +``` + +Anytime the `EZMicrophone` changes its device it will trigger the `EZMicrophoneDelegate` event: +```objectivec + +- (void)microphone:(EZMicrophone *)microphone changedDevice:(EZAudioDevice *)device +{ + // This is not always guaranteed to occur on the main thread so make sure you + // wrap it in a GCD block + dispatch_async(dispatch_get_main_queue(), ^{ + // Update UI here + NSLog(@"Changed input device: %@", device); + }); +} +``` +**Note: For iOS this can happen automatically if the AVAudioSession changes the current device.** + +####Getting Microphone Data + +To tell the microphone to start fetching audio use the `startFetchingAudio` function. + +```objectivec +// Starts fetching audio from the default device microphone and sends data to EZMicrophoneDelegate +[self.microphone startFetchingAudio]; +``` +Once the `EZMicrophone` has started it will send the `EZMicrophoneDelegate` the audio back in a few ways. +An array of float arrays: +```objectivec +/** + The microphone data represented as non-interleaved float arrays useful for: + - Creating real-time waveforms using EZAudioPlot or EZAudioPlotGL + - Creating any number of custom visualizations that utilize audio! + */ +-(void) microphone:(EZMicrophone *)microphone + hasAudioReceived:(float **)buffer + withBufferSize:(UInt32)bufferSize +withNumberOfChannels:(UInt32)numberOfChannels +{ + __weak typeof (self) weakSelf = self; + // Getting audio data as an array of float buffer arrays that can be fed into the + // EZAudioPlot, EZAudioPlotGL, or whatever visualization you would like to do with + // the microphone data. + dispatch_async(dispatch_get_main_queue(),^{ + // Visualize this data brah, buffer[0] = left channel, buffer[1] = right channel + [weakSelf.audioPlot updateBuffer:buffer[0] withBufferSize:bufferSize]; + }); +} +``` +or the AudioBufferList representation: +```objectivec +/** + The microphone data represented as CoreAudio's AudioBufferList useful for: + - Appending data to an audio file via the EZRecorder + - Playback via the EZOutput + + */ +-(void) microphone:(EZMicrophone *)microphone + hasBufferList:(AudioBufferList *)bufferList + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels +{ + // Getting audio data as an AudioBufferList that can be directly fed into the EZRecorder + // or EZOutput. Say whattt... +} +``` +####Pausing/Resuming The Microphone + +Pause or resume fetching audio at any time like so: +```objectivec +// Stop fetching audio +[self.microphone stopFetchingAudio]; + +// Resume fetching audio +[self.microphone startFetchingAudio]; +``` + +Alternatively, you could also toggle the `microphoneOn` property (safe to use with Cocoa Bindings) +```objectivec +// Stop fetching audio +self.microphone.microphoneOn = NO; + +// Start fetching audio +self.microphone.microphoneOn = YES; +``` + +###EZOutput +Provides flexible playback to the default output device by asking the `EZOutputDataSource` for audio data to play. Doesn't care where the buffers come from (microphone, audio file, streaming audio, etc). As of 1.0.0 the `EZOutputDataSource` has been simplified to have only one method to provide audio data to your `EZOutput` instance. +```objectivec +// The EZOutputDataSource should fill out the audioBufferList with the given frame count. +// The timestamp is provided for sample accurate calculation, but for basic use cases can +// be ignored. +- (OSStatus) output:(EZOutput *)output + shouldFillAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + timestamp:(const AudioTimeStamp *)timestamp; +``` + +**_Relevant Example Projects_** +- EZAudioPlayFileExample (iOS) +- EZAudioPlayFileExample (OSX) +- EZAudioPassThroughExample (iOS) +- EZAudioPassThroughExample (OSX) + +####Creating An Output + +Create an `EZOutput` by declaring a property and initializing it like so: + +```objectivec +// Declare the EZOutput as a strong property +@property (nonatomic, strong) EZOutput *output; +... + +// Initialize the EZOutput instance and assign it a delegate to provide the output audio data +self.output = [EZOutput outputWithDataSource:self]; +``` +Alternatively, you could also use the shared output instance and just assign it an `EZOutputDataSource` if you will only have one `EZOutput` instance for your application. +```objectivec +// Assign a delegate to the shared instance of the output to provide the output audio data +[EZOutput sharedOutput].delegate = self; +``` +####Setting The Device +The `EZOutput` uses an `EZAudioDevice` instance to select what specific hardware destination it will output audio to. You'd use this if you wanted to change the output device like in the [EZAudioPlayFileExample](https://github.com/syedhali/EZAudio/tree/master/EZAudioExamples/OSX/EZAudioPlayFileExample) for OSX. At any time you can change which output device is used by setting the `device` property: +```objectivec +// By default the EZOutput uses the default output device, but you can change this at any time +EZAudioDevice *currentOutputDevice = [EZAudioDevice currentOutputDevice]; +[self.output setDevice:currentOutputDevice]; +``` + +Anytime the `EZOutput` changes its device it will trigger the `EZOutputDelegate` event: +```objectivec +- (void)output:(EZOutput *)output changedDevice:(EZAudioDevice *)device +{ + NSLog(@"Change output device to: %@", device); +} +``` + +####Playing Audio + +#####Setting The Input Format + +When providing audio data the `EZOutputDataSource` will expect you to fill out the AudioBufferList provided with whatever `inputFormat` that is set on the `EZOutput`. By default the input format is a stereo, non-interleaved, float format (see [defaultInputFormat](http://cocoadocs.org/docsets/EZAudio/1.1.2/Classes/EZOutput.html#//api/name/defaultInputFormat) for more information). If you're dealing with a different input format (which is typically the case), just set the `inputFormat` property. For instance: +```objectivec +// Set a mono, float format with a sample rate of 44.1 kHz +AudioStreamBasicDescription monoFloatFormat = [EZAudioUtilities monoFloatFormatWithSampleRate:44100.0f]; +[self.output setInputFormat:monoFloatFormat]; +``` +#####Implementing the EZOutputDataSource + +An example of implementing the `EZOutputDataSource` is done internally in the `EZAudioPlayer` using an `EZAudioFile` to read audio from an audio file on disk like so: +```objectivec +- (OSStatus) output:(EZOutput *)output + shouldFillAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + timestamp:(const AudioTimeStamp *)timestamp +{ + if (self.audioFile) + { + UInt32 bufferSize; // amount of frames actually read + BOOL eof; // end of file + [self.audioFile readFrames:frames + audioBufferList:audioBufferList + bufferSize:&bufferSize + eof:&eof]; + if (eof && [self.delegate respondsToSelector:@selector(audioPlayer:reachedEndOfAudioFile:)]) + { + [self.delegate audioPlayer:self reachedEndOfAudioFile:self.audioFile]; + } + if (eof && self.shouldLoop) + { + [self seekToFrame:0]; + } + else if (eof) + { + [self pause]; + [self seekToFrame:0]; + [[NSNotificationCenter defaultCenter] postNotificationName:EZAudioPlayerDidReachEndOfFileNotification + object:self]; + } + } + return noErr; +} +``` + +I created a sample project that uses the `EZOutput` to act as a signal generator to play sine, square, triangle, sawtooth, and noise waveforms. **Here's a snippet of code to generate a sine tone**: +```objectivec +... +double const SAMPLE_RATE = 44100.0; + +- (void)awakeFromNib +{ + // + // Create EZOutput to play audio data with mono format (EZOutput will convert + // this mono, float "inputFormat" to a clientFormat, i.e. the stereo output format). + // + AudioStreamBasicDescription inputFormat = [EZAudioUtilities monoFloatFormatWithSampleRate:SAMPLE_RATE]; + self.output = [EZOutput outputWithDataSource:self inputFormat:inputFormat]; + [self.output setDelegate:self]; + self.frequency = 200.0; + self.sampleRate = SAMPLE_RATE; + self.amplitude = 0.80; +} + +- (OSStatus) output:(EZOutput *)output + shouldFillAudioBufferList:(AudioBufferList *)audioBufferList + withNumberOfFrames:(UInt32)frames + timestamp:(const AudioTimeStamp *)timestamp +{ + Float32 *buffer = (Float32 *)audioBufferList->mBuffers[0].mData; + size_t bufferByteSize = (size_t)audioBufferList->mBuffers[0].mDataByteSize; + double theta = self.theta; + double frequency = self.frequency; + double thetaIncrement = 2.0 * M_PI * frequency / SAMPLE_RATE; + if (self.type == GeneratorTypeSine) + { + for (UInt32 frame = 0; frame < frames; frame++) + { + buffer[frame] = self.amplitude * sin(theta); + theta += thetaIncrement; + if (theta > 2.0 * M_PI) + { + theta -= 2.0 * M_PI; + } + } + self.theta = theta; + } + else if (... other shapes in full source) +} +``` + +For the full implementation of the square, triangle, sawtooth, and noise functions here: (https://github.com/syedhali/SineExample/blob/master/SineExample/GeneratorViewController.m#L220-L305) + +Once the `EZOutput` has started it will send the `EZOutputDelegate` the audio back as float arrays for visualizing. These are converted inside the `EZOutput` component from whatever input format you may have provided. For instance, if you provide an interleaved, signed integer AudioStreamBasicDescription for the `inputFormat` property then that will be automatically converted to a stereo, non-interleaved, float format when sent back in the delegate `playedAudio:...` method below: +An array of float arrays: +```objectivec +/** + The output data represented as non-interleaved float arrays useful for: + - Creating real-time waveforms using EZAudioPlot or EZAudioPlotGL + - Creating any number of custom visualizations that utilize audio! + */ +- (void) output:(EZOutput *)output + playedAudio:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels +{ + __weak typeof (self) weakSelf = self; + dispatch_async(dispatch_get_main_queue(), ^{ + // Update plot, buffer[0] = left channel, buffer[1] = right channel + }); +} +``` + +####Pausing/Resuming The Output +Pause or resume the output component at any time like so: +```objectivec +// Stop fetching audio +[self.output stopPlayback]; + +// Resume fetching audio +[self.output startPlayback]; +``` + +####Chaining Audio Unit Effects +Internally the `EZOutput` is using an AUGraph to chain together a converter, mixer, and output audio units. You can hook into this graph by subclassing `EZOutput` and implementing the method: +```objectivec +// By default this method connects the AUNode representing the input format converter to +// the mixer node. In subclasses you can add effects in the chain between the converter +// and mixer by creating additional AUNodes, adding them to the AUGraph provided below, +// and then connecting them together. +- (OSStatus)connectOutputOfSourceNode:(AUNode)sourceNode + sourceNodeOutputBus:(UInt32)sourceNodeOutputBus + toDestinationNode:(AUNode)destinationNode + destinationNodeInputBus:(UInt32)destinationNodeInputBus + inGraph:(AUGraph)graph; +``` + +This was inspired by the audio processing graph from CocoaLibSpotify (Daniel Kennett of Spotify has [an excellent blog post](http://ikennd.ac/blog/2012/04/augraph-basics-in-cocoalibspotify/) explaining how to add an EQ to the CocoaLibSpotify AUGraph). + +Here's an example of how to add a delay audio unit (`kAudioUnitSubType_Delay`): +```objectivec +// In interface, declare delay node info property +@property (nonatomic, assign) EZAudioNodeInfo *delayNodeInfo; + +// In implementation, overwrite the connection method +- (OSStatus)connectOutputOfSourceNode:(AUNode)sourceNode + sourceNodeOutputBus:(UInt32)sourceNodeOutputBus + toDestinationNode:(AUNode)destinationNode + destinationNodeInputBus:(UInt32)destinationNodeInputBus + inGraph:(AUGraph)graph +{ + self.delayNodeInfo = (EZAudioNodeInfo *)malloc(sizeof(EZAudioNodeInfo)); + + // A description for the time/pitch shifter Device + AudioComponentDescription delayComponentDescription; + delayComponentDescription.componentType = kAudioUnitType_Effect; + delayComponentDescription.componentSubType = kAudioUnitSubType_Delay; + delayComponentDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + delayComponentDescription.componentFlags = 0; + delayComponentDescription.componentFlagsMask = 0; + + [EZAudioUtilities checkResult:AUGraphAddNode(graph, + &delayComponentDescription, + &self.delayNodeInfo->node) + operation:"Failed to add node for time shift"]; + + // Get the time/pitch shifter Audio Unit from the node + [EZAudioUtilities checkResult:AUGraphNodeInfo(graph, + self.delayNodeInfo->node, + NULL, + &self.delayNodeInfo->audioUnit) + operation:"Failed to get audio unit for delay node"]; + + // connect the output of the input source node to the input of the time/pitch shifter node + [EZAudioUtilities checkResult:AUGraphConnectNodeInput(graph, + sourceNode, + sourceNodeOutputBus, + self.delayNodeInfo->node, + 0) + operation:"Failed to connect source node into delay node"]; + + // connect the output of the time/pitch shifter node to the input of the destination node, thus completing the chain. + [EZAudioUtilities checkResult:AUGraphConnectNodeInput(graph, + self.delayNodeInfo->node, + 0, + destinationNode, + destinationNodeInputBus) + operation:"Failed to connect delay to destination node"]; + return noErr; +} + +// Clean up +- (void)dealloc +{ + free(self.delayNodeInfo); +} +``` + +###EZAudioFile +Provides simple read/seek operations, pulls waveform amplitude data, and provides the `EZAudioFileDelegate` to notify of any read/seek action occuring on the `EZAudioFile`. This can be thought of as the NSImage/UIImage equivalent of the audio world. + +**_Relevant Example Projects_** +- EZAudioWaveformFromFileExample (iOS) +- EZAudioWaveformFromFileExample (OSX) + +####Opening An Audio File +To open an audio file create a new instance of the `EZAudioFile` class. +```objectivec +// Declare the EZAudioFile as a strong property +@property (nonatomic, strong) EZAudioFile *audioFile; + +... + +// Initialize the EZAudioFile instance and assign it a delegate to receive the read/seek callbacks +self.audioFile = [EZAudioFile audioFileWithURL:[NSURL fileURLWithPath:@"/path/to/your/file"] delegate:self]; +``` + +####Getting Waveform Data + +The EZAudioFile allows you to quickly fetch waveform data from an audio file with as much or little detail as you'd like. +```objectivec +__weak typeof (self) weakSelf = self; +// Get a waveform with 1024 points of data. We can adjust the number of points to whatever level +// of detail is needed by the application +[self.audioFile getWaveformDataWithNumberOfPoints:1024 + completionBlock:^(float **waveformData, + int length) +{ + [weakSelf.audioPlot updateBuffer:waveformData[0] + withBufferSize:length]; +}]; +``` + +####Reading From An Audio File + +Reading audio data from a file requires you to create an AudioBufferList to hold the data. The `EZAudio` utility function, `audioBufferList`, provides a convenient way to get an allocated AudioBufferList to use. There is also a utility function, `freeBufferList:`, to use to free (or release) the AudioBufferList when you are done using that audio data. + +**Note: You have to free the AudioBufferList, even in ARC.** +```objectivec +// Allocate an AudioBufferList to hold the audio data (the client format is the non-compressed +// in-app format that is used for reading, it's different than the file format which is usually +// something compressed like an mp3 or m4a) +AudioStreamBasicDescription clientFormat = [self.audioFile clientFormat]; +UInt32 numberOfFramesToRead = 512; +UInt32 channels = clientFormat.mChannelsPerFrame; +BOOL isInterleaved = [EZAudioUtilities isInterleaved:clientFormat]; +AudioBufferList *bufferList = [EZAudioUtilities audioBufferListWithNumberOfFrames:numberOfFramesToRead + numberOfChannels:channels + interleaved:isInterleaved]; + +// Read the frames from the EZAudioFile into the AudioBufferList +UInt32 framesRead; +UInt32 isEndOfFile; +[self.audioFile readFrames:numberOfFramesToRead + audioBufferList:bufferList + bufferSize:&framesRead + eof:&isEndOfFile] +``` + +When a read occurs the `EZAudioFileDelegate` receives two events. + +An event notifying the delegate of the read audio data as float arrays: +```objectivec +-(void) audioFile:(EZAudioFile *)audioFile + readAudio:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels +{ + __weak typeof (self) weakSelf = self; + dispatch_async(dispatch_get_main_queue(), ^{ + [weakSelf.audioPlot updateBuffer:buffer[0] + withBufferSize:bufferSize]; + }); +} +``` +and an event notifying the delegate of the new frame position within the `EZAudioFile`: +```objectivec +-(void)audioFile:(EZAudioFile *)audioFile updatedPosition:(SInt64)framePosition +{ + __weak typeof (self) weakSelf = self; + dispatch_async(dispatch_get_main_queue(), ^{ + // Update UI + }); +} +``` + +####Seeking Through An Audio File + +You can seek very easily through an audio file using the `EZAudioFile`'s seekToFrame: method. The `EZAudioFile` provides a `totalFrames` method to provide you the total amount of frames in an audio file so you can calculate a proper offset. +```objectivec +// Get the total number of frames for the audio file +SInt64 totalFrames = [self.audioFile totalFrames]; + +// Seeks halfway through the audio file +[self.audioFile seekToFrame:(totalFrames/2)]; + +// Alternatively, you can seek using seconds +NSTimeInterval duration = [self.audioFile duration]; +[self.audioFile setCurrentTime:duration/2.0]; +``` +When a seek occurs the `EZAudioFileDelegate` receives the seek event: +```objectivec +-(void)audioFile:(EZAudioFile *)audioFile updatedPosition:(SInt64)framePosition +{ + __weak typeof (self) weakSelf = self; + dispatch_async(dispatch_get_main_queue(), ^{ + // Update UI + }); +} +``` + +###EZAudioPlayer +Provides a class that combines the `EZAudioFile` and `EZOutput` for file playback of all Core Audio supported formats to any hardware device. Because the `EZAudioPlayer` internally hooks into the `EZAudioFileDelegate` and `EZOutputDelegate`, you should implement the `EZAudioPlayerDelegate` to receive the `playedAudio:...` and `updatedPosition:` events. The EZAudioPlayFileExample projects for [iOS](https://github.com/syedhali/EZAudio/tree/master/EZAudioExamples/iOS/EZAudioPlayFileExample) and [OSX](https://github.com/syedhali/EZAudio/tree/master/EZAudioExamples/OSX/EZAudioPlayFileExample) shows how to use the `EZAudioPlayer` to play audio files, visualize the samples with an audio plot, adjust the volume, and change the output device using the `EZAudioDevice` class. The `EZAudioPlayer` primarily uses `NSNotificationCenter` to post notifications because often times you have one audio player and multiple UI elements that need to listen for player events to properly update. + +####Creating An Audio Player +```objectivec +// Declare the EZAudioFile as a strong property +@property (nonatomic, strong) EZAudioFile *audioFile; + +... + +// Create an EZAudioPlayer with a delegate that conforms to EZAudioPlayerDelegate +self.player = [EZAudioPlayer audioPlayerWithDelegate:self]; +``` + +####Playing An Audio File +The `EZAudioPlayer` uses an internal `EZAudioFile` to provide data to its `EZOutput` for output via the `EZOutputDataSource`. You can provide an `EZAudioFile` by just setting the `audioFile` property on the `EZAudioPlayer` will make a copy of the `EZAudioFile` at that file path url for its own use. +```objectivec +// Set the EZAudioFile for playback by setting the `audioFile` property +EZAudioFile *audioFile = [EZAudioFile audioFileWithURL:[NSURL fileURLWithPath:@"/path/to/your/file"]]; +[self.player setAudioFile:audioFile]; + +// This, however, will not pause playback if a current file is playing. Instead +// it's encouraged to use `playAudioFile:` instead if you're swapping in a new +// audio file while playback is already running +EZAudioFile *audioFile = [EZAudioFile audioFileWithURL:[NSURL fileURLWithPath:@"/path/to/your/file"]]; +[self.player playAudioFile:audioFile]; +``` + +As audio is played the `EZAudioPlayerDelegate` will receive the `playedAudio:...`, `updatedPosition:...`, and, if the audio file reaches the end of the file, the `reachedEndOfAudioFile:` events. A typical implementation of the `EZAudioPlayerDelegate` would be something like: +```objectivec +- (void) audioPlayer:(EZAudioPlayer *)audioPlayer + playedAudio:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels + inAudioFile:(EZAudioFile *)audioFile +{ + __weak typeof (self) weakSelf = self; + // Update an EZAudioPlot or EZAudioPlotGL to reflect the audio data coming out + // of the EZAudioPlayer (post volume and pan) + dispatch_async(dispatch_get_main_queue(), ^{ + [weakSelf.audioPlot updateBuffer:buffer[0] + withBufferSize:bufferSize]; + }); +} + +//------------------------------------------------------------------------------ + +- (void)audioPlayer:(EZAudioPlayer *)audioPlayer + updatedPosition:(SInt64)framePosition + inAudioFile:(EZAudioFile *)audioFile +{ + __weak typeof (self) weakSelf = self; + // Update any UI controls including sliders and labels + // display current time/duration + dispatch_async(dispatch_get_main_queue(), ^{ + if (!weakSelf.positionSlider.highlighted) + { + weakSelf.positionSlider.floatValue = (float)framePosition; + weakSelf.positionLabel.integerValue = framePosition; + } + }); +} +``` + +####Seeking +You can seek through the audio file in a similar fashion as with the `EZAudioFile`. That is, using the `seekToFrame:` or `currentTime` property. +```objectivec +// Get the total number of frames and seek halfway +SInt64 totalFrames = [self.player totalFrames]; +[self.player seekToFrame:(totalFrames/2)]; + +// Alternatively, you can seek using seconds +NSTimeInterval duration = [self.player duration]; +[self.player setCurrentTime:duration/2.0]; +``` + +####Setting Playback Parameters +Because the `EZAudioPlayer` wraps the `EZOutput` you can adjust the volume and pan parameters for playback. +```objectivec +// Make it half as loud, 0 = silence, 1 = full volume. Default is 1. +[self.player setVolume:0.5]; + +// Make it only play on the left, -1 = left, 1 = right. Default is 0.0 (center) +[self.player setPan:-1.0]; +``` + +####Getting Audio File Parameters +The `EZAudioPlayer` wraps the `EZAudioFile` and provides a high level interface for pulling values like current time, duration, the frame index, total frames, etc. +```objectivec +NSTimeInterval currentTime = [self.player currentTime]; +NSTimeInterval duration = [self.player duration]; +NSString *formattedCurrentTime = [self.player formattedCurrentTime]; // MM:SS formatted +NSString *formattedDuration = [self.player formattedDuration]; // MM:SS formatted +SInt64 frameIndex = [self.player frameIndex]; +SInt64 totalFrames = [self.player totalFrames]; +``` + +In addition, the `EZOutput` properties are also offered at a high level as well: +```objectivec +EZAudioDevice *outputDevice = [self.player device]; +BOOL isPlaying = [self.player isPlaying]; +float pan = [self.player pan]; +float volume = [self.player volume]; +``` + +####Notifications +The `EZAudioPlayer` provides the following notifications (as of 1.1.2): +```objectivec +/** + Notification that occurs whenever the EZAudioPlayer changes its `audioFile` property. Check the new value using the EZAudioPlayer's `audioFile` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangeAudioFileNotification; + +/** + Notification that occurs whenever the EZAudioPlayer changes its `device` property. Check the new value using the EZAudioPlayer's `device` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangeOutputDeviceNotification; + +/** + Notification that occurs whenever the EZAudioPlayer changes its `output` component's `pan` property. Check the new value using the EZAudioPlayer's `pan` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangePanNotification; + +/** + Notification that occurs whenever the EZAudioPlayer changes its `output` component's play state. Check the new value using the EZAudioPlayer's `isPlaying` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangePlayStateNotification; + +/** + Notification that occurs whenever the EZAudioPlayer changes its `output` component's `volume` property. Check the new value using the EZAudioPlayer's `volume` property. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidChangeVolumeNotification; + +/** + Notification that occurs whenever the EZAudioPlayer has reached the end of a file and its `shouldLoop` property has been set to NO. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidReachEndOfFileNotification; + +/** + Notification that occurs whenever the EZAudioPlayer performs a seek via the `seekToFrame` method or `setCurrentTime:` property setter. Check the new `currentTime` or `frameIndex` value using the EZAudioPlayer's `currentTime` or `frameIndex` property, respectively. + */ +FOUNDATION_EXPORT NSString * const EZAudioPlayerDidSeekNotification; +``` + +###EZRecorder +Provides a way to record any audio source to an audio file. This hooks into the other components quite nicely to do something like plot the audio waveform while recording to give visual feedback as to what is happening. The `EZRecorderDelegate` provides methods to listen to write events and a final close event on the `EZRecorder` (explained [below](#EZRecorderDelegateExplanation)). + +*Relevant Example Projects* +- EZAudioRecordExample (iOS) +- EZAudioRecordExample (OSX) + +####Creating A Recorder + +To create an `EZRecorder` you must provide at least 3 things: an NSURL representing the file path of where the audio file should be written to (an existing file will be overwritten), a `clientFormat` representing the format in which you will be providing the audio data, and either an `EZRecorderFileType` or an `AudioStreamBasicDescription` representing the file format of the audio data on disk. + +```objectivec +// Provide a file path url to write to, a client format (always linear PCM, this is the format +// coming from another component like the EZMicrophone's audioStreamBasicDescription property), +// and a EZRecorderFileType constant representing either a wav (EZRecorderFileTypeWAV), +// aiff (EZRecorderFileTypeAIFF), or m4a (EZRecorderFileTypeM4A) file format. The advantage of +// this is that the `fileFormat` property will be automatically filled out for you. ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileType:(EZRecorderFileType)fileType; + +// Alternatively, you can provide a file path url to write to, a client format (always linear +// PCM, this is the format coming from another component like the EZMicrophone's +// audioStreamBasicDescription property), a `fileFormat` representing your custom +// AudioStreamBasicDescription, and an AudioFileTypeID that corresponds with your `fileFormat`. ++ (instancetype)recorderWithURL:(NSURL *)url + clientFormat:(AudioStreamBasicDescription)clientFormat + fileFormat:(AudioStreamBasicDescription)fileFormat + audioFileTypeID:(AudioFileTypeID)audioFileTypeID; + +``` + +Start by declaring an instance of the EZRecorder (you will have one of these per audio file written out) +```objectivec +// Declare the EZRecorder as a strong property +@property (nonatomic, strong) EZRecorder *recorder; +``` + +and initialize it using one of the two initializers from above. For instance, using the `EZRecorderFileType` shortcut initializer you could create an instance like so: +```objectivec +// Example using an EZMicrophone and a string called kAudioFilePath representing a file +// path location on your computer to write out a M4A file. +self.recorder = [EZRecorder recorderWithURL:[NSURL fileURLWithPath:@"/path/to/your/file.m4a"] + clientFormat:[self.microphone audioStreamBasicDescription] + fileType:EZRecorderFileTypeM4A]; +``` + +or to configure your own custom file format, say to write out a 8000 Hz, iLBC file: +```objectivec +// Example using an EZMicrophone, a string called kAudioFilePath representing a file +// path location on your computer, and an iLBC file format. +AudioStreamBasicDescription iLBCFormat = [EZAudioUtilities iLBCFormatWithSampleRate:8000]; +self.recorder = [EZRecorder recorderWithURL:[NSURL fileURLWithPath:@"/path/to/your/file.caf"] + clientFormat:[self.microphone audioStreamBasicDescription] + fileFormat:iLBCFormat + audioFileTypeID:kAudioFileCAFType]; +``` + +####Recording Some Audio + +Once you've initialized your `EZRecorder` you can append data by passing in an AudioBufferList and its buffer size like so: +```objectivec +// Append the microphone data coming as a AudioBufferList with the specified buffer size +// to the recorder +-(void) microphone:(EZMicrophone *)microphone + hasBufferList:(AudioBufferList *)bufferList + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels +{ + // Getting audio data as a buffer list that can be directly fed into the EZRecorder. This is + // happening on the audio thread - any UI updating needs a GCD main queue block. + if (self.isRecording) + { + // Since we set the recorder's client format to be that of the EZMicrophone instance, + // the audio data coming in represented by the AudioBufferList can directly be provided + // to the EZRecorder. The EZRecorder will internally convert the audio data from the + // `clientFormat` to `fileFormat`. + [self.recorder appendDataFromBufferList:bufferList + withBufferSize:bufferSize]; + } +} +``` + +#####Responding to an EZRecorder after it has written audio data + +Once audio data has been successfully written with the `EZRecorder` it will notify the `EZRecorderDelegate` of the event so it can respond via: +```objectivec +// Triggers after the EZRecorder's `appendDataFromBufferList:withBufferSize:` method is called +// so you can update your interface accordingly. +- (void)recorderUpdatedCurrentTime:(EZRecorder *)recorder +{ + __weak typeof (self) weakSelf = self; + // This will get triggerd on the thread that the write occured on so be sure to wrap your UI + // updates in a GCD main queue block! However, I highly recommend you first pull the values + // you'd like to update the interface with before entering the GCD block to avoid trying to + // fetch a value after the audio file has been closed. + NSString *formattedCurrentTime = [recorder formattedCurrentTime]; // MM:SS formatted + dispatch_async(dispatch_get_main_queue(), ^{ + // Update label + weakSelf.currentTimeLabel.stringValue = formattedCurrentTime; + }); +} +``` + +####Closing An Audio File +When you're recording is done be sure to call the `closeAudioFile` method to make sure the audio file written to disk is properly closed before you attempt to read it again. + +```objectivec +// Close the EZRecorder's audio file BEFORE reading +[self.recorder closeAudioFile]; +``` + +This will trigger the EZRecorder's delegate method: +```objectivec +- (void)recorderDidClose:(EZRecorder *)recorder +{ + recorder.delegate = nil; +} +``` + +##Interface Components +`EZAudio` currently offers two drop in audio waveform components that help simplify the process of visualizing audio. + +###EZAudioPlot +Provides an audio waveform plot that uses CoreGraphics to perform the drawing. On iOS this is a subclass of UIView while on OSX this is a subclass of NSView. As of the 1.0.0 release, the waveforms are drawn using CALayers where compositing is done on the GPU. As a result, there have been some huge performance gains and CPU usage per real-time (i.e. 60 frames per second redrawing) plot is now about 2-3% CPU as opposed to the 20-30% we were experiencing before. + +*Relevant Example Projects* +- EZAudioCoreGraphicsWaveformExample (iOS) +- EZAudioCoreGraphicsWaveformExample (OSX) +- EZAudioRecordExample (iOS) +- EZAudioRecordExample (OSX) +- EZAudioWaveformFromFileExample (iOS) +- EZAudioWaveformFromFileExample (OSX) +- EZAudioFFTExample (iOS) +- EZAudioFFTExample (OSX) + +####Creating An Audio Plot + +You can create an audio plot in the interface builder by dragging in a UIView on iOS or an NSView on OSX onto your content area. Then change the custom class of the UIView/NSView to `EZAudioPlot`. + +![EZAudioPlotInterfaceBuilder](https://cloud.githubusercontent.com/assets/1275640/8532901/47d6f9ce-23e6-11e5-9766-d9969e630338.gif) + +Alternatively, you can could create the audio plot programmatically + +```objectivec +// Programmatically create an audio plot +EZAudioPlot *audioPlot = [[EZAudioPlot alloc] initWithFrame:self.view.frame]; +[self.view addSubview:audioPlot]; +``` + +####Customizing The Audio Plot + +All plots offer the ability to change the background color, waveform color, plot type (buffer or rolling), toggle between filled and stroked, and toggle between mirrored and unmirrored (about the x-axis). For iOS colors are of the type UIColor while on OSX colors are of the type NSColor. + +```objectivec +// Background color (use UIColor for iOS) +audioPlot.backgroundColor = [NSColor colorWithCalibratedRed:0.816 + green:0.349 + blue:0.255 + alpha:1]; +// Waveform color (use UIColor for iOS) +audioPlot.color = [NSColor colorWithCalibratedRed:1.000 + green:1.000 + blue:1.000 + alpha:1]; +// Plot type +audioPlot.plotType = EZPlotTypeBuffer; +// Fill +audioPlot.shouldFill = YES; +// Mirror +audioPlot.shouldMirror = YES; +``` + +####IBInspectable Attributes + +Also, as of iOS 8 you can adjust the background color, color, gain, shouldFill, and shouldMirror parameters directly in the Interface Builder via the IBInspectable attributes: + +![EZAudioPlotInspectableAttributes](https://cloud.githubusercontent.com/assets/1275640/8530670/288840c8-23d7-11e5-954b-644ed4ed67b4.png) + +####Updating The Audio Plot + +All plots have only one update function, `updateBuffer:withBufferSize:`, which expects a float array and its length. +```objectivec +// The microphone component provides audio data to its delegate as an array of float buffer arrays. +- (void) microphone:(EZMicrophone *)microphone + hasAudioReceived:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels +{ + /** + Update the audio plot using the float array provided by the microphone: + buffer[0] = left channel + buffer[1] = right channel + Note: Audio updates happen asynchronously so we need to make sure + sure to update the plot on the main thread + */ + __weak typeof (self) weakSelf = self; + dispatch_async(dispatch_get_main_queue(), ^{ + [weakSelf.audioPlot updateBuffer:buffer[0] withBufferSize:bufferSize]; + }); +} +``` + +###EZAudioPlotGL +Provides an audio waveform plot that uses OpenGL to perform the drawing. The API this class are exactly the same as those for the EZAudioPlot above. On iOS this is a subclass of the GLKView while on OSX this is a subclass of the NSOpenGLView. In most cases this is the plot you want to use, it's GPU-accelerated, can handle lots of points while displaying 60 frames per second (the EZAudioPlot starts to choke on anything greater than 1024), and performs amazingly on all devices. The only downside is that you can only have one OpenGL plot onscreen at a time. However, you can combine OpenGL plots with Core Graphics plots in the view hierachy (see the EZAudioRecordExample for an example of how to do this). + +*Relevant Example Projects* +- EZAudioOpenGLWaveformExample (iOS) +- EZAudioOpenGLWaveformExample (OSX) +- EZAudioPlayFileExample (iOS) +- EZAudioPlayFileExample (OSX) +- EZAudioRecordExample (iOS) +- EZAudioRecordExample (OSX) +- EZAudioPassThroughExample (iOS) +- EZAudioPassThroughExample (OSX) + +####Creating An OpenGL Audio Plot + +You can create an audio plot in the interface builder by dragging in a UIView on iOS or an NSView on OSX onto your content area. Then change the custom class of the UIView/NSView to `EZAudioPlotGL`. + +![EZAudioPlotGLInterfaceBuilder](https://cloud.githubusercontent.com/assets/1275640/8532900/47d62346-23e6-11e5-8128-07c6641f4af8.gif) + +Alternatively, you can could create the `EZAudioPlotGL` programmatically +```objectivec +// Programmatically create an audio plot +EZAudioPlotGL *audioPlotGL = [[EZAudioPlotGL alloc] initWithFrame:self.view.frame]; +[self.view addSubview:audioPlotGL]; +``` + +####Customizing The OpenGL Audio Plot + +All plots offer the ability to change the background color, waveform color, plot type (buffer or rolling), toggle between filled and stroked, and toggle between mirrored and unmirrored (about the x-axis). For iOS colors are of the type UIColor while on OSX colors are of the type NSColor. +```objectivec +// Background color (use UIColor for iOS) +audioPlotGL.backgroundColor = [NSColor colorWithCalibratedRed:0.816 + green:0.349 + blue:0.255 + alpha:1]; +// Waveform color (use UIColor for iOS) +audioPlotGL.color = [NSColor colorWithCalibratedRed:1.000 + green:1.000 + blue:1.000 + alpha:1]; +// Plot type +audioPlotGL.plotType = EZPlotTypeBuffer; +// Fill +audioPlotGL.shouldFill = YES; +// Mirror +audioPlotGL.shouldMirror = YES; +``` + +####IBInspectable Attributes + +Also, as of iOS 8 you can adjust the background color, color, gain, shouldFill, and shouldMirror parameters directly in the Interface Builder via the IBInspectable attributes: + +![EZAudioPlotGLInspectableAttributes](https://cloud.githubusercontent.com/assets/1275640/8530670/288840c8-23d7-11e5-954b-644ed4ed67b4.png) + +####Updating The OpenGL Audio Plot + +All plots have only one update function, `updateBuffer:withBufferSize:`, which expects a float array and its length. +```objectivec +// The microphone component provides audio data to its delegate as an array of float buffer arrays. +- (void) microphone:(EZMicrophone *)microphone + hasAudioReceived:(float **)buffer + withBufferSize:(UInt32)bufferSize + withNumberOfChannels:(UInt32)numberOfChannels +{ + /** + Update the audio plot using the float array provided by the microphone: + buffer[0] = left channel + buffer[1] = right channel + Note: Audio updates happen asynchronously so we need to make sure + sure to update the plot on the main thread + */ + __weak typeof (self) weakSelf = self; + dispatch_async(dispatch_get_main_queue(), ^{ + [weakSelf.audioPlotGL updateBuffer:buffer[0] withBufferSize:bufferSize]; + }); +} +``` + +##License +EZAudio is available under the MIT license. See the LICENSE file for more info. + +##Contact & Contributers +Syed Haris Ali
+www.syedharisali.com
+syedhali07[at]gmail.com + +##Acknowledgements +The following people rock: +- My brother, [Reza Ali](http://www.syedrezaali.com/), for walking me through all the gritty details of OpenGL and his constant encouragement through this journey to 1.0.0. +- [Aure Prochazka](http://aure.com/) for his amazing work on [AudioKit](http://audiokit.io/) and his encouragement to bring EZAudio to 1.0.0 +- [Daniel Kennett](http://ikennd.ac/) for writing [this great blog post](http://ikennd.ac/blog/2012/04/augraph-basics-in-cocoalibspotify/) that inspired the rewrite of the `EZOutput` in 1.0.0. +- [Michael Tyson](http://atastypixel.com/blog/) for creating the [TPCircularBuffer](http://atastypixel.com/blog/a-simple-fast-circular-buffer-implementation-for-audio-processing/) and all his contributions to the community including the Amazing Audio Engine, Audiobus, and all the tasty pixel blog posts. +- Chris Adamson and Kevin Avila for writing the amazing [Learning Core Audio](http://www.amazon.com/Learning-Core-Audio-Hands-On-Programming/dp/0321636848) book. diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudio.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudio.h new file mode 100644 index 0000000..598d66d --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudio.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudio.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioDevice.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioDevice.h new file mode 100644 index 0000000..4e74ae3 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioDevice.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioDevice.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioDisplayLink.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioDisplayLink.h new file mode 100644 index 0000000..9181b8c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioDisplayLink.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioDisplayLink.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFFT.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFFT.h new file mode 100644 index 0000000..cd26060 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFFT.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioFFT.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFile.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFile.h new file mode 100644 index 0000000..647862b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFile.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioFile.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFloatConverter.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFloatConverter.h new file mode 100644 index 0000000..a2f129c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFloatConverter.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioFloatConverter.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFloatData.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFloatData.h new file mode 100644 index 0000000..011d800 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioFloatData.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioFloatData.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioOSX.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioOSX.h new file mode 100644 index 0000000..c4d8e8e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioOSX.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioOSX.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioPlayer.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioPlayer.h new file mode 100644 index 0000000..6630f2d --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioPlayer.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioPlayer.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioPlot.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioPlot.h new file mode 100644 index 0000000..7ac6330 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioPlot.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioPlot.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioPlotGL.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioPlotGL.h new file mode 100644 index 0000000..29803ca --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioPlotGL.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioPlotGL.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioUtilities.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioUtilities.h new file mode 100644 index 0000000..f673e52 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioUtilities.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioUtilities.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioiOS.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioiOS.h new file mode 100644 index 0000000..810a8e7 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZAudioiOS.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioiOS.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZMicrophone.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZMicrophone.h new file mode 100644 index 0000000..b7c30b2 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZMicrophone.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZMicrophone.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZOutput.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZOutput.h new file mode 100644 index 0000000..756c0ca --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZOutput.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZOutput.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZPlot.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZPlot.h new file mode 100644 index 0000000..79ecc6a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZPlot.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZPlot.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZRecorder.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZRecorder.h new file mode 100644 index 0000000..a9dd341 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/EZRecorder.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZRecorder.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/TPCircularBuffer.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/TPCircularBuffer.h new file mode 100644 index 0000000..044cbe5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/EZAudio/TPCircularBuffer.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/TPCircularBuffer.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/TPCircularBuffer/TPCircularBuffer+AudioBufferList.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/TPCircularBuffer/TPCircularBuffer+AudioBufferList.h new file mode 100644 index 0000000..f3fd2d1 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/TPCircularBuffer/TPCircularBuffer+AudioBufferList.h @@ -0,0 +1 @@ +../../../TPCircularBuffer/TPCircularBuffer+AudioBufferList.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/TPCircularBuffer/TPCircularBuffer.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/TPCircularBuffer/TPCircularBuffer.h new file mode 100644 index 0000000..bff0736 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Private/TPCircularBuffer/TPCircularBuffer.h @@ -0,0 +1 @@ +../../../TPCircularBuffer/TPCircularBuffer.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudio.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudio.h new file mode 100644 index 0000000..598d66d --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudio.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudio.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioDevice.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioDevice.h new file mode 100644 index 0000000..4e74ae3 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioDevice.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioDevice.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioDisplayLink.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioDisplayLink.h new file mode 100644 index 0000000..9181b8c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioDisplayLink.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioDisplayLink.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFFT.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFFT.h new file mode 100644 index 0000000..cd26060 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFFT.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioFFT.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFile.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFile.h new file mode 100644 index 0000000..647862b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFile.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioFile.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFloatConverter.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFloatConverter.h new file mode 100644 index 0000000..a2f129c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFloatConverter.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioFloatConverter.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFloatData.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFloatData.h new file mode 100644 index 0000000..011d800 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioFloatData.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioFloatData.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioOSX.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioOSX.h new file mode 100644 index 0000000..c4d8e8e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioOSX.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioOSX.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioPlayer.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioPlayer.h new file mode 100644 index 0000000..6630f2d --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioPlayer.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioPlayer.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioPlot.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioPlot.h new file mode 100644 index 0000000..7ac6330 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioPlot.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioPlot.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioPlotGL.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioPlotGL.h new file mode 100644 index 0000000..29803ca --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioPlotGL.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioPlotGL.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioUtilities.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioUtilities.h new file mode 100644 index 0000000..f673e52 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioUtilities.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioUtilities.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioiOS.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioiOS.h new file mode 100644 index 0000000..810a8e7 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZAudioiOS.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZAudioiOS.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZMicrophone.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZMicrophone.h new file mode 100644 index 0000000..b7c30b2 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZMicrophone.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZMicrophone.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZOutput.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZOutput.h new file mode 100644 index 0000000..756c0ca --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZOutput.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZOutput.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZPlot.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZPlot.h new file mode 100644 index 0000000..79ecc6a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZPlot.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZPlot.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZRecorder.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZRecorder.h new file mode 100644 index 0000000..a9dd341 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/EZRecorder.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/EZRecorder.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/TPCircularBuffer.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/TPCircularBuffer.h new file mode 100644 index 0000000..044cbe5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/EZAudio/TPCircularBuffer.h @@ -0,0 +1 @@ +../../../EZAudio/EZAudio/TPCircularBuffer.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/TPCircularBuffer/TPCircularBuffer+AudioBufferList.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/TPCircularBuffer/TPCircularBuffer+AudioBufferList.h new file mode 100644 index 0000000..f3fd2d1 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/TPCircularBuffer/TPCircularBuffer+AudioBufferList.h @@ -0,0 +1 @@ +../../../TPCircularBuffer/TPCircularBuffer+AudioBufferList.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/TPCircularBuffer/TPCircularBuffer.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/TPCircularBuffer/TPCircularBuffer.h new file mode 100644 index 0000000..bff0736 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Headers/Public/TPCircularBuffer/TPCircularBuffer.h @@ -0,0 +1 @@ +../../../TPCircularBuffer/TPCircularBuffer.h \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Manifest.lock b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Manifest.lock new file mode 100644 index 0000000..7838365 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Manifest.lock @@ -0,0 +1,19 @@ +PODS: + - EZAudio (1.1.5): + - EZAudio/Full (= 1.1.5) + - EZAudio/Core (1.1.5) + - EZAudio/Full (1.1.5): + - EZAudio/Core + - TPCircularBuffer (= 1.1) + - TPCircularBuffer (1.1) + +DEPENDENCIES: + - EZAudio + +SPEC CHECKSUMS: + EZAudio: 3b1cb4b6d7ebca68f0c2478576d75940aad4aa99 + TPCircularBuffer: 4960a919e667280f7f38963d771f45871b64fe62 + +PODFILE CHECKSUM: cb3967f691a7245e97aa9d41e36b4522c55ccda1 + +COCOAPODS: 1.2.0.beta.1 diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Pods.xcodeproj/project.pbxproj b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Pods.xcodeproj/project.pbxproj new file mode 100644 index 0000000..15b16d6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Pods.xcodeproj/project.pbxproj @@ -0,0 +1,792 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 07F29CF3D34E836E7F9460EBEBF7BF8C /* Pods-SnowboyTest-dummy.m in Sources */ = {isa = PBXBuildFile; fileRef = E1FB4B74578D130C3D7B4BFECCB46B4E /* Pods-SnowboyTest-dummy.m */; }; + 0940511CD9BBA8E066867DEE5D19951A /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F0567477682A34B5E21CF00B41F7A2B6 /* AudioToolbox.framework */; }; + 0AE36BA7AF8D18E53D78A59CAB41B933 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = CB09A32D53C5CC4566A43E3BC6FC9574 /* Foundation.framework */; }; + 16229C9991DA2AEE20B4DA3D157269EE /* EZAudio.m in Sources */ = {isa = PBXBuildFile; fileRef = B5DEB394313E661485818ADA6A27BAA8 /* EZAudio.m */; }; + 188ECC220A830F71839B51119CE53E3E /* EZOutput.h in Headers */ = {isa = PBXBuildFile; fileRef = 3E0A9CF8AA533CB2ABE320F534C76EC5 /* EZOutput.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 217C83871CA6E6F17F96968FBFB8739D /* EZAudioFloatData.h in Headers */ = {isa = PBXBuildFile; fileRef = AF4451C471C29CC93876006B444C76CB /* EZAudioFloatData.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 29D283714C693DA2DBD4006A2CD8EC65 /* EZAudioPlot.h in Headers */ = {isa = PBXBuildFile; fileRef = A3E2D32122C0A2C2EAF3C0701CCA6615 /* EZAudioPlot.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 344EA22F4A4253BB6AE104EAED14C847 /* EZAudioOSX.h in Headers */ = {isa = PBXBuildFile; fileRef = C599E1E3C2A68B6EB423598726C1FA8E /* EZAudioOSX.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 42B2FEC0908CE57C4962666D38904569 /* EZAudioPlotGL.m in Sources */ = {isa = PBXBuildFile; fileRef = B7A80CC847216377B72356FC424D14C4 /* EZAudioPlotGL.m */; }; + 42EB22017F35B0994169438AD0A96466 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 343439D2EF4DE8E37F41D1BB45132871 /* Accelerate.framework */; }; + 49242DD143BC68CBFB4EAD74D9B44800 /* EZPlot.m in Sources */ = {isa = PBXBuildFile; fileRef = 363D794756A137A9FDE4586EC90C2DAB /* EZPlot.m */; }; + 4F8C2429D2F098A6D04FB10A94944E79 /* TPCircularBuffer+AudioBufferList.c in Sources */ = {isa = PBXBuildFile; fileRef = F382D83EF9D6E8752D4F09D23E849BF7 /* TPCircularBuffer+AudioBufferList.c */; settings = {COMPILER_FLAGS = "-fno-objc-arc"; }; }; + 52F73BBF1B2A6F7A37582EDF511276F3 /* EZAudioUtilities.h in Headers */ = {isa = PBXBuildFile; fileRef = 1DAF4FF6E5140B8F925C40BCE74D85D4 /* EZAudioUtilities.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 536641F4E01F326750008F2B697B1255 /* EZAudio.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E148565F8848992280486BDF6EA90AE /* EZAudio.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 5882B4E4E3908A171BF7348450662B1B /* EZAudio-dummy.m in Sources */ = {isa = PBXBuildFile; fileRef = 158BFE1E54251DAA0C63986FC7EE0E35 /* EZAudio-dummy.m */; }; + 5E1304B6F257E8B3389202594AB02801 /* EZAudioFile.m in Sources */ = {isa = PBXBuildFile; fileRef = CE33E0A96962E208B0D7253C1B8EF5F3 /* EZAudioFile.m */; }; + 63A61D86CDEBCCD323C692759CF298C4 /* EZAudioDevice.h in Headers */ = {isa = PBXBuildFile; fileRef = AAD00DA57191C15B8640E91446B12D61 /* EZAudioDevice.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 651C18CDE375C9E707B66804DF3699B5 /* EZAudioFFT.m in Sources */ = {isa = PBXBuildFile; fileRef = 75095EA3208866FFA47AC88622E02395 /* EZAudioFFT.m */; }; + 65A83C084CFCD9663D65369A552553A2 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = CB09A32D53C5CC4566A43E3BC6FC9574 /* Foundation.framework */; }; + 66090E8837B0679C80F579CC7C404146 /* EZAudioDevice.m in Sources */ = {isa = PBXBuildFile; fileRef = A836127C384E5BC8102CDB2D8730C3F1 /* EZAudioDevice.m */; }; + 71F7DEEB2722AC4078625B0B6B3D626E /* EZRecorder.m in Sources */ = {isa = PBXBuildFile; fileRef = 38B2ADFC30D2F0CCC36821E69FCFC5E6 /* EZRecorder.m */; }; + 7ABFB95CA4DF2C3698512E3D43FEA168 /* EZAudioFloatData.m in Sources */ = {isa = PBXBuildFile; fileRef = 418EC50AF5D0002A1A2B7D597CC6840E /* EZAudioFloatData.m */; }; + 7E6B1F12BFBEB7A367230F60B69509AC /* TPCircularBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 806A418E439E6C4ACF40A3B5210611DD /* TPCircularBuffer.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 846D0F02CB7E0A987DF9EDB31AFAB254 /* EZAudioFloatConverter.h in Headers */ = {isa = PBXBuildFile; fileRef = 17AC9E99CD2469349DDDE0F24E40193E /* EZAudioFloatConverter.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 879899E75E618F4A4CEFBBF613E10C91 /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = C8DE1DA5969D82E0DE0F195C0039DC98 /* AVFoundation.framework */; }; + 8945B805F96603C5678149FDAF64256F /* EZAudioiOS.h in Headers */ = {isa = PBXBuildFile; fileRef = 67FCC68BAB9FF27C2300D3D36A6A1D3E /* EZAudioiOS.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 8DD84868E1D267F6A81EDB61ED81B4BE /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F0567477682A34B5E21CF00B41F7A2B6 /* AudioToolbox.framework */; }; + 9EC49104A123C532375190099750B7C0 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = CB09A32D53C5CC4566A43E3BC6FC9574 /* Foundation.framework */; }; + A084252A9D12F1E9F4EAD3FA0DFC9F8C /* EZAudioFloatConverter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8C5F6DD69EB4CDB7D81805AE349DFA4D /* EZAudioFloatConverter.m */; }; + A26E6655077ABC61E7E651ADEC6C0D1D /* EZRecorder.h in Headers */ = {isa = PBXBuildFile; fileRef = 706734D4D9A29E686EDC1B548ED6A211 /* EZRecorder.h */; settings = {ATTRIBUTES = (Public, ); }; }; + A49FF2033A08D538C5BBA31D5B4C967F /* TPCircularBuffer-dummy.m in Sources */ = {isa = PBXBuildFile; fileRef = 803DE2A1BF839898425F5FA70577672D /* TPCircularBuffer-dummy.m */; }; + A4D879C3AF84EC21E85B97AC0F3D8D38 /* EZAudioPlayer.h in Headers */ = {isa = PBXBuildFile; fileRef = CA66AD5D6B569771170AED24642F061B /* EZAudioPlayer.h */; settings = {ATTRIBUTES = (Public, ); }; }; + A82AE70024A461EBE39DDECADA975BAD /* TPCircularBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 2423A256198CC5653F913CC5D36BCFF0 /* TPCircularBuffer.h */; settings = {ATTRIBUTES = (Public, ); }; }; + AFFED0CC7D73A3CC0D59A056BFFDE916 /* GLKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 60A87CC95A9CDF7101EAC257737F5472 /* GLKit.framework */; }; + BA003BA7D581D2673B0A66C8AA02122F /* EZAudioPlotGL.h in Headers */ = {isa = PBXBuildFile; fileRef = 3AE7EB4FD578A9D62731F043738266EA /* EZAudioPlotGL.h */; settings = {ATTRIBUTES = (Public, ); }; }; + BAB4E2DB6F46F1DF7F1C86867E426DC5 /* EZAudioDisplayLink.m in Sources */ = {isa = PBXBuildFile; fileRef = A93D30946CE1C15DFFBA01AFC61BE90A /* EZAudioDisplayLink.m */; }; + BF4471DDFD0DA894723B8785B2AEEE98 /* EZAudioPlayer.m in Sources */ = {isa = PBXBuildFile; fileRef = 54F7C38BC933D46E0A8FB6E5470B2113 /* EZAudioPlayer.m */; }; + C140586AF9D82CF9952CABF1F015FAE0 /* TPCircularBuffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 6B2874B0C4B92DAEF47AB3CF4BD132F9 /* TPCircularBuffer.c */; }; + CA87E1B7AD09E29BE9DF08BE779DB1BD /* EZAudioPlot.m in Sources */ = {isa = PBXBuildFile; fileRef = E53C53E9100D6955C8E0738767A91C3D /* EZAudioPlot.m */; }; + D8A779803A80CCCB28B4158880E637B5 /* EZPlot.h in Headers */ = {isa = PBXBuildFile; fileRef = 93AE37673C56CFAC627EDE6B69AF1E71 /* EZPlot.h */; settings = {ATTRIBUTES = (Public, ); }; }; + D8DED64FAABEA87BD4885DBE808C7BBF /* EZAudioFFT.h in Headers */ = {isa = PBXBuildFile; fileRef = F671D6C069951BA4505C98AF2975B4C3 /* EZAudioFFT.h */; settings = {ATTRIBUTES = (Public, ); }; }; + DC8AF0099977E7476B7A367BD8B18827 /* EZAudioDisplayLink.h in Headers */ = {isa = PBXBuildFile; fileRef = 9A9DE543722DE21C60A47B4BF57C09D8 /* EZAudioDisplayLink.h */; settings = {ATTRIBUTES = (Public, ); }; }; + DCD085DD7420B13E60BD9D8D314F853A /* TPCircularBuffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 21350F9999AF016D74CB6F44362E69AE /* TPCircularBuffer.c */; settings = {COMPILER_FLAGS = "-fno-objc-arc"; }; }; + E6229BEDE9C40ECE6A3A1A966EF121AD /* EZOutput.m in Sources */ = {isa = PBXBuildFile; fileRef = 1CE39A72FB74B7F4F0FCC68E0E9C207F /* EZOutput.m */; }; + E665243991FDDBCD732BC1F74C1B9269 /* EZAudioFile.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A87EDF18F6432EDE1278A1CDEB64E64 /* EZAudioFile.h */; settings = {ATTRIBUTES = (Public, ); }; }; + EB7E87E27D93F81BA61B285AE855E4EC /* EZMicrophone.h in Headers */ = {isa = PBXBuildFile; fileRef = 8E621BE6F2028937BE3C74DE5A6392E2 /* EZMicrophone.h */; settings = {ATTRIBUTES = (Public, ); }; }; + ECB9A462311597D9D6990EF5E7DE01A4 /* TPCircularBuffer+AudioBufferList.h in Headers */ = {isa = PBXBuildFile; fileRef = 2188B5CF086A239ECF7F1C3E3EDAD882 /* TPCircularBuffer+AudioBufferList.h */; settings = {ATTRIBUTES = (Public, ); }; }; + F9E6E1413E8BFD84C2142E6F6EECF1BF /* EZAudioUtilities.m in Sources */ = {isa = PBXBuildFile; fileRef = 28A6965FA687E427DC6D93DB8A6E0FDA /* EZAudioUtilities.m */; }; + FE0B354D25F78475C5A8845B1B230BBC /* EZMicrophone.m in Sources */ = {isa = PBXBuildFile; fileRef = 5823A221FF2BA8E0C26C5BBADF8799A2 /* EZMicrophone.m */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 1C20AB3D60DAAA242C664F9F8DCF455D /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = D41D8CD98F00B204E9800998ECF8427E /* Project object */; + proxyType = 1; + remoteGlobalIDString = 0FF42FAF2F1385A18E8DD9E261EA8764; + remoteInfo = TPCircularBuffer; + }; + D02E1C0ACC1715BE24A2B189CB17234F /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = D41D8CD98F00B204E9800998ECF8427E /* Project object */; + proxyType = 1; + remoteGlobalIDString = 6807B53894B5AFB5835D307C300B635E; + remoteInfo = EZAudio; + }; + F810117CBF5D1996E7D83453D4E09141 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = D41D8CD98F00B204E9800998ECF8427E /* Project object */; + proxyType = 1; + remoteGlobalIDString = 0FF42FAF2F1385A18E8DD9E261EA8764; + remoteInfo = TPCircularBuffer; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 10C27612DD4FA1A9A433F85DB433D932 /* libPods-SnowboyTest.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; name = "libPods-SnowboyTest.a"; path = "libPods-SnowboyTest.a"; sourceTree = BUILT_PRODUCTS_DIR; }; + 158BFE1E54251DAA0C63986FC7EE0E35 /* EZAudio-dummy.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; path = "EZAudio-dummy.m"; sourceTree = ""; }; + 17AC9E99CD2469349DDDE0F24E40193E /* EZAudioFloatConverter.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioFloatConverter.h; path = EZAudio/EZAudioFloatConverter.h; sourceTree = ""; }; + 1CE39A72FB74B7F4F0FCC68E0E9C207F /* EZOutput.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZOutput.m; path = EZAudio/EZOutput.m; sourceTree = ""; }; + 1DAF4FF6E5140B8F925C40BCE74D85D4 /* EZAudioUtilities.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioUtilities.h; path = EZAudio/EZAudioUtilities.h; sourceTree = ""; }; + 2017CC445AAC2D2C07670B49B7CBBE9A /* EZAudio.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; path = EZAudio.xcconfig; sourceTree = ""; }; + 20478486CC93167E8FC41315364F17A0 /* EZAudio-prefix.pch */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; path = "EZAudio-prefix.pch"; sourceTree = ""; }; + 21350F9999AF016D74CB6F44362E69AE /* TPCircularBuffer.c */ = {isa = PBXFileReference; includeInIndex = 1; path = TPCircularBuffer.c; sourceTree = ""; }; + 2188B5CF086A239ECF7F1C3E3EDAD882 /* TPCircularBuffer+AudioBufferList.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; path = "TPCircularBuffer+AudioBufferList.h"; sourceTree = ""; }; + 2423A256198CC5653F913CC5D36BCFF0 /* TPCircularBuffer.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; path = TPCircularBuffer.h; sourceTree = ""; }; + 28A6965FA687E427DC6D93DB8A6E0FDA /* EZAudioUtilities.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioUtilities.m; path = EZAudio/EZAudioUtilities.m; sourceTree = ""; }; + 343439D2EF4DE8E37F41D1BB45132871 /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS10.0.sdk/System/Library/Frameworks/Accelerate.framework; sourceTree = DEVELOPER_DIR; }; + 363D794756A137A9FDE4586EC90C2DAB /* EZPlot.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZPlot.m; path = EZAudio/EZPlot.m; sourceTree = ""; }; + 36ED0F88715C80521A9547961EB8C175 /* libTPCircularBuffer.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; name = libTPCircularBuffer.a; path = libTPCircularBuffer.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 38B2ADFC30D2F0CCC36821E69FCFC5E6 /* EZRecorder.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZRecorder.m; path = EZAudio/EZRecorder.m; sourceTree = ""; }; + 3AE7EB4FD578A9D62731F043738266EA /* EZAudioPlotGL.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioPlotGL.h; path = EZAudio/EZAudioPlotGL.h; sourceTree = ""; }; + 3E0A9CF8AA533CB2ABE320F534C76EC5 /* EZOutput.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZOutput.h; path = EZAudio/EZOutput.h; sourceTree = ""; }; + 3E420239C094C79DDD6A443C4D8EDF29 /* Pods-SnowboyTest.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; path = "Pods-SnowboyTest.release.xcconfig"; sourceTree = ""; }; + 418EC50AF5D0002A1A2B7D597CC6840E /* EZAudioFloatData.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioFloatData.m; path = EZAudio/EZAudioFloatData.m; sourceTree = ""; }; + 54F7C38BC933D46E0A8FB6E5470B2113 /* EZAudioPlayer.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioPlayer.m; path = EZAudio/EZAudioPlayer.m; sourceTree = ""; }; + 5823A221FF2BA8E0C26C5BBADF8799A2 /* EZMicrophone.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZMicrophone.m; path = EZAudio/EZMicrophone.m; sourceTree = ""; }; + 5A87EDF18F6432EDE1278A1CDEB64E64 /* EZAudioFile.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioFile.h; path = EZAudio/EZAudioFile.h; sourceTree = ""; }; + 60A87CC95A9CDF7101EAC257737F5472 /* GLKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = GLKit.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS10.0.sdk/System/Library/Frameworks/GLKit.framework; sourceTree = DEVELOPER_DIR; }; + 67FCC68BAB9FF27C2300D3D36A6A1D3E /* EZAudioiOS.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioiOS.h; path = EZAudio/EZAudioiOS.h; sourceTree = ""; }; + 689BE553E57C3C0DC1E047F239D7C3C9 /* libEZAudio.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; name = libEZAudio.a; path = libEZAudio.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 6B2874B0C4B92DAEF47AB3CF4BD132F9 /* TPCircularBuffer.c */ = {isa = PBXFileReference; includeInIndex = 1; name = TPCircularBuffer.c; path = EZAudio/TPCircularBuffer.c; sourceTree = ""; }; + 6E148565F8848992280486BDF6EA90AE /* EZAudio.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudio.h; path = EZAudio/EZAudio.h; sourceTree = ""; }; + 706734D4D9A29E686EDC1B548ED6A211 /* EZRecorder.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZRecorder.h; path = EZAudio/EZRecorder.h; sourceTree = ""; }; + 75095EA3208866FFA47AC88622E02395 /* EZAudioFFT.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioFFT.m; path = EZAudio/EZAudioFFT.m; sourceTree = ""; }; + 803DE2A1BF839898425F5FA70577672D /* TPCircularBuffer-dummy.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; path = "TPCircularBuffer-dummy.m"; sourceTree = ""; }; + 806A418E439E6C4ACF40A3B5210611DD /* TPCircularBuffer.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = TPCircularBuffer.h; path = EZAudio/TPCircularBuffer.h; sourceTree = ""; }; + 806B490E431794819B2EC84C866D27ED /* Pods-SnowboyTest-acknowledgements.plist */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.plist.xml; path = "Pods-SnowboyTest-acknowledgements.plist"; sourceTree = ""; }; + 80E76C60B91C211A76324C26B637CC08 /* Pods-SnowboyTest-frameworks.sh */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.script.sh; path = "Pods-SnowboyTest-frameworks.sh"; sourceTree = ""; }; + 8C5F6DD69EB4CDB7D81805AE349DFA4D /* EZAudioFloatConverter.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioFloatConverter.m; path = EZAudio/EZAudioFloatConverter.m; sourceTree = ""; }; + 8E621BE6F2028937BE3C74DE5A6392E2 /* EZMicrophone.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZMicrophone.h; path = EZAudio/EZMicrophone.h; sourceTree = ""; }; + 93A4A3777CF96A4AAC1D13BA6DCCEA73 /* Podfile */ = {isa = PBXFileReference; explicitFileType = text.script.ruby; includeInIndex = 1; lastKnownFileType = text; name = Podfile; path = ../Podfile; sourceTree = SOURCE_ROOT; xcLanguageSpecificationIdentifier = xcode.lang.ruby; }; + 93AE37673C56CFAC627EDE6B69AF1E71 /* EZPlot.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZPlot.h; path = EZAudio/EZPlot.h; sourceTree = ""; }; + 9A9DE543722DE21C60A47B4BF57C09D8 /* EZAudioDisplayLink.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioDisplayLink.h; path = EZAudio/EZAudioDisplayLink.h; sourceTree = ""; }; + A323F4C2A68B4614DAE0C6FD4EA41774 /* TPCircularBuffer.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; path = TPCircularBuffer.xcconfig; sourceTree = ""; }; + A3E2D32122C0A2C2EAF3C0701CCA6615 /* EZAudioPlot.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioPlot.h; path = EZAudio/EZAudioPlot.h; sourceTree = ""; }; + A7F6890EB9C4CD291EBF6964027445D3 /* TPCircularBuffer-prefix.pch */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; path = "TPCircularBuffer-prefix.pch"; sourceTree = ""; }; + A836127C384E5BC8102CDB2D8730C3F1 /* EZAudioDevice.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioDevice.m; path = EZAudio/EZAudioDevice.m; sourceTree = ""; }; + A93D30946CE1C15DFFBA01AFC61BE90A /* EZAudioDisplayLink.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioDisplayLink.m; path = EZAudio/EZAudioDisplayLink.m; sourceTree = ""; }; + AAD00DA57191C15B8640E91446B12D61 /* EZAudioDevice.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioDevice.h; path = EZAudio/EZAudioDevice.h; sourceTree = ""; }; + AF4451C471C29CC93876006B444C76CB /* EZAudioFloatData.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioFloatData.h; path = EZAudio/EZAudioFloatData.h; sourceTree = ""; }; + B33BD692A7747F468AD8FB357AFF9BB4 /* Pods-SnowboyTest-acknowledgements.markdown */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text; path = "Pods-SnowboyTest-acknowledgements.markdown"; sourceTree = ""; }; + B5DEB394313E661485818ADA6A27BAA8 /* EZAudio.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudio.m; path = EZAudio/EZAudio.m; sourceTree = ""; }; + B7A80CC847216377B72356FC424D14C4 /* EZAudioPlotGL.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioPlotGL.m; path = EZAudio/EZAudioPlotGL.m; sourceTree = ""; }; + C599E1E3C2A68B6EB423598726C1FA8E /* EZAudioOSX.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioOSX.h; path = EZAudio/EZAudioOSX.h; sourceTree = ""; }; + C8DE1DA5969D82E0DE0F195C0039DC98 /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS10.0.sdk/System/Library/Frameworks/AVFoundation.framework; sourceTree = DEVELOPER_DIR; }; + CA66AD5D6B569771170AED24642F061B /* EZAudioPlayer.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioPlayer.h; path = EZAudio/EZAudioPlayer.h; sourceTree = ""; }; + CB09A32D53C5CC4566A43E3BC6FC9574 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS10.0.sdk/System/Library/Frameworks/Foundation.framework; sourceTree = DEVELOPER_DIR; }; + CE33E0A96962E208B0D7253C1B8EF5F3 /* EZAudioFile.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioFile.m; path = EZAudio/EZAudioFile.m; sourceTree = ""; }; + E1FB4B74578D130C3D7B4BFECCB46B4E /* Pods-SnowboyTest-dummy.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; path = "Pods-SnowboyTest-dummy.m"; sourceTree = ""; }; + E53C53E9100D6955C8E0738767A91C3D /* EZAudioPlot.m */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.objc; name = EZAudioPlot.m; path = EZAudio/EZAudioPlot.m; sourceTree = ""; }; + F0567477682A34B5E21CF00B41F7A2B6 /* AudioToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioToolbox.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS10.0.sdk/System/Library/Frameworks/AudioToolbox.framework; sourceTree = DEVELOPER_DIR; }; + F382D83EF9D6E8752D4F09D23E849BF7 /* TPCircularBuffer+AudioBufferList.c */ = {isa = PBXFileReference; includeInIndex = 1; path = "TPCircularBuffer+AudioBufferList.c"; sourceTree = ""; }; + F615958B4706FACBFC7069C1307D6756 /* Pods-SnowboyTest.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; path = "Pods-SnowboyTest.debug.xcconfig"; sourceTree = ""; }; + F671D6C069951BA4505C98AF2975B4C3 /* EZAudioFFT.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; name = EZAudioFFT.h; path = EZAudio/EZAudioFFT.h; sourceTree = ""; }; + FAECDE6EA6013B9B85104D34F96C3D2E /* Pods-SnowboyTest-resources.sh */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.script.sh; path = "Pods-SnowboyTest-resources.sh"; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 82C9118003BADEEA8C30BE713905DEB5 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 42EB22017F35B0994169438AD0A96466 /* Accelerate.framework in Frameworks */, + 8DD84868E1D267F6A81EDB61ED81B4BE /* AudioToolbox.framework in Frameworks */, + 879899E75E618F4A4CEFBBF613E10C91 /* AVFoundation.framework in Frameworks */, + 0AE36BA7AF8D18E53D78A59CAB41B933 /* Foundation.framework in Frameworks */, + AFFED0CC7D73A3CC0D59A056BFFDE916 /* GLKit.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E1E441B643EA73FAF0771178B3E6FBA6 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 65A83C084CFCD9663D65369A552553A2 /* Foundation.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + EDDEACF5FA8633046F839F5A8D1FDDFD /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 0940511CD9BBA8E066867DEE5D19951A /* AudioToolbox.framework in Frameworks */, + 9EC49104A123C532375190099750B7C0 /* Foundation.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 18A3284CC0019C4A416F5E45BB2CB750 /* Support Files */ = { + isa = PBXGroup; + children = ( + 2017CC445AAC2D2C07670B49B7CBBE9A /* EZAudio.xcconfig */, + 158BFE1E54251DAA0C63986FC7EE0E35 /* EZAudio-dummy.m */, + 20478486CC93167E8FC41315364F17A0 /* EZAudio-prefix.pch */, + ); + name = "Support Files"; + path = "../Target Support Files/EZAudio"; + sourceTree = ""; + }; + 20B56609144CE204DFA8221F742B2D76 /* Frameworks */ = { + isa = PBXGroup; + children = ( + BAB67F7BAF975E63A852CDA3A51C6D70 /* iOS */, + ); + name = Frameworks; + sourceTree = ""; + }; + 3AF97BAF61E2E647A3910274364949FB /* EZAudio */ = { + isa = PBXGroup; + children = ( + 898E0226A2F20121B26D081C06490F39 /* Core */, + 18A3284CC0019C4A416F5E45BB2CB750 /* Support Files */, + ); + name = EZAudio; + path = EZAudio; + sourceTree = ""; + }; + 559EC26ECA1C3E30A34C2BE40D6FDA49 /* Support Files */ = { + isa = PBXGroup; + children = ( + A323F4C2A68B4614DAE0C6FD4EA41774 /* TPCircularBuffer.xcconfig */, + 803DE2A1BF839898425F5FA70577672D /* TPCircularBuffer-dummy.m */, + A7F6890EB9C4CD291EBF6964027445D3 /* TPCircularBuffer-prefix.pch */, + ); + name = "Support Files"; + path = "../Target Support Files/TPCircularBuffer"; + sourceTree = ""; + }; + 7C57FA91A4188381F5A04A6F83E77D76 /* Pods */ = { + isa = PBXGroup; + children = ( + 3AF97BAF61E2E647A3910274364949FB /* EZAudio */, + C6323540F1BCEFEA7D6FECF6E378276D /* TPCircularBuffer */, + ); + name = Pods; + sourceTree = ""; + }; + 7DB346D0F39D3F0E887471402A8071AB = { + isa = PBXGroup; + children = ( + 93A4A3777CF96A4AAC1D13BA6DCCEA73 /* Podfile */, + 20B56609144CE204DFA8221F742B2D76 /* Frameworks */, + 7C57FA91A4188381F5A04A6F83E77D76 /* Pods */, + C927ACBDE95E1F42974AAB6C9B9DC270 /* Products */, + 846A5D6846D58762D58C22F675D8AA39 /* Targets Support Files */, + ); + sourceTree = ""; + }; + 846A5D6846D58762D58C22F675D8AA39 /* Targets Support Files */ = { + isa = PBXGroup; + children = ( + C1353F6EDE9CE6DC8BB4D46E21AC5B58 /* Pods-SnowboyTest */, + ); + name = "Targets Support Files"; + sourceTree = ""; + }; + 898E0226A2F20121B26D081C06490F39 /* Core */ = { + isa = PBXGroup; + children = ( + 6E148565F8848992280486BDF6EA90AE /* EZAudio.h */, + B5DEB394313E661485818ADA6A27BAA8 /* EZAudio.m */, + AAD00DA57191C15B8640E91446B12D61 /* EZAudioDevice.h */, + A836127C384E5BC8102CDB2D8730C3F1 /* EZAudioDevice.m */, + 9A9DE543722DE21C60A47B4BF57C09D8 /* EZAudioDisplayLink.h */, + A93D30946CE1C15DFFBA01AFC61BE90A /* EZAudioDisplayLink.m */, + F671D6C069951BA4505C98AF2975B4C3 /* EZAudioFFT.h */, + 75095EA3208866FFA47AC88622E02395 /* EZAudioFFT.m */, + 5A87EDF18F6432EDE1278A1CDEB64E64 /* EZAudioFile.h */, + CE33E0A96962E208B0D7253C1B8EF5F3 /* EZAudioFile.m */, + 17AC9E99CD2469349DDDE0F24E40193E /* EZAudioFloatConverter.h */, + 8C5F6DD69EB4CDB7D81805AE349DFA4D /* EZAudioFloatConverter.m */, + AF4451C471C29CC93876006B444C76CB /* EZAudioFloatData.h */, + 418EC50AF5D0002A1A2B7D597CC6840E /* EZAudioFloatData.m */, + 67FCC68BAB9FF27C2300D3D36A6A1D3E /* EZAudioiOS.h */, + C599E1E3C2A68B6EB423598726C1FA8E /* EZAudioOSX.h */, + CA66AD5D6B569771170AED24642F061B /* EZAudioPlayer.h */, + 54F7C38BC933D46E0A8FB6E5470B2113 /* EZAudioPlayer.m */, + A3E2D32122C0A2C2EAF3C0701CCA6615 /* EZAudioPlot.h */, + E53C53E9100D6955C8E0738767A91C3D /* EZAudioPlot.m */, + 3AE7EB4FD578A9D62731F043738266EA /* EZAudioPlotGL.h */, + B7A80CC847216377B72356FC424D14C4 /* EZAudioPlotGL.m */, + 1DAF4FF6E5140B8F925C40BCE74D85D4 /* EZAudioUtilities.h */, + 28A6965FA687E427DC6D93DB8A6E0FDA /* EZAudioUtilities.m */, + 8E621BE6F2028937BE3C74DE5A6392E2 /* EZMicrophone.h */, + 5823A221FF2BA8E0C26C5BBADF8799A2 /* EZMicrophone.m */, + 3E0A9CF8AA533CB2ABE320F534C76EC5 /* EZOutput.h */, + 1CE39A72FB74B7F4F0FCC68E0E9C207F /* EZOutput.m */, + 93AE37673C56CFAC627EDE6B69AF1E71 /* EZPlot.h */, + 363D794756A137A9FDE4586EC90C2DAB /* EZPlot.m */, + 706734D4D9A29E686EDC1B548ED6A211 /* EZRecorder.h */, + 38B2ADFC30D2F0CCC36821E69FCFC5E6 /* EZRecorder.m */, + 6B2874B0C4B92DAEF47AB3CF4BD132F9 /* TPCircularBuffer.c */, + 806A418E439E6C4ACF40A3B5210611DD /* TPCircularBuffer.h */, + ); + name = Core; + sourceTree = ""; + }; + BAB67F7BAF975E63A852CDA3A51C6D70 /* iOS */ = { + isa = PBXGroup; + children = ( + 343439D2EF4DE8E37F41D1BB45132871 /* Accelerate.framework */, + F0567477682A34B5E21CF00B41F7A2B6 /* AudioToolbox.framework */, + C8DE1DA5969D82E0DE0F195C0039DC98 /* AVFoundation.framework */, + CB09A32D53C5CC4566A43E3BC6FC9574 /* Foundation.framework */, + 60A87CC95A9CDF7101EAC257737F5472 /* GLKit.framework */, + ); + name = iOS; + sourceTree = ""; + }; + C1353F6EDE9CE6DC8BB4D46E21AC5B58 /* Pods-SnowboyTest */ = { + isa = PBXGroup; + children = ( + B33BD692A7747F468AD8FB357AFF9BB4 /* Pods-SnowboyTest-acknowledgements.markdown */, + 806B490E431794819B2EC84C866D27ED /* Pods-SnowboyTest-acknowledgements.plist */, + E1FB4B74578D130C3D7B4BFECCB46B4E /* Pods-SnowboyTest-dummy.m */, + 80E76C60B91C211A76324C26B637CC08 /* Pods-SnowboyTest-frameworks.sh */, + FAECDE6EA6013B9B85104D34F96C3D2E /* Pods-SnowboyTest-resources.sh */, + F615958B4706FACBFC7069C1307D6756 /* Pods-SnowboyTest.debug.xcconfig */, + 3E420239C094C79DDD6A443C4D8EDF29 /* Pods-SnowboyTest.release.xcconfig */, + ); + name = "Pods-SnowboyTest"; + path = "Target Support Files/Pods-SnowboyTest"; + sourceTree = ""; + }; + C6323540F1BCEFEA7D6FECF6E378276D /* TPCircularBuffer */ = { + isa = PBXGroup; + children = ( + 21350F9999AF016D74CB6F44362E69AE /* TPCircularBuffer.c */, + 2423A256198CC5653F913CC5D36BCFF0 /* TPCircularBuffer.h */, + F382D83EF9D6E8752D4F09D23E849BF7 /* TPCircularBuffer+AudioBufferList.c */, + 2188B5CF086A239ECF7F1C3E3EDAD882 /* TPCircularBuffer+AudioBufferList.h */, + 559EC26ECA1C3E30A34C2BE40D6FDA49 /* Support Files */, + ); + name = TPCircularBuffer; + path = TPCircularBuffer; + sourceTree = ""; + }; + C927ACBDE95E1F42974AAB6C9B9DC270 /* Products */ = { + isa = PBXGroup; + children = ( + 689BE553E57C3C0DC1E047F239D7C3C9 /* libEZAudio.a */, + 10C27612DD4FA1A9A433F85DB433D932 /* libPods-SnowboyTest.a */, + 36ED0F88715C80521A9547961EB8C175 /* libTPCircularBuffer.a */, + ); + name = Products; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + 5534EAAE3B6950267D8C2065F5DA64C2 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + ECB9A462311597D9D6990EF5E7DE01A4 /* TPCircularBuffer+AudioBufferList.h in Headers */, + A82AE70024A461EBE39DDECADA975BAD /* TPCircularBuffer.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + EE967369BCC86CAA278A67A2C596E8A6 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 536641F4E01F326750008F2B697B1255 /* EZAudio.h in Headers */, + 63A61D86CDEBCCD323C692759CF298C4 /* EZAudioDevice.h in Headers */, + DC8AF0099977E7476B7A367BD8B18827 /* EZAudioDisplayLink.h in Headers */, + D8DED64FAABEA87BD4885DBE808C7BBF /* EZAudioFFT.h in Headers */, + E665243991FDDBCD732BC1F74C1B9269 /* EZAudioFile.h in Headers */, + 846D0F02CB7E0A987DF9EDB31AFAB254 /* EZAudioFloatConverter.h in Headers */, + 217C83871CA6E6F17F96968FBFB8739D /* EZAudioFloatData.h in Headers */, + 8945B805F96603C5678149FDAF64256F /* EZAudioiOS.h in Headers */, + 344EA22F4A4253BB6AE104EAED14C847 /* EZAudioOSX.h in Headers */, + A4D879C3AF84EC21E85B97AC0F3D8D38 /* EZAudioPlayer.h in Headers */, + 29D283714C693DA2DBD4006A2CD8EC65 /* EZAudioPlot.h in Headers */, + BA003BA7D581D2673B0A66C8AA02122F /* EZAudioPlotGL.h in Headers */, + 52F73BBF1B2A6F7A37582EDF511276F3 /* EZAudioUtilities.h in Headers */, + EB7E87E27D93F81BA61B285AE855E4EC /* EZMicrophone.h in Headers */, + 188ECC220A830F71839B51119CE53E3E /* EZOutput.h in Headers */, + D8A779803A80CCCB28B4158880E637B5 /* EZPlot.h in Headers */, + A26E6655077ABC61E7E651ADEC6C0D1D /* EZRecorder.h in Headers */, + 7E6B1F12BFBEB7A367230F60B69509AC /* TPCircularBuffer.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + 0FF42FAF2F1385A18E8DD9E261EA8764 /* TPCircularBuffer */ = { + isa = PBXNativeTarget; + buildConfigurationList = AC9212CB82F1B6976817E207E3B40956 /* Build configuration list for PBXNativeTarget "TPCircularBuffer" */; + buildPhases = ( + FDA3A9E9E978EB0CCBA34C64A6AB27CB /* Sources */, + EDDEACF5FA8633046F839F5A8D1FDDFD /* Frameworks */, + 5534EAAE3B6950267D8C2065F5DA64C2 /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = TPCircularBuffer; + productName = TPCircularBuffer; + productReference = 36ED0F88715C80521A9547961EB8C175 /* libTPCircularBuffer.a */; + productType = "com.apple.product-type.library.static"; + }; + 6807B53894B5AFB5835D307C300B635E /* EZAudio */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4D7986FB6D90BECC9BC510F417F2FF18 /* Build configuration list for PBXNativeTarget "EZAudio" */; + buildPhases = ( + F6A614DAF5DF3A1B88FF6969A2784FD7 /* Sources */, + 82C9118003BADEEA8C30BE713905DEB5 /* Frameworks */, + EE967369BCC86CAA278A67A2C596E8A6 /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + 421629A4EDD75C4DCB7EC9E9A46DDD9B /* PBXTargetDependency */, + ); + name = EZAudio; + productName = EZAudio; + productReference = 689BE553E57C3C0DC1E047F239D7C3C9 /* libEZAudio.a */; + productType = "com.apple.product-type.library.static"; + }; + 9EF53FF579F7F81F240A497C4607304A /* Pods-SnowboyTest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 40AE2823239424B271D95C0906972F80 /* Build configuration list for PBXNativeTarget "Pods-SnowboyTest" */; + buildPhases = ( + DBACCF0B12E6D7D560FA5635BBA71635 /* Sources */, + E1E441B643EA73FAF0771178B3E6FBA6 /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + 6C35EEF98FE36A410DBDEF44C1185635 /* PBXTargetDependency */, + 769FCF9776DC208F6EA1586C1D0FA00B /* PBXTargetDependency */, + ); + name = "Pods-SnowboyTest"; + productName = "Pods-SnowboyTest"; + productReference = 10C27612DD4FA1A9A433F85DB433D932 /* libPods-SnowboyTest.a */; + productType = "com.apple.product-type.library.static"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + D41D8CD98F00B204E9800998ECF8427E /* Project object */ = { + isa = PBXProject; + attributes = { + LastSwiftUpdateCheck = 0730; + LastUpgradeCheck = 0700; + }; + buildConfigurationList = 2D8E8EC45A3A1A1D94AE762CB5028504 /* Build configuration list for PBXProject "Pods" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + ); + mainGroup = 7DB346D0F39D3F0E887471402A8071AB; + productRefGroup = C927ACBDE95E1F42974AAB6C9B9DC270 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 6807B53894B5AFB5835D307C300B635E /* EZAudio */, + 9EF53FF579F7F81F240A497C4607304A /* Pods-SnowboyTest */, + 0FF42FAF2F1385A18E8DD9E261EA8764 /* TPCircularBuffer */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXSourcesBuildPhase section */ + DBACCF0B12E6D7D560FA5635BBA71635 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 07F29CF3D34E836E7F9460EBEBF7BF8C /* Pods-SnowboyTest-dummy.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + F6A614DAF5DF3A1B88FF6969A2784FD7 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 5882B4E4E3908A171BF7348450662B1B /* EZAudio-dummy.m in Sources */, + 16229C9991DA2AEE20B4DA3D157269EE /* EZAudio.m in Sources */, + 66090E8837B0679C80F579CC7C404146 /* EZAudioDevice.m in Sources */, + BAB4E2DB6F46F1DF7F1C86867E426DC5 /* EZAudioDisplayLink.m in Sources */, + 651C18CDE375C9E707B66804DF3699B5 /* EZAudioFFT.m in Sources */, + 5E1304B6F257E8B3389202594AB02801 /* EZAudioFile.m in Sources */, + A084252A9D12F1E9F4EAD3FA0DFC9F8C /* EZAudioFloatConverter.m in Sources */, + 7ABFB95CA4DF2C3698512E3D43FEA168 /* EZAudioFloatData.m in Sources */, + BF4471DDFD0DA894723B8785B2AEEE98 /* EZAudioPlayer.m in Sources */, + CA87E1B7AD09E29BE9DF08BE779DB1BD /* EZAudioPlot.m in Sources */, + 42B2FEC0908CE57C4962666D38904569 /* EZAudioPlotGL.m in Sources */, + F9E6E1413E8BFD84C2142E6F6EECF1BF /* EZAudioUtilities.m in Sources */, + FE0B354D25F78475C5A8845B1B230BBC /* EZMicrophone.m in Sources */, + E6229BEDE9C40ECE6A3A1A966EF121AD /* EZOutput.m in Sources */, + 49242DD143BC68CBFB4EAD74D9B44800 /* EZPlot.m in Sources */, + 71F7DEEB2722AC4078625B0B6B3D626E /* EZRecorder.m in Sources */, + C140586AF9D82CF9952CABF1F015FAE0 /* TPCircularBuffer.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDA3A9E9E978EB0CCBA34C64A6AB27CB /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4F8C2429D2F098A6D04FB10A94944E79 /* TPCircularBuffer+AudioBufferList.c in Sources */, + A49FF2033A08D538C5BBA31D5B4C967F /* TPCircularBuffer-dummy.m in Sources */, + DCD085DD7420B13E60BD9D8D314F853A /* TPCircularBuffer.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 421629A4EDD75C4DCB7EC9E9A46DDD9B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = TPCircularBuffer; + target = 0FF42FAF2F1385A18E8DD9E261EA8764 /* TPCircularBuffer */; + targetProxy = F810117CBF5D1996E7D83453D4E09141 /* PBXContainerItemProxy */; + }; + 6C35EEF98FE36A410DBDEF44C1185635 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = EZAudio; + target = 6807B53894B5AFB5835D307C300B635E /* EZAudio */; + targetProxy = D02E1C0ACC1715BE24A2B189CB17234F /* PBXContainerItemProxy */; + }; + 769FCF9776DC208F6EA1586C1D0FA00B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = TPCircularBuffer; + target = 0FF42FAF2F1385A18E8DD9E261EA8764 /* TPCircularBuffer */; + targetProxy = 1C20AB3D60DAAA242C664F9F8DCF455D /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin XCBuildConfiguration section */ + 01FEB15E1B9B00AE2FF3B95776B10A57 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 2017CC445AAC2D2C07670B49B7CBBE9A /* EZAudio.xcconfig */; + buildSettings = { + "CODE_SIGN_IDENTITY[sdk=appletvos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=watchos*]" = ""; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_PREFIX_HEADER = "Target Support Files/EZAudio/EZAudio-prefix.pch"; + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + MTL_ENABLE_DEBUG_INFO = YES; + OTHER_LDFLAGS = ""; + OTHER_LIBTOOLFLAGS = ""; + PRIVATE_HEADERS_FOLDER_PATH = ""; + PRODUCT_NAME = "$(TARGET_NAME)"; + PUBLIC_HEADERS_FOLDER_PATH = ""; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + }; + name = Debug; + }; + 12914D756594D15C6F2CA12FE5F89F1B /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGNING_REQUIRED = NO; + COPY_PHASE_STRIP = NO; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "POD_CONFIGURATION_DEBUG=1", + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.0; + ONLY_ACTIVE_ARCH = YES; + PROVISIONING_PROFILE_SPECIFIER = NO_SIGNING/; + STRIP_INSTALLED_PRODUCT = NO; + SYMROOT = "${SRCROOT}/../build"; + }; + name = Debug; + }; + 4C2CD730E8CA773857922DB097225A4D /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 2017CC445AAC2D2C07670B49B7CBBE9A /* EZAudio.xcconfig */; + buildSettings = { + "CODE_SIGN_IDENTITY[sdk=appletvos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=watchos*]" = ""; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_PREFIX_HEADER = "Target Support Files/EZAudio/EZAudio-prefix.pch"; + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + MTL_ENABLE_DEBUG_INFO = NO; + OTHER_LDFLAGS = ""; + OTHER_LIBTOOLFLAGS = ""; + PRIVATE_HEADERS_FOLDER_PATH = ""; + PRODUCT_NAME = "$(TARGET_NAME)"; + PUBLIC_HEADERS_FOLDER_PATH = ""; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + }; + name = Release; + }; + 91482A0F582041E621C09281B8D90521 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 3E420239C094C79DDD6A443C4D8EDF29 /* Pods-SnowboyTest.release.xcconfig */; + buildSettings = { + "CODE_SIGN_IDENTITY[sdk=appletvos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=watchos*]" = ""; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.0; + MACH_O_TYPE = staticlib; + MTL_ENABLE_DEBUG_INFO = NO; + OTHER_LDFLAGS = ""; + OTHER_LIBTOOLFLAGS = ""; + PODS_ROOT = "$(SRCROOT)"; + PRODUCT_BUNDLE_IDENTIFIER = "org.cocoapods.${PRODUCT_NAME:rfc1034identifier}"; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + }; + name = Release; + }; + 919909115E453F3EC961355E92AB8A9A /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = A323F4C2A68B4614DAE0C6FD4EA41774 /* TPCircularBuffer.xcconfig */; + buildSettings = { + "CODE_SIGN_IDENTITY[sdk=appletvos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=watchos*]" = ""; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_PREFIX_HEADER = "Target Support Files/TPCircularBuffer/TPCircularBuffer-prefix.pch"; + IPHONEOS_DEPLOYMENT_TARGET = 4.3; + MTL_ENABLE_DEBUG_INFO = NO; + OTHER_LDFLAGS = ""; + OTHER_LIBTOOLFLAGS = ""; + PRIVATE_HEADERS_FOLDER_PATH = ""; + PRODUCT_NAME = "$(TARGET_NAME)"; + PUBLIC_HEADERS_FOLDER_PATH = ""; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + }; + name = Release; + }; + 94B57A0A850B52191AB4594569B007F4 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = F615958B4706FACBFC7069C1307D6756 /* Pods-SnowboyTest.debug.xcconfig */; + buildSettings = { + "CODE_SIGN_IDENTITY[sdk=appletvos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=watchos*]" = ""; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.0; + MACH_O_TYPE = staticlib; + MTL_ENABLE_DEBUG_INFO = YES; + OTHER_LDFLAGS = ""; + OTHER_LIBTOOLFLAGS = ""; + PODS_ROOT = "$(SRCROOT)"; + PRODUCT_BUNDLE_IDENTIFIER = "org.cocoapods.${PRODUCT_NAME:rfc1034identifier}"; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + }; + name = Debug; + }; + E72E7977875C2D251FC62736BBDDC389 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGNING_REQUIRED = NO; + COPY_PHASE_STRIP = YES; + ENABLE_NS_ASSERTIONS = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_PREPROCESSOR_DEFINITIONS = ( + "POD_CONFIGURATION_RELEASE=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.0; + PROVISIONING_PROFILE_SPECIFIER = NO_SIGNING/; + STRIP_INSTALLED_PRODUCT = NO; + SYMROOT = "${SRCROOT}/../build"; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + FCAEF9B27F366672F58887F7BD335FEE /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = A323F4C2A68B4614DAE0C6FD4EA41774 /* TPCircularBuffer.xcconfig */; + buildSettings = { + "CODE_SIGN_IDENTITY[sdk=appletvos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = ""; + "CODE_SIGN_IDENTITY[sdk=watchos*]" = ""; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_PREFIX_HEADER = "Target Support Files/TPCircularBuffer/TPCircularBuffer-prefix.pch"; + IPHONEOS_DEPLOYMENT_TARGET = 4.3; + MTL_ENABLE_DEBUG_INFO = YES; + OTHER_LDFLAGS = ""; + OTHER_LIBTOOLFLAGS = ""; + PRIVATE_HEADERS_FOLDER_PATH = ""; + PRODUCT_NAME = "$(TARGET_NAME)"; + PUBLIC_HEADERS_FOLDER_PATH = ""; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + }; + name = Debug; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 2D8E8EC45A3A1A1D94AE762CB5028504 /* Build configuration list for PBXProject "Pods" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 12914D756594D15C6F2CA12FE5F89F1B /* Debug */, + E72E7977875C2D251FC62736BBDDC389 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 40AE2823239424B271D95C0906972F80 /* Build configuration list for PBXNativeTarget "Pods-SnowboyTest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 94B57A0A850B52191AB4594569B007F4 /* Debug */, + 91482A0F582041E621C09281B8D90521 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4D7986FB6D90BECC9BC510F417F2FF18 /* Build configuration list for PBXNativeTarget "EZAudio" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 01FEB15E1B9B00AE2FF3B95776B10A57 /* Debug */, + 4C2CD730E8CA773857922DB097225A4D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + AC9212CB82F1B6976817E207E3B40956 /* Build configuration list for PBXNativeTarget "TPCircularBuffer" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FCAEF9B27F366672F58887F7BD335FEE /* Debug */, + 919909115E453F3EC961355E92AB8A9A /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = D41D8CD98F00B204E9800998ECF8427E /* Project object */; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/README.markdown b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/README.markdown new file mode 100644 index 0000000..04056d0 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/README.markdown @@ -0,0 +1,55 @@ +A simple, fast circular buffer implementation for audio processing +================================================================== + +A simple C implementation for a circular (ring) buffer. Thread-safe with a single producer and a single consumer, using OSAtomic.h primitives, and avoids any need for buffer wrapping logic by using a virtual memory map technique to place a virtual copy of the buffer straight after the end of the real buffer. + +Usage +----- + +Initialisation and cleanup: `TPCircularBufferInit` and `TPCircularBufferCleanup` to allocate and free resources. + +Producing: Use `TPCircularBufferHead` to get a pointer to write to the buffer, followed by `TPCircularBufferProduce` to submit the written data. `TPCircularBufferProduceBytes` is a convenience routine for writing data straight to the buffer. + +Consuming: Use `TPCircularBufferTail` to get a pointer to the next data to read, followed by `TPCircularBufferConsume` to free up the space once processed. + +TPCircularBuffer+AudioBufferList.(c,h) contain helper functions to queue and dequeue AudioBufferList +structures. These will automatically adjust the mData fields of each buffer to point to 16-byte aligned +regions within the circular buffer. + +Thread safety +------------- + +As long as you restrict multithreaded access to just one producer, and just one consumer, this utility should be thread safe. + +Only one shared variable is used (the buffer fill count), and OSAtomic primitives are used to write to this value to ensure atomicity. + +License +------- + +Copyright (C) 2012-2013 A Tasty Pixel + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + +3. This notice may not be removed or altered from any source distribution. + + +----------------------------------------------------- + +Virtual memory technique originally proposed by [Philip Howard](http://vrb.slashusr.org/), and [adapted to Darwin](http://www.snoize.com/Code/PlayBufferedSoundFile.tar.gz) by [Kurt Revis](http://www.snoize.com) + +See more info at [atastypixel.com](http://atastypixel.com/blog/a-simple-fast-circular-buffer-implementation-for-audio-processing/) + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer+AudioBufferList.c b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer+AudioBufferList.c new file mode 100644 index 0000000..71e10fc --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer+AudioBufferList.c @@ -0,0 +1,287 @@ +// +// TPCircularBuffer+AudioBufferList.c +// Circular/Ring buffer implementation +// +// https://github.com/michaeltyson/TPCircularBuffer +// +// Created by Michael Tyson on 20/03/2012. +// +// Copyright (C) 2012-2013 A Tasty Pixel +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// +// 3. This notice may not be removed or altered from any source distribution. +// + +#include "TPCircularBuffer+AudioBufferList.h" +#import + +static double __secondsToHostTicks = 0.0; + +static inline long align16byte(long val) { + if ( val & (16-1) ) { + return val + (16 - (val & (16-1))); + } + return val; +} + +static inline long min(long a, long b) { + return a > b ? b : a; +} + +AudioBufferList *TPCircularBufferPrepareEmptyAudioBufferList(TPCircularBuffer *buffer, int numberOfBuffers, int bytesPerBuffer, const AudioTimeStamp *inTimestamp) { + int32_t availableBytes; + TPCircularBufferABLBlockHeader *block = (TPCircularBufferABLBlockHeader*)TPCircularBufferHead(buffer, &availableBytes); + if ( !block || availableBytes < sizeof(TPCircularBufferABLBlockHeader)+((numberOfBuffers-1)*sizeof(AudioBuffer))+(numberOfBuffers*bytesPerBuffer) ) return NULL; + + assert(!((unsigned long)block & 0xF) /* Beware unaligned accesses */); + + if ( inTimestamp ) { + memcpy(&block->timestamp, inTimestamp, sizeof(AudioTimeStamp)); + } else { + memset(&block->timestamp, 0, sizeof(AudioTimeStamp)); + } + + memset(&block->bufferList, 0, sizeof(AudioBufferList)+((numberOfBuffers-1)*sizeof(AudioBuffer))); + block->bufferList.mNumberBuffers = numberOfBuffers; + + char *dataPtr = (char*)&block->bufferList + sizeof(AudioBufferList)+((numberOfBuffers-1)*sizeof(AudioBuffer)); + for ( int i=0; i availableBytes ) { + return NULL; + } + + block->bufferList.mBuffers[i].mData = dataPtr; + block->bufferList.mBuffers[i].mDataByteSize = bytesPerBuffer; + block->bufferList.mBuffers[i].mNumberChannels = 1; + + dataPtr += bytesPerBuffer; + } + + // Make sure whole buffer (including timestamp and length value) is 16-byte aligned in length + block->totalLength = (UInt32)align16byte(dataPtr - (char*)block); + if ( block->totalLength > availableBytes ) { + return NULL; + } + + return &block->bufferList; +} + +AudioBufferList *TPCircularBufferPrepareEmptyAudioBufferListWithAudioFormat(TPCircularBuffer *buffer, const AudioStreamBasicDescription *audioFormat, UInt32 frameCount, const AudioTimeStamp *timestamp) { + return TPCircularBufferPrepareEmptyAudioBufferList(buffer, + (audioFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) ? audioFormat->mChannelsPerFrame : 1, + audioFormat->mBytesPerFrame * frameCount, + timestamp); +} + +void TPCircularBufferProduceAudioBufferList(TPCircularBuffer *buffer, const AudioTimeStamp *inTimestamp) { + int32_t availableBytes; + TPCircularBufferABLBlockHeader *block = (TPCircularBufferABLBlockHeader*)TPCircularBufferHead(buffer, &availableBytes); + + assert(block); + + assert(!((unsigned long)block & 0xF) /* Beware unaligned accesses */); + + if ( inTimestamp ) { + memcpy(&block->timestamp, inTimestamp, sizeof(AudioTimeStamp)); + } + + UInt32 calculatedLength = (UInt32)(((char*)block->bufferList.mBuffers[block->bufferList.mNumberBuffers-1].mData + block->bufferList.mBuffers[block->bufferList.mNumberBuffers-1].mDataByteSize) - (char*)block); + + // Make sure whole buffer (including timestamp and length value) is 16-byte aligned in length + calculatedLength = (UInt32)align16byte(calculatedLength); + + assert(calculatedLength <= block->totalLength && calculatedLength <= availableBytes); + + block->totalLength = calculatedLength; + + TPCircularBufferProduce(buffer, block->totalLength); +} + +bool TPCircularBufferCopyAudioBufferList(TPCircularBuffer *buffer, const AudioBufferList *inBufferList, const AudioTimeStamp *inTimestamp, UInt32 frames, const AudioStreamBasicDescription *audioDescription) { + if ( frames == 0 ) return true; + + int byteCount = inBufferList->mBuffers[0].mDataByteSize; + if ( frames != kTPCircularBufferCopyAll ) { + byteCount = frames * audioDescription->mBytesPerFrame; + assert(byteCount <= inBufferList->mBuffers[0].mDataByteSize); + } + + if ( byteCount == 0 ) return true; + + AudioBufferList *bufferList = TPCircularBufferPrepareEmptyAudioBufferList(buffer, inBufferList->mNumberBuffers, byteCount, inTimestamp); + if ( !bufferList ) return false; + + for ( int i=0; imNumberBuffers; i++ ) { + memcpy(bufferList->mBuffers[i].mData, inBufferList->mBuffers[i].mData, byteCount); + } + + TPCircularBufferProduceAudioBufferList(buffer, NULL); + + return true; +} + +AudioBufferList *TPCircularBufferNextBufferListAfter(TPCircularBuffer *buffer, AudioBufferList *bufferList, AudioTimeStamp *outTimestamp) { + int32_t availableBytes; + void *tail = TPCircularBufferTail(buffer, &availableBytes); + void *end = (char*)tail + availableBytes; + assert((void*)bufferList > (void*)tail && (void*)bufferList < end); + + TPCircularBufferABLBlockHeader *originalBlock = (TPCircularBufferABLBlockHeader*)((char*)bufferList - offsetof(TPCircularBufferABLBlockHeader, bufferList)); + assert(!((unsigned long)originalBlock & 0xF) /* Beware unaligned accesses */); + + + TPCircularBufferABLBlockHeader *nextBlock = (TPCircularBufferABLBlockHeader*)((char*)originalBlock + originalBlock->totalLength); + if ( (void*)nextBlock >= end ) return NULL; + assert(!((unsigned long)nextBlock & 0xF) /* Beware unaligned accesses */); + + if ( outTimestamp ) { + memcpy(outTimestamp, &nextBlock->timestamp, sizeof(AudioTimeStamp)); + } + + return &nextBlock->bufferList; +} + +void TPCircularBufferConsumeNextBufferListPartial(TPCircularBuffer *buffer, int framesToConsume, const AudioStreamBasicDescription *audioFormat) { + assert(framesToConsume >= 0); + + int32_t dontcare; + TPCircularBufferABLBlockHeader *block = (TPCircularBufferABLBlockHeader*)TPCircularBufferTail(buffer, &dontcare); + if ( !block ) return; + assert(!((unsigned long)block & 0xF)); // Beware unaligned accesses + + int bytesToConsume = (int)min(framesToConsume * audioFormat->mBytesPerFrame, block->bufferList.mBuffers[0].mDataByteSize); + + if ( bytesToConsume == block->bufferList.mBuffers[0].mDataByteSize ) { + TPCircularBufferConsumeNextBufferList(buffer); + return; + } + + for ( int i=0; ibufferList.mNumberBuffers; i++ ) { + assert(bytesToConsume <= block->bufferList.mBuffers[i].mDataByteSize); + + block->bufferList.mBuffers[i].mData = (char*)block->bufferList.mBuffers[i].mData + bytesToConsume; + block->bufferList.mBuffers[i].mDataByteSize -= bytesToConsume; + } + + if ( block->timestamp.mFlags & kAudioTimeStampSampleTimeValid ) { + block->timestamp.mSampleTime += framesToConsume; + } + if ( block->timestamp.mFlags & kAudioTimeStampHostTimeValid ) { + if ( !__secondsToHostTicks ) { + mach_timebase_info_data_t tinfo; + mach_timebase_info(&tinfo); + __secondsToHostTicks = 1.0 / (((double)tinfo.numer / tinfo.denom) * 1.0e-9); + } + + block->timestamp.mHostTime += ((double)framesToConsume / audioFormat->mSampleRate) * __secondsToHostTicks; + } + + // Reposition block forward, just before the audio data, ensuring 16-byte alignment + TPCircularBufferABLBlockHeader *newBlock = (TPCircularBufferABLBlockHeader*)(((unsigned long)block + bytesToConsume) & ~0xFul); + memmove(newBlock, block, sizeof(TPCircularBufferABLBlockHeader) + (block->bufferList.mNumberBuffers-1)*sizeof(AudioBuffer)); + intptr_t bytesFreed = (intptr_t)newBlock - (intptr_t)block; + newBlock->totalLength -= bytesFreed; + TPCircularBufferConsume(buffer, (int32_t)bytesFreed); +} + +void TPCircularBufferDequeueBufferListFrames(TPCircularBuffer *buffer, UInt32 *ioLengthInFrames, AudioBufferList *outputBufferList, AudioTimeStamp *outTimestamp, const AudioStreamBasicDescription *audioFormat) { + bool hasTimestamp = false; + UInt32 bytesToGo = *ioLengthInFrames * audioFormat->mBytesPerFrame; + UInt32 bytesCopied = 0; + while ( bytesToGo > 0 ) { + AudioBufferList *bufferList = TPCircularBufferNextBufferList(buffer, !hasTimestamp ? outTimestamp : NULL); + if ( !bufferList ) break; + + hasTimestamp = true; + long bytesToCopy = min(bytesToGo, bufferList->mBuffers[0].mDataByteSize); + + if ( outputBufferList ) { + for ( int i=0; imNumberBuffers; i++ ) { + assert(bytesCopied + bytesToCopy <= outputBufferList->mBuffers[i].mDataByteSize); + memcpy((char*)outputBufferList->mBuffers[i].mData + bytesCopied, bufferList->mBuffers[i].mData, bytesToCopy); + } + } + + TPCircularBufferConsumeNextBufferListPartial(buffer, (int)bytesToCopy/audioFormat->mBytesPerFrame, audioFormat); + + bytesToGo -= bytesToCopy; + bytesCopied += bytesToCopy; + } + + *ioLengthInFrames -= bytesToGo / audioFormat->mBytesPerFrame; +} + +static UInt32 _TPCircularBufferPeek(TPCircularBuffer *buffer, AudioTimeStamp *outTimestamp, const AudioStreamBasicDescription *audioFormat, UInt32 contiguousToleranceSampleTime) { + int32_t availableBytes; + TPCircularBufferABLBlockHeader *block = (TPCircularBufferABLBlockHeader*)TPCircularBufferTail(buffer, &availableBytes); + if ( !block ) return 0; + assert(!((unsigned long)block & 0xF) /* Beware unaligned accesses */); + + if ( outTimestamp ) { + memcpy(outTimestamp, &block->timestamp, sizeof(AudioTimeStamp)); + } + + void *end = (char*)block + availableBytes; + + UInt32 byteCount = 0; + + while ( 1 ) { + byteCount += block->bufferList.mBuffers[0].mDataByteSize; + TPCircularBufferABLBlockHeader *nextBlock = (TPCircularBufferABLBlockHeader*)((char*)block + block->totalLength); + if ( (void*)nextBlock >= end || + (contiguousToleranceSampleTime != UINT32_MAX + && fabs(nextBlock->timestamp.mSampleTime - (block->timestamp.mSampleTime + (block->bufferList.mBuffers[0].mDataByteSize / audioFormat->mBytesPerFrame))) > contiguousToleranceSampleTime) ) { + break; + } + assert(!((unsigned long)nextBlock & 0xF) /* Beware unaligned accesses */); + block = nextBlock; + } + + return byteCount / audioFormat->mBytesPerFrame; +} + +UInt32 TPCircularBufferPeek(TPCircularBuffer *buffer, AudioTimeStamp *outTimestamp, const AudioStreamBasicDescription *audioFormat) { + return _TPCircularBufferPeek(buffer, outTimestamp, audioFormat, UINT32_MAX); +} + +UInt32 TPCircularBufferPeekContiguous(TPCircularBuffer *buffer, AudioTimeStamp *outTimestamp, const AudioStreamBasicDescription *audioFormat, UInt32 contiguousToleranceSampleTime) { + return _TPCircularBufferPeek(buffer, outTimestamp, audioFormat, contiguousToleranceSampleTime); +} + +UInt32 TPCircularBufferGetAvailableSpace(TPCircularBuffer *buffer, const AudioStreamBasicDescription *audioFormat) { + // Look at buffer head; make sure there's space for the block metadata + int32_t availableBytes; + TPCircularBufferABLBlockHeader *block = (TPCircularBufferABLBlockHeader*)TPCircularBufferHead(buffer, &availableBytes); + if ( !block ) return 0; + assert(!((unsigned long)block & 0xF) /* Beware unaligned accesses */); + + // Now find out how much 16-byte aligned audio we can store in the space available + int numberOfBuffers = audioFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved ? audioFormat->mChannelsPerFrame : 1; + char * endOfBuffer = (char*)block + availableBytes; + char * dataPtr = (char*)align16byte((long)(&block->bufferList + sizeof(AudioBufferList)+((numberOfBuffers-1)*sizeof(AudioBuffer)))); + if ( dataPtr >= endOfBuffer ) return 0; + int32_t availableAudioBytes = (int)(endOfBuffer - dataPtr); + + int32_t availableAudioBytesPerBuffer = availableAudioBytes / numberOfBuffers; + availableAudioBytesPerBuffer -= (availableAudioBytesPerBuffer % (16-1)); + + return availableAudioBytesPerBuffer > 0 ? availableAudioBytesPerBuffer / audioFormat->mBytesPerFrame : 0; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer+AudioBufferList.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer+AudioBufferList.h new file mode 100644 index 0000000..6c83d8b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer+AudioBufferList.h @@ -0,0 +1,214 @@ +// +// TPCircularBuffer+AudioBufferList.h +// Circular/Ring buffer implementation +// +// https://github.com/michaeltyson/TPCircularBuffer +// +// Created by Michael Tyson on 20/03/2012. +// +// Copyright (C) 2012-2013 A Tasty Pixel +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// +// 3. This notice may not be removed or altered from any source distribution. +// + +#ifndef TPCircularBuffer_AudioBufferList_h +#define TPCircularBuffer_AudioBufferList_h + +#ifdef __cplusplus +extern "C" { +#endif + +#include "TPCircularBuffer.h" +#include + +#define kTPCircularBufferCopyAll UINT32_MAX + +typedef struct { + AudioTimeStamp timestamp; + UInt32 totalLength; + AudioBufferList bufferList; +} TPCircularBufferABLBlockHeader; + + +/*! + * Prepare an empty buffer list, stored on the circular buffer + * + * @param buffer Circular buffer + * @param numberOfBuffers The number of buffers to be contained within the buffer list + * @param bytesPerBuffer The number of bytes to store for each buffer + * @param timestamp The timestamp associated with the buffer, or NULL. Note that you can also pass a timestamp into TPCircularBufferProduceAudioBufferList, to set it there instead. + * @return The empty buffer list, or NULL if circular buffer has insufficient space + */ +AudioBufferList *TPCircularBufferPrepareEmptyAudioBufferList(TPCircularBuffer *buffer, int numberOfBuffers, int bytesPerBuffer, const AudioTimeStamp *timestamp); + +/*! + * Prepare an empty buffer list, stored on the circular buffer, using an audio description to automatically configure buffer + * + * @param buffer Circular buffer + * @param audioFormat The kind of audio that will be stored + * @param frameCount The number of frames that will be stored + * @param timestamp The timestamp associated with the buffer, or NULL. Note that you can also pass a timestamp into TPCircularBufferProduceAudioBufferList, to set it there instead. + * @return The empty buffer list, or NULL if circular buffer has insufficient space + */ +AudioBufferList *TPCircularBufferPrepareEmptyAudioBufferListWithAudioFormat(TPCircularBuffer *buffer, const AudioStreamBasicDescription *audioFormat, UInt32 frameCount, const AudioTimeStamp *timestamp); + +/*! + * Mark next audio buffer list as ready for reading + * + * This marks the audio buffer list prepared using TPCircularBufferPrepareEmptyAudioBufferList + * as ready for reading. You must not call this function without first calling + * TPCircularBufferPrepareEmptyAudioBufferList. + * + * @param buffer Circular buffer + * @param timestamp The timestamp associated with the buffer, or NULL to leave as-is. Note that you can also pass a timestamp into TPCircularBufferPrepareEmptyAudioBufferList, to set it there instead. + */ +void TPCircularBufferProduceAudioBufferList(TPCircularBuffer *buffer, const AudioTimeStamp *inTimestamp); + +/*! + * Copy the audio buffer list onto the buffer + * + * @param buffer Circular buffer + * @param bufferList Buffer list containing audio to copy to buffer + * @param timestamp The timestamp associated with the buffer, or NULL + * @param frames Length of audio in frames. Specify kTPCircularBufferCopyAll to copy the whole buffer (audioFormat can be NULL, in this case) + * @param audioFormat The AudioStreamBasicDescription describing the audio, or NULL if you specify kTPCircularBufferCopyAll to the `frames` argument + * @return YES if buffer list was successfully copied; NO if there was insufficient space + */ +bool TPCircularBufferCopyAudioBufferList(TPCircularBuffer *buffer, const AudioBufferList *bufferList, const AudioTimeStamp *timestamp, UInt32 frames, const AudioStreamBasicDescription *audioFormat); + +/*! + * Get a pointer to the next stored buffer list + * + * @param buffer Circular buffer + * @param outTimestamp On output, if not NULL, the timestamp corresponding to the buffer + * @return Pointer to the next buffer list in the buffer + */ +static __inline__ __attribute__((always_inline)) AudioBufferList *TPCircularBufferNextBufferList(TPCircularBuffer *buffer, AudioTimeStamp *outTimestamp) { + int32_t dontcare; // Length of segment is contained within buffer list, so we can ignore this + TPCircularBufferABLBlockHeader *block = (TPCircularBufferABLBlockHeader*)TPCircularBufferTail(buffer, &dontcare); + if ( !block ) { + if ( outTimestamp ) { + memset(outTimestamp, 0, sizeof(AudioTimeStamp)); + } + return NULL; + } + if ( outTimestamp ) { + memcpy(outTimestamp, &block->timestamp, sizeof(AudioTimeStamp)); + } + return &block->bufferList; +} + +/*! + * Get a pointer to the next stored buffer list after the given one + * + * @param buffer Circular buffer + * @param bufferList Preceding buffer list + * @param outTimestamp On output, if not NULL, the timestamp corresponding to the buffer + * @return Pointer to the next buffer list in the buffer, or NULL + */ +AudioBufferList *TPCircularBufferNextBufferListAfter(TPCircularBuffer *buffer, AudioBufferList *bufferList, AudioTimeStamp *outTimestamp); + +/*! + * Consume the next buffer list + * + * @param buffer Circular buffer + */ +static __inline__ __attribute__((always_inline)) void TPCircularBufferConsumeNextBufferList(TPCircularBuffer *buffer) { + int32_t dontcare; + TPCircularBufferABLBlockHeader *block = (TPCircularBufferABLBlockHeader*)TPCircularBufferTail(buffer, &dontcare); + if ( !block ) return; + TPCircularBufferConsume(buffer, block->totalLength); +} + +/*! + * Consume a portion of the next buffer list + * + * This will also increment the sample time and host time portions of the timestamp of + * the buffer list, if present. + * + * @param buffer Circular buffer + * @param framesToConsume The number of frames to consume from the buffer list + * @param audioFormat The AudioStreamBasicDescription describing the audio + */ +void TPCircularBufferConsumeNextBufferListPartial(TPCircularBuffer *buffer, int framesToConsume, const AudioStreamBasicDescription *audioFormat); + +/*! + * Consume a certain number of frames from the buffer, possibly from multiple queued buffer lists + * + * Copies the given number of frames from the buffer into outputBufferList, of the + * given audio description, then consumes the audio buffers. If an audio buffer has + * not been entirely consumed, then updates the queued buffer list structure to point + * to the unconsumed data only. + * + * @param buffer Circular buffer + * @param ioLengthInFrames On input, the number of frames in the given audio format to consume; on output, the number of frames provided + * @param outputBufferList The buffer list to copy audio to, or NULL to discard audio. If not NULL, the structure must be initialised properly, and the mData pointers must not be NULL. + * @param outTimestamp On output, if not NULL, the timestamp corresponding to the first audio frame returned + * @param audioFormat The format of the audio stored in the buffer + */ +void TPCircularBufferDequeueBufferListFrames(TPCircularBuffer *buffer, UInt32 *ioLengthInFrames, AudioBufferList *outputBufferList, AudioTimeStamp *outTimestamp, const AudioStreamBasicDescription *audioFormat); + +/*! + * Determine how many frames of audio are buffered + * + * Given the provided audio format, determines the frame count of all queued buffers + * + * Note: This function should only be used on the consumer thread, not the producer thread. + * + * @param buffer Circular buffer + * @param outTimestamp On output, if not NULL, the timestamp corresponding to the first audio frame returned + * @param audioFormat The format of the audio stored in the buffer + * @return The number of frames in the given audio format that are in the buffer + */ +UInt32 TPCircularBufferPeek(TPCircularBuffer *buffer, AudioTimeStamp *outTimestamp, const AudioStreamBasicDescription *audioFormat); + +/*! + * Determine how many contiguous frames of audio are buffered + * + * Given the provided audio format, determines the frame count of all queued buffers that are contiguous, + * given their corresponding timestamps (sample time). + * + * Note: This function should only be used on the consumer thread, not the producer thread. + * + * @param buffer Circular buffer + * @param outTimestamp On output, if not NULL, the timestamp corresponding to the first audio frame returned + * @param audioFormat The format of the audio stored in the buffer + * @param contiguousToleranceSampleTime The number of samples of discrepancy to tolerate + * @return The number of frames in the given audio format that are in the buffer + */ +UInt32 TPCircularBufferPeekContiguous(TPCircularBuffer *buffer, AudioTimeStamp *outTimestamp, const AudioStreamBasicDescription *audioFormat, UInt32 contiguousToleranceSampleTime); + +/*! + * Determine how many much space there is in the buffer + * + * Given the provided audio format, determines the number of frames of audio that can be buffered. + * + * Note: This function should only be used on the producer thread, not the consumer thread. + * + * @param buffer Circular buffer + * @param audioFormat The format of the audio stored in the buffer + * @return The number of frames in the given audio format that can be stored in the buffer + */ +UInt32 TPCircularBufferGetAvailableSpace(TPCircularBuffer *buffer, const AudioStreamBasicDescription *audioFormat); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer.c b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer.c new file mode 100644 index 0000000..cfdba2b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer.c @@ -0,0 +1,136 @@ +// +// TPCircularBuffer.c +// Circular/Ring buffer implementation +// +// https://github.com/michaeltyson/TPCircularBuffer +// +// Created by Michael Tyson on 10/12/2011. +// +// Copyright (C) 2012-2013 A Tasty Pixel +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// +// 3. This notice may not be removed or altered from any source distribution. +// + +#include "TPCircularBuffer.h" +#include +#include + +#define reportResult(result,operation) (_reportResult((result),(operation),strrchr(__FILE__, '/')+1,__LINE__)) +static inline bool _reportResult(kern_return_t result, const char *operation, const char* file, int line) { + if ( result != ERR_SUCCESS ) { + printf("%s:%d: %s: %s\n", file, line, operation, mach_error_string(result)); + return false; + } + return true; +} + +bool TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length) { + + // Keep trying until we get our buffer, needed to handle race conditions + int retries = 3; + while ( true ) { + + buffer->length = (int32_t)round_page(length); // We need whole page sizes + + // Temporarily allocate twice the length, so we have the contiguous address space to + // support a second instance of the buffer directly after + vm_address_t bufferAddress; + kern_return_t result = vm_allocate(mach_task_self(), + &bufferAddress, + buffer->length * 2, + VM_FLAGS_ANYWHERE); // allocate anywhere it'll fit + if ( result != ERR_SUCCESS ) { + if ( retries-- == 0 ) { + reportResult(result, "Buffer allocation"); + return false; + } + // Try again if we fail + continue; + } + + // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half... + result = vm_deallocate(mach_task_self(), + bufferAddress + buffer->length, + buffer->length); + if ( result != ERR_SUCCESS ) { + if ( retries-- == 0 ) { + reportResult(result, "Buffer deallocation"); + return false; + } + // If this fails somehow, deallocate the whole region and try again + vm_deallocate(mach_task_self(), bufferAddress, buffer->length); + continue; + } + + // Re-map the buffer to the address space immediately after the buffer + vm_address_t virtualAddress = bufferAddress + buffer->length; + vm_prot_t cur_prot, max_prot; + result = vm_remap(mach_task_self(), + &virtualAddress, // mirror target + buffer->length, // size of mirror + 0, // auto alignment + 0, // force remapping to virtualAddress + mach_task_self(), // same task + bufferAddress, // mirror source + 0, // MAP READ-WRITE, NOT COPY + &cur_prot, // unused protection struct + &max_prot, // unused protection struct + VM_INHERIT_DEFAULT); + if ( result != ERR_SUCCESS ) { + if ( retries-- == 0 ) { + reportResult(result, "Remap buffer memory"); + return false; + } + // If this remap failed, we hit a race condition, so deallocate and try again + vm_deallocate(mach_task_self(), bufferAddress, buffer->length); + continue; + } + + if ( virtualAddress != bufferAddress+buffer->length ) { + // If the memory is not contiguous, clean up both allocated buffers and try again + if ( retries-- == 0 ) { + printf("Couldn't map buffer memory to end of buffer\n"); + return false; + } + + vm_deallocate(mach_task_self(), virtualAddress, buffer->length); + vm_deallocate(mach_task_self(), bufferAddress, buffer->length); + continue; + } + + buffer->buffer = (void*)bufferAddress; + buffer->fillCount = 0; + buffer->head = buffer->tail = 0; + + return true; + } + return false; +} + +void TPCircularBufferCleanup(TPCircularBuffer *buffer) { + vm_deallocate(mach_task_self(), (vm_address_t)buffer->buffer, buffer->length * 2); + memset(buffer, 0, sizeof(TPCircularBuffer)); +} + +void TPCircularBufferClear(TPCircularBuffer *buffer) { + int32_t fillCount; + if ( TPCircularBufferTail(buffer, &fillCount) ) { + TPCircularBufferConsume(buffer, fillCount); + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer.h new file mode 100644 index 0000000..b475670 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/TPCircularBuffer/TPCircularBuffer.h @@ -0,0 +1,195 @@ +// +// TPCircularBuffer.h +// Circular/Ring buffer implementation +// +// https://github.com/michaeltyson/TPCircularBuffer +// +// Created by Michael Tyson on 10/12/2011. +// +// +// This implementation makes use of a virtual memory mapping technique that inserts a virtual copy +// of the buffer memory directly after the buffer's end, negating the need for any buffer wrap-around +// logic. Clients can simply use the returned memory address as if it were contiguous space. +// +// The implementation is thread-safe in the case of a single producer and single consumer. +// +// Virtual memory technique originally proposed by Philip Howard (http://vrb.slashusr.org/), and +// adapted to Darwin by Kurt Revis (http://www.snoize.com, +// http://www.snoize.com/Code/PlayBufferedSoundFile.tar.gz) +// +// +// Copyright (C) 2012-2013 A Tasty Pixel +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// +// 3. This notice may not be removed or altered from any source distribution. +// + +#ifndef TPCircularBuffer_h +#define TPCircularBuffer_h + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + void *buffer; + int32_t length; + int32_t tail; + int32_t head; + volatile int32_t fillCount; +} TPCircularBuffer; + +/*! + * Initialise buffer + * + * Note that the length is advisory only: Because of the way the + * memory mirroring technique works, the true buffer length will + * be multiples of the device page size (e.g. 4096 bytes) + * + * @param buffer Circular buffer + * @param length Length of buffer + */ +bool TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length); + +/*! + * Cleanup buffer + * + * Releases buffer resources. + */ +void TPCircularBufferCleanup(TPCircularBuffer *buffer); + +/*! + * Clear buffer + * + * Resets buffer to original, empty state. + * + * This is safe for use by consumer while producer is accessing + * buffer. + */ +void TPCircularBufferClear(TPCircularBuffer *buffer); + +// Reading (consuming) + +/*! + * Access end of buffer + * + * This gives you a pointer to the end of the buffer, ready + * for reading, and the number of available bytes to read. + * + * @param buffer Circular buffer + * @param availableBytes On output, the number of bytes ready for reading + * @return Pointer to the first bytes ready for reading, or NULL if buffer is empty + */ +static __inline__ __attribute__((always_inline)) void* TPCircularBufferTail(TPCircularBuffer *buffer, int32_t* availableBytes) { + *availableBytes = buffer->fillCount; + if ( *availableBytes == 0 ) return NULL; + return (void*)((char*)buffer->buffer + buffer->tail); +} + +/*! + * Consume bytes in buffer + * + * This frees up the just-read bytes, ready for writing again. + * + * @param buffer Circular buffer + * @param amount Number of bytes to consume + */ +static __inline__ __attribute__((always_inline)) void TPCircularBufferConsume(TPCircularBuffer *buffer, int32_t amount) { + buffer->tail = (buffer->tail + amount) % buffer->length; + OSAtomicAdd32Barrier(-amount, &buffer->fillCount); + assert(buffer->fillCount >= 0); +} + +/*! + * Version of TPCircularBufferConsume without the memory barrier, for more optimal use in single-threaded contexts + */ +static __inline__ __attribute__((always_inline)) void TPCircularBufferConsumeNoBarrier(TPCircularBuffer *buffer, int32_t amount) { + buffer->tail = (buffer->tail + amount) % buffer->length; + buffer->fillCount -= amount; + assert(buffer->fillCount >= 0); +} + +/*! + * Access front of buffer + * + * This gives you a pointer to the front of the buffer, ready + * for writing, and the number of available bytes to write. + * + * @param buffer Circular buffer + * @param availableBytes On output, the number of bytes ready for writing + * @return Pointer to the first bytes ready for writing, or NULL if buffer is full + */ +static __inline__ __attribute__((always_inline)) void* TPCircularBufferHead(TPCircularBuffer *buffer, int32_t* availableBytes) { + *availableBytes = (buffer->length - buffer->fillCount); + if ( *availableBytes == 0 ) return NULL; + return (void*)((char*)buffer->buffer + buffer->head); +} + +// Writing (producing) + +/*! + * Produce bytes in buffer + * + * This marks the given section of the buffer ready for reading. + * + * @param buffer Circular buffer + * @param amount Number of bytes to produce + */ +static __inline__ __attribute__((always_inline)) void TPCircularBufferProduce(TPCircularBuffer *buffer, int32_t amount) { + buffer->head = (buffer->head + amount) % buffer->length; + OSAtomicAdd32Barrier(amount, &buffer->fillCount); + assert(buffer->fillCount <= buffer->length); +} + +/*! + * Version of TPCircularBufferProduce without the memory barrier, for more optimal use in single-threaded contexts + */ +static __inline__ __attribute__((always_inline)) void TPCircularBufferProduceNoBarrier(TPCircularBuffer *buffer, int32_t amount) { + buffer->head = (buffer->head + amount) % buffer->length; + buffer->fillCount += amount; + assert(buffer->fillCount <= buffer->length); +} + +/*! + * Helper routine to copy bytes to buffer + * + * This copies the given bytes to the buffer, and marks them ready for writing. + * + * @param buffer Circular buffer + * @param src Source buffer + * @param len Number of bytes in source buffer + * @return true if bytes copied, false if there was insufficient space + */ +static __inline__ __attribute__((always_inline)) bool TPCircularBufferProduceBytes(TPCircularBuffer *buffer, const void* src, int32_t len) { + int32_t space; + void *ptr = TPCircularBufferHead(buffer, &space); + if ( space < len ) return false; + memcpy(ptr, src, len); + TPCircularBufferProduce(buffer, len); + return true; +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/EZAudio/EZAudio-dummy.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/EZAudio/EZAudio-dummy.m new file mode 100644 index 0000000..3f9b622 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/EZAudio/EZAudio-dummy.m @@ -0,0 +1,5 @@ +#import +@interface PodsDummy_EZAudio : NSObject +@end +@implementation PodsDummy_EZAudio +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/EZAudio/EZAudio-prefix.pch b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/EZAudio/EZAudio-prefix.pch new file mode 100644 index 0000000..beb2a24 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/EZAudio/EZAudio-prefix.pch @@ -0,0 +1,12 @@ +#ifdef __OBJC__ +#import +#else +#ifndef FOUNDATION_EXPORT +#if defined(__cplusplus) +#define FOUNDATION_EXPORT extern "C" +#else +#define FOUNDATION_EXPORT extern +#endif +#endif +#endif + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/EZAudio/EZAudio.xcconfig b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/EZAudio/EZAudio.xcconfig new file mode 100644 index 0000000..363df95 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/EZAudio/EZAudio.xcconfig @@ -0,0 +1,11 @@ +CONFIGURATION_BUILD_DIR = $PODS_CONFIGURATION_BUILD_DIR/EZAudio +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1 +HEADER_SEARCH_PATHS = "${PODS_ROOT}/Headers/Private" "${PODS_ROOT}/Headers/Private/EZAudio" "${PODS_ROOT}/Headers/Public" "${PODS_ROOT}/Headers/Public/EZAudio" "${PODS_ROOT}/Headers/Public/TPCircularBuffer" +LIBRARY_SEARCH_PATHS = $(inherited) "$PODS_CONFIGURATION_BUILD_DIR/TPCircularBuffer" +OTHER_LDFLAGS = -framework "AVFoundation" -framework "Accelerate" -framework "AudioToolbox" -framework "GLKit" +PODS_BUILD_DIR = $BUILD_DIR +PODS_CONFIGURATION_BUILD_DIR = $PODS_BUILD_DIR/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME) +PODS_ROOT = ${SRCROOT} +PODS_TARGET_SRCROOT = ${PODS_ROOT}/EZAudio +PRODUCT_BUNDLE_IDENTIFIER = org.cocoapods.${PRODUCT_NAME:rfc1034identifier} +SKIP_INSTALL = YES diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-acknowledgements.markdown b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-acknowledgements.markdown new file mode 100644 index 0000000..b27220f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-acknowledgements.markdown @@ -0,0 +1,35 @@ +# Acknowledgements +This application makes use of the following third party libraries: + +## EZAudio + +The MIT License (MIT) + +EZAudio +Copyright (c) 2013 Syed Haris Ali + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +## TPCircularBuffer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE." + +Generated by CocoaPods - https://cocoapods.org diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-acknowledgements.plist b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-acknowledgements.plist new file mode 100644 index 0000000..f89e84e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-acknowledgements.plist @@ -0,0 +1,73 @@ + + + + + PreferenceSpecifiers + + + FooterText + This application makes use of the following third party libraries: + Title + Acknowledgements + Type + PSGroupSpecifier + + + FooterText + The MIT License (MIT) + +EZAudio +Copyright (c) 2013 Syed Haris Ali + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + License + MIT + Title + EZAudio + Type + PSGroupSpecifier + + + FooterText + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE." + + License + MIT + Title + TPCircularBuffer + Type + PSGroupSpecifier + + + FooterText + Generated by CocoaPods - https://cocoapods.org + Title + + Type + PSGroupSpecifier + + + StringsTable + Acknowledgements + Title + Acknowledgements + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-dummy.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-dummy.m new file mode 100644 index 0000000..45d6643 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-dummy.m @@ -0,0 +1,5 @@ +#import +@interface PodsDummy_Pods_SnowboyTest : NSObject +@end +@implementation PodsDummy_Pods_SnowboyTest +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-frameworks.sh b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-frameworks.sh new file mode 100644 index 0000000..d839f60 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-frameworks.sh @@ -0,0 +1,92 @@ +#!/bin/sh +set -e + +echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" +mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + +SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}" + +install_framework() +{ + if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then + local source="${BUILT_PRODUCTS_DIR}/$1" + elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then + local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")" + elif [ -r "$1" ]; then + local source="$1" + fi + + local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + + if [ -L "${source}" ]; then + echo "Symlinked..." + source="$(readlink "${source}")" + fi + + # use filter instead of exclude so missing patterns dont' throw errors + echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\"" + rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}" + + local basename + basename="$(basename -s .framework "$1")" + binary="${destination}/${basename}.framework/${basename}" + if ! [ -r "$binary" ]; then + binary="${destination}/${basename}" + fi + + # Strip invalid architectures so "fat" simulator / device frameworks work on device + if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then + strip_invalid_archs "$binary" + fi + + # Resign the code if required by the build settings to avoid unstable apps + code_sign_if_enabled "${destination}/$(basename "$1")" + + # Embed linked Swift runtime libraries. No longer necessary as of Xcode 7. + if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then + local swift_runtime_libs + swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]}) + for lib in $swift_runtime_libs; do + echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\"" + rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}" + code_sign_if_enabled "${destination}/${lib}" + done + fi +} + +# Signs a framework with the provided identity +code_sign_if_enabled() { + if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then + # Use the current code_sign_identitiy + echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}" + local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"" + + if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then + code_sign_cmd="$code_sign_cmd &" + fi + echo "$code_sign_cmd" + eval "$code_sign_cmd" + fi +} + +# Strip invalid architectures +strip_invalid_archs() { + binary="$1" + # Get architectures for current file + archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)" + stripped="" + for arch in $archs; do + if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then + # Strip non-valid architectures in-place + lipo -remove "$arch" -output "$binary" "$binary" || exit 1 + stripped="$stripped $arch" + fi + done + if [[ "$stripped" ]]; then + echo "Stripped $binary of architectures:$stripped" + fi +} + +if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then + wait +fi diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-resources.sh b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-resources.sh new file mode 100644 index 0000000..4602c68 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-resources.sh @@ -0,0 +1,99 @@ +#!/bin/sh +set -e + +mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" + +RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt +> "$RESOURCES_TO_COPY" + +XCASSET_FILES=() + +case "${TARGETED_DEVICE_FAMILY}" in + 1,2) + TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone" + ;; + 1) + TARGET_DEVICE_ARGS="--target-device iphone" + ;; + 2) + TARGET_DEVICE_ARGS="--target-device ipad" + ;; + 3) + TARGET_DEVICE_ARGS="--target-device tv" + ;; + *) + TARGET_DEVICE_ARGS="--target-device mac" + ;; +esac + +install_resource() +{ + if [[ "$1" = /* ]] ; then + RESOURCE_PATH="$1" + else + RESOURCE_PATH="${PODS_ROOT}/$1" + fi + if [[ ! -e "$RESOURCE_PATH" ]] ; then + cat << EOM +error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script. +EOM + exit 1 + fi + case $RESOURCE_PATH in + *.storyboard) + echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" + ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS} + ;; + *.xib) + echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" + ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS} + ;; + *.framework) + echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + echo "rsync -av $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + rsync -av "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + ;; + *.xcdatamodel) + echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\"" + xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom" + ;; + *.xcdatamodeld) + echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\"" + xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd" + ;; + *.xcmappingmodel) + echo "xcrun mapc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\"" + xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm" + ;; + *.xcassets) + ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH" + XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE") + ;; + *) + echo "$RESOURCE_PATH" + echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY" + ;; + esac +} + +mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" +rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" +if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then + mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" + rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" +fi +rm -f "$RESOURCES_TO_COPY" + +if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ] +then + # Find all other xcassets (this unfortunately includes those of path pods and other targets). + OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d) + while read line; do + if [[ $line != "${PODS_ROOT}*" ]]; then + XCASSET_FILES+=("$line") + fi + done <<<"$OTHER_XCASSETS" + + printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" +fi diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest.debug.xcconfig b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest.debug.xcconfig new file mode 100644 index 0000000..3627a1c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest.debug.xcconfig @@ -0,0 +1,8 @@ +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1 +HEADER_SEARCH_PATHS = $(inherited) "${PODS_ROOT}/Headers/Public" "${PODS_ROOT}/Headers/Public/EZAudio" "${PODS_ROOT}/Headers/Public/TPCircularBuffer" +LIBRARY_SEARCH_PATHS = $(inherited) "$PODS_CONFIGURATION_BUILD_DIR/EZAudio" "$PODS_CONFIGURATION_BUILD_DIR/TPCircularBuffer" +OTHER_CFLAGS = $(inherited) -isystem "${PODS_ROOT}/Headers/Public" -isystem "${PODS_ROOT}/Headers/Public/EZAudio" -isystem "${PODS_ROOT}/Headers/Public/TPCircularBuffer" +OTHER_LDFLAGS = $(inherited) -ObjC -l"EZAudio" -l"TPCircularBuffer" -framework "AVFoundation" -framework "Accelerate" -framework "AudioToolbox" -framework "GLKit" +PODS_BUILD_DIR = $BUILD_DIR +PODS_CONFIGURATION_BUILD_DIR = $PODS_BUILD_DIR/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME) +PODS_ROOT = ${SRCROOT}/Pods diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest.release.xcconfig b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest.release.xcconfig new file mode 100644 index 0000000..3627a1c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest.release.xcconfig @@ -0,0 +1,8 @@ +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1 +HEADER_SEARCH_PATHS = $(inherited) "${PODS_ROOT}/Headers/Public" "${PODS_ROOT}/Headers/Public/EZAudio" "${PODS_ROOT}/Headers/Public/TPCircularBuffer" +LIBRARY_SEARCH_PATHS = $(inherited) "$PODS_CONFIGURATION_BUILD_DIR/EZAudio" "$PODS_CONFIGURATION_BUILD_DIR/TPCircularBuffer" +OTHER_CFLAGS = $(inherited) -isystem "${PODS_ROOT}/Headers/Public" -isystem "${PODS_ROOT}/Headers/Public/EZAudio" -isystem "${PODS_ROOT}/Headers/Public/TPCircularBuffer" +OTHER_LDFLAGS = $(inherited) -ObjC -l"EZAudio" -l"TPCircularBuffer" -framework "AVFoundation" -framework "Accelerate" -framework "AudioToolbox" -framework "GLKit" +PODS_BUILD_DIR = $BUILD_DIR +PODS_CONFIGURATION_BUILD_DIR = $PODS_BUILD_DIR/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME) +PODS_ROOT = ${SRCROOT}/Pods diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/TPCircularBuffer/TPCircularBuffer-dummy.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/TPCircularBuffer/TPCircularBuffer-dummy.m new file mode 100644 index 0000000..dd59cd0 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/TPCircularBuffer/TPCircularBuffer-dummy.m @@ -0,0 +1,5 @@ +#import +@interface PodsDummy_TPCircularBuffer : NSObject +@end +@implementation PodsDummy_TPCircularBuffer +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/TPCircularBuffer/TPCircularBuffer-prefix.pch b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/TPCircularBuffer/TPCircularBuffer-prefix.pch new file mode 100644 index 0000000..beb2a24 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/TPCircularBuffer/TPCircularBuffer-prefix.pch @@ -0,0 +1,12 @@ +#ifdef __OBJC__ +#import +#else +#ifndef FOUNDATION_EXPORT +#if defined(__cplusplus) +#define FOUNDATION_EXPORT extern "C" +#else +#define FOUNDATION_EXPORT extern +#endif +#endif +#endif + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/TPCircularBuffer/TPCircularBuffer.xcconfig b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/TPCircularBuffer/TPCircularBuffer.xcconfig new file mode 100644 index 0000000..5629b7c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/Pods/Target Support Files/TPCircularBuffer/TPCircularBuffer.xcconfig @@ -0,0 +1,10 @@ +CONFIGURATION_BUILD_DIR = $PODS_CONFIGURATION_BUILD_DIR/TPCircularBuffer +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1 +HEADER_SEARCH_PATHS = "${PODS_ROOT}/Headers/Private" "${PODS_ROOT}/Headers/Private/TPCircularBuffer" "${PODS_ROOT}/Headers/Public" "${PODS_ROOT}/Headers/Public/EZAudio" "${PODS_ROOT}/Headers/Public/TPCircularBuffer" +OTHER_LDFLAGS = -framework "AudioToolbox" +PODS_BUILD_DIR = $BUILD_DIR +PODS_CONFIGURATION_BUILD_DIR = $PODS_BUILD_DIR/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME) +PODS_ROOT = ${SRCROOT} +PODS_TARGET_SRCROOT = ${PODS_ROOT}/TPCircularBuffer +PRODUCT_BUNDLE_IDENTIFIER = org.cocoapods.${PRODUCT_NAME:rfc1034identifier} +SKIP_INSTALL = YES diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest.xcodeproj/project.pbxproj b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest.xcodeproj/project.pbxproj new file mode 100644 index 0000000..6890353 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest.xcodeproj/project.pbxproj @@ -0,0 +1,652 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 2244BF6407642EFDC31DAD72 /* libPods-SnowboyTest.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 9825F267A840425D1E4CCAC4 /* libPods-SnowboyTest.a */; }; + EAD06BE81E55992900C23345 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = EAD06BE71E55992900C23345 /* main.m */; }; + EAD06BEB1E55992900C23345 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = EAD06BEA1E55992900C23345 /* AppDelegate.m */; }; + EAD06BEE1E55992900C23345 /* ViewController.mm in Sources */ = {isa = PBXBuildFile; fileRef = EAD06BED1E55992900C23345 /* ViewController.mm */; }; + EAD06BF11E55992A00C23345 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = EAD06BEF1E55992A00C23345 /* Main.storyboard */; }; + EAD06BF31E55992A00C23345 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = EAD06BF21E55992A00C23345 /* Assets.xcassets */; }; + EAD06BF61E55992A00C23345 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = EAD06BF41E55992A00C23345 /* LaunchScreen.storyboard */; }; + EAD06C011E55992A00C23345 /* SnowboyTestTests.m in Sources */ = {isa = PBXBuildFile; fileRef = EAD06C001E55992A00C23345 /* SnowboyTestTests.m */; }; + EAD06C0C1E55992A00C23345 /* SnowboyTestUITests.m in Sources */ = {isa = PBXBuildFile; fileRef = EAD06C0B1E55992A00C23345 /* SnowboyTestUITests.m */; }; + EAD06C1A1E559BAA00C23345 /* libsnowboy-detect.a in Frameworks */ = {isa = PBXBuildFile; fileRef = EAD06C191E559BAA00C23345 /* libsnowboy-detect.a */; }; + EAD06C1F1E55A0E000C23345 /* alexa.umdl in Resources */ = {isa = PBXBuildFile; fileRef = EAD06C1D1E559F7E00C23345 /* alexa.umdl */; }; + EAD06C201E55A0E000C23345 /* common.res in Resources */ = {isa = PBXBuildFile; fileRef = EAD06C1B1E559BCE00C23345 /* common.res */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + EAD06BFD1E55992A00C23345 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = EAD06BDB1E55992900C23345 /* Project object */; + proxyType = 1; + remoteGlobalIDString = EAD06BE21E55992900C23345; + remoteInfo = SnowboyTest; + }; + EAD06C081E55992A00C23345 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = EAD06BDB1E55992900C23345 /* Project object */; + proxyType = 1; + remoteGlobalIDString = EAD06BE21E55992900C23345; + remoteInfo = SnowboyTest; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 5CBB35CAB788AC29D83325B4 /* Pods-SnowboyTest.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-SnowboyTest.debug.xcconfig"; path = "Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest.debug.xcconfig"; sourceTree = ""; }; + 9825F267A840425D1E4CCAC4 /* libPods-SnowboyTest.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-SnowboyTest.a"; sourceTree = BUILT_PRODUCTS_DIR; }; + AD21A54DF38F3AEB7BFAD0F3 /* Pods-SnowboyTest.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-SnowboyTest.release.xcconfig"; path = "Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest.release.xcconfig"; sourceTree = ""; }; + EAD06BE31E55992900C23345 /* SnowboyTest.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = SnowboyTest.app; sourceTree = BUILT_PRODUCTS_DIR; }; + EAD06BE71E55992900C23345 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; + EAD06BE91E55992900C23345 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + EAD06BEA1E55992900C23345 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + EAD06BEC1E55992900C23345 /* ViewController.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ViewController.h; sourceTree = ""; }; + EAD06BED1E55992900C23345 /* ViewController.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = ViewController.mm; sourceTree = ""; }; + EAD06BF01E55992A00C23345 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; + EAD06BF21E55992A00C23345 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + EAD06BF51E55992A00C23345 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; }; + EAD06BF71E55992A00C23345 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + EAD06BFC1E55992A00C23345 /* SnowboyTestTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = SnowboyTestTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + EAD06C001E55992A00C23345 /* SnowboyTestTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SnowboyTestTests.m; sourceTree = ""; }; + EAD06C021E55992A00C23345 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + EAD06C071E55992A00C23345 /* SnowboyTestUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = SnowboyTestUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + EAD06C0B1E55992A00C23345 /* SnowboyTestUITests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SnowboyTestUITests.m; sourceTree = ""; }; + EAD06C0D1E55992A00C23345 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + EAD06C191E559BAA00C23345 /* libsnowboy-detect.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = "libsnowboy-detect.a"; sourceTree = ""; }; + EAD06C1B1E559BCE00C23345 /* common.res */ = {isa = PBXFileReference; lastKnownFileType = file; path = common.res; sourceTree = ""; }; + EAD06C1C1E559C4B00C23345 /* snowboy-detect.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "snowboy-detect.h"; sourceTree = ""; }; + EAD06C1D1E559F7E00C23345 /* alexa.umdl */ = {isa = PBXFileReference; lastKnownFileType = file; path = alexa.umdl; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + EAD06BE01E55992900C23345 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + EAD06C1A1E559BAA00C23345 /* libsnowboy-detect.a in Frameworks */, + 2244BF6407642EFDC31DAD72 /* libPods-SnowboyTest.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + EAD06BF91E55992A00C23345 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + EAD06C041E55992A00C23345 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 3E63D7F6A0B9B21F74052E4A /* Pods */ = { + isa = PBXGroup; + children = ( + 5CBB35CAB788AC29D83325B4 /* Pods-SnowboyTest.debug.xcconfig */, + AD21A54DF38F3AEB7BFAD0F3 /* Pods-SnowboyTest.release.xcconfig */, + ); + name = Pods; + sourceTree = ""; + }; + 50CB785434B4CFC6D30EAB3C /* Frameworks */ = { + isa = PBXGroup; + children = ( + 9825F267A840425D1E4CCAC4 /* libPods-SnowboyTest.a */, + ); + name = Frameworks; + sourceTree = ""; + }; + EAD06BDA1E55992900C23345 = { + isa = PBXGroup; + children = ( + EAD06BE51E55992900C23345 /* SnowboyTest */, + EAD06BFF1E55992A00C23345 /* SnowboyTestTests */, + EAD06C0A1E55992A00C23345 /* SnowboyTestUITests */, + EAD06BE41E55992900C23345 /* Products */, + 3E63D7F6A0B9B21F74052E4A /* Pods */, + 50CB785434B4CFC6D30EAB3C /* Frameworks */, + ); + sourceTree = ""; + }; + EAD06BE41E55992900C23345 /* Products */ = { + isa = PBXGroup; + children = ( + EAD06BE31E55992900C23345 /* SnowboyTest.app */, + EAD06BFC1E55992A00C23345 /* SnowboyTestTests.xctest */, + EAD06C071E55992A00C23345 /* SnowboyTestUITests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + EAD06BE51E55992900C23345 /* SnowboyTest */ = { + isa = PBXGroup; + children = ( + EAD06C1E1E559F8200C23345 /* snowboy */, + EAD06BE91E55992900C23345 /* AppDelegate.h */, + EAD06BEA1E55992900C23345 /* AppDelegate.m */, + EAD06BEC1E55992900C23345 /* ViewController.h */, + EAD06BED1E55992900C23345 /* ViewController.mm */, + EAD06BEF1E55992A00C23345 /* Main.storyboard */, + EAD06BF21E55992A00C23345 /* Assets.xcassets */, + EAD06BF41E55992A00C23345 /* LaunchScreen.storyboard */, + EAD06BF71E55992A00C23345 /* Info.plist */, + EAD06BE61E55992900C23345 /* Supporting Files */, + ); + path = SnowboyTest; + sourceTree = ""; + }; + EAD06BE61E55992900C23345 /* Supporting Files */ = { + isa = PBXGroup; + children = ( + EAD06BE71E55992900C23345 /* main.m */, + ); + name = "Supporting Files"; + sourceTree = ""; + }; + EAD06BFF1E55992A00C23345 /* SnowboyTestTests */ = { + isa = PBXGroup; + children = ( + EAD06C001E55992A00C23345 /* SnowboyTestTests.m */, + EAD06C021E55992A00C23345 /* Info.plist */, + ); + path = SnowboyTestTests; + sourceTree = ""; + }; + EAD06C0A1E55992A00C23345 /* SnowboyTestUITests */ = { + isa = PBXGroup; + children = ( + EAD06C0B1E55992A00C23345 /* SnowboyTestUITests.m */, + EAD06C0D1E55992A00C23345 /* Info.plist */, + ); + path = SnowboyTestUITests; + sourceTree = ""; + }; + EAD06C1E1E559F8200C23345 /* snowboy */ = { + isa = PBXGroup; + children = ( + EAD06C1D1E559F7E00C23345 /* alexa.umdl */, + EAD06C1C1E559C4B00C23345 /* snowboy-detect.h */, + EAD06C191E559BAA00C23345 /* libsnowboy-detect.a */, + EAD06C1B1E559BCE00C23345 /* common.res */, + ); + name = snowboy; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + EAD06BE21E55992900C23345 /* SnowboyTest */ = { + isa = PBXNativeTarget; + buildConfigurationList = EAD06C101E55992A00C23345 /* Build configuration list for PBXNativeTarget "SnowboyTest" */; + buildPhases = ( + E09B2FE3EA5E4C1DA30DCA20 /* [CP] Check Pods Manifest.lock */, + EAD06BDF1E55992900C23345 /* Sources */, + EAD06BE01E55992900C23345 /* Frameworks */, + EAD06BE11E55992900C23345 /* Resources */, + A587E7E5B4851FF18745B26F /* [CP] Embed Pods Frameworks */, + 64C96E859D56C813CF65A6D2 /* [CP] Copy Pods Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = SnowboyTest; + productName = SnowboyTest; + productReference = EAD06BE31E55992900C23345 /* SnowboyTest.app */; + productType = "com.apple.product-type.application"; + }; + EAD06BFB1E55992A00C23345 /* SnowboyTestTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = EAD06C131E55992A00C23345 /* Build configuration list for PBXNativeTarget "SnowboyTestTests" */; + buildPhases = ( + EAD06BF81E55992A00C23345 /* Sources */, + EAD06BF91E55992A00C23345 /* Frameworks */, + EAD06BFA1E55992A00C23345 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + EAD06BFE1E55992A00C23345 /* PBXTargetDependency */, + ); + name = SnowboyTestTests; + productName = SnowboyTestTests; + productReference = EAD06BFC1E55992A00C23345 /* SnowboyTestTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; + EAD06C061E55992A00C23345 /* SnowboyTestUITests */ = { + isa = PBXNativeTarget; + buildConfigurationList = EAD06C161E55992A00C23345 /* Build configuration list for PBXNativeTarget "SnowboyTestUITests" */; + buildPhases = ( + EAD06C031E55992A00C23345 /* Sources */, + EAD06C041E55992A00C23345 /* Frameworks */, + EAD06C051E55992A00C23345 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + EAD06C091E55992A00C23345 /* PBXTargetDependency */, + ); + name = SnowboyTestUITests; + productName = SnowboyTestUITests; + productReference = EAD06C071E55992A00C23345 /* SnowboyTestUITests.xctest */; + productType = "com.apple.product-type.bundle.ui-testing"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + EAD06BDB1E55992900C23345 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0820; + ORGANIZATIONNAME = Kitt.ai; + TargetAttributes = { + EAD06BE21E55992900C23345 = { + CreatedOnToolsVersion = 8.2.1; + DevelopmentTeam = DMZDNM92BE; + ProvisioningStyle = Automatic; + }; + EAD06BFB1E55992A00C23345 = { + CreatedOnToolsVersion = 8.2.1; + DevelopmentTeam = DMZDNM92BE; + ProvisioningStyle = Automatic; + TestTargetID = EAD06BE21E55992900C23345; + }; + EAD06C061E55992A00C23345 = { + CreatedOnToolsVersion = 8.2.1; + DevelopmentTeam = DMZDNM92BE; + ProvisioningStyle = Automatic; + TestTargetID = EAD06BE21E55992900C23345; + }; + }; + }; + buildConfigurationList = EAD06BDE1E55992900C23345 /* Build configuration list for PBXProject "SnowboyTest" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = EAD06BDA1E55992900C23345; + productRefGroup = EAD06BE41E55992900C23345 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + EAD06BE21E55992900C23345 /* SnowboyTest */, + EAD06BFB1E55992A00C23345 /* SnowboyTestTests */, + EAD06C061E55992A00C23345 /* SnowboyTestUITests */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + EAD06BE11E55992900C23345 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + EAD06C1F1E55A0E000C23345 /* alexa.umdl in Resources */, + EAD06C201E55A0E000C23345 /* common.res in Resources */, + EAD06BF61E55992A00C23345 /* LaunchScreen.storyboard in Resources */, + EAD06BF31E55992A00C23345 /* Assets.xcassets in Resources */, + EAD06BF11E55992A00C23345 /* Main.storyboard in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + EAD06BFA1E55992A00C23345 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + EAD06C051E55992A00C23345 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 64C96E859D56C813CF65A6D2 /* [CP] Copy Pods Resources */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Copy Pods Resources"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-resources.sh\"\n"; + showEnvVarsInLog = 0; + }; + A587E7E5B4851FF18745B26F /* [CP] Embed Pods Frameworks */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Embed Pods Frameworks"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-SnowboyTest/Pods-SnowboyTest-frameworks.sh\"\n"; + showEnvVarsInLog = 0; + }; + E09B2FE3EA5E4C1DA30DCA20 /* [CP] Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Check Pods Manifest.lock"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n"; + showEnvVarsInLog = 0; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + EAD06BDF1E55992900C23345 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + EAD06BEE1E55992900C23345 /* ViewController.mm in Sources */, + EAD06BEB1E55992900C23345 /* AppDelegate.m in Sources */, + EAD06BE81E55992900C23345 /* main.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + EAD06BF81E55992A00C23345 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + EAD06C011E55992A00C23345 /* SnowboyTestTests.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + EAD06C031E55992A00C23345 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + EAD06C0C1E55992A00C23345 /* SnowboyTestUITests.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + EAD06BFE1E55992A00C23345 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = EAD06BE21E55992900C23345 /* SnowboyTest */; + targetProxy = EAD06BFD1E55992A00C23345 /* PBXContainerItemProxy */; + }; + EAD06C091E55992A00C23345 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = EAD06BE21E55992900C23345 /* SnowboyTest */; + targetProxy = EAD06C081E55992A00C23345 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin PBXVariantGroup section */ + EAD06BEF1E55992A00C23345 /* Main.storyboard */ = { + isa = PBXVariantGroup; + children = ( + EAD06BF01E55992A00C23345 /* Base */, + ); + name = Main.storyboard; + sourceTree = ""; + }; + EAD06BF41E55992A00C23345 /* LaunchScreen.storyboard */ = { + isa = PBXVariantGroup; + children = ( + EAD06BF51E55992A00C23345 /* Base */, + ); + name = LaunchScreen.storyboard; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + EAD06C0E1E55992A00C23345 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + }; + name = Debug; + }; + EAD06C0F1E55992A00C23345 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = iphoneos; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + EAD06C111E55992A00C23345 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 5CBB35CAB788AC29D83325B4 /* Pods-SnowboyTest.debug.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + DEVELOPMENT_TEAM = DMZDNM92BE; + ENABLE_BITCODE = NO; + GCC_OPTIMIZATION_LEVEL = fast; + INFOPLIST_FILE = SnowboyTest/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/SnowboyTest", + ); + LLVM_LTO = YES; + PRODUCT_BUNDLE_IDENTIFIER = com.snowboy.SnowboyTest; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + EAD06C121E55992A00C23345 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = AD21A54DF38F3AEB7BFAD0F3 /* Pods-SnowboyTest.release.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + DEVELOPMENT_TEAM = DMZDNM92BE; + ENABLE_BITCODE = NO; + INFOPLIST_FILE = SnowboyTest/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/SnowboyTest", + ); + LLVM_LTO = YES; + PRODUCT_BUNDLE_IDENTIFIER = com.snowboy.SnowboyTest; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + EAD06C141E55992A00C23345 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + BUNDLE_LOADER = "$(TEST_HOST)"; + DEVELOPMENT_TEAM = DMZDNM92BE; + INFOPLIST_FILE = SnowboyTestTests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = com.snowboy.SnowboyTestTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/SnowboyTest.app/SnowboyTest"; + }; + name = Debug; + }; + EAD06C151E55992A00C23345 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + BUNDLE_LOADER = "$(TEST_HOST)"; + DEVELOPMENT_TEAM = DMZDNM92BE; + INFOPLIST_FILE = SnowboyTestTests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = com.snowboy.SnowboyTestTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/SnowboyTest.app/SnowboyTest"; + }; + name = Release; + }; + EAD06C171E55992A00C23345 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + DEVELOPMENT_TEAM = DMZDNM92BE; + INFOPLIST_FILE = SnowboyTestUITests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = com.snowboy.SnowboyTestUITests; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_TARGET_NAME = SnowboyTest; + }; + name = Debug; + }; + EAD06C181E55992A00C23345 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + DEVELOPMENT_TEAM = DMZDNM92BE; + INFOPLIST_FILE = SnowboyTestUITests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = com.snowboy.SnowboyTestUITests; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_TARGET_NAME = SnowboyTest; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + EAD06BDE1E55992900C23345 /* Build configuration list for PBXProject "SnowboyTest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + EAD06C0E1E55992A00C23345 /* Debug */, + EAD06C0F1E55992A00C23345 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + EAD06C101E55992A00C23345 /* Build configuration list for PBXNativeTarget "SnowboyTest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + EAD06C111E55992A00C23345 /* Debug */, + EAD06C121E55992A00C23345 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + EAD06C131E55992A00C23345 /* Build configuration list for PBXNativeTarget "SnowboyTestTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + EAD06C141E55992A00C23345 /* Debug */, + EAD06C151E55992A00C23345 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + EAD06C161E55992A00C23345 /* Build configuration list for PBXNativeTarget "SnowboyTestUITests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + EAD06C171E55992A00C23345 /* Debug */, + EAD06C181E55992A00C23345 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = EAD06BDB1E55992900C23345 /* Project object */; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000..6274ae4 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest.xcworkspace/contents.xcworkspacedata b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000..68b0d85 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,10 @@ + + + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/AppDelegate.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/AppDelegate.h new file mode 100644 index 0000000..356ec0c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/AppDelegate.h @@ -0,0 +1,19 @@ +// +// AppDelegate.h +// SnowboyTest +// +// Created by Patrick Quinn on 16/02/2017. +// Copyright © 2017 Kitt.ai. All rights reserved. +// + +#import +#import + + +@interface AppDelegate : UIResponder + +@property (strong, nonatomic) UIWindow *window; + + +@end + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/AppDelegate.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/AppDelegate.m new file mode 100644 index 0000000..a53d542 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/AppDelegate.m @@ -0,0 +1,64 @@ +// +// AppDelegate.m +// SnowboyTest +// +// Created by Patrick Quinn on 16/02/2017. +// Copyright © 2017 Kitt.ai. All rights reserved. +// + +#import "AppDelegate.h" + +@interface AppDelegate () + +@end + +@implementation AppDelegate + + +- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions { + // Override point for customization after application launch. + AVAudioSession *session = [AVAudioSession sharedInstance]; + NSError *error; + [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error]; + [session setMode:AVAudioSessionModeMeasurement error:&error]; + + UInt32 doChangeDefaultRoute = 1; + AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, sizeof(doChangeDefaultRoute), &doChangeDefaultRoute); + + [session setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:&error]; + if (error) { + NSLog(@"ERROR%@", error); + } + + return YES; +} + + +- (void)applicationWillResignActive:(UIApplication *)application { + // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. + // Use this method to pause ongoing tasks, disable timers, and invalidate graphics rendering callbacks. Games should use this method to pause the game. +} + + +- (void)applicationDidEnterBackground:(UIApplication *)application { + // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. + // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. +} + + +- (void)applicationWillEnterForeground:(UIApplication *)application { + // Called as part of the transition from the background to the active state; here you can undo many of the changes made on entering the background. +} + + +- (void)applicationDidBecomeActive:(UIApplication *)application { + // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. +} + + +- (void)applicationWillTerminate:(UIApplication *)application { + // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:. +} + + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/AppIcon.appiconset/Contents.json b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000..b8236c6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,48 @@ +{ + "images" : [ + { + "idiom" : "iphone", + "size" : "20x20", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "20x20", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "29x29", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "29x29", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "40x40", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "40x40", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "60x60", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "60x60", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/Contents.json b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/Contents.json new file mode 100644 index 0000000..da4a164 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/Contents.json b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/Contents.json new file mode 100644 index 0000000..b3e3d07 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/Contents.json @@ -0,0 +1,23 @@ +{ + "images" : [ + { + "idiom" : "universal", + "filename" : "snowboy.png", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "snowboy-1.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "filename" : "snowboy-2.png", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/snowboy-1.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/snowboy-1.png new file mode 100644 index 0000000..2bcdb32 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/snowboy-1.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/snowboy-2.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/snowboy-2.png new file mode 100644 index 0000000..2bcdb32 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/snowboy-2.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/snowboy.png b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/snowboy.png new file mode 100644 index 0000000..2bcdb32 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Assets.xcassets/banner.imageset/snowboy.png differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Base.lproj/LaunchScreen.storyboard b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Base.lproj/LaunchScreen.storyboard new file mode 100644 index 0000000..eaa631f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Base.lproj/LaunchScreen.storyboard @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Base.lproj/Main.storyboard b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Base.lproj/Main.storyboard new file mode 100644 index 0000000..c6a0701 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Base.lproj/Main.storyboard @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Info.plist b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Info.plist new file mode 100644 index 0000000..6b8850a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/Info.plist @@ -0,0 +1,38 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + APPL + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + LSRequiresIPhoneOS + + NSMicrophoneUsageDescription + Makes use of Microphone to detect hotwords + UILaunchStoryboardName + LaunchScreen + UIMainStoryboardFile + Main + UIRequiredDeviceCapabilities + + armv7 + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/ViewController.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/ViewController.h new file mode 100644 index 0000000..1bbb82b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/ViewController.h @@ -0,0 +1,29 @@ +// +// ViewController.h +// SnowboyTest +// +// Created by Patrick Quinn on 16/02/2017. +// Copyright © 2017 Kitt.ai. All rights reserved. +// + +#import +#import +#import +#import + + +#import "snowboy-detect.h" + +@interface ViewController : UIViewController { + snowboy::SnowboyDetect* _snowboyDetect; + int detection_countdown; +} + +@property (strong, nonatomic) IBOutlet UILabel *detected; + +@property (nonatomic, strong) EZMicrophone *microphone; + + + +@end + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/ViewController.mm b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/ViewController.mm new file mode 100644 index 0000000..16467f6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/ViewController.mm @@ -0,0 +1,80 @@ +// +// ViewController.m +// SnowboyTest +// +// Created by Patrick Quinn on 16/02/2017. +// Copyright © 2017 Kitt.ai. All rights reserved. +// + +#import "ViewController.h" + +@interface ViewController () + +@end + +@implementation ViewController + +- (void)viewDidLoad { + [super viewDidLoad]; + [self initPermissions]; + [self initSnowboy]; + [self initMic]; +} + +- (void) initPermissions { + [AVCaptureDevice requestAccessForMediaType:AVMediaTypeAudio completionHandler:nil]; +} + +- (void)initSnowboy { + _snowboyDetect = NULL; + _snowboyDetect = new snowboy::SnowboyDetect(std::string([[[NSBundle mainBundle]pathForResource:@"common" ofType:@"res"] UTF8String]), + std::string([[[NSBundle mainBundle]pathForResource:@"alexa" ofType:@"umdl"] UTF8String])); + _snowboyDetect->SetSensitivity("0.5"); + _snowboyDetect->SetAudioGain(1.0); + _snowboyDetect->ApplyFrontend(false); +} + +- (void) initMic { + AudioStreamBasicDescription audioStreamBasicDescription = [EZAudioUtilities monoFloatFormatWithSampleRate:16000]; + audioStreamBasicDescription.mFormatID = kAudioFormatLinearPCM; + audioStreamBasicDescription.mSampleRate = 16000; + audioStreamBasicDescription.mFramesPerPacket = 1; + audioStreamBasicDescription.mBytesPerPacket = 2; + audioStreamBasicDescription.mBytesPerFrame = 2; + audioStreamBasicDescription.mChannelsPerFrame = 1; + audioStreamBasicDescription.mBitsPerChannel = 16; + audioStreamBasicDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; + audioStreamBasicDescription.mReserved = 0; + + NSArray *inputs = [EZAudioDevice inputDevices]; + [self.microphone setDevice:[inputs lastObject]]; + self.microphone = [EZMicrophone microphoneWithDelegate:self withAudioStreamBasicDescription:audioStreamBasicDescription]; + [self.microphone startFetchingAudio]; +} + +-(void) microphone:(EZMicrophone *)microphone + hasAudioReceived:(float **)buffer + withBufferSize:(UInt32)bufferSize +withNumberOfChannels:(UInt32)numberOfChannels { + dispatch_async(dispatch_get_main_queue(),^{ + int result = _snowboyDetect->RunDetection(buffer[0], bufferSize); + if (result == 1) { + self.detected.text = @"Hotword Detected"; + detection_countdown = 30; + } else { + if (detection_countdown == 0){ + self.detected.text = @"No Hotword Detected"; + } else { + detection_countdown--; + } + } + }); +} + +- (void)didReceiveMemoryWarning { + [super didReceiveMemoryWarning]; + [self initSnowboy]; +} + + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/alexa.umdl b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/alexa.umdl new file mode 100644 index 0000000..c4a6094 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/alexa.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/common.res b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/common.res new file mode 100644 index 0000000..0e267f5 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/common.res differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/libsnowboy-detect.a b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/libsnowboy-detect.a new file mode 100644 index 0000000..42703b2 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/libsnowboy-detect.a differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/main.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/main.m new file mode 100644 index 0000000..1a197fc --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/main.m @@ -0,0 +1,16 @@ +// +// main.m +// SnowboyTest +// +// Created by Patrick Quinn on 16/02/2017. +// Copyright © 2017 Kitt.ai. All rights reserved. +// + +#import +#import "AppDelegate.h" + +int main(int argc, char * argv[]) { + @autoreleasepool { + return UIApplicationMain(argc, argv, nil, NSStringFromClass([AppDelegate class])); + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/snowboy-detect.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/snowboy-detect.h new file mode 100644 index 0000000..54e4cbe --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTest/snowboy-detect.h @@ -0,0 +1,131 @@ +// include/snowboy-detect.h + +// Copyright 2016 KITT.AI (author: Guoguo Chen) + +#ifndef SNOWBOY_INCLUDE_SNOWBOY_DETECT_H_ +#define SNOWBOY_INCLUDE_SNOWBOY_DETECT_H_ + +#include +#include + +namespace snowboy { + +// Forward declaration. +struct WaveHeader; +class PipelineDetect; + +//////////////////////////////////////////////////////////////////////////////// +// +// SnowboyDetect class interface. +// +//////////////////////////////////////////////////////////////////////////////// +class SnowboyDetect { + public: + // Constructor that takes a resource file, and a list of hotword models which + // are separated by comma. In the case that more than one hotword exist in the + // provided models, RunDetection() will return the index of the hotword, if + // the corresponding hotword is triggered. + // + // CAVEAT: a personal model only contain one hotword, but an universal model + // may contain multiple hotwords. It is your responsibility to figure + // out the index of the hotword. For example, if your model string is + // "foo.pmdl,bar.umdl", where foo.pmdl contains hotword x, bar.umdl + // has two hotwords y and z, the indices of different hotwords are as + // follows: + // x 1 + // y 2 + // z 3 + // + // @param [in] resource_filename Filename of resource file. + // @param [in] model_str A string of multiple hotword models, + // separated by comma. + SnowboyDetect(const std::string& resource_filename, + const std::string& model_str); + + // Resets the detection. This class handles voice activity detection (VAD) + // internally. But if you have an external VAD, you should call Reset() + // whenever you see segment end from your VAD. + bool Reset(); + + // Runs hotword detection. Supported audio format is WAVE (with linear PCM, + // 8-bits unsigned integer, 16-bits signed integer or 32-bits signed integer). + // See SampleRate(), NumChannels() and BitsPerSample() for the required + // sampling rate, number of channels and bits per sample values. You are + // supposed to provide a small chunk of data (e.g., 0.1 second) each time you + // call RunDetection(). Larger chunk usually leads to longer delay, but less + // CPU usage. + // + // Definition of return values: + // -2: Silence. + // -1: Error. + // 0: No event. + // 1: Hotword 1 triggered. + // 2: Hotword 2 triggered. + // ... + // + // @param [in] data Small chunk of data to be detected. See + // above for the supported data format. + int RunDetection(const std::string& data); + + // Various versions of RunDetection() that take different format of audio. If + // NumChannels() > 1, e.g., NumChannels() == 2, then the array is as follows: + // + // d1c1, d1c2, d2c1, d2c2, d3c1, d3c2, ..., dNc1, dNc2 + // + // where d1c1 means data point 1 of channel 1. + // + // @param [in] data Small chunk of data to be detected. See + // above for the supported data format. + // @param [in] array_length Length of the data array. + int RunDetection(const float* const data, const int array_length); + int RunDetection(const int16_t* const data, const int array_length); + int RunDetection(const int32_t* const data, const int array_length); + + // Sets the sensitivity string for the loaded hotwords. A is + // a list of floating numbers between 0 and 1, and separated by comma. For + // example, if there are 3 loaded hotwords, your string should looks something + // like this: + // 0.4,0.5,0.8 + // Make sure you properly align the sensitivity value to the corresponding + // hotword. + void SetSensitivity(const std::string& sensitivity_str); + + // Returns the sensitivity string for the current hotwords. + std::string GetSensitivity() const; + + // Applied a fixed gain to the input audio. In case you have a very weak + // microphone, you can use this function to boost input audio level. + void SetAudioGain(const float audio_gain); + + // Writes the models to the model filenames specified in in the + // constructor. This overwrites the original model with the latest parameter + // setting. You are supposed to call this function if you have updated the + // hotword sensitivities through SetSensitivity(), and you would like to store + // those values in the model as the default value. + void UpdateModel() const; + + // Returns the number of the loaded hotwords. This helps you to figure the + // index of the hotwords. + int NumHotwords() const; + + // If is true, then apply frontend audio processing; + // otherwise turns the audio processing off. + void ApplyFrontend(const bool apply_frontend); + + // Returns the required sampling rate, number of channels and bits per sample + // values for the audio data. You should use this information to set up your + // audio capturing interface. + int SampleRate() const; + int NumChannels() const; + int BitsPerSample() const; + + ~SnowboyDetect(); + + private: + std::unique_ptr wave_header_; + std::unique_ptr detect_pipeline_; +}; + +} // namespace snowboy + +#endif // SNOWBOY_INCLUDE_SNOWBOY_DETECT_H_ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestTests/Info.plist b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestTests/Info.plist new file mode 100644 index 0000000..6c6c23c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestTests/Info.plist @@ -0,0 +1,22 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestTests/SnowboyTestTests.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestTests/SnowboyTestTests.m new file mode 100644 index 0000000..cbefd07 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestTests/SnowboyTestTests.m @@ -0,0 +1,39 @@ +// +// SnowboyTestTests.m +// SnowboyTestTests +// +// Created by Patrick Quinn on 16/02/2017. +// Copyright © 2017 Kitt.ai. All rights reserved. +// + +#import + +@interface SnowboyTestTests : XCTestCase + +@end + +@implementation SnowboyTestTests + +- (void)setUp { + [super setUp]; + // Put setup code here. This method is called before the invocation of each test method in the class. +} + +- (void)tearDown { + // Put teardown code here. This method is called after the invocation of each test method in the class. + [super tearDown]; +} + +- (void)testExample { + // This is an example of a functional test case. + // Use XCTAssert and related functions to verify your tests produce the correct results. +} + +- (void)testPerformanceExample { + // This is an example of a performance test case. + [self measureBlock:^{ + // Put the code you want to measure the time of here. + }]; +} + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestUITests/Info.plist b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestUITests/Info.plist new file mode 100644 index 0000000..6c6c23c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestUITests/Info.plist @@ -0,0 +1,22 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestUITests/SnowboyTestUITests.m b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestUITests/SnowboyTestUITests.m new file mode 100644 index 0000000..9dc70eb --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Obj-C/SnowboyTestUITests/SnowboyTestUITests.m @@ -0,0 +1,40 @@ +// +// SnowboyTestUITests.m +// SnowboyTestUITests +// +// Created by Patrick Quinn on 16/02/2017. +// Copyright © 2017 Kitt.ai. All rights reserved. +// + +#import + +@interface SnowboyTestUITests : XCTestCase + +@end + +@implementation SnowboyTestUITests + +- (void)setUp { + [super setUp]; + + // Put setup code here. This method is called before the invocation of each test method in the class. + + // In UI tests it is usually best to stop immediately when a failure occurs. + self.continueAfterFailure = NO; + // UI tests must launch the application that they test. Doing this in setup will make sure it happens for each test method. + [[[XCUIApplication alloc] init] launch]; + + // In UI tests it’s important to set the initial state - such as interface orientation - required for your tests before they run. The setUp method is a good place to do this. +} + +- (void)tearDown { + // Put teardown code here. This method is called after the invocation of each test method in the class. + [super tearDown]; +} + +- (void)testExample { + // Use recording to get started writing UI tests. + // Use XCTAssert and related functions to verify your tests produce the correct results. +} + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/README.md b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/README.md new file mode 100644 index 0000000..70d67c0 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/README.md @@ -0,0 +1,2 @@ +# kitt-snowboy-swift3-sample-app +This is a sample iOS-Swift3 app that demonstrate the capabilities of KITT.AI Snowboy diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest.xcodeproj/project.pbxproj b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest.xcodeproj/project.pbxproj new file mode 100644 index 0000000..7467303 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest.xcodeproj/project.pbxproj @@ -0,0 +1,628 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 045F549D1E58A8FD00F8F4A4 /* libsnowboy-detect.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 045F549C1E58A8FD00F8F4A4 /* libsnowboy-detect.a */; }; + 045F54A01E58AAEC00F8F4A4 /* common.res in Resources */ = {isa = PBXBuildFile; fileRef = 045F549F1E58AAEC00F8F4A4 /* common.res */; }; + 045F54A21E58AAF900F8F4A4 /* alexa_02092017.umdl in Resources */ = {isa = PBXBuildFile; fileRef = 045F54A11E58AAF900F8F4A4 /* alexa_02092017.umdl */; }; + 04D8BB8B1E539066001D71B9 /* SnowboyWrapper.mm in Sources */ = {isa = PBXBuildFile; fileRef = 04D8BB8A1E539066001D71B9 /* SnowboyWrapper.mm */; }; + 04D8BB8D1E53916F001D71B9 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 04D8BB8C1E53916F001D71B9 /* Accelerate.framework */; }; + 04F91B9A1E522B6F00BEF6E3 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 04F91B991E522B6F00BEF6E3 /* AppDelegate.swift */; }; + 04F91B9C1E522B6F00BEF6E3 /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 04F91B9B1E522B6F00BEF6E3 /* ViewController.swift */; }; + 04F91B9F1E522B6F00BEF6E3 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 04F91B9D1E522B6F00BEF6E3 /* Main.storyboard */; }; + 04F91BA21E522B6F00BEF6E3 /* SnowboyTest.xcdatamodeld in Sources */ = {isa = PBXBuildFile; fileRef = 04F91BA01E522B6F00BEF6E3 /* SnowboyTest.xcdatamodeld */; }; + 04F91BA41E522B6F00BEF6E3 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 04F91BA31E522B6F00BEF6E3 /* Assets.xcassets */; }; + 04F91BA71E522B6F00BEF6E3 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 04F91BA51E522B6F00BEF6E3 /* LaunchScreen.storyboard */; }; + 04F91BB21E522B6F00BEF6E3 /* SnowboyTestTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 04F91BB11E522B6F00BEF6E3 /* SnowboyTestTests.swift */; }; + 04F91BBD1E522B6F00BEF6E3 /* SnowboyTestUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 04F91BBC1E522B6F00BEF6E3 /* SnowboyTestUITests.swift */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 04F91BAE1E522B6F00BEF6E3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 04F91B8E1E522B6F00BEF6E3 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 04F91B951E522B6F00BEF6E3; + remoteInfo = SnowboyTest; + }; + 04F91BB91E522B6F00BEF6E3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 04F91B8E1E522B6F00BEF6E3 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 04F91B951E522B6F00BEF6E3; + remoteInfo = SnowboyTest; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 045F549A1E58A7EA00F8F4A4 /* snowboy-detect.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "snowboy-detect.h"; path = "../../../../include/snowboy-detect.h"; sourceTree = ""; }; + 045F549C1E58A8FD00F8F4A4 /* libsnowboy-detect.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = "libsnowboy-detect.a"; path = "../../../lib/ios/libsnowboy-detect.a"; sourceTree = ""; }; + 045F549F1E58AAEC00F8F4A4 /* common.res */ = {isa = PBXFileReference; lastKnownFileType = file; name = common.res; path = ../../../../resources/common.res; sourceTree = ""; }; + 045F54A11E58AAF900F8F4A4 /* alexa_02092017.umdl */ = {isa = PBXFileReference; lastKnownFileType = file; name = alexa_02092017.umdl; path = ../../../../resources/alexa/alexa_02092017.umdl; sourceTree = ""; }; + 04D8BB881E52C242001D71B9 /* SnowboyTest-Bridging-Header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "SnowboyTest-Bridging-Header.h"; sourceTree = ""; }; + 04D8BB891E5382BB001D71B9 /* SnowboyWrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SnowboyWrapper.h; sourceTree = ""; }; + 04D8BB8A1E539066001D71B9 /* SnowboyWrapper.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = SnowboyWrapper.mm; sourceTree = ""; }; + 04D8BB8C1E53916F001D71B9 /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; }; + 04F91B961E522B6F00BEF6E3 /* SnowboyTest.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = SnowboyTest.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 04F91B991E522B6F00BEF6E3 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; + 04F91B9B1E522B6F00BEF6E3 /* ViewController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ViewController.swift; sourceTree = ""; }; + 04F91B9E1E522B6F00BEF6E3 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; + 04F91BA11E522B6F00BEF6E3 /* SnowboyTest.xcdatamodel */ = {isa = PBXFileReference; lastKnownFileType = wrapper.xcdatamodel; path = SnowboyTest.xcdatamodel; sourceTree = ""; }; + 04F91BA31E522B6F00BEF6E3 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + 04F91BA61E522B6F00BEF6E3 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; }; + 04F91BA81E522B6F00BEF6E3 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 04F91BAD1E522B6F00BEF6E3 /* SnowboyTestTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = SnowboyTestTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + 04F91BB11E522B6F00BEF6E3 /* SnowboyTestTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SnowboyTestTests.swift; sourceTree = ""; }; + 04F91BB31E522B6F00BEF6E3 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 04F91BB81E522B6F00BEF6E3 /* SnowboyTestUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = SnowboyTestUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + 04F91BBC1E522B6F00BEF6E3 /* SnowboyTestUITests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SnowboyTestUITests.swift; sourceTree = ""; }; + 04F91BBE1E522B6F00BEF6E3 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 04F91B931E522B6F00BEF6E3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 04D8BB8D1E53916F001D71B9 /* Accelerate.framework in Frameworks */, + 045F549D1E58A8FD00F8F4A4 /* libsnowboy-detect.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 04F91BAA1E522B6F00BEF6E3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 04F91BB51E522B6F00BEF6E3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 04D8BB911E53C41D001D71B9 /* Resources */ = { + isa = PBXGroup; + children = ( + 045F549F1E58AAEC00F8F4A4 /* common.res */, + 045F54A11E58AAF900F8F4A4 /* alexa_02092017.umdl */, + ); + name = Resources; + sourceTree = ""; + }; + 04F91B8D1E522B6F00BEF6E3 = { + isa = PBXGroup; + children = ( + 04F91B981E522B6F00BEF6E3 /* SnowboyTest */, + 04F91BB01E522B6F00BEF6E3 /* SnowboyTestTests */, + 04F91BBB1E522B6F00BEF6E3 /* SnowboyTestUITests */, + 04F91B971E522B6F00BEF6E3 /* Products */, + 04F91BCB1E522BE300BEF6E3 /* Frameworks */, + ); + sourceTree = ""; + }; + 04F91B971E522B6F00BEF6E3 /* Products */ = { + isa = PBXGroup; + children = ( + 04F91B961E522B6F00BEF6E3 /* SnowboyTest.app */, + 04F91BAD1E522B6F00BEF6E3 /* SnowboyTestTests.xctest */, + 04F91BB81E522B6F00BEF6E3 /* SnowboyTestUITests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + 04F91B981E522B6F00BEF6E3 /* SnowboyTest */ = { + isa = PBXGroup; + children = ( + 04D8BB911E53C41D001D71B9 /* Resources */, + 04F91B991E522B6F00BEF6E3 /* AppDelegate.swift */, + 04D8BB881E52C242001D71B9 /* SnowboyTest-Bridging-Header.h */, + 04F91B9B1E522B6F00BEF6E3 /* ViewController.swift */, + 045F549A1E58A7EA00F8F4A4 /* snowboy-detect.h */, + 04F91B9D1E522B6F00BEF6E3 /* Main.storyboard */, + 04F91BA31E522B6F00BEF6E3 /* Assets.xcassets */, + 04F91BA51E522B6F00BEF6E3 /* LaunchScreen.storyboard */, + 04F91BA81E522B6F00BEF6E3 /* Info.plist */, + 04F91BA01E522B6F00BEF6E3 /* SnowboyTest.xcdatamodeld */, + 04D8BB891E5382BB001D71B9 /* SnowboyWrapper.h */, + 04D8BB8A1E539066001D71B9 /* SnowboyWrapper.mm */, + ); + path = SnowboyTest; + sourceTree = ""; + }; + 04F91BB01E522B6F00BEF6E3 /* SnowboyTestTests */ = { + isa = PBXGroup; + children = ( + 04F91BB11E522B6F00BEF6E3 /* SnowboyTestTests.swift */, + 04F91BB31E522B6F00BEF6E3 /* Info.plist */, + ); + path = SnowboyTestTests; + sourceTree = ""; + }; + 04F91BBB1E522B6F00BEF6E3 /* SnowboyTestUITests */ = { + isa = PBXGroup; + children = ( + 04F91BBC1E522B6F00BEF6E3 /* SnowboyTestUITests.swift */, + 04F91BBE1E522B6F00BEF6E3 /* Info.plist */, + ); + path = SnowboyTestUITests; + sourceTree = ""; + }; + 04F91BCB1E522BE300BEF6E3 /* Frameworks */ = { + isa = PBXGroup; + children = ( + 045F549C1E58A8FD00F8F4A4 /* libsnowboy-detect.a */, + 04D8BB8C1E53916F001D71B9 /* Accelerate.framework */, + ); + name = Frameworks; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 04F91B951E522B6F00BEF6E3 /* SnowboyTest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 04F91BC11E522B6F00BEF6E3 /* Build configuration list for PBXNativeTarget "SnowboyTest" */; + buildPhases = ( + 04F91B921E522B6F00BEF6E3 /* Sources */, + 04F91B931E522B6F00BEF6E3 /* Frameworks */, + 04F91B941E522B6F00BEF6E3 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = SnowboyTest; + productName = SnowboyTest; + productReference = 04F91B961E522B6F00BEF6E3 /* SnowboyTest.app */; + productType = "com.apple.product-type.application"; + }; + 04F91BAC1E522B6F00BEF6E3 /* SnowboyTestTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = 04F91BC41E522B6F00BEF6E3 /* Build configuration list for PBXNativeTarget "SnowboyTestTests" */; + buildPhases = ( + 04F91BA91E522B6F00BEF6E3 /* Sources */, + 04F91BAA1E522B6F00BEF6E3 /* Frameworks */, + 04F91BAB1E522B6F00BEF6E3 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + 04F91BAF1E522B6F00BEF6E3 /* PBXTargetDependency */, + ); + name = SnowboyTestTests; + productName = SnowboyTestTests; + productReference = 04F91BAD1E522B6F00BEF6E3 /* SnowboyTestTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; + 04F91BB71E522B6F00BEF6E3 /* SnowboyTestUITests */ = { + isa = PBXNativeTarget; + buildConfigurationList = 04F91BC71E522B6F00BEF6E3 /* Build configuration list for PBXNativeTarget "SnowboyTestUITests" */; + buildPhases = ( + 04F91BB41E522B6F00BEF6E3 /* Sources */, + 04F91BB51E522B6F00BEF6E3 /* Frameworks */, + 04F91BB61E522B6F00BEF6E3 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + 04F91BBA1E522B6F00BEF6E3 /* PBXTargetDependency */, + ); + name = SnowboyTestUITests; + productName = SnowboyTestUITests; + productReference = 04F91BB81E522B6F00BEF6E3 /* SnowboyTestUITests.xctest */; + productType = "com.apple.product-type.bundle.ui-testing"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 04F91B8E1E522B6F00BEF6E3 /* Project object */ = { + isa = PBXProject; + attributes = { + LastSwiftUpdateCheck = 0820; + LastUpgradeCheck = 0820; + ORGANIZATIONNAME = "Bi, Sheng"; + TargetAttributes = { + 04F91B951E522B6F00BEF6E3 = { + CreatedOnToolsVersion = 8.2.1; + ProvisioningStyle = Automatic; + }; + 04F91BAC1E522B6F00BEF6E3 = { + CreatedOnToolsVersion = 8.2.1; + ProvisioningStyle = Automatic; + TestTargetID = 04F91B951E522B6F00BEF6E3; + }; + 04F91BB71E522B6F00BEF6E3 = { + CreatedOnToolsVersion = 8.2.1; + ProvisioningStyle = Automatic; + TestTargetID = 04F91B951E522B6F00BEF6E3; + }; + }; + }; + buildConfigurationList = 04F91B911E522B6F00BEF6E3 /* Build configuration list for PBXProject "SnowboyTest" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 04F91B8D1E522B6F00BEF6E3; + productRefGroup = 04F91B971E522B6F00BEF6E3 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 04F91B951E522B6F00BEF6E3 /* SnowboyTest */, + 04F91BAC1E522B6F00BEF6E3 /* SnowboyTestTests */, + 04F91BB71E522B6F00BEF6E3 /* SnowboyTestUITests */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 04F91B941E522B6F00BEF6E3 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 04F91BA71E522B6F00BEF6E3 /* LaunchScreen.storyboard in Resources */, + 045F54A21E58AAF900F8F4A4 /* alexa_02092017.umdl in Resources */, + 04F91BA41E522B6F00BEF6E3 /* Assets.xcassets in Resources */, + 04F91B9F1E522B6F00BEF6E3 /* Main.storyboard in Resources */, + 045F54A01E58AAEC00F8F4A4 /* common.res in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 04F91BAB1E522B6F00BEF6E3 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 04F91BB61E522B6F00BEF6E3 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 04F91B921E522B6F00BEF6E3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 04D8BB8B1E539066001D71B9 /* SnowboyWrapper.mm in Sources */, + 04F91B9C1E522B6F00BEF6E3 /* ViewController.swift in Sources */, + 04F91B9A1E522B6F00BEF6E3 /* AppDelegate.swift in Sources */, + 04F91BA21E522B6F00BEF6E3 /* SnowboyTest.xcdatamodeld in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 04F91BA91E522B6F00BEF6E3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 04F91BB21E522B6F00BEF6E3 /* SnowboyTestTests.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 04F91BB41E522B6F00BEF6E3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 04F91BBD1E522B6F00BEF6E3 /* SnowboyTestUITests.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 04F91BAF1E522B6F00BEF6E3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 04F91B951E522B6F00BEF6E3 /* SnowboyTest */; + targetProxy = 04F91BAE1E522B6F00BEF6E3 /* PBXContainerItemProxy */; + }; + 04F91BBA1E522B6F00BEF6E3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 04F91B951E522B6F00BEF6E3 /* SnowboyTest */; + targetProxy = 04F91BB91E522B6F00BEF6E3 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin PBXVariantGroup section */ + 04F91B9D1E522B6F00BEF6E3 /* Main.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 04F91B9E1E522B6F00BEF6E3 /* Base */, + ); + name = Main.storyboard; + sourceTree = ""; + }; + 04F91BA51E522B6F00BEF6E3 /* LaunchScreen.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 04F91BA61E522B6F00BEF6E3 /* Base */, + ); + name = LaunchScreen.storyboard; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + 04F91BBF1E522B6F00BEF6E3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + 04F91BC01E522B6F00BEF6E3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = iphoneos; + SWIFT_OPTIMIZATION_LEVEL = "-Owholemodule"; + TARGETED_DEVICE_FAMILY = "1,2"; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + 04F91BC21E522B6F00BEF6E3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CLANG_CXX_LIBRARY = "libc++"; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + DEVELOPMENT_TEAM = ""; + ENABLE_BITCODE = NO; + FRAMEWORK_SEARCH_PATHS = "$(PROJECT_DIR)"; + GCC_INPUT_FILETYPE = automatic; + HEADER_SEARCH_PATHS = ( + "$(PROJECT_DIR)", + "$(PROJECT_DIR)/SnowboyTest", + ); + INFOPLIST_FILE = SnowboyTest/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)", + "$(PROJECT_DIR)/../../../lib/ios", + ); + PRODUCT_BUNDLE_IDENTIFIER = com.shengbi.SnowboyTest; + PRODUCT_NAME = "$(TARGET_NAME)"; + SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = YES; + SWIFT_INSTALL_OBJC_HEADER = YES; + SWIFT_OBJC_BRIDGING_HEADER = "SnowboyTest/SnowboyTest-Bridging-Header.h"; + SWIFT_VERSION = 3.0; + USER_HEADER_SEARCH_PATHS = "$(PROJECT_DIR) $(PROJECT_DIR)/SnowboyTest"; + }; + name = Debug; + }; + 04F91BC31E522B6F00BEF6E3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CLANG_CXX_LIBRARY = "libc++"; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + DEVELOPMENT_TEAM = ""; + ENABLE_BITCODE = NO; + FRAMEWORK_SEARCH_PATHS = "$(PROJECT_DIR)"; + GCC_INPUT_FILETYPE = automatic; + HEADER_SEARCH_PATHS = ( + "$(PROJECT_DIR)", + "$(PROJECT_DIR)/SnowboyTest", + ); + INFOPLIST_FILE = SnowboyTest/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)", + "$(PROJECT_DIR)/../../../lib/ios", + ); + PRODUCT_BUNDLE_IDENTIFIER = com.shengbi.SnowboyTest; + PRODUCT_NAME = "$(TARGET_NAME)"; + SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = YES; + SWIFT_INSTALL_OBJC_HEADER = YES; + SWIFT_OBJC_BRIDGING_HEADER = "SnowboyTest/SnowboyTest-Bridging-Header.h"; + SWIFT_VERSION = 3.0; + USER_HEADER_SEARCH_PATHS = "$(PROJECT_DIR) $(PROJECT_DIR)/SnowboyTest"; + }; + name = Release; + }; + 04F91BC51E522B6F00BEF6E3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES; + BUNDLE_LOADER = "$(TEST_HOST)"; + INFOPLIST_FILE = SnowboyTestTests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = com.shengbi.SnowboyTestTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_VERSION = 3.0; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/SnowboyTest.app/SnowboyTest"; + }; + name = Debug; + }; + 04F91BC61E522B6F00BEF6E3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES; + BUNDLE_LOADER = "$(TEST_HOST)"; + INFOPLIST_FILE = SnowboyTestTests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = com.shengbi.SnowboyTestTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_VERSION = 3.0; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/SnowboyTest.app/SnowboyTest"; + }; + name = Release; + }; + 04F91BC81E522B6F00BEF6E3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES; + INFOPLIST_FILE = SnowboyTestUITests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = com.shengbi.SnowboyTestUITests; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_VERSION = 3.0; + TEST_TARGET_NAME = SnowboyTest; + }; + name = Debug; + }; + 04F91BC91E522B6F00BEF6E3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES; + INFOPLIST_FILE = SnowboyTestUITests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = com.shengbi.SnowboyTestUITests; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_VERSION = 3.0; + TEST_TARGET_NAME = SnowboyTest; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 04F91B911E522B6F00BEF6E3 /* Build configuration list for PBXProject "SnowboyTest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 04F91BBF1E522B6F00BEF6E3 /* Debug */, + 04F91BC01E522B6F00BEF6E3 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 04F91BC11E522B6F00BEF6E3 /* Build configuration list for PBXNativeTarget "SnowboyTest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 04F91BC21E522B6F00BEF6E3 /* Debug */, + 04F91BC31E522B6F00BEF6E3 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 04F91BC41E522B6F00BEF6E3 /* Build configuration list for PBXNativeTarget "SnowboyTestTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 04F91BC51E522B6F00BEF6E3 /* Debug */, + 04F91BC61E522B6F00BEF6E3 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 04F91BC71E522B6F00BEF6E3 /* Build configuration list for PBXNativeTarget "SnowboyTestUITests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 04F91BC81E522B6F00BEF6E3 /* Debug */, + 04F91BC91E522B6F00BEF6E3 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + +/* Begin XCVersionGroup section */ + 04F91BA01E522B6F00BEF6E3 /* SnowboyTest.xcdatamodeld */ = { + isa = XCVersionGroup; + children = ( + 04F91BA11E522B6F00BEF6E3 /* SnowboyTest.xcdatamodel */, + ); + currentVersion = 04F91BA11E522B6F00BEF6E3 /* SnowboyTest.xcdatamodel */; + path = SnowboyTest.xcdatamodeld; + sourceTree = ""; + versionGroupType = wrapper.xcdatamodel; + }; +/* End XCVersionGroup section */ + }; + rootObject = 04F91B8E1E522B6F00BEF6E3 /* Project object */; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000..6274ae4 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/AppDelegate.swift b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/AppDelegate.swift new file mode 100644 index 0000000..f5853ba --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/AppDelegate.swift @@ -0,0 +1,94 @@ +// +// AppDelegate.swift +// SnowboyTest +// +// Created by Bi, Sheng on 2/13/17. +// Copyright © 2017 Bi, Sheng. All rights reserved. +// + +import UIKit +import CoreData + +@UIApplicationMain +class AppDelegate: UIResponder, UIApplicationDelegate { + + var window: UIWindow? + + + func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplicationLaunchOptionsKey: Any]?) -> Bool { + // Override point for customization after application launch. + + return true + } + + func applicationWillResignActive(_ application: UIApplication) { + // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. + // Use this method to pause ongoing tasks, disable timers, and invalidate graphics rendering callbacks. Games should use this method to pause the game. + } + + func applicationDidEnterBackground(_ application: UIApplication) { + // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. + // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. + } + + func applicationWillEnterForeground(_ application: UIApplication) { + // Called as part of the transition from the background to the active state; here you can undo many of the changes made on entering the background. + } + + func applicationDidBecomeActive(_ application: UIApplication) { + // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. + } + + func applicationWillTerminate(_ application: UIApplication) { + // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:. + // Saves changes in the application's managed object context before the application terminates. + self.saveContext() + } + + // MARK: - Core Data stack + + lazy var persistentContainer: NSPersistentContainer = { + /* + The persistent container for the application. This implementation + creates and returns a container, having loaded the store for the + application to it. This property is optional since there are legitimate + error conditions that could cause the creation of the store to fail. + */ + let container = NSPersistentContainer(name: "SnowboyTest") + container.loadPersistentStores(completionHandler: { (storeDescription, error) in + if let error = error as NSError? { + // Replace this implementation with code to handle the error appropriately. + // fatalError() causes the application to generate a crash log and terminate. You should not use this function in a shipping application, although it may be useful during development. + + /* + Typical reasons for an error here include: + * The parent directory does not exist, cannot be created, or disallows writing. + * The persistent store is not accessible, due to permissions or data protection when the device is locked. + * The device is out of space. + * The store could not be migrated to the current model version. + Check the error message to determine what the actual problem was. + */ + fatalError("Unresolved error \(error), \(error.userInfo)") + } + }) + return container + }() + + // MARK: - Core Data Saving support + + func saveContext () { + let context = persistentContainer.viewContext + if context.hasChanges { + do { + try context.save() + } catch { + // Replace this implementation with code to handle the error appropriately. + // fatalError() causes the application to generate a crash log and terminate. You should not use this function in a shipping application, although it may be useful during development. + let nserror = error as NSError + fatalError("Unresolved error \(nserror), \(nserror.userInfo)") + } + } + } + +} + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Assets.xcassets/AppIcon.appiconset/Contents.json b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000..1d060ed --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,93 @@ +{ + "images" : [ + { + "idiom" : "iphone", + "size" : "20x20", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "20x20", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "29x29", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "29x29", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "40x40", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "40x40", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "60x60", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "60x60", + "scale" : "3x" + }, + { + "idiom" : "ipad", + "size" : "20x20", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "20x20", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "29x29", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "29x29", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "40x40", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "40x40", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "76x76", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "76x76", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "83.5x83.5", + "scale" : "2x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Base.lproj/LaunchScreen.storyboard b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Base.lproj/LaunchScreen.storyboard new file mode 100644 index 0000000..fdf3f97 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Base.lproj/LaunchScreen.storyboard @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Base.lproj/Main.storyboard b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Base.lproj/Main.storyboard new file mode 100644 index 0000000..3b44219 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Base.lproj/Main.storyboard @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Info.plist b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Info.plist new file mode 100644 index 0000000..57b1dce --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/Info.plist @@ -0,0 +1,47 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + APPL + CFBundleShortVersionString + 1.0 + NSMicrophoneUsageDescription + Need to use microphone + CFBundleVersion + 1 + LSRequiresIPhoneOS + + UILaunchStoryboardName + LaunchScreen + UIMainStoryboardFile + Main + UIRequiredDeviceCapabilities + + armv7 + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + UISupportedInterfaceOrientations~ipad + + UIInterfaceOrientationPortrait + UIInterfaceOrientationPortraitUpsideDown + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyTest-Bridging-Header.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyTest-Bridging-Header.h new file mode 100644 index 0000000..88c82d5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyTest-Bridging-Header.h @@ -0,0 +1,14 @@ +// +// SnowboyTest-Bridging-Header.h +// SnowboyTest +// +// Created by Bi, Sheng on 2/13/17. +// Copyright © 2017 Bi, Sheng. All rights reserved. +// + +#ifndef SnowboyTest_Bridging_Header_h +#define SnowboyTest_Bridging_Header_h + +#import "SnowboyWrapper.h" + +#endif /* SnowboyTest_Bridging_Header_h */ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyTest.xcdatamodeld/.xccurrentversion b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyTest.xcdatamodeld/.xccurrentversion new file mode 100644 index 0000000..920f673 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyTest.xcdatamodeld/.xccurrentversion @@ -0,0 +1,8 @@ + + + + + _XCCurrentVersionName + SnowboyTest.xcdatamodel + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyTest.xcdatamodeld/SnowboyTest.xcdatamodel/contents b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyTest.xcdatamodeld/SnowboyTest.xcdatamodel/contents new file mode 100644 index 0000000..476e5b6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyTest.xcdatamodeld/SnowboyTest.xcdatamodel/contents @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyWrapper.h b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyWrapper.h new file mode 100644 index 0000000..506b4fb --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyWrapper.h @@ -0,0 +1,33 @@ +// +// SnowboyWrapper.h +// SnowboyTest +// +// Created by Bi, Sheng on 2/14/17. +// Copyright © 2017 Bi, Sheng. All rights reserved. +// + +#ifndef SnowboyWrapper_h +#define SnowboyWrapper_h + +#import + +// This is a wrapper Objective-C++ class around the C++ class snowboy-detect.h +// +// "You cannot import C++ code directly into Swift. Instead, create an Objective-C or C wrapper for C++ code." +// See: https://developer.apple.com/library/content/documentation/Swift/Conceptual/BuildingCocoaApps/index.html#//apple_ref/doc/uid/TP40014216-CH2-ID0 +@interface SnowboyWrapper : NSObject + +-(id)initWithResources:(NSString*)resourceFileName modelStr:(NSString*)modelStr; +-(int)runDetection:(NSString*)data; +-(int)runDetection:(NSArray*)data length:(int)length; +-(void)setSensitivity:(NSString*)sensitivity; +-(bool)reset; +-(void)setAudioGain:(float)audioGain; +-(int)sampleRate; +-(int)numChannels; +-(int)bitsPerSample; + +@end + + +#endif /* SnowboyWrapper_h */ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyWrapper.mm b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyWrapper.mm new file mode 100644 index 0000000..9ac5363 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/SnowboyWrapper.mm @@ -0,0 +1,76 @@ +// +// SnowboyWrapper.m +// SnowboyTest +// +// Created by Bi, Sheng on 2/14/17. +// Copyright © 2017 Bi, Sheng. All rights reserved. +// + +#import "SnowboyWrapper.h" +#import "snowboy-detect.h" + +@interface SnowboyWrapper() +{ + snowboy::SnowboyDetect* snowboy; +} +@end + +@implementation SnowboyWrapper + +-(id)initWithResources:(NSString*)resourceFileName modelStr:(NSString*)modelStr +{ + std::string resource = [resourceFileName cStringUsingEncoding:[NSString defaultCStringEncoding]]; + std::string model = [modelStr cStringUsingEncoding:[NSString defaultCStringEncoding]]; + snowboy = new snowboy::SnowboyDetect(resource, model); + return self; +} + +-(void)setSensitivity:(NSString*)sensitivity +{ + snowboy->SetSensitivity([sensitivity cStringUsingEncoding:[NSString defaultCStringEncoding]]); +} + +-(int)runDetection:(NSString*)data +{ + return snowboy->RunDetection([data cStringUsingEncoding:[NSString defaultCStringEncoding]]); +} + +-(int)runDetection:(NSArray*)data length:(int)length +{ + long count = [data count]; + float* dataArray = (float*) malloc(sizeof(float*) * count); + for (int i = 0; i < count; ++i) { + dataArray[i] = [[data objectAtIndex:i] floatValue]; + } + + int detected = snowboy->RunDetection(dataArray, length); + free(dataArray); + return detected; +} + +-(bool)reset +{ + return snowboy->Reset(); +} + +-(void)setAudioGain:(float)audioGain +{ + return snowboy->SetAudioGain(audioGain); +} + +-(int)sampleRate +{ + return snowboy->SampleRate(); +} + +-(int)numChannels +{ + return snowboy->NumChannels(); +} + +-(int)bitsPerSample +{ + return snowboy->BitsPerSample(); +} + +@end diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/ViewController.swift b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/ViewController.swift new file mode 100644 index 0000000..1e14f7e --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTest/ViewController.swift @@ -0,0 +1,134 @@ +// +// ViewController.swift +// SnowboyTest +// +// Created by Bi, Sheng on 2/13/17. +// Copyright © 2017 Bi, Sheng. All rights reserved. +// + +import UIKit +import AVFoundation + +class ViewController: UIViewController, AVAudioRecorderDelegate, AVAudioPlayerDelegate { + + @IBOutlet weak var instructionLabel: UILabel! + @IBOutlet weak var resultLabel: UILabel! + @IBOutlet weak var btn: UIButton! + + let WAKE_WORD = "Alexa" + let RESOURCE = Bundle.main.path(forResource: "common", ofType: "res") + let MODEL = Bundle.main.path(forResource: "alexa_02092017", ofType: "umdl") + + var wrapper: SnowboyWrapper! = nil + + var audioRecorder: AVAudioRecorder! + var audioPlayer: AVAudioPlayer! + var soundFileURL: URL! + + var timer: Timer! + var isStarted = false + + override func viewDidLoad() { + super.viewDidLoad() + + wrapper = SnowboyWrapper(resources: RESOURCE, modelStr: MODEL) + wrapper.setSensitivity("0.5") + wrapper.setAudioGain(1.0) + print("Sample rate: \(wrapper?.sampleRate()); channels: \(wrapper?.numChannels()); bits: \(wrapper?.bitsPerSample())") + } + + @IBAction func onClickBtn(_ sender: Any) { + if (isStarted) { + stopRecording() + timer.invalidate() + btn.setTitle("Start", for: .normal) + isStarted = false + } else { + timer = Timer.scheduledTimer(timeInterval: 4, target: self, selector: #selector(startRecording), userInfo: nil, repeats: true) + timer.fire() + btn.setTitle("Stop", for: .normal) + isStarted = true + } + } + + func runSnowboy() { + + let file = try! AVAudioFile(forReading: soundFileURL) + let format = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 16000.0, channels: 1, interleaved: false) + let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(file.length)) + try! file.read(into: buffer) + let array = Array(UnsafeBufferPointer(start: buffer.floatChannelData![0], count:Int(buffer.frameLength))) + + print("Frame capacity: \(AVAudioFrameCount(file.length))") + print("Int16 channel array: \(array)") + print("Buffer frame length: \(buffer.frameLength)") + + let result = wrapper.runDetection(array, length: Int32(buffer.frameLength)) + resultLabel.text = "Snowboy result: \(result)" + print("Result: \(result)") + } + + func startRecording() { + do { + let fileMgr = FileManager.default + let dirPaths = fileMgr.urls(for: .documentDirectory, + in: .userDomainMask) + soundFileURL = dirPaths[0].appendingPathComponent("temp.wav") + let recordSettings = + [AVEncoderAudioQualityKey: AVAudioQuality.high.rawValue, + AVEncoderBitRateKey: 128000, + AVNumberOfChannelsKey: 1, + AVSampleRateKey: 16000.0] as [String : Any] + let audioSession = AVAudioSession.sharedInstance() + try audioSession.setCategory(AVAudioSessionCategoryRecord) + try audioRecorder = AVAudioRecorder(url: soundFileURL, + settings: recordSettings as [String : AnyObject]) + audioRecorder.delegate = self + audioRecorder.prepareToRecord() + audioRecorder.record(forDuration: 2.0) + instructionLabel.text = "Speak wake word: \(WAKE_WORD)" + + print("Started recording...") + } catch let error { + print("Audio session error: \(error.localizedDescription)") + } + } + + func stopRecording() { + if (audioRecorder != nil && audioRecorder.isRecording) { + audioRecorder.stop() + } + instructionLabel.text = "Stop" + print("Stopped recording...") + } + + func playAudio() { + do { + try audioPlayer = AVAudioPlayer(contentsOf:(soundFileURL)) + audioPlayer!.delegate = self + audioPlayer!.prepareToPlay() + audioPlayer!.play() + } catch let error { + print("Audio player error: \(error.localizedDescription)") + } + } + + func audioRecorderDidFinishRecording(_ recorder: AVAudioRecorder, successfully flag: Bool) { + print("Audio Recorder did finish recording.") + stopRecording() + runSnowboy() + } + + func audioRecorderEncodeErrorDidOccur(_ recorder: AVAudioRecorder, error: Error?) { + print("Audio Recorder encode error.") + } + + func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, successfully flag: Bool) { + print("Audio player did finish playing.") + } + + func audioPlayerDecodeErrorDidOccur(_ player: AVAudioPlayer, error: Error?) { + print("Audio player decode error.") + } +} + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestTests/Info.plist b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestTests/Info.plist new file mode 100644 index 0000000..6c6c23c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestTests/Info.plist @@ -0,0 +1,22 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestTests/SnowboyTestTests.swift b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestTests/SnowboyTestTests.swift new file mode 100644 index 0000000..c7760cd --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestTests/SnowboyTestTests.swift @@ -0,0 +1,36 @@ +// +// SnowboyTestTests.swift +// SnowboyTestTests +// +// Created by Bi, Sheng on 2/13/17. +// Copyright © 2017 Bi, Sheng. All rights reserved. +// + +import XCTest +@testable import SnowboyTest + +class SnowboyTestTests: XCTestCase { + + override func setUp() { + super.setUp() + // Put setup code here. This method is called before the invocation of each test method in the class. + } + + override func tearDown() { + // Put teardown code here. This method is called after the invocation of each test method in the class. + super.tearDown() + } + + func testExample() { + // This is an example of a functional test case. + // Use XCTAssert and related functions to verify your tests produce the correct results. + } + + func testPerformanceExample() { + // This is an example of a performance test case. + self.measure { + // Put the code you want to measure the time of here. + } + } + +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestUITests/Info.plist b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestUITests/Info.plist new file mode 100644 index 0000000..6c6c23c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestUITests/Info.plist @@ -0,0 +1,22 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestUITests/SnowboyTestUITests.swift b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestUITests/SnowboyTestUITests.swift new file mode 100644 index 0000000..b44004d --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/examples/iOS/Swift3/SnowboyTestUITests/SnowboyTestUITests.swift @@ -0,0 +1,36 @@ +// +// SnowboyTestUITests.swift +// SnowboyTestUITests +// +// Created by Bi, Sheng on 2/13/17. +// Copyright © 2017 Bi, Sheng. All rights reserved. +// + +import XCTest + +class SnowboyTestUITests: XCTestCase { + + override func setUp() { + super.setUp() + + // Put setup code here. This method is called before the invocation of each test method in the class. + + // In UI tests it is usually best to stop immediately when a failure occurs. + continueAfterFailure = false + // UI tests must launch the application that they test. Doing this in setup will make sure it happens for each test method. + XCUIApplication().launch() + + // In UI tests it’s important to set the initial state - such as interface orientation - required for your tests before they run. The setUp method is a good place to do this. + } + + override func tearDown() { + // Put teardown code here. This method is called after the invocation of each test method in the class. + super.tearDown() + } + + func testExample() { + // Use recording to get started writing UI tests. + // Use XCTAssert and related functions to verify your tests produce the correct results. + } + +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/include/snowboy-detect.h b/src/Guide_stick_system/voice_assistant/snowboy/include/snowboy-detect.h new file mode 100644 index 0000000..95e06c5 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/include/snowboy-detect.h @@ -0,0 +1,226 @@ +// include/snowboy-detect.h + +// Copyright 2016 KITT.AI (author: Guoguo Chen) + +#ifndef SNOWBOY_INCLUDE_SNOWBOY_DETECT_H_ +#define SNOWBOY_INCLUDE_SNOWBOY_DETECT_H_ + +#include +#include + +namespace snowboy { + +// Forward declaration. +struct WaveHeader; +class PipelineDetect; +class PipelineVad; + +//////////////////////////////////////////////////////////////////////////////// +// +// SnowboyDetect class interface. +// +//////////////////////////////////////////////////////////////////////////////// +class SnowboyDetect { + public: + // Constructor that takes a resource file, and a list of hotword models which + // are separated by comma. In the case that more than one hotword exist in the + // provided models, RunDetection() will return the index of the hotword, if + // the corresponding hotword is triggered. + // + // CAVEAT: a personal model only contain one hotword, but an universal model + // may contain multiple hotwords. It is your responsibility to figure + // out the index of the hotword. For example, if your model string is + // "foo.pmdl,bar.umdl", where foo.pmdl contains hotword x, bar.umdl + // has two hotwords y and z, the indices of different hotwords are as + // follows: + // x 1 + // y 2 + // z 3 + // + // @param [in] resource_filename Filename of resource file. + // @param [in] model_str A string of multiple hotword models, + // separated by comma. + SnowboyDetect(const std::string& resource_filename, + const std::string& model_str); + + // Resets the detection. This class handles voice activity detection (VAD) + // internally. But if you have an external VAD, you should call Reset() + // whenever you see segment end from your VAD. + bool Reset(); + + // Runs hotword detection. Supported audio format is WAVE (with linear PCM, + // 8-bits unsigned integer, 16-bits signed integer or 32-bits signed integer). + // See SampleRate(), NumChannels() and BitsPerSample() for the required + // sampling rate, number of channels and bits per sample values. You are + // supposed to provide a small chunk of data (e.g., 0.1 second) each time you + // call RunDetection(). Larger chunk usually leads to longer delay, but less + // CPU usage. + // + // Definition of return values: + // -2: Silence. + // -1: Error. + // 0: No event. + // 1: Hotword 1 triggered. + // 2: Hotword 2 triggered. + // ... + // + // @param [in] data Small chunk of data to be detected. See + // above for the supported data format. + // @param [in] is_end Set it to true if it is the end of a + // utterance or file. + int RunDetection(const std::string& data, bool is_end = false); + + // Various versions of RunDetection() that take different format of audio. If + // NumChannels() > 1, e.g., NumChannels() == 2, then the array is as follows: + // + // d1c1, d1c2, d2c1, d2c2, d3c1, d3c2, ..., dNc1, dNc2 + // + // where d1c1 means data point 1 of channel 1. + // + // @param [in] data Small chunk of data to be detected. See + // above for the supported data format. + // @param [in] array_length Length of the data array. + // @param [in] is_end Set it to true if it is the end of a + // utterance or file. + int RunDetection(const float* const data, + const int array_length, bool is_end = false); + int RunDetection(const int16_t* const data, + const int array_length, bool is_end = false); + int RunDetection(const int32_t* const data, + const int array_length, bool is_end = false); + + // Sets the sensitivity string for the loaded hotwords. A is + // a list of floating numbers between 0 and 1, and separated by comma. For + // example, if there are 3 loaded hotwords, your string should looks something + // like this: + // 0.4,0.5,0.8 + // Make sure you properly align the sensitivity value to the corresponding + // hotword. + void SetSensitivity(const std::string& sensitivity_str); + + // Similar to the sensitivity setting above. When set higher than the above + // sensitivity, the algorithm automatically chooses between the normal + // sensitivity set above and the higher sensitivity set here, to maximize the + // performance. By default, it is not set, which means the algorithm will + // stick with the sensitivity set above. + void SetHighSensitivity(const std::string& high_sensitivity_str); + + // Returns the sensitivity string for the current hotwords. + std::string GetSensitivity() const; + + // Applied a fixed gain to the input audio. In case you have a very weak + // microphone, you can use this function to boost input audio level. + void SetAudioGain(const float audio_gain); + + // Writes the models to the model filenames specified in in the + // constructor. This overwrites the original model with the latest parameter + // setting. You are supposed to call this function if you have updated the + // hotword sensitivities through SetSensitivity(), and you would like to store + // those values in the model as the default value. + void UpdateModel() const; + + // Returns the number of the loaded hotwords. This helps you to figure the + // index of the hotwords. + int NumHotwords() const; + + // If is true, then apply frontend audio processing; + // otherwise turns the audio processing off. Frontend audio processing + // includes algorithms such as automatic gain control (AGC), noise suppression + // (NS) and so on. Generally adding frontend audio processing helps the + // performance, but if the model is not trained with frontend audio + // processing, it may decrease the performance. The general rule of thumb is: + // 1. For personal models, set it to false. + // 2. For universal models, follow the instruction of each published model + void ApplyFrontend(const bool apply_frontend); + + // Returns the required sampling rate, number of channels and bits per sample + // values for the audio data. You should use this information to set up your + // audio capturing interface. + int SampleRate() const; + int NumChannels() const; + int BitsPerSample() const; + + ~SnowboyDetect(); + + private: + std::unique_ptr wave_header_; + std::unique_ptr detect_pipeline_; +}; + +//////////////////////////////////////////////////////////////////////////////// +// +// SnowboyVad class interface. +// +//////////////////////////////////////////////////////////////////////////////// +class SnowboyVad { + public: + // Constructor that takes a resource file. It shares the same resource file + // with SnowboyDetect. + SnowboyVad(const std::string& resource_filename); + + // Resets the VAD. + bool Reset(); + + // Runs the VAD algorithm. Supported audio format is WAVE (with linear PCM, + // 8-bits unsigned integer, 16-bits signed integer or 32-bits signed integer). + // See SampleRate(), NumChannels() and BitsPerSample() for the required + // sampling rate, number of channels and bits per sample values. You are + // supposed to provide a small chunk of data (e.g., 0.1 second) each time you + // call RunDetection(). Larger chunk usually leads to longer delay, but less + // CPU usage. + // + // Definition of return values: + // -2: Silence. + // -1: Error. + // 0: Non-silence. + // + // @param [in] data Small chunk of data to be detected. See + // above for the supported data format. + // @param [in] is_end Set it to true if it is the end of a + // utterance or file. + int RunVad(const std::string& data, bool is_end = false); + + // Various versions of RunVad() that take different format of audio. If + // NumChannels() > 1, e.g., NumChannels() == 2, then the array is as follows: + // + // d1c1, d1c2, d2c1, d2c2, d3c1, d3c2, ..., dNc1, dNc2 + // + // where d1c1 means data point 1 of channel 1. + // + // @param [in] data Small chunk of data to be detected. See + // above for the supported data format. + // @param [in] array_length Length of the data array. + // @param [in] is_end Set it to true if it is the end of a + // utterance or file. + int RunVad(const float* const data, + const int array_length, bool is_end = false); + int RunVad(const int16_t* const data, + const int array_length, bool is_end = false); + int RunVad(const int32_t* const data, + const int array_length, bool is_end = false); + + // Applied a fixed gain to the input audio. In case you have a very weak + // microphone, you can use this function to boost input audio level. + void SetAudioGain(const float audio_gain); + + // If is true, then apply frontend audio processing; + // otherwise turns the audio processing off. + void ApplyFrontend(const bool apply_frontend); + + // Returns the required sampling rate, number of channels and bits per sample + // values for the audio data. You should use this information to set up your + // audio capturing interface. + int SampleRate() const; + int NumChannels() const; + int BitsPerSample() const; + + ~SnowboyVad(); + + private: + std::unique_ptr wave_header_; + std::unique_ptr vad_pipeline_; +}; + +} // namespace snowboy + +#endif // SNOWBOY_INCLUDE_SNOWBOY_DETECT_H_ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/aarch64-ubuntu1604/libsnowboy-detect.a b/src/Guide_stick_system/voice_assistant/snowboy/lib/aarch64-ubuntu1604/libsnowboy-detect.a new file mode 100644 index 0000000..6b8ec97 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/lib/aarch64-ubuntu1604/libsnowboy-detect.a differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/android/armv7a/libsnowboy-detect.a b/src/Guide_stick_system/voice_assistant/snowboy/lib/android/armv7a/libsnowboy-detect.a new file mode 100644 index 0000000..112bc12 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/lib/android/armv7a/libsnowboy-detect.a differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/android/armv8-aarch64/libsnowboy-detect.a b/src/Guide_stick_system/voice_assistant/snowboy/lib/android/armv8-aarch64/libsnowboy-detect.a new file mode 100644 index 0000000..e8c4f19 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/lib/android/armv8-aarch64/libsnowboy-detect.a differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/ios/libsnowboy-detect.a b/src/Guide_stick_system/voice_assistant/snowboy/lib/ios/libsnowboy-detect.a new file mode 100644 index 0000000..345832c Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/lib/ios/libsnowboy-detect.a differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/node/SnowboyDetectNative.d.ts b/src/Guide_stick_system/voice_assistant/snowboy/lib/node/SnowboyDetectNative.d.ts new file mode 100644 index 0000000..d071441 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/lib/node/SnowboyDetectNative.d.ts @@ -0,0 +1,15 @@ +interface SnowboyDetectNativeInterface { + new (resource: string, models: string): SnowboyDetectNativeInterface; + Reset(): boolean; + RunDetection(audioData: Buffer): number; + SetSensitivity(sensitivity: string): void; + SetHighSensitivity(highSensitivity: string): void; + GetSensitivity(): string; + SetAudioGain(audioGain: number): void; + UpdateModel(): void; + NumHotwords(): number; + SampleRate(): number; + NumChannels(): number; + BitsPerSample(): number; + ApplyFrontend(applyFrontend: boolean): void; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/node/index.ts b/src/Guide_stick_system/voice_assistant/snowboy/lib/node/index.ts new file mode 100644 index 0000000..409e469 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/lib/node/index.ts @@ -0,0 +1,208 @@ +import * as stream from 'stream'; +import * as path from 'path'; +import * as fs from 'fs'; +import * as binary from 'node-pre-gyp'; + +const bindingPath: string = binary.find(path.resolve(path.join(__dirname, '../../package.json'))); +const SnowboyDetectNative: SnowboyDetectNativeInterface = require(bindingPath).SnowboyDetect; + +enum DetectionResult { + SILENCE = -2, + ERROR = -1, + SOUND = 0 +} + +enum ModelType { + PMDL, + UMDL +} + +export interface HotwordModel { + file: string; + sensitivity?: string; + hotwords: string | Array; +} + +interface HotwordModelsInterface { + add(model: HotwordModel): void; + lookup(index: number): string; + numHotwords(): number; +} + +export interface DetectorOptions { + resource: string; + models: HotwordModels; + audioGain?: number; + applyFrontend?: boolean; + highSensitivity?: string; +} + +export interface SnowboyDetectInterface { + reset(): boolean; + runDetection(buffer: Buffer): number; + setSensitivity(sensitivity: string): void; + setHighSensitivity(highSensitivity: string): void; + getSensitivity(): string; + setAudioGain(gain: number): void; + updateModel(): void; + numHotwords(): number; + sampleRate(): number; + numChannels(): number; + bitsPerSample(): number; +} + +export class HotwordModels implements HotwordModels { + private models: Array = []; + private lookupTable: Array; + + add(model: HotwordModel) { + model.hotwords = [].concat(model.hotwords); + model.sensitivity = model.sensitivity || "0.5"; + + if (fs.existsSync(model.file) === false) { + throw new Error(`Model ${model.file} does not exists.`); + } + + const type = path.extname(model.file).toUpperCase(); + + if (ModelType[type] === ModelType.PMDL && model.hotwords.length > 1) { + throw new Error('Personal models can define only one hotword.'); + } + + this.models.push(model); + this.lookupTable = this.generateHotwordsLookupTable(); + } + + get modelString(): string { + return this.models.map((model) => model.file).join(); + } + + get sensitivityString(): string { + return this.models.map((model) => model.sensitivity).join(); + } + + lookup(index: number): string { + const lookupIndex = index - 1; + if (lookupIndex < 0 || lookupIndex >= this.lookupTable.length) { + throw new Error('Index out of bounds.'); + } + return this.lookupTable[lookupIndex]; + } + + numHotwords(): number { + return this.lookupTable.length; + } + + private generateHotwordsLookupTable(): Array { + return this.models.reduce((hotwords, model) => { + return hotwords.concat(model.hotwords); + }, new Array()); + } +} + +export class SnowboyDetect extends stream.Writable implements SnowboyDetectInterface { + nativeInstance: SnowboyDetectNativeInterface; + private models: HotwordModels; + + constructor(options: DetectorOptions) { + super(); + + this.models = options.models; + this.nativeInstance = new SnowboyDetectNative(options.resource, options.models.modelString); + + if (this.nativeInstance.NumHotwords() !== options.models.numHotwords()) { + throw new Error('Loaded hotwords count does not match number of hotwords defined.'); + } + + this.nativeInstance.SetSensitivity(options.models.sensitivityString); + + if (options.audioGain) { + this.nativeInstance.SetAudioGain(options.audioGain); + } + + if (options.applyFrontend) { + this.nativeInstance.ApplyFrontend(options.applyFrontend); + } + + if (options.highSensitivity) { + this.nativeInstance.SetHighSensitivity(options.highSensitivity); + } + } + + reset(): boolean { + return this.nativeInstance.Reset(); + } + + runDetection(buffer: Buffer): number { + const index = this.nativeInstance.RunDetection(buffer); + this.processDetectionResult(index, buffer); + return index; + } + + setSensitivity(sensitivity: string): void { + this.nativeInstance.SetSensitivity(sensitivity); + } + + setHighSensitivity(highSensitivity: string): void { + this.nativeInstance.SetHighSensitivity(highSensitivity); + } + + getSensitivity(): string { + return this.nativeInstance.GetSensitivity(); + } + + setAudioGain(gain: number): void { + this.nativeInstance.SetAudioGain(gain); + } + + updateModel(): void { + this.nativeInstance.UpdateModel(); + } + + numHotwords(): number { + return this.nativeInstance.NumHotwords(); + } + + sampleRate(): number { + return this.nativeInstance.SampleRate(); + } + + numChannels(): number { + return this.nativeInstance.NumChannels(); + } + + bitsPerSample(): number { + return this.nativeInstance.BitsPerSample(); + } + + // Stream implementation + _write(chunk: Buffer, encoding: string, callback: Function) { + const index = this.nativeInstance.RunDetection(chunk); + this.processDetectionResult(index, chunk); + return callback(); + } + + private processDetectionResult(index: number, buffer: Buffer): void { + switch (index) { + case DetectionResult.ERROR: + this.emit('error'); + break; + + case DetectionResult.SILENCE: + this.emit('silence'); + break; + + case DetectionResult.SOUND: + this.emit('sound', buffer); + break; + + default: + const hotword = this.models.lookup(index); + this.emit('hotword', index, hotword, buffer); + break; + } + } +} + +export const Detector = SnowboyDetect; +export const Models = HotwordModels; diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/node/node-pre-gyp.d.ts b/src/Guide_stick_system/voice_assistant/snowboy/lib/node/node-pre-gyp.d.ts new file mode 100644 index 0000000..22b07a7 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/lib/node/node-pre-gyp.d.ts @@ -0,0 +1,3 @@ +declare module "node-pre-gyp" { + export function find(path:string):string; +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/osx/libsnowboy-detect.a b/src/Guide_stick_system/voice_assistant/snowboy/lib/osx/libsnowboy-detect.a new file mode 100644 index 0000000..b0736ad Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/lib/osx/libsnowboy-detect.a differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/rpi/libsnowboy-detect.a b/src/Guide_stick_system/voice_assistant/snowboy/lib/rpi/libsnowboy-detect.a new file mode 100644 index 0000000..df04d29 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/lib/rpi/libsnowboy-detect.a differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/lib/ubuntu64/libsnowboy-detect.a b/src/Guide_stick_system/voice_assistant/snowboy/lib/ubuntu64/libsnowboy-detect.a new file mode 100644 index 0000000..6aaad1a Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/lib/ubuntu64/libsnowboy-detect.a differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/package.json b/src/Guide_stick_system/voice_assistant/snowboy/package.json new file mode 100644 index 0000000..4248979 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/package.json @@ -0,0 +1,42 @@ +{ + "name": "snowboy", + "version": "1.3.1", + "description": "Snowboy is a customizable hotword detection engine", + "main": "lib/node/index.js", + "typings": "lib/node/index.d.ts", + "binary": { + "module_name": "snowboy", + "module_path": "./lib/node/binding/{configuration}/{node_abi}-{platform}-{arch}/", + "remote_path": "./{module_name}/v{version}/{configuration}/", + "package_name": "{module_name}-v{version}-{node_abi}-{platform}-{arch}.tar.gz", + "host": "https://snowboy-release-node.s3-us-west-2.amazonaws.com" + }, + "scripts": { + "install": "node-pre-gyp install --fallback-to-build", + "test": "node index.js", + "prepublish": "tsc --listFiles" + }, + "author": "KITT.AI ", + "contributors": [ + "Leandre Gohy ", + "Evan Cohen " + ], + "repository": { + "type": "git", + "url": "git+https://github.com/Kitt-AI/snowboy.git" + }, + "gypfile": true, + "license": "Apache-2.0", + "dependencies": { + "node-pre-gyp": "^0.6.30" + }, + "devDependencies": { + "@types/node": "^6.0.38", + "aws-sdk": "2.x", + "nan": "^2.4.0", + "typescript": "^2.0.2" + }, + "bugs": { + "url": "https://github.com/Kitt-AI/snowboy/issues" + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/SnowboyAlexaDemo.apk b/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/SnowboyAlexaDemo.apk new file mode 100644 index 0000000..df55e42 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/SnowboyAlexaDemo.apk differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/alexa-avs-sample-app/alexa.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/alexa-avs-sample-app/alexa.umdl new file mode 100644 index 0000000..f916312 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/alexa-avs-sample-app/alexa.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/alexa-avs-sample-app/avs-kittai.patch b/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/alexa-avs-sample-app/avs-kittai.patch new file mode 100644 index 0000000..d1c2b2b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/alexa-avs-sample-app/avs-kittai.patch @@ -0,0 +1,72 @@ +diff -Naur avs-sensory/pi.sh avs-kitt/pi.sh +--- pi.sh 2019-05-03 18:09:18.063849909 -0700 ++++ pi.sh 2019-05-03 18:09:39.273744305 -0700 +@@ -20,18 +20,19 @@ + + SOUND_CONFIG="$HOME/.asoundrc" + START_SCRIPT="$INSTALL_BASE/startsample.sh" +-CMAKE_PLATFORM_SPECIFIC=(-DSENSORY_KEY_WORD_DETECTOR=ON \ ++CMAKE_PLATFORM_SPECIFIC=(-DKITTAI_KEY_WORD_DETECTOR=ON \ + -DGSTREAMER_MEDIA_PLAYER=ON -DPORTAUDIO=ON \ + -DPORTAUDIO_LIB_PATH="$THIRD_PARTY_PATH/portaudio/lib/.libs/libportaudio.$LIB_SUFFIX" \ + -DPORTAUDIO_INCLUDE_DIR="$THIRD_PARTY_PATH/portaudio/include" \ +- -DSENSORY_KEY_WORD_DETECTOR_LIB_PATH=$THIRD_PARTY_PATH/alexa-rpi/lib/libsnsr.a \ +- -DSENSORY_KEY_WORD_DETECTOR_INCLUDE_DIR=$THIRD_PARTY_PATH/alexa-rpi/include) ++ -DKITTAI_KEY_WORD_DETECTOR_LIB_PATH=$THIRD_PARTY_PATH/snowboy/lib/rpi/libsnowboy-detect.a \ ++ -DKITTAI_KEY_WORD_DETECTOR_INCLUDE_DIR=$THIRD_PARTY_PATH/snowboy/include \ ++ -DCMAKE_BUILD_TYPE=MINSIZEREL) + + GSTREAMER_AUDIO_SINK="alsasink" + + install_dependencies() { + sudo apt-get update +- sudo apt-get -y install git gcc cmake build-essential libsqlite3-dev libcurl4-openssl-dev libssl-dev libfaad-dev libsoup2.4-dev libgcrypt20-dev libgstreamer-plugins-bad1.0-dev gstreamer1.0-plugins-good libasound2-dev sox gedit vim python3-pip ++ sudo apt-get -y install git gcc cmake build-essential libsqlite3-dev libcurl4-openssl-dev libssl-dev libfaad-dev libsoup2.4-dev libgcrypt20-dev libgstreamer-plugins-bad1.0-dev gstreamer1.0-plugins-good libasound2-dev sox gedit vim python3-pip python-pip libatlas-base-dev + pip install flask commentjson + } + +@@ -64,18 +65,18 @@ + build_kwd_engine() { +- #get sensory and build ++ #get kittai and build + echo +- echo "==============> CLONING AND BUILDING SENSORY ==============" ++ echo "==============> CLONING AND BUILDING KITTAI ==============" + echo + + cd $THIRD_PARTY_PATH +- git clone git://github.com/Sensory/alexa-rpi.git +- bash ./alexa-rpi/bin/license.sh ++ git clone https://github.com/Kitt-AI/snowboy.git ++ cp snowboy/resources/alexa/alexa-avs-sample-app/alexa.umdl snowboy/resources/alexa.umdl + } + + generate_start_script() { + cat << EOF > "$START_SCRIPT" + cd "$BUILD_PATH/SampleApp/src" + +- ./SampleApp "$OUTPUT_CONFIG_FILE" "$THIRD_PARTY_PATH/alexa-rpi/models" DEBUG9 ++ ./SampleApp "$OUTPUT_CONFIG_FILE" "$THIRD_PARTY_PATH/snowboy/resources" DEBUG9 + EOF + } +diff -Naur avs-sensory/setup.sh avs-kitt/setup.sh +--- setup.sh 2019-05-03 18:09:24.383818365 -0700 ++++ setup.sh 2019-05-03 18:09:35.193764563 -0700 +@@ -50,6 +50,8 @@ + LIB_SUFFIX="a" + ANDROID_CONFIG_FILE="" + ++BUILDTYPE="MINSIZEREL" ++ + # Default device serial number if nothing is specified + DEVICE_SERIAL_NUMBER="123456" + +@@ -242,7 +244,7 @@ + cmake "$SOURCE_PATH/avs-device-sdk" \ + -DCMAKE_BUILD_TYPE=DEBUG \ + "${CMAKE_PLATFORM_SPECIFIC[@]}" +- ++ sed -E -i "s:CXX_PLATFORM_DEPENDENT_FLAGS_"$BUILDTYPE"\s+\"(.*)\":CXX_PLATFORM_DEPENDENT_FLAGS_"$BUILDTYPE" \"\1 -D_GLIBCXX_USE_CXX11_ABI=0 -pg\":" ../avs-device-sdk/build/cmake/BuildOptions.cmake + cd $BUILD_PATH + make SampleApp -j1 + diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/alexa_02092017.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/alexa_02092017.umdl new file mode 100644 index 0000000..c4a6094 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/alexa/alexa_02092017.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/common.res b/src/Guide_stick_system/voice_assistant/snowboy/resources/common.res new file mode 100644 index 0000000..fe6fa95 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/common.res differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/ding.wav b/src/Guide_stick_system/voice_assistant/snowboy/resources/ding.wav new file mode 100644 index 0000000..79346e0 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/ding.wav differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/dong.wav b/src/Guide_stick_system/voice_assistant/snowboy/resources/dong.wav new file mode 100644 index 0000000..426596b Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/dong.wav differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/models/computer.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/computer.umdl new file mode 100644 index 0000000..3760688 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/computer.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/models/hey_extreme.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/hey_extreme.umdl new file mode 100644 index 0000000..4af46fd Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/hey_extreme.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/models/jarvis.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/jarvis.umdl new file mode 100644 index 0000000..793d253 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/jarvis.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/models/neoya.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/neoya.umdl new file mode 100644 index 0000000..90dba54 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/neoya.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/models/smart_mirror.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/smart_mirror.umdl new file mode 100644 index 0000000..497dc5b Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/smart_mirror.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/models/snowboy.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/snowboy.umdl new file mode 100644 index 0000000..bb68185 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/snowboy.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/models/subex.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/subex.umdl new file mode 100644 index 0000000..e9c261e Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/subex.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/models/view_glass.umdl b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/view_glass.umdl new file mode 100644 index 0000000..e367dfb Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/models/view_glass.umdl differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/snowboy.raw b/src/Guide_stick_system/voice_assistant/snowboy/resources/snowboy.raw new file mode 100644 index 0000000..93149a7 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/snowboy.raw differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/resources/snowboy.wav b/src/Guide_stick_system/voice_assistant/snowboy/resources/snowboy.wav new file mode 100644 index 0000000..6cf6fa0 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/snowboy/resources/snowboy.wav differ diff --git a/src/Guide_stick_system/voice_assistant/snowboy/scripts/install_swig.sh b/src/Guide_stick_system/voice_assistant/snowboy/scripts/install_swig.sh new file mode 100644 index 0000000..46208db --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/scripts/install_swig.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# SWIG is a tool to compile c++ code into Python. + +echo "Installing SWIG" + +if [ ! -e swig-3.0.10.tar.gz ]; then + cp exteral_tools/swig-3.0.10.tar.gz ./ || \ + wget -T 10 -t 3 \ + http://prdownloads.sourceforge.net/swig/swig-3.0.10.tar.gz || exit 1; +fi + +tar -xovzf swig-3.0.10.tar.gz || exit 1 +ln -s swig-3.0.10 swig + +cd swig + +# We first have to install PCRE. +if [ ! -e pcre-8.37.tar.gz ]; then + cp ../exteral_tools/pcre-8.37.tar.gz ./ || \ + wget -T 10 -t 3 \ + https://sourceforge.net/projects/pcre/files/pcre/8.37/pcre-8.37.tar.gz || exit 1; +fi +Tools/pcre-build.sh + +./configure --prefix=`pwd` --with-pic +make +make install + +cd .. diff --git a/src/Guide_stick_system/voice_assistant/snowboy/scripts/publish-node.sh b/src/Guide_stick_system/voice_assistant/snowboy/scripts/publish-node.sh new file mode 100644 index 0000000..8b7304a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/scripts/publish-node.sh @@ -0,0 +1,23 @@ +#!/bin/bash + + +NODE_VERSIONS=( "4.0.0" "5.0.0" "6.0.0" "7.0.0" "8.0.0" "9.0.0") + +# Makes sure nvm is installed. +if ! which nvm > /dev/null; then + rm -rf ~/.nvm/ &&\ + git clone --depth 1 https://github.com/creationix/nvm.git ~/.nvm + source ~/.nvm/nvm.sh +fi + +for i in "${NODE_VERSIONS[@]}"; do + # Installs and use the correct version of node + nvm install $i + nvm use $i + + # build, package and publish for the current package version + npm install nan + npm install aws-sdk + npm install node-pre-gyp + ./node_modules/.bin/node-pre-gyp clean configure build package publish +done diff --git a/src/Guide_stick_system/voice_assistant/snowboy/setup.py b/src/Guide_stick_system/voice_assistant/snowboy/setup.py new file mode 100644 index 0000000..3ce2c9d --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/setup.py @@ -0,0 +1,61 @@ +import os +import sys +from setuptools import setup, find_packages +from distutils.command.build import build +from distutils.dir_util import copy_tree +from subprocess import call + + +py_dir = 'Python' if sys.version_info[0] < 3 else 'Python3' + +class SnowboyBuild(build): + + def run(self): + + cmd = ['make'] + swig_dir = os.path.join('swig', py_dir) + def compile(): + call(cmd, cwd=swig_dir) + + self.execute(compile, [], 'Compiling snowboy...') + + # copy generated .so to build folder + self.mkpath(self.build_lib) + snowboy_build_lib = os.path.join(self.build_lib, 'snowboy') + self.mkpath(snowboy_build_lib) + target_file = os.path.join(swig_dir, '_snowboydetect.so') + if not self.dry_run: + self.copy_file(target_file, + snowboy_build_lib) + + # copy resources too since it is a symlink + resources_dir = 'resources' + resources_dir_on_build = os.path.join(snowboy_build_lib, + 'resources') + copy_tree(resources_dir, resources_dir_on_build) + + build.run(self) + + +setup( + name='snowboy', + version='1.3.0', + description='Snowboy is a customizable hotword detection engine', + maintainer='KITT.AI', + maintainer_email='snowboy@kitt.ai', + license='Apache-2.0', + url='https://snowboy.kitt.ai', + packages=find_packages(os.path.join('examples', py_dir)), + package_dir={'snowboy': os.path.join('examples', py_dir)}, + py_modules=['snowboy.snowboydecoder', 'snowboy.snowboydetect'], + package_data={'snowboy': ['resources/*']}, + zip_safe=False, + long_description="", + classifiers=[], + install_requires=[ + 'PyAudio', + ], + cmdclass={ + 'build': SnowboyBuild + } +) diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/Makefile b/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/Makefile new file mode 100644 index 0000000..35f5486 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/Makefile @@ -0,0 +1,114 @@ +# Example Makefile that wrappers snowboy c++ library (snowboy-detect.a) through +# JNI interface, using swig. + +# This Makefile is optimized for armv7-a/armv8 architecture. Also, please make sure +# "unzip" is installed. +# Usage: +# make # for 32bit ARM +# make BIT=64 # for 64bit ARM + +# Please use swig-3.0.10 or up. +SWIG := swig + +SWIG_VERSION := $(shell expr `$(SWIG) -version | grep -i Version | \ + sed "s/^.* //g" | sed -e "s/\.\([0-9][0-9]\)/\1/g" -e "s/\.\([0-9]\)/0\1/g" \ + -e "s/^[0-9]\{3,4\}$$/&00/"` \>= 30010) + +ifeq ($(SWIG_VERSION), 0) +checkversion: + $(info You need at least Swig 3.0.10 to run) + $(info Your current version is $(shell $(SWIG) -version | grep -i Version)) + @exit -1 +endif + + +NDK_VERSION=r14b +BIT = 32 +ifeq ($(BIT), 64) + NDKINSTALLEDROOT := $(PWD)/ndk_install_64bit + OPENBLASTARGET := ARMV8 + OPENBLASDIR := OpenBLAS-Android-ARM64 +else + NDKINSTALLEDROOT := $(PWD)/ndk_install_32bit + OPENBLASTARGET := ARMV7 + OPENBLASDIR := OpenBLAS-Android-ARM32 +endif +NDKROOT := $(PWD)/android-ndk-${NDK_VERSION} + +SNOWBOYDETECTSWIGITF = snowboy-detect-swig.i +SNOWBOYDETECTSWIGOBJ = snowboy-detect-swig.o +SNOWBOYDETECTSWIGCC = snowboy-detect-swig.cc +SNOWBOYDETECTJAVAPKG = ai.kitt.snowboy +SNOWBOYDETECTJAVAPKGDIR = java/ai/kitt/snowboy/ +SNOWBOYDETECTSWIGLIBFILE = libsnowboy-detect-android.so +OPENBLASLIBFILE = $(OPENBLASDIR)/install/lib/libopenblas.a + +ARCH := arm +TOPDIR := ../../ +LDFLAGS := + +CXXFLAGS := -O3 --sysroot=$(NDKINSTALLEDROOT)/sysroot +LDLIBS := -L$(NDKINSTALLEDROOT)/sysroot/usr/lib + +ifeq ($(ARCH), arm) + ifeq ($(BIT), 64) + AR := $(NDKINSTALLEDROOT)/bin/aarch64-linux-android-ar + CC := $(NDKINSTALLEDROOT)/bin/aarch64-linux-android-gcc + CXX := $(NDKINSTALLEDROOT)/bin/aarch64-linux-android-g++ + STRIP := $(NDKINSTALLEDROOT)/bin/aarch64-linux-android-strip + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/android/armv8-aarch64/libsnowboy-detect.a + CXXFLAGS += -std=c++0x -rdynamic -I$(TOPDIR) -Werror -Wall \ + -fsigned-char -fpic -fPIC -march=armv8-a \ + -DNDEBUG -ffast-math -fomit-frame-pointer -O3 -pie -fPIE -DHAVE_NEON=1 \ + -fno-strict-aliasing -Wno-unused-function -shared + LDLIBS += \ + -L$(NDKROOT)/sources/cxx-stl/gnu-libstdc++/4.9/libs/arm64-v8a \ + -lgnustl_static -lsupc++ -lgcc -ldl -lc -lm -llog -pthread + SNOWBOYDETECTSWIGLIBFILE := jniLibs/arm64-v8a/$(SNOWBOYDETECTSWIGLIBFILE) + SNOWBOYDETECTSWIGLIBNAME := $(shell basename $(SNOWBOYDETECTSWIGLIBFILE)) + else + AR := $(NDKINSTALLEDROOT)/bin/arm-linux-androideabi-ar + CC := $(NDKINSTALLEDROOT)/bin/arm-linux-androideabi-gcc + CXX := $(NDKINSTALLEDROOT)/bin/arm-linux-androideabi-g++ + STRIP := $(NDKINSTALLEDROOT)/bin/arm-linux-androideabi-strip + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/android/armv7a/libsnowboy-detect.a + CXXFLAGS += -std=c++0x -rdynamic -I$(TOPDIR) -Werror -Wall \ + -fsigned-char -fpic -fPIC -mfloat-abi=softfp -march=armv7-a -mfpu=neon \ + -DNDEBUG -ffast-math -fomit-frame-pointer -O3 -pie -fPIE -DHAVE_NEON=1 \ + -fno-strict-aliasing -Wno-unused-function -shared + LDLIBS += \ + -L$(NDKROOT)/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a \ + -lgnustl_static -lsupc++ -lgcc -ldl -lc -lm -llog -pthread + SNOWBOYDETECTSWIGLIBFILE := jniLibs/armeabi-v7a/$(SNOWBOYDETECTSWIGLIBFILE) + SNOWBOYDETECTSWIGLIBNAME := $(shell basename $(SNOWBOYDETECTSWIGLIBFILE)) + endif +endif + +all: $(SNOWBOYSWIGLIBFILE) $(SNOWBOYDETECTSWIGLIBFILE) + +%.a: + $(MAKE) -C ${@D} ${@F} + +$(NDKINSTALLEDROOT): + @-./install_ndk.sh ${NDK_VERSION} ${BIT} + +$(OPENBLASLIBFILE): $(NDKINSTALLEDROOT) + @-./install_openblas.sh $(CC) $(AR) $(OPENBLASTARGET) $(OPENBLASDIR) + +$(SNOWBOYDETECTSWIGCC): $(SNOWBOYDETECTSWIGITF) + @-mkdir -p $(SNOWBOYDETECTJAVAPKGDIR) + $(SWIG) -I$(TOPDIR) -c++ -java -package $(SNOWBOYDETECTJAVAPKG) -outdir \ + $(SNOWBOYDETECTJAVAPKGDIR) -o $(SNOWBOYDETECTSWIGCC) $(SNOWBOYDETECTSWIGITF) + +$(SNOWBOYDETECTSWIGOBJ): $(SNOWBOYDETECTSWIGCC) + $(CXX) $(CXXFLAGS) -c $(SNOWBOYDETECTSWIGCC) -o $(SNOWBOYDETECTSWIGOBJ) + +$(SNOWBOYDETECTSWIGLIBFILE): $(OPENBLASLIBFILE) $(SNOWBOYDETECTSWIGOBJ) $(SNOWBOYDETECTLIBFILE) + @-mkdir -p `dirname $(SNOWBOYDETECTSWIGLIBFILE)` + $(CXX) -Wl,-soname,$(SNOWBOYDETECTSWIGLIBNAME) $(CXXFLAGS) $(LDFLAGS) \ + $(SNOWBOYDETECTSWIGOBJ) $(SNOWBOYDETECTLIBFILE) $(OPENBLASLIBFILE) \ + $(LDLIBS) -o $(SNOWBOYDETECTSWIGLIBFILE) + $(STRIP) --strip-unneeded $(SNOWBOYDETECTSWIGLIBFILE) + +clean: + -rm -rf *.o *.a *.so java jniLibs $(SNOWBOYDETECTSWIGCC) diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/install_ndk.sh b/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/install_ndk.sh new file mode 100644 index 0000000..266b497 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/install_ndk.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# This script installs NDK with version number as parameter +# Usage: ./install_ndk.sh [ndk version] (such as "r14b") + +UNAME_INFO=`uname -a` +NDK_REPOSITORY_URL="https://dl.google.com/android/repository/" +NDK_VERSION=$1 +BIT=$2 + +if [[ $UNAME_INFO == *"Darwin"* ]]; then + if [ ! -d "android-ndk-${NDK_VERSION}" ]; then + wget -T 10 -t 3 ${NDK_REPOSITORY_URL}/android-ndk-${NDK_VERSION}-darwin-x86_64.zip \ + -O android-ndk-${NDK_VERSION}-darwin-x86_64.zip || exit 1; + unzip android-ndk-${NDK_VERSION}-darwin-x86_64.zip 1>/dev/null || exit 1; + fi +elif [[ $UNAME_INFO == *"Linux"* ]]; then + if [ ! -d "android-ndk-${NDK_VERSION}" ]; then + wget -T 10 -t 3 ${NDK_REPOSITORY_URL}/android-ndk-${NDK_VERSION}-linux-x86_64.zip \ + -O android-ndk-${NDK_VERSION}-linux-x86_64.zip || exit 1; + unzip android-ndk-${NDK_VERSION}-linux-x86_64.zip 1>/dev/null || exit 1; + fi +else + echo "Your platform is not supported yet." || exit 1; +fi + +if [[ $BIT == *"64"* ]]; then + ./android-ndk-${NDK_VERSION}/build/tools/make-standalone-toolchain.sh --verbose \ + --arch=arm64 --platform=android-21 --install-dir=`pwd`/ndk_install_64bit || exit 1; +else + ./android-ndk-${NDK_VERSION}/build/tools/make-standalone-toolchain.sh --verbose \ + --arch=arm --platform=android-14 --install-dir=`pwd`/ndk_install_32bit || exit 1; +fi diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/install_openblas.sh b/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/install_openblas.sh new file mode 100644 index 0000000..d914558 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/install_openblas.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# This script compiles OpenBLAS for Android with ARM architecture. The prebuilt +# Android NDK toolchains do not include Fortran, hence parts like LAPACK will +# not be built. + +CC=$1 +AR=$2 +TARGET=$3 +DIR=$4 + +if [ ! -d $DIR ]; then + git clone https://github.com/xianyi/OpenBLAS.git $DIR + cd $DIR + git checkout arm_soft_fp_abi || exit 1; + git reset --hard b5c96fcfcdc82945502a2303116a64d89985daf5 || exit 1; + cd .. +fi + +cd $DIR +make USE_THREAD=0 TARGET=${TARGET} HOSTCC=gcc CC=${CC} AR=${AR} \ + NOFORTRAN=1 ARM_SOFTFP_ABI=1 libs || exit 1; +make PREFIX=`pwd`/install install || exit 1; diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/snowboy-detect-swig.i b/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/snowboy-detect-swig.i new file mode 100644 index 0000000..250ec37 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Android/snowboy-detect-swig.i @@ -0,0 +1,20 @@ +// swig/Android/snowboy-detect-swig.i + +// Copyright 2016 KITT.AI (author: Guoguo Chen) + +%module snowboy + +// Suppress SWIG warnings. +#pragma SWIG nowarn=SWIGWARN_PARSE_NESTED_CLASS +%include "arrays_java.i" +%include "std_string.i" + +%apply float[] {float*}; +%apply short[] {int16_t*}; +%apply int[] {int32_t*}; + +%{ +#include "include/snowboy-detect.h" +%} + +%include "include/snowboy-detect.h" diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Go/snowboy.go b/src/Guide_stick_system/voice_assistant/snowboy/swig/Go/snowboy.go new file mode 100644 index 0000000..b03774a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Go/snowboy.go @@ -0,0 +1,9 @@ +package snowboydetect + +/* +#cgo CXXFLAGS: -std=c++11 -D_GLIBCXX_USE_CXX11_ABI=0 +#cgo linux,amd64 LDFLAGS: -L${SRCDIR}/../../lib/ubuntu64 -lsnowboy-detect -lcblas +#cgo linux,arm LDFLAGS: -L${SRCDIR}/../../lib/rpi -lsnowboy-detect -lcblas +#cgo darwin LDFLAGS: -L${SRCDIR}/../../lib/osx -lsnowboy-detect -lcblas + */ +import "C" diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Go/snowboydetect.swigcxx b/src/Guide_stick_system/voice_assistant/snowboy/swig/Go/snowboydetect.swigcxx new file mode 100644 index 0000000..e166d91 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Go/snowboydetect.swigcxx @@ -0,0 +1,13 @@ +// swig/Go/snowboydetect.swigcxx + +%module snowboydetect + +// Suppress SWIG warnings. +#pragma SWIG nowarn=SWIGWARN_PARSE_NESTED_CLASS +%include "std_string.i" + +%{ +#include "../../include/snowboy-detect.h" +%} + +%include "../../include/snowboy-detect.h" diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Java/Makefile b/src/Guide_stick_system/voice_assistant/snowboy/swig/Java/Makefile new file mode 100644 index 0000000..7e58bb2 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Java/Makefile @@ -0,0 +1,79 @@ +# Example Makefile that wrappers snowboy c++ library (snowboy-detect.a) through +# JNI interface, using swig. +# +# Make sure you have jdk installed. + +# Please use swig-3.0.10 or up. +SWIG := swig + +SWIG_VERSION := $(shell expr `$(SWIG) -version | grep -i Version | \ + sed "s/^.* //g" | sed -e "s/\.\([0-9][0-9]\)/\1/g" -e "s/\.\([0-9]\)/0\1/g" \ + -e "s/^[0-9]\{3,4\}$$/&00/"` \>= 30010) + +ifeq ($(SWIG_VERSION), 0) +checkversion: + $(info You need at least Swig 3.0.10 to run) + $(info Your current version is $(shell $(SWIG) -version | grep -i Version)) + @exit -1 +endif + + +SNOWBOYDETECTSWIGITF = snowboy-detect-swig.i +SNOWBOYDETECTSWIGOBJ = snowboy-detect-swig.o +SNOWBOYDETECTSWIGCC = snowboy-detect-swig.cc +SNOWBOYDETECTJAVAPKG = ai.kitt.snowboy +SNOWBOYDETECTJAVAPKGDIR = java/ai/kitt/snowboy/ +SNOWBOYDETECTSWIGLIBFILE = jniLibs/libsnowboy-detect-java.so + +TOPDIR := ../../ +CXXFLAGS := -I$(TOPDIR) -O3 -fPIC -D_GLIBCXX_USE_CXX11_ABI=0 +LDFLAGS := + +ifeq ($(shell uname), Darwin) + CXX := clang++ + JAVAHOME := $(shell /usr/libexec/java_home) + JAVAINC := -I$(JAVAHOME)/include -I$(JAVAHOME)/include/darwin + SWIGFLAGS := -bundle -flat_namespace -undefined suppress + LDLIBS := -lm -ldl -framework Accelerate + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/osx/libsnowboy-detect.a + SNOWBOYDETECTSWIGLIBFILE := $(SNOWBOYDETECTSWIGLIBFILE:.so=.dylib) +else + CXX := g++ + JAVAHOME := $(shell readlink -f $(shell which javac)) + JAVAHOME := $(shell dirname $(shell dirname $(JAVAHOME))) + JAVAINC := -I$(JAVAHOME)/include -I$(JAVAHOME)/include/linux + SWIGFLAGS := -shared + CXXFLAGS += -std=c++0x + # Make sure you have Atlas installed. You can statically link Atlas if you + # would like to be able to move the library to a machine without Atlas. + LDLIBS := -lm -ldl -lf77blas -lcblas -llapack_atlas -latlas + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/ubuntu64/libsnowboy-detect.a + ifneq (,$(findstring arm,$(shell uname -m))) + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/rpi/libsnowboy-detect.a + ifeq ($(findstring fc,$(shell uname -r)), fc) + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/fedora25-armv7/libsnowboy-detect.a + LDLIBS := -L/usr/lib/atlas -lm -ldl -lsatlas + endif + endif +endif + +all: $(SNOWBOYSWIGLIBFILE) $(SNOWBOYDETECTSWIGLIBFILE) + +%.a: + $(MAKE) -C ${@D} ${@F} + +$(SNOWBOYDETECTSWIGCC): $(SNOWBOYDETECTSWIGITF) + @-mkdir -p $(SNOWBOYDETECTJAVAPKGDIR) + $(SWIG) -I$(TOPDIR) -c++ -java -package $(SNOWBOYDETECTJAVAPKG) -outdir \ + $(SNOWBOYDETECTJAVAPKGDIR) -o $(SNOWBOYDETECTSWIGCC) $(SNOWBOYDETECTSWIGITF) + +$(SNOWBOYDETECTSWIGOBJ): $(SNOWBOYDETECTSWIGCC) + $(CXX) $(JAVAINC) $(CXXFLAGS) -c $(SNOWBOYDETECTSWIGCC) -o $(SNOWBOYDETECTSWIGOBJ) + +$(SNOWBOYDETECTSWIGLIBFILE): $(SNOWBOYDETECTSWIGOBJ) $(SNOWBOYDETECTLIBFILE) + @-mkdir -p `dirname $(SNOWBOYDETECTSWIGLIBFILE)` + $(CXX) $(CXXFLAGS) $(SWIGFLAGS) $(LDFLAGS) $(SNOWBOYDETECTSWIGOBJ) \ + $(SNOWBOYDETECTLIBFILE) $(LDLIBS) -o $(SNOWBOYDETECTSWIGLIBFILE) + +clean: + -rm -rf *.o *.a *.so java jniLibs $(SNOWBOYDETECTSWIGCC) diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Java/snowboy-detect-swig.i b/src/Guide_stick_system/voice_assistant/snowboy/swig/Java/snowboy-detect-swig.i new file mode 100644 index 0000000..a4c30e6 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Java/snowboy-detect-swig.i @@ -0,0 +1,20 @@ +// swig/Java/snowboy-detect-swig.i + +// Copyright 2016 KITT.AI (author: Guoguo Chen) + +%module snowboy + +// Suppress SWIG warnings. +#pragma SWIG nowarn=SWIGWARN_PARSE_NESTED_CLASS +%include "arrays_java.i" +%include "std_string.i" + +%apply float[] {float*}; +%apply short[] {int16_t*}; +%apply int[] {int32_t*}; + +%{ +#include "include/snowboy-detect.h" +%} + +%include "include/snowboy-detect.h" diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Node/snowboy.cc b/src/Guide_stick_system/voice_assistant/snowboy/swig/Node/snowboy.cc new file mode 100644 index 0000000..e0aa543 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Node/snowboy.cc @@ -0,0 +1,183 @@ +#include +#include +#include + +class SnowboyDetect : public Nan::ObjectWrap { + public: + static NAN_MODULE_INIT(Init); + + private: + explicit SnowboyDetect(const std::string& resource_filename, + const std::string& model_str); + ~SnowboyDetect(); + + static NAN_METHOD(New); + static NAN_METHOD(Reset); + static NAN_METHOD(RunDetection); + static NAN_METHOD(SetSensitivity); + static NAN_METHOD(GetSensitivity); + static NAN_METHOD(SetAudioGain); + static NAN_METHOD(UpdateModel); + static NAN_METHOD(NumHotwords); + static NAN_METHOD(SampleRate); + static NAN_METHOD(NumChannels); + static NAN_METHOD(BitsPerSample); + static NAN_METHOD(ApplyFrontend); + + static Nan::Persistent constructor; + + snowboy::SnowboyDetect* detector; +}; + +Nan::Persistent SnowboyDetect::constructor; + +SnowboyDetect::SnowboyDetect(const std::string& resource_filename, + const std::string& model_str) { + try { + this->detector = new snowboy::SnowboyDetect(resource_filename, model_str); + } catch (std::runtime_error e) { + Nan::ThrowError(e.what()); + } +} +SnowboyDetect::~SnowboyDetect() { + if (this->detector) { + delete this->detector; + } +} + +NAN_MODULE_INIT(SnowboyDetect::Init) { + v8::Local tpl = Nan::New(New); + tpl->SetClassName(Nan::New("SnowboyDetect").ToLocalChecked()); + tpl->InstanceTemplate()->SetInternalFieldCount(1); + + SetPrototypeMethod(tpl, "Reset", Reset); + SetPrototypeMethod(tpl, "RunDetection", RunDetection); + SetPrototypeMethod(tpl, "SetSensitivity", SetSensitivity); + SetPrototypeMethod(tpl, "GetSensitivity", GetSensitivity); + SetPrototypeMethod(tpl, "SetAudioGain", SetAudioGain); + SetPrototypeMethod(tpl, "UpdateModel", UpdateModel); + SetPrototypeMethod(tpl, "NumHotwords", NumHotwords); + SetPrototypeMethod(tpl, "SampleRate", SampleRate); + SetPrototypeMethod(tpl, "NumChannels", NumChannels); + SetPrototypeMethod(tpl, "BitsPerSample", BitsPerSample); + SetPrototypeMethod(tpl, "ApplyFrontend", ApplyFrontend); + + constructor.Reset(Nan::GetFunction(tpl).ToLocalChecked()); + Nan::Set(target, Nan::New("SnowboyDetect").ToLocalChecked(), + Nan::GetFunction(tpl).ToLocalChecked()); +} + +NAN_METHOD(SnowboyDetect::New) { + if (!info.IsConstructCall()) { + Nan::ThrowError("Cannot call constructor as function, you need to use " + "'new' keyword"); + return; + } else if (!info[0]->IsString()) { + Nan::ThrowTypeError("resource must be a string"); + return; + } else if (!info[1]->IsString()) { + Nan::ThrowTypeError("model must be a string"); + return; + } + + Nan::MaybeLocal resource = Nan::To(info[0]); + Nan::MaybeLocal model = Nan::To(info[1]); + Nan::Utf8String resourceString(resource.ToLocalChecked()); + Nan::Utf8String modelString(model.ToLocalChecked()); + SnowboyDetect* obj = new SnowboyDetect(*resourceString, *modelString); + obj->Wrap(info.This()); + info.GetReturnValue().Set(info.This()); +} + +NAN_METHOD(SnowboyDetect::Reset) { + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + bool ret = ptr->detector->Reset(); + info.GetReturnValue().Set(Nan::New(ret)); +} + +NAN_METHOD(SnowboyDetect::RunDetection) { + if (!info[0]->IsObject()) { + Nan::ThrowTypeError("data must be a buffer"); + return; + } + + Nan::MaybeLocal buffer = Nan::To(info[0]); + char* bufferData = node::Buffer::Data(buffer.ToLocalChecked()); + size_t bufferLength = node::Buffer::Length(buffer.ToLocalChecked()); + + std::string data(bufferData, bufferLength); + + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + int ret = ptr->detector->RunDetection(data); + info.GetReturnValue().Set(Nan::New(ret)); +} + +NAN_METHOD(SnowboyDetect::SetSensitivity) { + if (!info[0]->IsString()) { + Nan::ThrowTypeError("sensitivity must be a string"); + return; + } + + Nan::MaybeLocal sensitivity = Nan::To(info[0]); + Nan::Utf8String sensitivityString(sensitivity.ToLocalChecked()); + + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + ptr->detector->SetSensitivity(*sensitivityString); +} + +NAN_METHOD(SnowboyDetect::ApplyFrontend) { + Nan::Maybe applyFrontend= Nan::To(info[0]); + bool applyFrontendBool=applyFrontend.FromJust(); + + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + ptr->detector->ApplyFrontend(applyFrontendBool); +} + +NAN_METHOD(SnowboyDetect::GetSensitivity) { + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + std::string sensitivity = ptr->detector->GetSensitivity(); + info.GetReturnValue().Set(Nan::New(sensitivity).ToLocalChecked()); +} + +NAN_METHOD(SnowboyDetect::SetAudioGain) { + if (!info[0]->IsNumber()) { + Nan::ThrowTypeError("gain must be a number"); + return; + } + + Nan::MaybeLocal gain = Nan::To(info[0]); + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + ptr->detector->SetAudioGain(gain.ToLocalChecked()->Value()); +} + +NAN_METHOD(SnowboyDetect::UpdateModel) { + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + ptr->detector->UpdateModel(); +} + +NAN_METHOD(SnowboyDetect::NumHotwords) { + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + int numHotwords = ptr->detector->NumHotwords(); + info.GetReturnValue().Set(Nan::New(numHotwords)); +} + +NAN_METHOD(SnowboyDetect::SampleRate) { + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + int sampleRate = ptr->detector->SampleRate(); + info.GetReturnValue().Set(Nan::New(sampleRate)); +} + +NAN_METHOD(SnowboyDetect::NumChannels) { + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + int numChannels = ptr->detector->NumChannels(); + info.GetReturnValue().Set(Nan::New(numChannels)); +} + +NAN_METHOD(SnowboyDetect::BitsPerSample) { + SnowboyDetect* ptr = Nan::ObjectWrap::Unwrap(info.Holder()); + int bitsPerSample = ptr->detector->BitsPerSample(); + info.GetReturnValue().Set(Nan::New(bitsPerSample)); +} + + +NODE_MODULE(SnowboyDetect, SnowboyDetect::Init) diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Perl/Makefile b/src/Guide_stick_system/voice_assistant/snowboy/swig/Perl/Makefile new file mode 100644 index 0000000..9b2b17b --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Perl/Makefile @@ -0,0 +1,70 @@ +# Example Makefile that wraps snowboy c++ library (snowboy-detect.a) into +# a Perl module (Snowboy.pm) using swig. + +# Please use swig-3.0.10 or up. +SWIG := swig + +SWIG_VERSION := $(shell expr `$(SWIG) -version | grep -i Version | \ + sed "s/^.* //g" | sed -e "s/\.\([0-9][0-9]\)/\1/g" -e "s/\.\([0-9]\)/0\1/g" \ + -e "s/^[0-9]\{3,4\}$$/&00/"` \>= 30010) + +ifeq ($(SWIG_VERSION), 0) +checkversion: + $(info You need at least Swig 3.0.10 to run) + $(info Your current version is $(shell $(SWIG) -version | grep -i Version)) + @exit -1 +endif + + +SNOWBOYDETECTSWIGITF = snowboy-detect.i +SNOWBOYDETECTSWIGOBJ = snowboy-detect-swig.o +SNOWBOYDETECTSWIGCC = snowboy-detect-swig.cc +SNOWBOYDETECTSWIGLIBFILE = Snowboy.so + +TOPDIR := ../../ +CXXFLAGS := -I$(TOPDIR) -O3 -fPIC -D_GLIBCXX_USE_CXX11_ABI=0 +LDFLAGS := + +ifeq ($(shell uname), Darwin) + CXX := clang++ + PERLARCH := $(shell perl -e 'use Config; print $$Config{archlib}') + SWIGFLAGS := -bundle -flat_namespace -undefined suppress + CXXFLAGS += -I$(PERLARCH)/CORE + LDLIBS := -lm -ldl -framework Accelerate + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/osx/libsnowboy-detect.a + SNOWBOYDETECTSWIGLIBFILE := Snowboy.dylib +else + CXX := g++ + PERLARCH := $(shell perl -e 'use Config; print $$Config{archlib}') + SWIGFLAGS := -shared + CXXFLAGS += -std=gnu++11 -I$(PERLARCH)/CORE + # Make sure you have Atlas installed. You can statically link Atlas if you + # would like to be able to move the library to a machine without Atlas. + LDLIBS := -lm -ldl -lf77blas -lcblas -llapack_atlas -latlas + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/ubuntu64/libsnowboy-detect.a + ifneq (,$(findstring arm,$(shell uname -m))) + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/rpi/libsnowboy-detect.a + ifeq ($(findstring fc,$(shell uname -r)), fc) + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/fedora25-armv7/libsnowboy-detect.a + LDLIBS := -L/usr/lib/atlas -lm -ldl -lsatlas + endif + endif +endif + +all: $(SNOWBOYSWIGLIBFILE) $(SNOWBOYDETECTSWIGLIBFILE) + +%.a: + $(MAKE) -C ${@D} ${@F} + +$(SNOWBOYDETECTSWIGCC): $(SNOWBOYDETECTSWIGITF) + $(SWIG) -I$(TOPDIR) -c++ -perl -o $(SNOWBOYDETECTSWIGCC) $(SNOWBOYDETECTSWIGITF) + +$(SNOWBOYDETECTSWIGOBJ): $(SNOWBOYDETECTSWIGCC) + $(CXX) $(CXXFLAGS) -c $(SNOWBOYDETECTSWIGCC) + +$(SNOWBOYDETECTSWIGLIBFILE): $(SNOWBOYDETECTSWIGOBJ) $(SNOWBOYDETECTLIBFILE) + $(CXX) $(CXXFLAGS) $(LDFLAGS) $(SWIGFLAGS) $(SNOWBOYDETECTSWIGOBJ) \ + $(SNOWBOYDETECTLIBFILE) $(LDLIBS) -o $(SNOWBOYDETECTSWIGLIBFILE) + +clean: + -rm -f *.o *.a *.so *.dylib Snowboy.pm $(SNOWBOYDETECTSWIGCC) diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Perl/snowboy-detect.i b/src/Guide_stick_system/voice_assistant/snowboy/swig/Perl/snowboy-detect.i new file mode 100644 index 0000000..16a5005 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Perl/snowboy-detect.i @@ -0,0 +1,10 @@ +%module Snowboy + +%include "std_string.i" +%include "typemaps.i" + +%{ +#include "include/snowboy-detect.h" +%} + +%include "include/snowboy-detect.h" diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Python/Makefile b/src/Guide_stick_system/voice_assistant/snowboy/swig/Python/Makefile new file mode 100644 index 0000000..c4aa5cb --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Python/Makefile @@ -0,0 +1,73 @@ +# Example Makefile that converts snowboy c++ library (snowboy-detect.a) to +# python library (_snowboydetect.so, snowboydetect.py), using swig. + +# Please use swig-3.0.10 or up. +SWIG := swig + +SWIG_VERSION := $(shell expr `$(SWIG) -version | grep -i Version | \ + sed "s/^.* //g" | sed -e "s/\.\([0-9][0-9]\)/\1/g" -e "s/\.\([0-9]\)/0\1/g" \ + -e "s/^[0-9]\{3,4\}$$/&00/"` \>= 30010) + +ifeq ($(SWIG_VERSION), 0) +checkversion: + $(info You need at least Swig 3.0.10 to run) + $(info Your current version is $(shell $(SWIG) -version | grep -i Version)) + @exit -1 +endif + +SNOWBOYDETECTSWIGITF = snowboy-detect-swig.i +SNOWBOYDETECTSWIGOBJ = snowboy-detect-swig.o +SNOWBOYDETECTSWIGCC = snowboy-detect-swig.cc +SNOWBOYDETECTSWIGLIBFILE = _snowboydetect.so + +TOPDIR := ../../ +CXXFLAGS := -I$(TOPDIR) -O3 -fPIC -D_GLIBCXX_USE_CXX11_ABI=0 +LDFLAGS := + +ifeq ($(shell uname), Darwin) + CXX := clang++ + PYINC := $(shell python-config --includes) + PYLIBS := $(shell python-config --ldflags) + SWIGFLAGS := -bundle -flat_namespace -undefined suppress + LDLIBS := -lm -ldl -framework Accelerate + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/osx/libsnowboy-detect.a +else + CXX := g++ + PYINC := $(shell python-config --cflags) + PYLIBS := $(shell python-config --ldflags) + SWIGFLAGS := -shared + CXXFLAGS += -std=c++0x + # Make sure you have Atlas installed. You can statically link Atlas if you + # would like to be able to move the library to a machine without Atlas. + ifneq ("$(ldconfig -p | grep lapack_atlas)","") + LDLIBS := -lm -ldl -lf77blas -lcblas -llapack_atlas -latlas + else + LDLIBS := -lm -ldl -lf77blas -lcblas -llapack -latlas + endif + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/ubuntu64/libsnowboy-detect.a + ifneq (,$(findstring arm,$(shell uname -m))) + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/rpi/libsnowboy-detect.a + ifeq ($(findstring fc,$(shell uname -r)), fc) + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/fedora25-armv7/libsnowboy-detect.a + LDLIBS := -L/usr/lib/atlas -lm -ldl -lsatlas + endif + endif +endif + +all: $(SNOWBOYSWIGLIBFILE) $(SNOWBOYDETECTSWIGLIBFILE) + +%.a: + $(MAKE) -C ${@D} ${@F} + +$(SNOWBOYDETECTSWIGCC): $(SNOWBOYDETECTSWIGITF) + $(SWIG) -I$(TOPDIR) -c++ -python -o $(SNOWBOYDETECTSWIGCC) $(SNOWBOYDETECTSWIGITF) + +$(SNOWBOYDETECTSWIGOBJ): $(SNOWBOYDETECTSWIGCC) + $(CXX) $(PYINC) $(CXXFLAGS) -c $(SNOWBOYDETECTSWIGCC) + +$(SNOWBOYDETECTSWIGLIBFILE): $(SNOWBOYDETECTSWIGOBJ) $(SNOWBOYDETECTLIBFILE) + $(CXX) $(CXXFLAGS) $(LDFLAGS) $(SWIGFLAGS) $(SNOWBOYDETECTSWIGOBJ) \ + $(SNOWBOYDETECTLIBFILE) $(PYLIBS) $(LDLIBS) -o $(SNOWBOYDETECTSWIGLIBFILE) + +clean: + -rm -f *.o *.a *.so snowboydetect.py *.pyc $(SNOWBOYDETECTSWIGCC) diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Python/snowboy-detect-swig.i b/src/Guide_stick_system/voice_assistant/snowboy/swig/Python/snowboy-detect-swig.i new file mode 100644 index 0000000..d383fed --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Python/snowboy-detect-swig.i @@ -0,0 +1,24 @@ +// swig/Python/snowboy-detect-swig.i + +// Copyright 2016 KITT.AI (author: Guoguo Chen) + +%module snowboydetect + +// Suppress SWIG warnings. +#pragma SWIG nowarn=SWIGWARN_PARSE_NESTED_CLASS +%include "std_string.i" + +%{ +#include "include/snowboy-detect.h" +%} + +%include "include/snowboy-detect.h" + +// below is Python 3 support, however, +// adding it will generate wrong .so file +// for Fedora 25 on ARMv7. So be sure to +// comment them when you compile for +// Fedora 25 on ARMv7. +%begin %{ +#define SWIG_PYTHON_STRICT_BYTE_CHAR +%} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Python3/Makefile b/src/Guide_stick_system/voice_assistant/snowboy/swig/Python3/Makefile new file mode 100644 index 0000000..5bf8b15 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Python3/Makefile @@ -0,0 +1,77 @@ +# Example Makefile that converts snowboy c++ library (snowboy-detect.a) to +# python3 library (_snowboydetect.so, snowboydetect.py), using swig. + +# Please use swig-3.0.10 or up. +SWIG := swig + +SWIG_VERSION := $(shell expr `$(SWIG) -version | grep -i Version | \ + sed "s/^.* //g" | sed -e "s/\.\([0-9][0-9]\)/\1/g" -e "s/\.\([0-9]\)/0\1/g" \ + -e "s/^[0-9]\{3,4\}$$/&00/"` \>= 30010) + +ifeq ($(SWIG_VERSION), 0) +checkversion: + $(info You need at least Swig 3.0.10 to run) + $(info Your current version is $(shell $(SWIG) -version | grep -i Version)) + @exit -1 +endif + + +SNOWBOYDETECTSWIGITF = snowboy-detect-swig.i +SNOWBOYDETECTSWIGOBJ = snowboy-detect-swig.o +SNOWBOYDETECTSWIGCC = snowboy-detect-swig.cc +SNOWBOYDETECTSWIGLIBFILE = _snowboydetect.so + +TOPDIR := ../../ +CXXFLAGS := -I$(TOPDIR) -O3 -fPIC -D_GLIBCXX_USE_CXX11_ABI=0 +LDFLAGS := + +ifeq ($(shell uname), Darwin) + CXX := clang++ + PYINC := $(shell python3-config --includes) + # If you use Anaconda, the command `python3-config` will not return full path. + # In this case, please manually specify the full path like the following: + # PYLIBS := -L/Users/YOURNAME/anaconda3/lib/python3.6/config-3.6m-darwin -lpython3.6m -ldl -framework CoreFoundation + PYLIBS := $(shell python3-config --ldflags) + SWIGFLAGS := -bundle -flat_namespace -undefined suppress + LDLIBS := -lm -ldl -framework Accelerate + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/osx/libsnowboy-detect.a +else + CXX := g++ + PYINC := $(shell python3-config --cflags) + PYLIBS := $(shell python3-config --ldflags) + SWIGFLAGS := -shared + CXXFLAGS += -std=c++0x + # Make sure you have Atlas installed. You can statically link Atlas if you + # would like to be able to move the library to a machine without Atlas. + ifneq ("$(ldconfig -p | grep lapack_atlas)","") + LDLIBS := -lm -ldl -lf77blas -lcblas -llapack_atlas -latlas + else + LDLIBS := -lm -ldl -lf77blas -lcblas -llapack -latlas + endif + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/ubuntu64/libsnowboy-detect.a + ifneq (,$(findstring arm,$(shell uname -m))) + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/rpi/libsnowboy-detect.a + ifeq ($(findstring fc,$(shell uname -r)), fc) + SNOWBOYDETECTLIBFILE = $(TOPDIR)/lib/fedora25-armv7/libsnowboy-detect.a + LDLIBS := -L/usr/lib/atlas -lm -ldl -lsatlas + endif + endif +endif + +all: $(SNOWBOYSWIGLIBFILE) $(SNOWBOYDETECTSWIGLIBFILE) + +%.a: + $(MAKE) -C ${@D} ${@F} + +$(SNOWBOYDETECTSWIGCC): $(SNOWBOYDETECTSWIGITF) + $(SWIG) -I$(TOPDIR) -c++ -python -o $(SNOWBOYDETECTSWIGCC) $(SNOWBOYDETECTSWIGITF) + +$(SNOWBOYDETECTSWIGOBJ): $(SNOWBOYDETECTSWIGCC) + $(CXX) $(PYINC) $(CXXFLAGS) -c $(SNOWBOYDETECTSWIGCC) + +$(SNOWBOYDETECTSWIGLIBFILE): $(SNOWBOYDETECTSWIGOBJ) $(SNOWBOYDETECTLIBFILE) + $(CXX) $(CXXFLAGS) $(LDFLAGS) $(SWIGFLAGS) $(SNOWBOYDETECTSWIGOBJ) \ + $(SNOWBOYDETECTLIBFILE) $(PYLIBS) $(LDLIBS) -o $(SNOWBOYDETECTSWIGLIBFILE) + +clean: + -rm -f *.o *.a *.so snowboydetect.py *.pyc $(SNOWBOYDETECTSWIGCC) diff --git a/src/Guide_stick_system/voice_assistant/snowboy/swig/Python3/snowboy-detect-swig.i b/src/Guide_stick_system/voice_assistant/snowboy/swig/Python3/snowboy-detect-swig.i new file mode 100644 index 0000000..d383fed --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/swig/Python3/snowboy-detect-swig.i @@ -0,0 +1,24 @@ +// swig/Python/snowboy-detect-swig.i + +// Copyright 2016 KITT.AI (author: Guoguo Chen) + +%module snowboydetect + +// Suppress SWIG warnings. +#pragma SWIG nowarn=SWIGWARN_PARSE_NESTED_CLASS +%include "std_string.i" + +%{ +#include "include/snowboy-detect.h" +%} + +%include "include/snowboy-detect.h" + +// below is Python 3 support, however, +// adding it will generate wrong .so file +// for Fedora 25 on ARMv7. So be sure to +// comment them when you compile for +// Fedora 25 on ARMv7. +%begin %{ +#define SWIG_PYTHON_STRICT_BYTE_CHAR +%} diff --git a/src/Guide_stick_system/voice_assistant/snowboy/tsconfig.json b/src/Guide_stick_system/voice_assistant/snowboy/tsconfig.json new file mode 100644 index 0000000..2914c2f --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboy/tsconfig.json @@ -0,0 +1,34 @@ +{ + "compilerOptions": { + "target": "es6", + "module": "commonjs", + "moduleResolution": "node", + "isolatedModules": false, + "jsx": "react", + "experimentalDecorators": false, + "emitDecoratorMetadata": false, + "declaration": true, + "noImplicitAny": true, + "noImplicitUseStrict": false, + "noFallthroughCasesInSwitch": true, + "noImplicitReturns": true, + "removeComments": true, + "noLib": false, + "preserveConstEnums": true, + "suppressImplicitAnyIndexErrors": true + }, + "files": [ + "lib/node/index.ts", + "lib/node/node-pre-gyp.d.ts", + "lib/node/SnowboyDetectNative.d.ts", + "node_modules/@types/node/index.d.ts" + ], + "exclude": [ + "node_modules" + ], + "compileOnSave": true, + "buildOnSave": false, + "atom": { + "rewriteTsconfig": false + } +} diff --git a/src/Guide_stick_system/voice_assistant/snowboydecoder.py b/src/Guide_stick_system/voice_assistant/snowboydecoder.py new file mode 100644 index 0000000..86e001c --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboydecoder.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python + +import collections +import pyaudio +import snowboydetect +import time +import wave +import os +import logging + +logging.basicConfig() +logger = logging.getLogger("snowboy") +logger.setLevel(logging.INFO) +TOP_DIR = os.path.dirname(os.path.abspath(__file__)) + +RESOURCE_FILE = os.path.join(TOP_DIR, "resources/common.res") +DETECT_DING = os.path.join(TOP_DIR, "resources/ding.wav") +DETECT_DONG = os.path.join(TOP_DIR, "resources/dong.wav") + + +class RingBuffer(object): + """Ring buffer to hold audio from PortAudio""" + + def __init__(self, size=4096): + self._buf = collections.deque(maxlen=size) + + def extend(self, data): + """Adds data to the end of buffer""" + self._buf.extend(data) + + def get(self): + """Retrieves data from the beginning of buffer and clears it""" + tmp = bytes(bytearray(self._buf)) + self._buf.clear() + return tmp + + +def play_audio_file(fname=DETECT_DING): + """Simple callback function to play a wave file. By default it plays + a Ding sound. + + :param str fname: wave file name + :return: None + """ + # 打开wave文件 + ding_wav = wave.open(fname, 'rb') + # 读取文件内容 + ding_data = ding_wav.readframes(ding_wav.getnframes()) + # 初始化pyaudio + audio = pyaudio.PyAudio() + # 打开音频流 + stream_out = audio.open( + format=audio.get_format_from_width(ding_wav.getsampwidth()), + channels=ding_wav.getnchannels(), + rate=ding_wav.getframerate(), input=False, output=True) + # 开始音频流 + stream_out.start_stream() + # 写入音频数据 + stream_out.write(ding_data) + # 等待0.2秒 + time.sleep(0.2) + # 停止音频流 + stream_out.stop_stream() + stream_out.close() + # 关闭pyaudio + audio.terminate() + + +class HotwordDetector(object): + """ + Snowboy decoder to detect whether a keyword specified by `decoder_model` + exists in a microphone input stream. + + :param decoder_model: decoder model file path, a string or a list of strings + :param resource: resource file path. + :param sensitivity: decoder sensitivity, a float of a list of floats. + The bigger the value, the more senstive the + decoder. If an empty list is provided, then the + default sensitivity in the model will be used. + :param audio_gain: multiply input volume by this factor. + """ + + def __init__(self, decoder_model, + resource=RESOURCE_FILE, + sensitivity=[], + audio_gain=1): + + def audio_callback(in_data, frame_count, time_info, status): + self.ring_buffer.extend(in_data) + play_data = chr(0) * len(in_data) + return play_data, pyaudio.paContinue + + tm = type(decoder_model) + ts = type(sensitivity) + if tm is not list: + decoder_model = [decoder_model] + if ts is not list: + sensitivity = [sensitivity] + model_str = ",".join(decoder_model) + + self.detector = snowboydetect.SnowboyDetect( + resource_filename=resource.encode(), model_str=model_str.encode()) + self.detector.SetAudioGain(audio_gain) + self.num_hotwords = self.detector.NumHotwords() + + if len(decoder_model) > 1 and len(sensitivity) == 1: + sensitivity = sensitivity*self.num_hotwords + if len(sensitivity) != 0: + assert self.num_hotwords == len(sensitivity), \ + "number of hotwords in decoder_model (%d) and sensitivity " \ + "(%d) does not match" % (self.num_hotwords, len(sensitivity)) + sensitivity_str = ",".join([str(t) for t in sensitivity]) + if len(sensitivity) != 0: + self.detector.SetSensitivity(sensitivity_str.encode()) + + self.ring_buffer = RingBuffer( + self.detector.NumChannels() * self.detector.SampleRate() * 5) + self.audio = pyaudio.PyAudio() + self.stream_in = self.audio.open( + input=True, output=False, + format=self.audio.get_format_from_width( + self.detector.BitsPerSample() / 8), + channels=self.detector.NumChannels(), + rate=self.detector.SampleRate(), + frames_per_buffer=2048, + stream_callback=audio_callback) + + def start(self, detected_callback=play_audio_file, + interrupt_check=lambda: False, + sleep_time=0.03): + """ + Start the voice detector. For every `sleep_time` second it checks the + audio buffer for triggering keywords. If detected, then call + corresponding function in `detected_callback`, which can be a single + function (single model) or a list of callback functions (multiple + models). Every loop it also calls `interrupt_check` -- if it returns + True, then breaks from the loop and return. + + :param detected_callback: a function or list of functions. The number of + items must match the number of models in + `decoder_model`. + :param interrupt_check: a function that returns True if the main loop + needs to stop. + :param float sleep_time: how much time in second every loop waits. + :return: None + """ + if interrupt_check(): + logger.debug("detect voice return") + return + + tc = type(detected_callback) + if tc is not list: + detected_callback = [detected_callback] + if len(detected_callback) == 1 and self.num_hotwords > 1: + detected_callback *= self.num_hotwords + + assert self.num_hotwords == len(detected_callback), \ + "Error: hotwords in your models (%d) do not match the number of " \ + "callbacks (%d)" % (self.num_hotwords, len(detected_callback)) + + logger.debug("detecting...") + + while True: + if interrupt_check(): + logger.debug("detect voice break") + break + data = self.ring_buffer.get() + if len(data) == 0: + time.sleep(sleep_time) + continue + + ans = self.detector.RunDetection(data) + if ans == -1: + logger.warning( + "Error initializing streams or reading audio data") + elif ans > 0: + message = "Keyword " + str(ans) + " detected at time: " + message += time.strftime("%Y-%m-%d %H:%M:%S", + time.localtime(time.time())) + logger.info(message) + callback = detected_callback[ans-1] + if callback is not None: + callback() + + logger.debug("finished.") + + def terminate(self): + """ + Terminate audio stream. Users cannot call start() again to detect. + :return: None + """ + self.stream_in.stop_stream() + self.stream_in.close() + self.audio.terminate() diff --git a/src/Guide_stick_system/voice_assistant/snowboydetect.py b/src/Guide_stick_system/voice_assistant/snowboydetect.py new file mode 100644 index 0000000..bfa5f6a --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/snowboydetect.py @@ -0,0 +1,180 @@ +# 导入sys模块,并判断版本信息是否大于等于(2, 6, 0) +from sys import version_info +# 如果版本信息大于等于(2, 6, 0),则定义swig_import_helper函数 +if version_info >= (2, 6, 0): + def swig_import_helper(): + # 从os.path模块导入dirname函数 + from os.path import dirname + # 从importlib模块导入load_module函数 + import importlib + # 定义fp变量 + fp = None + # 尝试导入_snowboydetect模块 + try: + fp, pathname, description = importlib.find_module( + '_snowboydetect', [dirname(__file__)]) + # 如果导入失败,则导入_snowboydetect模块 + except ImportError: + import _snowboydetect + return _snowboydetect + # 如果fp不为空,则执行以下操作 + if fp is not None: + try: + # 从fp, pathname, description中导入_mod模块 + _mod = importlib.load_module( + '_snowboydetect', fp, pathname, description) + # finally语句块,关闭fp + finally: + fp.close() + # 返回_mod + return _mod + # 定义_snowboydetect变量,调用swig_import_helper函数 + _snowboydetect = swig_import_helper() + # 删除swig_import_helper变量 + del swig_import_helper +# 如果版本信息小于(2, 6, 0),则导入_snowboydetect模块 +else: + import _snowboydetect +# 删除版本信息变量 +del version_info +# 尝试导入_swig_property变量 +try: + # 如果导入失败,则定义_swig_property变量 + _swig_property = property +except NameError: + pass # Python < 2.2 doesn't have 'property'. + + +def _swig_setattr_nondynamic(self, class_type, name, value, static=1): + # 如果name等于"thisown",则将self.this.own(value)赋值给name + if (name == "thisown"): + return self.this.own(value) + # 如果name等于"this",则将value的类型判断是否为SwigPyObject,如果是,则将self.__dict__[name]赋值给value,否则调用class_type.__swig_setmethods__.get(name, None)函数 + if (name == "this"): + if type(value).__name__ == 'SwigPyObject': + self.__dict__[name] = value + return + # 调用class_type.__swig_setmethods__.get(name, None)函数 + method = class_type.__swig_setmethods__.get(name, None) + # 如果method不为空,则调用method(self, value)函数 + if method: + return method(self, value) + # 如果static为0,则调用object.__setattr__(self, name, value)函数 + if (not static): + if _newclass: + object.__setattr__(self, name, value) + else: + self.__dict__[name] = value + # 否则,抛出AttributeError异常 + else: + raise AttributeError("You cannot add attributes to %s" % self) + + +# 定义_swig_setattr函数,将类、名称和值作为参数 +def _swig_setattr(self, class_type, name, value): + # 返回_swig_setattr_nondynamic函数,将类、名称和值作为参数 + return _swig_setattr_nondynamic(self, class_type, name, value, 0) + + +# 定义_swig_getattr_nondynamic函数,将类、名称和静态作为参数 +def _swig_getattr_nondynamic(self, class_type, name, static=1): + # 如果名称是"thisown",则返回self.this.own() + if (name == "thisown"): + return self.this.own() + # 获取类中是否有该名称的方法 + method = class_type.__swig_getmethods__.get(name, None) + # 如果存在该方法,则返回该方法(self) + if method: + return method(self) + # 如果不是静态的 + if (not static): + # 则返回object.__getattr__(self, name) + return object.__getattr__(self, name) + # 否则抛出AttributeError异常 + else: + raise AttributeError(name) + + +# 定义_swig_getattr函数,将类和名称作为参数 +def _swig_getattr(self, class_type, name): + # 返回_swig_getattr_nondynamic函数,将类、名称和静态作为参数 + return _swig_getattr_nondynamic(self, class_type, name, 0) + + +# 定义_swig_repr函数,将self作为参数 +def _swig_repr(self): + # 尝试将self.__repr__()赋值给strthis + try: + strthis = "proxy of " + self.this.__repr__() + # 如果不存在,则strthis为空 + except: + strthis = "" + # 返回<%s.%s; %s > % % (self.__class__.__module__, self.__class__.__name__, strthis,) + return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) + + +# 尝试获取object类 +try: + _object = object + # 如果存在,则_newclass为1 + _newclass = 1 +# 如果不存在,则_object为空,_newclass为0 +except AttributeError: + class _object: + pass + _newclass = 0 + + +class SnowboyDetect(_object): + __swig_setmethods__ = {} + def __setattr__(self, name, value): return _swig_setattr( + self, SnowboyDetect, name, value) + __swig_getmethods__ = {} + def __getattr__(self, name): return _swig_getattr( + self, SnowboyDetect, name) + __repr__ = _swig_repr + + def __init__(self, resource_filename, model_str): + this = _snowboydetect.new_SnowboyDetect(resource_filename, model_str) + try: + self.this.append(this) + except: + self.this = this + + def Reset(self): + return _snowboydetect.SnowboyDetect_Reset(self) + + def RunDetection(self, *args): + return _snowboydetect.SnowboyDetect_RunDetection(self, *args) + + def SetSensitivity(self, sensitivity_str): + return _snowboydetect.SnowboyDetect_SetSensitivity(self, sensitivity_str) + + def GetSensitivity(self): + return _snowboydetect.SnowboyDetect_GetSensitivity(self) + + def SetAudioGain(self, audio_gain): + return _snowboydetect.SnowboyDetect_SetAudioGain(self, audio_gain) + + def UpdateModel(self): + return _snowboydetect.SnowboyDetect_UpdateModel(self) + + def NumHotwords(self): + return _snowboydetect.SnowboyDetect_NumHotwords(self) + + def SampleRate(self): + return _snowboydetect.SnowboyDetect_SampleRate(self) + + def NumChannels(self): + return _snowboydetect.SnowboyDetect_NumChannels(self) + + def BitsPerSample(self): + return _snowboydetect.SnowboyDetect_BitsPerSample(self) + __swig_destroy__ = _snowboydetect.delete_SnowboyDetect + def __del__(self): return None + + +SnowboyDetect_swigregister = _snowboydetect.SnowboyDetect_swigregister +SnowboyDetect_swigregister(SnowboyDetect) + +# This file is compatible with both classic and new-style classes. diff --git a/src/Guide_stick_system/voice_assistant/text/test_snowboydecoder.py b/src/Guide_stick_system/voice_assistant/text/test_snowboydecoder.py new file mode 100644 index 0000000..445a3f9 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/text/test_snowboydecoder.py @@ -0,0 +1,56 @@ +import unittest +from unittest.mock import Mock, patch +from snowboydecoder import HotwordDetector + +# 假设 RESOURCE_FILE 是在 snowboydecoder.py 中定义的 +RESOURCE_FILE = 'path/to/resource.res' + +class TestHotwordDetector(unittest.TestCase): + + @patch('snowboydecoder.snowboydetect.SnowboyDetect') + @patch('snowboydecoder.pyaudio.PyAudio') + def test_init(self, mock_pyaudio, mock_snowboydetect): + # Mock SnowboyDetect + mock_detector = mock_snowboydetect.return_value + mock_detector.NumChannels.return_value = 1 + mock_detector.SampleRate.return_value = 16000 + mock_detector.BitsPerSample.return_value = 16 + mock_detector.NumHotwords.return_value = 1 + + # Mock PyAudio + mock_audio = mock_pyaudio.return_value + + # Mock audio stream + mock_stream = Mock() + mock_audio.open.return_value = mock_stream + + # Initialize HotwordDetector + decoder_model = 'model_id' + sensitivity = 0.5 + audio_gain = 1.0 + detector = HotwordDetector(decoder_model, RESOURCE_FILE, sensitivity, audio_gain) + + # Verify interactions + mock_snowboydetect.assert_called_once_with( + resource_filename=RESOURCE_FILE.encode(), + model_str='model_id'.encode() + ) + mock_detector.SetAudioGain.assert_called_once_with(audio_gain) + mock_detector.SetSensitivity.assert_called_once_with('0.5'.encode()) + + # Verify PyAudio and Stream configurations + mock_pyaudio.assert_called_once() + mock_audio.open.assert_called_once_with( + input=True, output=False, + format=mock_audio.get_format_from_width(2).return_value, + channels=1, + rate=16000, + frames_per_buffer=2048, + stream_callback=detector.audio_callback + ) + + # Verify RingBuffer size + self.assertEqual(len(detector.ring_buffer), 0) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/text/test_unit.py b/src/Guide_stick_system/voice_assistant/text/test_unit.py new file mode 100644 index 0000000..70becc2 --- /dev/null +++ b/src/Guide_stick_system/voice_assistant/text/test_unit.py @@ -0,0 +1,52 @@ +import unittest +from unittest.mock import MagicMock +from unittest.mock import patch + +class TestMyCode(unittest.TestCase): + + def setUp(self): + self.detector = MagicMock(spec=snowboydecoder.HotwordDetector) + self.pa = MagicMock(spec=PyAudio) + self.wf = MagicMock(spec=wave.open) + self.p = MagicMock() + self.stream = MagicMock() + self.data = MagicMock() + + @patch('snowboydecoder.HotwordDetector') + @patch('PyAudio.PyAudio') + @patch('wave.open') + def test_detected(self, mock_wave_open, mock_pyaudio, mock_hotword_detector): + mock_hotword_detector.return_value = self.detector + self.detector.start = MagicMock() + self.detector.terminate = MagicMock() + + with patch('builtins.print') as mock_print: + detected() + mock_print.assert_called_with('唤醒成功') + self.detector.start.assert_called_with(detected_callback=detected, interrupt_check=interrupt_callback, sleep_time=0.03) + self.detector.terminate.assert_called() + + @patch('PyAudio.PyAudio') + @patch('wave.open') + def test_my_record(self, mock_wave_open, mock_pyaudio): + self.pa.return_value = self.pa + self.pa.open = MagicMock(return_value=self.stream) + self.stream.read = MagicMock(return_value=self.data) + + my_record() + + self.pa.open.assert_called_with(format=paInt16, channels=channels, rate=framerate, input=True, frames_per_buffer=num_samples) + self.stream.read.assert_called_with(num_samples) + + @patch('requests.post') + def test_speech2text(self, mock_requests_post): + mock_response = MagicMock() + mock_response.json = MagicMock(return_value={'result': ['你好']}) + mock_requests_post.return_value = mock_response + + speech = MagicMock() + result = speech2text(speech, 'token') + self.assertEqual(result, '你好') + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/src/Guide_stick_system/voice_assistant/xm.pmdl b/src/Guide_stick_system/voice_assistant/xm.pmdl new file mode 100644 index 0000000..e753476 Binary files /dev/null and b/src/Guide_stick_system/voice_assistant/xm.pmdl differ