From 91444e753231d71f330efc8c712ddb520d4d2349 Mon Sep 17 00:00:00 2001
From: Frieren <1692219062wang@gmail.com>
Date: Mon, 13 May 2024 00:11:07 +0800
Subject: [PATCH] second commit
---
.idea/misc.xml | 3 +
.idea/workspace.xml | 106 ++++++++++++++++++++++++++----
requirements.txt | 9 ++-
src/base/PdfLoader.py | 15 +++++
src/base/chinese_text_splitter.py | 48 ++++++++++++--
src/serve/embedding.py | 28 ++++++++
6 files changed, 185 insertions(+), 24 deletions(-)
create mode 100644 src/base/PdfLoader.py
create mode 100644 src/serve/embedding.py
diff --git a/.idea/misc.xml b/.idea/misc.xml
index 7830771..7427ae4 100644
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,4 +1,7 @@
+
+
+
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index d70da44..205572e 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -1,33 +1,75 @@
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+ {
+ "associatedIndex": 8
+}
-
+
+
+
- {
+ "keyToString": {
+ "Python.RAG.executor": "Run",
+ "Python.embedding.executor": "Run",
+ "RunOnceActivity.OpenProjectViewOnStart": "true",
+ "RunOnceActivity.ShowReadmeOnStart": "true",
+ "git-widget-placeholder": "f7939674",
+ "last_opened_file_path": "G:/code/py/LLM",
+ "node.js.detected.package.eslint": "true",
+ "node.js.detected.package.tslint": "true",
+ "node.js.selected.package.eslint": "(autodetect)",
+ "node.js.selected.package.tslint": "(autodetect)",
+ "nodejs_package_manager_path": "npm",
+ "settings.editor.selected.configurable": "com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable",
+ "vue.rearranger.settings.migration": "true"
}
-}]]>
+}
+
+
+
+
+
+
@@ -43,8 +85,44 @@
1715274683455
-
+
+
+
+
+
+
+
+
+ 1715279142669
+
+
+
+ 1715279142669
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index ecdfcaa..f5aee2c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,7 @@
langchain~=0.1.19
-httpx-sse
-langchainhub
-pyjwt
\ No newline at end of file
+httpx
+pyjwt
+transformers
+text2vec~=1.2.9
+chardet
+pypdf~=4.2.0
\ No newline at end of file
diff --git a/src/base/PdfLoader.py b/src/base/PdfLoader.py
new file mode 100644
index 0000000..25bdd12
--- /dev/null
+++ b/src/base/PdfLoader.py
@@ -0,0 +1,15 @@
+from langchain_community.document_loaders import PyPDFLoader
+
+
+class PDFLoader:
+ @staticmethod
+ def loader(file_path: str):
+ content = ""
+ loader = PyPDFLoader(file_path)
+ for page in loader.load():
+ content += page.page_content
+ return content
+
+
+if __name__ == '__main__':
+ print(PDFLoader.loader("C:\\Users\\16922\\Desktop\\文档1.pdf"))
diff --git a/src/base/chinese_text_splitter.py b/src/base/chinese_text_splitter.py
index 14914ec..414fd41 100644
--- a/src/base/chinese_text_splitter.py
+++ b/src/base/chinese_text_splitter.py
@@ -1,25 +1,59 @@
+from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
-from langchain.text_splitter import CharacterTextSplitter
-
class ChineseTextSplitter(CharacterTextSplitter):
- def __init__(self, pdf: bool = False, **kwargs):
+ def __init__(self, pdf: bool = False, sentence_size: int = 250, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
+ self.sentence_size = sentence_size
- def split_text(self, text: str) -> List[str]:
+ def split_text1(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "")
- sent_sep_pattern = re.compile(
- '([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))')
+ sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :;
sent_list = []
for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
- return sent_list
\ No newline at end of file
+ return sent_list
+
+ def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑
+ if self.pdf:
+ text = re.sub(r"\n{3,}", r"\n", text)
+ text = re.sub('\s', " ", text)
+ text = re.sub("\n\n", "", text)
+
+ text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符
+ text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号
+ text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号
+ text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
+ # 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
+ text = text.rstrip() # 段尾如果有多余的\n就去掉它
+ # 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
+ ls = [i for i in text.split("\n") if i]
+ for ele in ls:
+ if len(ele) > self.sentence_size:
+ ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
+ ele1_ls = ele1.split("\n")
+ for ele_ele1 in ele1_ls:
+ if len(ele_ele1) > self.sentence_size:
+ ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
+ ele2_ls = ele_ele2.split("\n")
+ for ele_ele2 in ele2_ls:
+ if len(ele_ele2) > self.sentence_size:
+ ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
+ ele2_id = ele2_ls.index(ele_ele2)
+ ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
+ ele2_id + 1:]
+ ele_id = ele1_ls.index(ele_ele1)
+ ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
+
+ id = ls.index(ele)
+ ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
+ return ls
\ No newline at end of file
diff --git a/src/serve/embedding.py b/src/serve/embedding.py
new file mode 100644
index 0000000..c7b71f4
--- /dev/null
+++ b/src/serve/embedding.py
@@ -0,0 +1,28 @@
+from langchain_community.document_loaders import PyPDFLoader
+from langchain_community.vectorstores import Chroma
+from text2vec import SentenceModel
+from src.base.chinese_text_splitter import ChineseTextSplitter
+
+
+class SentenceEmbedding:
+ __model = SentenceModel('shibing624/text2vec-base-chinese')
+ def __init__(self, file_path: str):
+ self.file_path = file_path
+ content = ""
+ loader = PyPDFLoader(file_path)
+ for page in loader.load():
+ content += page.page_content
+ sentences = ChineseTextSplitter(True).split_text(content)
+ embeddings = SentenceEmbedding.__model.encode(sentences)
+ self.vectorstore = Chroma.add_texts(iter(sentences), embeddings)
+
+ def get_vectorstore(self):
+ return self.vectorstore
+
+ def search(self, query:str):
+ embeddings = SentenceEmbedding.__model.encode(query)
+ self.vectorstore
+
+
+if __name__ == '__main__':
+ SentenceEmbedding("C:\\Users\\16922\\Desktop\\文档1.pdf").