hahahafofo commited on
Commit
5323587
1 Parent(s): dbeda76
Files changed (4) hide show
  1. chatpdf.py +190 -0
  2. requirements.txt +11 -0
  3. sample.pdf +0 -0
  4. webui.py +290 -0
chatpdf.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ @author:XuMing([email protected])
4
+ @description:
5
+ """
6
+ import logging
7
+
8
+ from similarities import Similarity
9
+ from textgen import ChatGlmModel, LlamaModel
10
+ from transformers import pipeline
11
+ from loguru import logger
12
+
13
+ PROMPT_TEMPLATE = """\
14
+ 基于以下已知信息,简洁和专业的来回答用户的问题。
15
+ 如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
16
+
17
+ 已知内容:
18
+ {context_str}
19
+
20
+ 问题:
21
+ {query_str}
22
+ """
23
+
24
+
25
+ class ChatPDF:
26
+ def __init__(
27
+ self,
28
+ sim_model_name_or_path: str = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
29
+ gen_model_type: str = "chatglm",
30
+ gen_model_name_or_path: str = "THUDM/chatglm-6b-int4",
31
+ lora_model_name_or_path: str = None,
32
+
33
+ ):
34
+ self.sim_model = Similarity(model_name_or_path=sim_model_name_or_path)
35
+ self.model_type = gen_model_type
36
+
37
+ if gen_model_type == "chatglm":
38
+ self.gen_model = ChatGlmModel(gen_model_type, gen_model_name_or_path, lora_name=lora_model_name_or_path)
39
+ elif gen_model_type == "llama":
40
+ self.gen_model = LlamaModel(gen_model_type, gen_model_name_or_path, lora_name=lora_model_name_or_path)
41
+ elif gen_model_type == "t5":
42
+ self.gen_model = pipeline('text2text-generation', model=gen_model_name_or_path, device=0)
43
+ else:
44
+ raise ValueError('gen_model_type must be chatglm or llama.')
45
+ self.history = None
46
+ self.pdf_path = None
47
+
48
+ def load_pdf_file(self, pdf_path: str):
49
+ """Load a PDF file."""
50
+ if pdf_path.endswith('.pdf'):
51
+ corpus = self.extract_text_from_pdf(pdf_path)
52
+ elif pdf_path.endswith('.docx'):
53
+ corpus = self.extract_text_from_docx(pdf_path)
54
+ elif pdf_path.endswith('.md'):
55
+ corpus = self.extract_text_from_markdown(pdf_path)
56
+ else:
57
+ corpus = self.extract_text_from_txt(pdf_path)
58
+ self.sim_model.add_corpus(corpus)
59
+ self.pdf_path = pdf_path
60
+
61
+ @staticmethod
62
+ def extract_text_from_pdf(file_path: str):
63
+ """Extract text content from a PDF file."""
64
+ import PyPDF2
65
+ contents = []
66
+ with open(file_path, 'rb') as f:
67
+ pdf_reader = PyPDF2.PdfReader(f)
68
+ for page in pdf_reader.pages:
69
+ page_text = page.extract_text().strip()
70
+ raw_text = [text.strip() for text in page_text.splitlines() if text.strip()]
71
+ new_text = ''
72
+ for text in raw_text:
73
+ new_text += text
74
+ if text[-1] in ['.', '!', '?', '。', '!', '?', '…', ';', ';', ':', ':', '”', '’', ')', '】', '》', '」',
75
+ '』', '〕', '〉', '》', '〗', '〞', '〟', '»', '"', "'", ')', ']', '}']:
76
+ contents.append(new_text)
77
+ new_text = ''
78
+ if new_text:
79
+ contents.append(new_text)
80
+ return contents
81
+
82
+ @staticmethod
83
+ def extract_text_from_txt(file_path: str):
84
+ """Extract text content from a TXT file."""
85
+ contents = []
86
+ with open(file_path, 'r', encoding='utf-8') as f:
87
+ contents = [text.strip() for text in f.readlines() if text.strip()]
88
+ return contents
89
+
90
+ @staticmethod
91
+ def extract_text_from_docx(file_path: str):
92
+ """Extract text content from a DOCX file."""
93
+ import docx
94
+ document = docx.Document(file_path)
95
+ contents = [paragraph.text.strip() for paragraph in document.paragraphs if paragraph.text.strip()]
96
+ return contents
97
+
98
+ @staticmethod
99
+ def extract_text_from_markdown(file_path: str):
100
+ """Extract text content from a Markdown file."""
101
+ import markdown
102
+ from bs4 import BeautifulSoup
103
+ with open(file_path, 'r', encoding='utf-8') as f:
104
+ markdown_text = f.read()
105
+ html = markdown.markdown(markdown_text)
106
+ soup = BeautifulSoup(html, 'html.parser')
107
+ contents = [text.strip() for text in soup.get_text().splitlines() if text.strip()]
108
+ return contents
109
+
110
+ @staticmethod
111
+ def _add_source_numbers(lst):
112
+ """Add source numbers to a list of strings."""
113
+ return [f'[{idx + 1}]\t "{item}"' for idx, item in enumerate(lst)]
114
+
115
+ def _generate_answer(self, query_str, context_str, history=None, max_length=1024):
116
+ """Generate answer from query and context."""
117
+ if self.model_type == "t5":
118
+ response = self.gen_model(query_str, max_length=max_length, do_sample=True)[0]['generated_text']
119
+ return response, history
120
+ prompt = PROMPT_TEMPLATE.format(context_str=context_str, query_str=query_str)
121
+ response, out_history = self.gen_model.chat(prompt, history, max_length=max_length)
122
+ return response, out_history
123
+
124
+ def chat(self, query_str, history=None, max_length=1024):
125
+ if self.model_type == "t5":
126
+ response = self.gen_model(query_str, max_length=max_length, do_sample=True)[0]['generated_text']
127
+ logger.debug(response)
128
+ return response, history
129
+
130
+ response, out_history = self.gen_model.chat(query_str, history, max_length=max_length)
131
+ return response, out_history
132
+
133
+ def query(
134
+ self,
135
+ query,
136
+ topn: int = 5,
137
+ max_length: int = 1024,
138
+ max_input_size: int = 1024,
139
+ use_history: bool = False
140
+ ):
141
+ """Query from corpus."""
142
+
143
+ sim_contents = self.sim_model.most_similar(query, topn=topn)
144
+
145
+ reference_results = []
146
+ for query_id, id_score_dict in sim_contents.items():
147
+ for corpus_id, s in id_score_dict.items():
148
+ reference_results.append(self.sim_model.corpus[corpus_id])
149
+ if not reference_results:
150
+ return '没有提供足够的相关信息', reference_results
151
+ reference_results = self._add_source_numbers(reference_results)
152
+
153
+ context_str = '\n'.join(reference_results)[:(max_input_size - len(PROMPT_TEMPLATE))]
154
+
155
+ if use_history:
156
+ response, out_history = self._generate_answer(query, context_str, self.history, max_length=max_length)
157
+ self.history = out_history
158
+ else:
159
+
160
+ response, out_history = self._generate_answer(query, context_str)
161
+
162
+ return response, out_history, reference_results
163
+
164
+ def save_index(self, index_path=None):
165
+ """Save model."""
166
+ if index_path is None:
167
+ index_path = '.'.join(self.pdf_path.split('.')[:-1]) + '_index.json'
168
+ self.sim_model.save_index(index_path)
169
+
170
+ def load_index(self, index_path=None):
171
+ """Load model."""
172
+ if index_path is None:
173
+ index_path = '.'.join(self.pdf_path.split('.')[:-1]) + '_index.json'
174
+ self.sim_model.load_index(index_path)
175
+
176
+
177
+ if __name__ == "__main__":
178
+ import sys
179
+
180
+ if len(sys.argv) > 2:
181
+ gen_model_name_or_path = sys.argv[1]
182
+ else:
183
+ print('Usage: python chatpdf.py <gen_model_name_or_path>')
184
+ gen_model_name_or_path = "THUDM/chatglm-6b-int4"
185
+ m = ChatPDF(gen_model_name_or_path=gen_model_name_or_path)
186
+ m.load_pdf_file(pdf_path='sample.pdf')
187
+ response = m.query('自然语言中的非平行迁移是指什么?')
188
+ print(response[0])
189
+ response = m.query('本文作者是谁?')
190
+ print(response[0])
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ similarities>=1.0.5
3
+ sentencepiece
4
+ textgen
5
+ markdown
6
+ PyPDF2
7
+ python-docx
8
+ pandas
9
+ protobuf
10
+ cpm-kernels
11
+ loguru
sample.pdf ADDED
Binary file (375 kB). View file
 
webui.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ @author:XuMing([email protected])
4
+ @description:
5
+ modified from https://github.com/imClumsyPanda/langchain-ChatGLM/blob/master/webui.py
6
+ """
7
+ import gradio as gr
8
+ import os
9
+ import shutil
10
+ from loguru import logger
11
+ from chatpdf import ChatPDF
12
+ import hashlib
13
+ from typing import List
14
+
15
+ pwd_path = os.path.abspath(os.path.dirname(__file__))
16
+
17
+ CONTENT_DIR = os.path.join(pwd_path, "content")
18
+ logger.info(f"CONTENT_DIR: {CONTENT_DIR}")
19
+ VECTOR_SEARCH_TOP_K = 3
20
+ MAX_INPUT_LEN = 2048
21
+
22
+ embedding_model_dict = {
23
+ "text2vec-large": "GanymedeNil/text2vec-large-chinese",
24
+ "text2vec-base": "shibing624/text2vec-base-chinese",
25
+ "sentence-transformers": "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
26
+ "ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
27
+ "ernie-base": "nghuyong/ernie-3.0-base-zh",
28
+
29
+ }
30
+
31
+ # supported LLM models
32
+ llm_model_dict = {
33
+
34
+ # "chatglm-6b": "E:\\sdwebui\\image2text_prompt_generator\\models\\chatglm-6b",
35
+ "chatglm-6b-int4": "THUDM/chatglm-6b-int4",
36
+ "chatglm-6b": "THUDM/chatglm-6b",
37
+ "chatglm-6b-int4-qe": "THUDM/chatglm-6b-int4-qe",
38
+ "llama-7b": "decapoda-research/llama-7b-hf",
39
+ "llama-13b": "decapoda-research/llama-13b-hf",
40
+ "t5-lamini-flan-783M": "MBZUAI/LaMini-Flan-T5-783M",
41
+ }
42
+
43
+ llm_model_dict_list = list(llm_model_dict.keys())
44
+ embedding_model_dict_list = list(embedding_model_dict.keys())
45
+
46
+ model = None
47
+
48
+
49
+ def get_file_list():
50
+ if not os.path.exists("content"):
51
+ return []
52
+ return [f for f in os.listdir("content") if
53
+ f.endswith(".txt") or f.endswith(".pdf") or f.endswith(".docx") or f.endswith(".md")]
54
+
55
+
56
+ def upload_file(file, file_list):
57
+ if not os.path.exists(CONTENT_DIR):
58
+ os.mkdir(CONTENT_DIR)
59
+ filename = os.path.basename(file.name)
60
+ shutil.move(file.name, os.path.join(CONTENT_DIR, filename))
61
+ # file_list首位插入新上传的文件
62
+ file_list.insert(0, filename)
63
+ return gr.Dropdown.update(choices=file_list, value=filename), file_list
64
+
65
+
66
+ def parse_text(text):
67
+ """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
68
+ lines = text.split("\n")
69
+ lines = [line for line in lines if line != ""]
70
+ count = 0
71
+ for i, line in enumerate(lines):
72
+ if "```" in line:
73
+ count += 1
74
+ items = line.split('`')
75
+ if count % 2 == 1:
76
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
77
+ else:
78
+ lines[i] = f'<br></code></pre>'
79
+ else:
80
+ if i > 0:
81
+ if count % 2 == 1:
82
+ line = line.replace("`", "\`")
83
+ line = line.replace("<", "&lt;")
84
+ line = line.replace(">", "&gt;")
85
+ line = line.replace(" ", "&nbsp;")
86
+ line = line.replace("*", "&ast;")
87
+ line = line.replace("_", "&lowbar;")
88
+ line = line.replace("-", "&#45;")
89
+ line = line.replace(".", "&#46;")
90
+ line = line.replace("!", "&#33;")
91
+ line = line.replace("(", "&#40;")
92
+ line = line.replace(")", "&#41;")
93
+ line = line.replace("$", "&#36;")
94
+ lines[i] = "<br>" + line
95
+ text = "".join(lines)
96
+ return text
97
+
98
+
99
+ def get_answer(
100
+ query,
101
+ index_path,
102
+ history,
103
+ topn: int = VECTOR_SEARCH_TOP_K,
104
+ max_input_size: int = 1024,
105
+ chat_mode: str = "pdf"
106
+ ):
107
+ global model
108
+
109
+ if model is None:
110
+ return [None, "模型还未加载"], query
111
+ if index_path and chat_mode == "pdf":
112
+ if not model.sim_model.corpus_embeddings:
113
+ model.load_index(index_path)
114
+ response, empty_history, reference_results = model.query(query=query, topn=topn, max_input_size=max_input_size)
115
+
116
+ logger.debug(f"query: {query}, response with content: {response}")
117
+ for i in range(len(reference_results)):
118
+ r = reference_results[i]
119
+ response += f"\n{r.strip()}"
120
+ response = parse_text(response)
121
+ history = history + [[query, response]]
122
+ else:
123
+ # 未加载文件,仅返回生成模型结果
124
+ response, empty_history = model.chat(query, history)
125
+ response = parse_text(response)
126
+ history = history + [[query, response]]
127
+ logger.debug(f"query: {query}, response: {response}")
128
+ return history, ""
129
+
130
+
131
+ def update_status(history, status):
132
+ history = history + [[None, status]]
133
+ logger.info(status)
134
+ return history
135
+
136
+
137
+ def reinit_model(llm_model, embedding_model, history):
138
+ try:
139
+ global model
140
+ if model is not None:
141
+ del model
142
+ model = ChatPDF(
143
+ sim_model_name_or_path=embedding_model_dict.get(
144
+ embedding_model,
145
+ "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
146
+ ),
147
+ gen_model_type=llm_model.split('-')[0],
148
+ gen_model_name_or_path=llm_model_dict.get(llm_model, "THUDM/chatglm-6b-int4"),
149
+ lora_model_name_or_path=None,
150
+ )
151
+
152
+ model_status = """模型已成功重新加载,请选择文件后点击"加载文件"按钮"""
153
+ except Exception as e:
154
+ model = None
155
+ logger.error(e)
156
+ model_status = """模型未成功重新加载,请重新选择后点击"加载模型"按钮"""
157
+ return history + [[None, model_status]]
158
+
159
+
160
+ def get_file_hash(fpath):
161
+ return hashlib.md5(open(fpath, 'rb').read()).hexdigest()
162
+
163
+
164
+ def get_vector_store(filepath, history, embedding_model):
165
+ logger.info(filepath, history)
166
+ index_path = None
167
+ file_status = ''
168
+ if model is not None:
169
+
170
+ local_file_path = os.path.join(CONTENT_DIR, filepath)
171
+
172
+ local_file_hash = get_file_hash(local_file_path)
173
+ index_file_name = f"{filepath}.{embedding_model}.{local_file_hash}.index.json"
174
+
175
+ local_index_path = os.path.join(CONTENT_DIR, index_file_name)
176
+
177
+ if os.path.exists(local_index_path):
178
+ model.load_index(local_index_path)
179
+ index_path = local_index_path
180
+ file_status = "文件已成功加载,请开始提问"
181
+
182
+ elif os.path.exists(local_file_path):
183
+ model.load_pdf_file(local_file_path)
184
+ model.save_index(local_index_path)
185
+ index_path = local_index_path
186
+ if index_path:
187
+ file_status = "文件索引并成功加载,请开始提问"
188
+ else:
189
+ file_status = "文件未成功加载,请重新上传文件"
190
+ else:
191
+ file_status = "模型未完成加载,请先在加载模型后再导入文件"
192
+
193
+ return index_path, history + [[None, file_status]]
194
+
195
+
196
+ def reset_chat(chatbot, state):
197
+ return None, None
198
+
199
+
200
+ block_css = """.importantButton {
201
+ background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
202
+ border: none !important;
203
+ }
204
+ .importantButton:hover {
205
+ background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
206
+ border: none !important;
207
+ }"""
208
+
209
+ webui_title = """
210
+ # 🎉ChatPDF WebUI🎉
211
+ Link in: [https://github.com/zhongpei/ChatPDF](https://github.com/zhongpei/ChatPDF) Test for MBZUAI/LaMini-Flan-T5-783M
212
+ """
213
+
214
+ init_message = """欢迎使用 ChatPDF Web UI,可以直接提问或上传文件后提问 """
215
+
216
+ with gr.Blocks(css=block_css) as demo:
217
+ index_path, file_status, model_status = gr.State(""), gr.State(""), gr.State("")
218
+ file_list = gr.State(get_file_list())
219
+ gr.Markdown(webui_title)
220
+ with gr.Row():
221
+ with gr.Column(scale=2):
222
+ chatbot = gr.Chatbot([[None, init_message], [None, None]],
223
+ elem_id="chat-box",
224
+ show_label=False).style(height=700)
225
+ query = gr.Textbox(show_label=False,
226
+ placeholder="请输入提问内容,按回车进行提交",
227
+ ).style(container=False)
228
+ clear_btn = gr.Button('🔄Clear!', elem_id='clear').style(full_width=True)
229
+ with gr.Column(scale=1):
230
+ llm_model = gr.Radio(llm_model_dict_list,
231
+ label="LLM 模型",
232
+ value=list(llm_model_dict.keys())[0],
233
+ interactive=True)
234
+ embedding_model = gr.Radio(embedding_model_dict_list,
235
+ label="Embedding 模型",
236
+ value=embedding_model_dict_list[0],
237
+ interactive=True)
238
+
239
+ load_model_button = gr.Button("重新加载模型" if model is not None else "加载模型")
240
+
241
+ with gr.Row():
242
+ chat_mode = gr.Radio(choices=["chat", "pdf"], value="pdf", label="聊天模式")
243
+
244
+ with gr.Row():
245
+ topn = gr.Slider(1, 100, 20, step=1, label="最大搜索数量")
246
+ max_input_size = gr.Slider(512, 4096, MAX_INPUT_LEN, step=10, label="摘要最大长度")
247
+ with gr.Tab("select"):
248
+ with gr.Row():
249
+ selectFile = gr.Dropdown(
250
+ file_list.value,
251
+ label="content file",
252
+ interactive=True,
253
+ value=file_list.value[0] if len(file_list.value) > 0 else None
254
+ )
255
+ # get_file_list_btn = gr.Button('🔄').style(width=10)
256
+ with gr.Tab("upload"):
257
+ file = gr.File(
258
+ label="content file",
259
+ file_types=['.txt', '.md', '.docx', '.pdf']
260
+ )
261
+ load_file_button = gr.Button("加载文件")
262
+
263
+ load_model_button.click(
264
+ reinit_model,
265
+ show_progress=True,
266
+ inputs=[llm_model, embedding_model, chatbot],
267
+ outputs=chatbot
268
+ )
269
+ # 将上传的文件保存到content文件夹下,并更新下拉框
270
+ file.upload(
271
+ upload_file,
272
+ inputs=[file, file_list],
273
+ outputs=[selectFile, file_list]
274
+ )
275
+ load_file_button.click(
276
+ get_vector_store,
277
+ show_progress=True,
278
+ inputs=[selectFile, chatbot, embedding_model],
279
+ outputs=[index_path, chatbot],
280
+ )
281
+ query.submit(
282
+ get_answer,
283
+ [query, index_path, chatbot, topn, max_input_size, chat_mode],
284
+ [chatbot, query],
285
+ )
286
+ clear_btn.click(reset_chat, [chatbot, query], [chatbot, query])
287
+
288
+ demo.queue(concurrency_count=3).launch(
289
+ server_name='0.0.0.0', share=False, inbrowser=False
290
+ )