update
This commit is contained in:
parent
3f33566f32
commit
96a6f43e53
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -53,15 +53,10 @@ class ChatGLMService(LLM):
|
||||||
model_name_or_path,
|
model_name_or_path,
|
||||||
trust_remote_code=True
|
trust_remote_code=True
|
||||||
)
|
)
|
||||||
self.model = (
|
self.model = AutoModel.from_pretrained(model_name_or_path, trust_remote_code=True).half().cuda()
|
||||||
AutoModel.from_pretrained(
|
self.model=self.model.eval()
|
||||||
model_name_or_path,
|
|
||||||
trust_remote_code=True)
|
|
||||||
.half()
|
|
||||||
.cuda()
|
|
||||||
)
|
|
||||||
|
|
||||||
# if __name__ == '__main__':
|
# if __name__ == '__main__':
|
||||||
# config=LangChainCFG()
|
# config=LangChainCFG()
|
||||||
# chatLLM = ChatGLMService()
|
# chatLLM = ChatGLMService()
|
||||||
# chatLLM.load_model(model_name_or_path=config.llm_model_name)
|
# chatLLM.load_model(model_name_or_path=config.llm_model_name)
|
||||||
|
|
||||||
|
|
35
main.py
35
main.py
|
@ -1,15 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding:utf-8 _*-
|
|
||||||
"""
|
|
||||||
@author:quincy qiang
|
|
||||||
@license: Apache Licence
|
|
||||||
@file: main.py
|
|
||||||
@time: 2023/04/17
|
|
||||||
@contact: yanqiangmiffy@gamil.com
|
|
||||||
@software: PyCharm
|
|
||||||
@description: coding..
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -17,11 +5,13 @@ import gradio as gr
|
||||||
|
|
||||||
from clc.langchain_application import LangChainApplication
|
from clc.langchain_application import LangChainApplication
|
||||||
|
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
|
||||||
|
|
||||||
|
|
||||||
# 修改成自己的配置!!!
|
# 修改成自己的配置!!!
|
||||||
class LangChainCFG:
|
class LangChainCFG:
|
||||||
llm_model_name = '../../pretrained_models/chatglm-6b' # 本地模型文件 or huggingface远程仓库
|
llm_model_name = 'THUDM/chatglm-6b-int4-qe' # 本地模型文件 or huggingface远程仓库
|
||||||
embedding_model_name = '../../pretrained_models/text2vec-large-chinese' # 检索模型文件 or huggingface远程仓库
|
embedding_model_name = 'GanymedeNil/text2vec-large-chinese' # 检索模型文件 or huggingface远程仓库
|
||||||
vector_store_path = './cache'
|
vector_store_path = './cache'
|
||||||
docs_path = './docs'
|
docs_path = './docs'
|
||||||
|
|
||||||
|
@ -58,22 +48,23 @@ def predict(input,
|
||||||
large_language_model,
|
large_language_model,
|
||||||
embedding_model,
|
embedding_model,
|
||||||
history=None):
|
history=None):
|
||||||
print(large_language_model, embedding_model)
|
# print(large_language_model, embedding_model)
|
||||||
|
print(input)
|
||||||
if history == None:
|
if history == None:
|
||||||
history = []
|
history = []
|
||||||
resp = application.get_knowledge_based_answer(
|
resp = application.get_knowledge_based_answer(
|
||||||
query=input,
|
query=input,
|
||||||
history_len=5,
|
history_len=1,
|
||||||
temperature=0.1,
|
temperature=0.1,
|
||||||
top_p=0.9,
|
top_p=0.9,
|
||||||
chat_history=history
|
chat_history=history
|
||||||
)
|
)
|
||||||
print(resp)
|
|
||||||
history.append((input, resp['result']))
|
history.append((input, resp['result']))
|
||||||
|
|
||||||
search_text = ''
|
search_text = ''
|
||||||
for idx, source in enumerate(resp['source_documents'][:2]):
|
for idx, source in enumerate(resp['source_documents'][:2]):
|
||||||
search_text += f'【搜索结果{idx}:】{source.page_content}\n\n'
|
sep = f'----------【搜索结果{idx}:】---------------\n'
|
||||||
|
search_text += f'{sep}\n{source.page_content}\n\n'
|
||||||
|
print(search_text)
|
||||||
return '', history, history, search_text
|
return '', history, history, search_text
|
||||||
|
|
||||||
|
|
||||||
|
@ -83,6 +74,8 @@ with block as demo:
|
||||||
<center><font size=3>
|
<center><font size=3>
|
||||||
</center></font>
|
</center></font>
|
||||||
""")
|
""")
|
||||||
|
state = gr.State()
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column(scale=1):
|
with gr.Column(scale=1):
|
||||||
embedding_model = gr.Dropdown([
|
embedding_model = gr.Dropdown([
|
||||||
|
@ -112,7 +105,6 @@ with block as demo:
|
||||||
inputs=file,
|
inputs=file,
|
||||||
outputs=selectFile)
|
outputs=selectFile)
|
||||||
with gr.Column(scale=4):
|
with gr.Column(scale=4):
|
||||||
state = gr.State()
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column(scale=4):
|
with gr.Column(scale=4):
|
||||||
chatbot = gr.Chatbot(label='Chinese-LangChain').style(height=400)
|
chatbot = gr.Chatbot(label='Chinese-LangChain').style(height=400)
|
||||||
|
@ -122,6 +114,7 @@ with block as demo:
|
||||||
send = gr.Button("🚀 发送")
|
send = gr.Button("🚀 发送")
|
||||||
with gr.Column(scale=2):
|
with gr.Column(scale=2):
|
||||||
search = gr.Textbox(label='搜索结果')
|
search = gr.Textbox(label='搜索结果')
|
||||||
|
|
||||||
# 发送按钮 提交
|
# 发送按钮 提交
|
||||||
send.click(predict,
|
send.click(predict,
|
||||||
inputs=[
|
inputs=[
|
||||||
|
@ -144,4 +137,4 @@ with block as demo:
|
||||||
],
|
],
|
||||||
outputs=[message, chatbot, state, search])
|
outputs=[message, chatbot, state, search])
|
||||||
|
|
||||||
demo.queue().launch(server_name='0.0.0.0', server_port=8008, share=False)
|
demo.queue(concurrency_count=2).launch(server_name='0.0.0.0', server_port=8888, share=False,show_error=True, enable_queue=True)
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
from duckduckgo_search import ddg
|
||||||
|
from duckduckgo_search.utils import SESSION
|
||||||
|
|
||||||
|
|
||||||
|
SESSION.proxies = {
|
||||||
|
"http": f"socks5h://localhost:7890",
|
||||||
|
"https": f"socks5h://localhost:7890"
|
||||||
|
}
|
||||||
|
r = ddg("马保国")
|
||||||
|
print(r)
|
|
@ -0,0 +1,15 @@
|
||||||
|
from duckpy import Client
|
||||||
|
|
||||||
|
client = Client()
|
||||||
|
|
||||||
|
results = client.search("Python Wikipedia")
|
||||||
|
|
||||||
|
# Prints first result title
|
||||||
|
print(results[0].title)
|
||||||
|
|
||||||
|
# Prints first result URL
|
||||||
|
print(results[0].url)
|
||||||
|
|
||||||
|
# Prints first result description
|
||||||
|
print(results[0].description)
|
||||||
|
# https://github.com/AmanoTeam/duckpy
|
|
@ -0,0 +1,19 @@
|
||||||
|
import time
|
||||||
|
|
||||||
|
import gradio as gra
|
||||||
|
|
||||||
|
|
||||||
|
def user_greeting(name):
|
||||||
|
time.sleep(10)
|
||||||
|
return "Hi! " + name + " Welcome to your first Gradio application!😎"
|
||||||
|
|
||||||
|
|
||||||
|
# define gradio interface and other parameters
|
||||||
|
app = gra.Interface(
|
||||||
|
fn=user_greeting,
|
||||||
|
inputs="text",
|
||||||
|
outputs="text",
|
||||||
|
)
|
||||||
|
app.launch(
|
||||||
|
server_name='0.0.0.0', server_port=8888, share=False,show_error=True, enable_queue=True
|
||||||
|
)
|
Loading…
Reference in New Issue