Spaces:
Sleeping
Sleeping
Commit
·
a56a0d0
1
Parent(s):
4445ab4
ChatbotWebRead
Browse files- pages/ChatbotWebRead.py +97 -0
pages/ChatbotWebRead.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
import os
|
4 |
+
import pickle
|
5 |
+
import faiss
|
6 |
+
import logging
|
7 |
+
|
8 |
+
from multiprocessing import Lock
|
9 |
+
from multiprocessing.managers import BaseManager
|
10 |
+
from llama_index.callbacks import CallbackManager, LlamaDebugHandler
|
11 |
+
from llama_index import VectorStoreIndex, Document,Prompt, SimpleDirectoryReader, ServiceContext, StorageContext, load_index_from_storage
|
12 |
+
from llama_index.chat_engine import CondenseQuestionChatEngine;
|
13 |
+
from llama_index.node_parser import SimpleNodeParser
|
14 |
+
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
|
15 |
+
from llama_index.constants import DEFAULT_CHUNK_OVERLAP
|
16 |
+
from llama_index.response_synthesizers import get_response_synthesizer
|
17 |
+
from llama_index import ListIndex, SimpleWebPageReader,TrafilaturaWebReader
|
18 |
+
|
19 |
+
import tiktoken
|
20 |
+
from logging import getLogger, StreamHandler, Formatter
|
21 |
+
|
22 |
+
custom_prompt = Prompt("""\
|
23 |
+
以下はこれまでの会話履歴と、ドキュメントを検索して回答する必要がある、ユーザーからの会話文です。
|
24 |
+
会話と新しい会話文に基づいて、検索クエリを作成します。回答は日本語で行います。
|
25 |
+
新しい会話文が挨拶の場合、挨拶を返してください。
|
26 |
+
新しい会話文が質問の場合、検索した結果の回答を返してください。
|
27 |
+
答えがわからない場合は正直にわからないと回答してください。
|
28 |
+
会話履歴:
|
29 |
+
{chat_history}
|
30 |
+
新しい会話文:
|
31 |
+
{question}
|
32 |
+
Search query:
|
33 |
+
""")
|
34 |
+
|
35 |
+
chat_history = []
|
36 |
+
|
37 |
+
logging.basicConfig(level=logging.INFO)
|
38 |
+
logger = logging.getLogger("__name__")
|
39 |
+
logger.debug("調査用ログ")
|
40 |
+
|
41 |
+
|
42 |
+
st.title("💬 ChatbotWebRead")
|
43 |
+
if "webmessages" not in st.session_state:
|
44 |
+
st.session_state["webmessages"] = [{"role": "assistant", "content": "お困りごとはございますか?"}]
|
45 |
+
|
46 |
+
for msg in st.session_state.webmessages:
|
47 |
+
st.chat_message(msg["role"]).write(msg["content"])
|
48 |
+
|
49 |
+
if prompt := st.chat_input():
|
50 |
+
st.session_state.webmessages.append({"role": "user", "content": prompt})
|
51 |
+
st.chat_message("user").write(prompt)
|
52 |
+
response = st.session_state.web_chat_engine.chat(prompt)
|
53 |
+
msg = str(response)
|
54 |
+
st.session_state.webmessages.append({"role": "assistant", "content": msg})
|
55 |
+
st.chat_message("assistant").write(msg)
|
56 |
+
|
57 |
+
if st.button("リセット",use_container_width=True):
|
58 |
+
st.session_state.web_chat_engine.reset()
|
59 |
+
st.session_state.webmessages = [{"role": "assistant", "content": "お困りごとはございますか?"}]
|
60 |
+
logger.info("reset")
|
61 |
+
|
62 |
+
def initialize_webindex():
|
63 |
+
logger.info("initialize_web_index start")
|
64 |
+
text_splitter = TokenTextSplitter(separator="。", chunk_size=1500
|
65 |
+
, chunk_overlap=DEFAULT_CHUNK_OVERLAP
|
66 |
+
, tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode)
|
67 |
+
node_parser = SimpleNodeParser(text_splitter=text_splitter)
|
68 |
+
service_context = ServiceContext.from_defaults(node_parser=node_parser)
|
69 |
+
d = 1536
|
70 |
+
k=2
|
71 |
+
faiss_index = faiss.IndexFlatL2(d)
|
72 |
+
# デバッグ用
|
73 |
+
llama_debug_handler = LlamaDebugHandler()
|
74 |
+
callback_manager = CallbackManager([llama_debug_handler])
|
75 |
+
service_context = ServiceContext.from_defaults(callback_manager=callback_manager)
|
76 |
+
|
77 |
+
webDocuments = SimpleWebPageReader(html_to_text=True).load_data(
|
78 |
+
["https://www.stylez.co.jp/"]
|
79 |
+
)
|
80 |
+
logger.info(webDocuments)
|
81 |
+
webIndex = ListIndex.from_documents(webDocuments,service_context=service_context)
|
82 |
+
response_synthesizer = get_response_synthesizer(response_mode='compact')
|
83 |
+
webQuery_engine = webIndex.as_query_engine(response_synthesizer=response_synthesizer)
|
84 |
+
st.session_state.web_chat_engine = CondenseQuestionChatEngine.from_defaults(
|
85 |
+
query_engine=webQuery_engine,
|
86 |
+
condense_question_prompt=custom_prompt,
|
87 |
+
chat_history=chat_history,
|
88 |
+
verbose=True
|
89 |
+
)
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
# init the global index
|
94 |
+
logger.info("main start")
|
95 |
+
if "web_chat_engine" not in st.session_state:
|
96 |
+
initialize_webindex()
|
97 |
+
logger.info("initializing index...")
|