naotakigawa commited on
Commit
478c7ef
Β·
1 Parent(s): af73c80

Upload Chatbot.py

Browse files
Files changed (1) hide show
  1. pages/Chatbot.py +7 -34
pages/Chatbot.py CHANGED
@@ -1,27 +1,8 @@
1
 
2
  import streamlit as st
3
- import os
4
- import pickle
5
- import faiss
6
  import logging
7
-
8
- from multiprocessing import Lock
9
- from multiprocessing.managers import BaseManager
10
- from llama_index.callbacks import CallbackManager, LlamaDebugHandler
11
- from llama_index import VectorStoreIndex, Document,Prompt, SimpleDirectoryReader, ServiceContext, StorageContext, load_index_from_storage
12
- from llama_index.chat_engine import CondenseQuestionChatEngine;
13
- from llama_index.node_parser import SimpleNodeParser
14
- from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
15
- from llama_index.constants import DEFAULT_CHUNK_OVERLAP
16
- from llama_index.response_synthesizers import get_response_synthesizer
17
- from llama_index.vector_stores.faiss import FaissVectorStore
18
- from llama_index.graph_stores import SimpleGraphStore
19
- from llama_index.storage.docstore import SimpleDocumentStore
20
- from llama_index.storage.index_store import SimpleIndexStore
21
- import tiktoken
22
- from logging import getLogger, StreamHandler, Formatter
23
-
24
  import common
 
25
 
26
  index_name = "./data/storage"
27
  pkl_name = "./data/stored_documents.pkl"
@@ -39,13 +20,6 @@ custom_prompt = Prompt("""\
39
  Search query:
40
  """)
41
 
42
- # # list of (human_message, ai_message) tuples
43
- custom_chat_history = [
44
- (
45
- 'γ“γ‚“γ«γ‘γ―γ€γ‚’γ‚·γ‚Ήγ‚Ώγƒ³γƒˆγ€‚γ“γ‚Œγ‹γ‚‰θ³ͺε•γ«η­”γˆγ¦δΈ‹γ•γ„γ€‚',
46
- 'γ“γ‚“γ«γ‘γ―γ€‚δΊ†θ§£γ—γΎγ—γŸγ€‚'
47
- )
48
- ]
49
  chat_history = []
50
 
51
  logging.basicConfig(level=logging.INFO)
@@ -55,6 +29,12 @@ logger.debug("θͺΏζŸ»η”¨γƒ­γ‚°")
55
  common.check_login()
56
 
57
  st.title("πŸ’¬ Chatbot")
 
 
 
 
 
 
58
  if "messages" not in st.session_state:
59
  st.session_state["messages"] = [{"role": "assistant", "content": "γŠε›°γ‚Šγ”γ¨γ―γ”γ–γ„γΎγ™γ‹οΌŸ"}]
60
 
@@ -65,13 +45,6 @@ if prompt := st.chat_input():
65
  st.session_state.messages.append({"role": "user", "content": prompt})
66
  st.chat_message("user").write(prompt)
67
  response = st.session_state.chat_engine.chat(prompt)
68
- # response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
69
  msg = str(response)
70
  st.session_state.messages.append({"role": "assistant", "content": msg})
71
  st.chat_message("assistant").write(msg)
72
-
73
- if st.button("γƒͺγ‚»γƒƒγƒˆ",use_container_width=True):
74
- st.session_state.chat_engine.reset()
75
- st.session_state.messages = [{"role": "assistant", "content": "γŠε›°γ‚Šγ”γ¨γ―γ”γ–γ„γΎγ™γ‹οΌŸ"}]
76
- logger.info("reset")
77
-
 
1
 
2
  import streamlit as st
 
 
 
3
  import logging
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import common
5
+ from llama_index import Prompt
6
 
7
  index_name = "./data/storage"
8
  pkl_name = "./data/stored_documents.pkl"
 
20
  Search query:
21
  """)
22
 
 
 
 
 
 
 
 
23
  chat_history = []
24
 
25
  logging.basicConfig(level=logging.INFO)
 
29
  common.check_login()
30
 
31
  st.title("πŸ’¬ Chatbot")
32
+ if st.button("γƒͺγ‚»γƒƒγƒˆ",use_container_width=True):
33
+ st.session_state.chat_engine.reset()
34
+ st.session_state.messages = [{"role": "assistant", "content": "γŠε›°γ‚Šγ”γ¨γ―γ”γ–γ„γΎγ™γ‹οΌŸ"}]
35
+ st.experimental_rerun()
36
+ logger.info("reset")
37
+
38
  if "messages" not in st.session_state:
39
  st.session_state["messages"] = [{"role": "assistant", "content": "γŠε›°γ‚Šγ”γ¨γ―γ”γ–γ„γΎγ™γ‹οΌŸ"}]
40
 
 
45
  st.session_state.messages.append({"role": "user", "content": prompt})
46
  st.chat_message("user").write(prompt)
47
  response = st.session_state.chat_engine.chat(prompt)
 
48
  msg = str(response)
49
  st.session_state.messages.append({"role": "assistant", "content": msg})
50
  st.chat_message("assistant").write(msg)