uyen13 commited on
Commit
5e06bfb
1 Parent(s): 3fc0a78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -13
app.py CHANGED
@@ -9,13 +9,11 @@ from langchain.chains import ConversationalRetrievalChain
9
  from ctransformers import AutoModelForCausalLM
10
  from langchain_g4f import G4FLLM
11
  from g4f import Provider, models
12
- import unicodedata
13
  import requests
14
  # Define the path for generated embeddings
15
  DB_FAISS_PATH = 'vectorstore/db_faiss'
16
- def is_japanese_character(character):
17
- return 'CJK UNIFIED' in unicodedata.name(character, '')
18
-
19
  # Load the model of choice
20
  def load_llm():
21
  # url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin" # 2.87G
@@ -54,7 +52,7 @@ st.markdown(hide_streamlit_style, unsafe_allow_html=True)
54
  # Set the title for the Streamlit app
55
  st.title("Zendo美女チャットボックス")
56
 
57
- csv_url = "https://huggingface.co/spaces/uyen13/chatgirl2/raw/main/testchatdata.csv"
58
  # csv_url="https://docs.google.com/uc?export=download&id=1fQ2v2n9zQcoi6JoOU3lCBDHRt3a1PmaE"
59
 
60
  # Define the path where you want to save the downloaded file
@@ -92,14 +90,6 @@ chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever
92
  def conversational_chat(query):
93
  query = "提供されたデータに基づいて,"+query
94
  result = chain({"question": query, "chat_history": st.session_state['history']})
95
- # i = 0
96
- # while i < len(result["answer"]):
97
- # character = input_string[i]
98
- # if is_japanese_character(character):
99
- # break
100
- # else:
101
- # result = chain({"question": query, "chat_history": st.session_state['history']})
102
- # i += 1
103
  st.session_state['history'].append((query, result["answer"]))
104
  return result["answer"]
105
 
 
9
  from ctransformers import AutoModelForCausalLM
10
  from langchain_g4f import G4FLLM
11
  from g4f import Provider, models
12
+ # import spacy
13
  import requests
14
  # Define the path for generated embeddings
15
  DB_FAISS_PATH = 'vectorstore/db_faiss'
16
+
 
 
17
  # Load the model of choice
18
  def load_llm():
19
  # url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin" # 2.87G
 
52
  # Set the title for the Streamlit app
53
  st.title("Zendo美女チャットボックス")
54
 
55
+ csv_url = "https://huggingface.co/spaces/uyen13/chatzendo/raw/main/testchatdata.csv"
56
  # csv_url="https://docs.google.com/uc?export=download&id=1fQ2v2n9zQcoi6JoOU3lCBDHRt3a1PmaE"
57
 
58
  # Define the path where you want to save the downloaded file
 
90
  def conversational_chat(query):
91
  query = "提供されたデータに基づいて,"+query
92
  result = chain({"question": query, "chat_history": st.session_state['history']})
 
 
 
 
 
 
 
 
93
  st.session_state['history'].append((query, result["answer"]))
94
  return result["answer"]
95