thoristhor commited on
Commit
d018b4e
·
1 Parent(s): 565caba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -30
app.py CHANGED
@@ -1,39 +1,55 @@
1
  import os
2
  import gradio as gr
3
- import pinecone
4
- from llama_index import GPTIndexMemory, GPTPineconeIndex
5
- from langchain.agents import Tool
6
- from langchain.chains.conversation.memory import ConversationBufferMemory
7
- from langchain import OpenAI
8
- from langchain.agents import initialize_agent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- OPENAI_API_KEY=os.environ["OPENAI_API_KEY"]
11
- PINECONE_API_KEY=os.environ["PINECONE_API_KEY"]
12
-
13
- pinecone.init(api_key=PINECONE_API_KEY, environment="us-east1-gcp")
14
 
15
- pindex=pinecone.Index("sejarah")
16
- indexed_pinecone=GPTPineconeIndex([], pinecone_index=pindex)
 
17
 
18
- tools = [
19
- Tool(
20
- name = "GPT Index",
21
- func=lambda q: str(indexed_pinecone.query(q)),
22
- description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
23
- return_direct=True
24
- )
 
 
 
 
 
 
 
 
 
25
  ]
26
- memory = GPTIndexMemory(index=indexed_pinecone, memory_key="chat_history", query_kwargs={"response_mode": "compact"})
27
- llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo")
28
- agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory, verbose=True)
29
-
30
- def predict(input, history=[]):
31
- response = agent_chain.run(input)
32
- history = history + [(input, response)]
33
- response = history
34
- # response = [response]
35
- # return response, response
36
- return response, response
37
 
38
  with gr.Blocks() as demo:
39
  chatbot = gr.Chatbot()
 
1
  import os
2
  import gradio as gr
3
+ from langchain.vectorstores import Chroma
4
+ from langchain.chains import ConversationalRetrievalChain
5
+ from langchain.embeddings.openai import OpenAIEmbeddings
6
+ # convo chain lib
7
+ from langchain.embeddings.openai import OpenAIEmbeddings
8
+ from langchain.vectorstores import Chroma
9
+ from langchain.text_splitter import CharacterTextSplitter
10
+ from langchain.llms import OpenAI
11
+ from langchain.chains import ConversationalRetrievalChain
12
+ from langchain.chat_models import ChatOpenAI
13
+ from langchain.prompts.chat import (
14
+ ChatPromptTemplate,
15
+ SystemMessagePromptTemplate,
16
+ AIMessagePromptTemplate,
17
+ HumanMessagePromptTemplate,
18
+ )
19
+ from langchain.schema import (
20
+ AIMessage,
21
+ HumanMessage,
22
+ SystemMessage
23
+ )
24
 
 
 
 
 
25
 
26
+ OPENAI_API_KEY=os.environ["OPENAI_API_KEY"]
27
+ embedding = OpenAIEmbedding()
28
+ vectorstore = Chroma(persist_directory='/vectorstore', embedding_function=embedding)
29
 
30
+ aisyah_template="""
31
+ Answer each question truthfully using the Malaysia's Form 1 History data provided. Your answers should be concise and straight to the point.
32
+ For questions that are open-ended, which require subjective judgment or opinion, you may not find a definitive answer in the textbook.
33
+ However, you should still address the question's directive based on the data's context. Ideally, your answer should provide 3 points that support your response.
34
+ You are encouraged to better provide positive suggestions for concepts that are less ethical.
35
+ Please keep in mind that the scope of the data provided is limited to the content covered in the Malaysia's Form 1 History textbook.
36
+ ---------------
37
+ {context}"""
38
+ ##If you don't know the answer, just say that you don't know, don't try to make up an answer.
39
+ system_template="""Use the following pieces of context to answer the users question.
40
+ ----------------
41
+ {context}"""
42
+ ##If you don't know the answer, just say that you don't know, don't try to make up an answer.
43
+ messages = [
44
+ SystemMessagePromptTemplate.from_template(aisyah_template),
45
+ HumanMessagePromptTemplate.from_template("{question}")
46
  ]
47
+ prompt = ChatPromptTemplate.from_messages(messages)
48
+ qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), retriever, return_source_documents=True, qa_prompt=prompt)
49
+
50
+ def predict(input, chat_historyhistory=[]):
51
+ response = qa({"question":input, "chat_history":history})
52
+ return response, chat_history
 
 
 
 
 
53
 
54
  with gr.Blocks() as demo:
55
  chatbot = gr.Chatbot()