leofan commited on
Commit
e8037fe
·
verified ·
1 Parent(s): 4962d48

Update app/server.py

Browse files

Restore for the original server.py.

Files changed (1) hide show
  1. app/server.py +5 -8
app/server.py CHANGED
@@ -8,7 +8,6 @@ from langchain.prompts import ChatPromptTemplate
8
  from langchain_core.runnables import RunnablePassthrough, RunnableParallel
9
  from langchain.schema import StrOutputParser
10
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
11
- from langchain.memory import ConversationBufferMemory
12
 
13
  app = FastAPI()
14
 
@@ -18,8 +17,7 @@ hf_llm = HuggingFaceHub(
18
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
19
  huggingfacehub_api_token=os.environ["HF_TOKEN"],
20
  task="text-generation",
21
- model_kwargs={"temperature":0.01, "max_new_tokens" : 250},
22
- memory = ConversationBufferMemory(memory_key="chat_history")
23
  )
24
 
25
  embedding_model_id = 'WhereIsAI/UAE-Large-V1'
@@ -30,11 +28,10 @@ retriever = faiss_index.as_retriever()
30
  # retriever = faiss_index.as_retriever(search_kwargs={"k": 2})
31
 
32
  prompt_template = """\
33
-
34
- You are a lab assistant who is trying to answer the student's question. The following is the conversation history you are having with the student:
35
- {chat_history}
36
-
37
- Use the provided context below to answer the user's question. If you don't know the answer, say you don't know and ask the student to seek help from the instructor. The answer you provided should be limited to 3 sentences if possible.
38
 
39
  Context:
40
  {context}
 
8
  from langchain_core.runnables import RunnablePassthrough, RunnableParallel
9
  from langchain.schema import StrOutputParser
10
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
 
11
 
12
  app = FastAPI()
13
 
 
17
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
18
  huggingfacehub_api_token=os.environ["HF_TOKEN"],
19
  task="text-generation",
20
+ model_kwargs={"temperature":0.01, "max_new_tokens" : 250}
 
21
  )
22
 
23
  embedding_model_id = 'WhereIsAI/UAE-Large-V1'
 
28
  # retriever = faiss_index.as_retriever(search_kwargs={"k": 2})
29
 
30
  prompt_template = """\
31
+ Use the provided context to answer the user's question. If you don't know the answer, say you don't know.
32
+ The text output that you are providing should only contain the answer, but not the question text or any other prompt text.
33
+ The answer you provided should be limited to 2 sentences if possible.
34
+ Also, if the answer contains any of the word phrases [pipette tip, vortex mixer, lysis-buffer, covid sample], replace the space in the word phrase with a hyphen (e.g. pipette-tip).
 
35
 
36
  Context:
37
  {context}