Sbnos commited on
Commit
88e3219
·
verified ·
1 Parent(s): 12def49

mainfile cgpt 8

Browse files
Files changed (1) hide show
  1. app.py +15 -9
app.py CHANGED
@@ -25,14 +25,16 @@ llm = Together(
25
  model="mistralai/Mixtral-8x22B-Instruct-v0.1",
26
  temperature=0.2,
27
  top_k=12,
28
- together_api_key=os.environ['pilotikval']
 
29
  )
30
 
31
  llmc = Together(
32
  model="mistralai/Mixtral-8x22B-Instruct-v0.1",
33
  temperature=0.2,
34
  top_k=3,
35
- together_api_key=os.environ['pilotikval']
 
36
  )
37
 
38
  # Memory setup
@@ -67,15 +69,19 @@ def store_chat_history(role: str, content: str):
67
 
68
  # Define the chain using LCEL
69
  def create_conversational_qa_chain(retriever, condense_llm, answer_llm):
70
- condense_question_chain = RunnableLambda(
71
- lambda x: {"chat_history": chistory, "question": x['question']}
72
- ) | CONDENSE_QUESTION_PROMPT | RunnableLambda(lambda x: {"standalone_question": x['standalone_question']})
 
 
73
 
74
- retrieval_chain = RunnableLambda(
75
- lambda x: {"standalone_question": x['standalone_question']}
76
- ) | retriever | _combine_documents
 
 
77
 
78
- answer_chain = ANSWER_PROMPT | RunnableLambda(lambda x: {"context": x, "question": x['standalone_question']}) | answer_llm
79
 
80
  return RunnableParallel(
81
  condense_question=condense_question_chain,
 
25
  model="mistralai/Mixtral-8x22B-Instruct-v0.1",
26
  temperature=0.2,
27
  top_k=12,
28
+ together_api_key=os.environ['pilotikval'],
29
+ max_tokens=200
30
  )
31
 
32
  llmc = Together(
33
  model="mistralai/Mixtral-8x22B-Instruct-v0.1",
34
  temperature=0.2,
35
  top_k=3,
36
+ together_api_key=os.environ['pilotikval'],
37
+ max_tokens=200
38
  )
39
 
40
  # Memory setup
 
69
 
70
  # Define the chain using LCEL
71
  def create_conversational_qa_chain(retriever, condense_llm, answer_llm):
72
+ condense_question_chain = (
73
+ RunnableLambda(lambda x: {"chat_history": chistory, "question": x['question']})
74
+ | CONDENSE_QUESTION_PROMPT
75
+ | RunnableLambda(lambda x: {"standalone_question": x})
76
+ )
77
 
78
+ retrieval_chain = (
79
+ RunnableLambda(lambda x: x['standalone_question'])
80
+ | retriever
81
+ | RunnableLambda(lambda x: {"context": _combine_documents(x)})
82
+ )
83
 
84
+ answer_chain = ANSWER_PROMPT | answer_llm
85
 
86
  return RunnableParallel(
87
  condense_question=condense_question_chain,