Aiswarya Sankar commited on
Commit
96b5c18
·
1 Parent(s): d87953b

Update the app

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -229,7 +229,7 @@ def answer_questions(question: str, github: str, **kwargs) -> Response:
229
  [StreamingGradioCallbackHandler(q)]
230
  ),
231
  )
232
- qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
233
  chat_history = []
234
 
235
  except Exception as e:
@@ -345,7 +345,7 @@ def generateDocumentationPerFolder(dir, github):
345
  verbose=True,
346
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
347
  )
348
- qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
349
  chat_history = []
350
  return qa({"question": prompt, "chat_history": chat_history})["answer"]
351
 
@@ -399,7 +399,7 @@ def solveGithubIssue(ticket, history) -> Response:
399
  [StreamingGradioCallbackHandler(q)]
400
  ),
401
  )
402
- qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever,max_tokens_limit=8000)
403
 
404
  except Exception as e:
405
  return [[str(e), None]]
@@ -445,7 +445,7 @@ def bot(history, **kwargs):
445
  [StreamingGradioCallbackHandler(q)]
446
  ),
447
  )
448
- qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
449
  chat_history = []
450
 
451
  except Exception as e:
 
229
  [StreamingGradioCallbackHandler(q)]
230
  ),
231
  )
232
+ qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever, max_tokens_limit=16000)
233
  chat_history = []
234
 
235
  except Exception as e:
 
345
  verbose=True,
346
  streaming=True, # Pass `streaming=True` to make sure the client receives the data.
347
  )
348
+ qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever, max_tokens_limit=16000)
349
  chat_history = []
350
  return qa({"question": prompt, "chat_history": chat_history})["answer"]
351
 
 
399
  [StreamingGradioCallbackHandler(q)]
400
  ),
401
  )
402
+ qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever, max_tokens_limit=16000)
403
 
404
  except Exception as e:
405
  return [[str(e), None]]
 
445
  [StreamingGradioCallbackHandler(q)]
446
  ),
447
  )
448
+ qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever, max_tokens_limit=16000)
449
  chat_history = []
450
 
451
  except Exception as e: