Shaleen123 commited on
Commit
4f913b7
Β·
1 Parent(s): e24629e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -18,6 +18,15 @@ from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVL
18
  import tempfile # μž„μ‹œ νŒŒμΌμ„ μƒμ„±ν•˜κΈ° μœ„ν•œ λΌμ΄λΈŒλŸ¬λ¦¬μž…λ‹ˆλ‹€.
19
  import os
20
 
 
 
 
 
 
 
 
 
 
21
 
22
  # PDF λ¬Έμ„œλ‘œλΆ€ν„° ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
23
  def get_pdf_text(pdf_docs):
@@ -65,14 +74,6 @@ def get_vectorstore(text_chunks):
65
  return vectorstore # μƒμ„±λœ 벑터 μŠ€ν† μ–΄λ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
66
 
67
  def get_conversation_chain(vectorstore):
68
- # Replace 'microsoft/DialoGPT-large' with the desired model name
69
- model_name = "Shaleen123/mistrallite_medical_qa"
70
-
71
- config = PeftConfig.from_pretrained(model_name)
72
- model = AutoModelForCausalLM.from_pretrained(model_name)
73
- model = PeftModel.from_pretrained(model, model_name)
74
- tokenizer = AutoTokenizer.from_pretrained(model_name)
75
-
76
 
77
  # λŒ€ν™” 기둝을 μ €μž₯ν•˜κΈ° μœ„ν•œ λ©”λͺ¨λ¦¬λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
78
  memory = ConversationBufferMemory(
 
18
  import tempfile # μž„μ‹œ νŒŒμΌμ„ μƒμ„±ν•˜κΈ° μœ„ν•œ λΌμ΄λΈŒλŸ¬λ¦¬μž…λ‹ˆλ‹€.
19
  import os
20
 
21
+ with st.spinner("Loading the model"):
22
+ model_name = "Shaleen123/mistrallite_medical_qa"
23
+
24
+ config = PeftConfig.from_pretrained(model_name)
25
+ model = AutoModelForCausalLM.from_pretrained(model_name)
26
+ model = PeftModel.from_pretrained(model, model_name)
27
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
28
+
29
+
30
 
31
  # PDF λ¬Έμ„œλ‘œλΆ€ν„° ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
32
  def get_pdf_text(pdf_docs):
 
74
  return vectorstore # μƒμ„±λœ 벑터 μŠ€ν† μ–΄λ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
75
 
76
  def get_conversation_chain(vectorstore):
 
 
 
 
 
 
 
 
77
 
78
  # λŒ€ν™” 기둝을 μ €μž₯ν•˜κΈ° μœ„ν•œ λ©”λͺ¨λ¦¬λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
79
  memory = ConversationBufferMemory(