extoncs commited on
Commit
9089e99
·
verified ·
1 Parent(s): 92af19b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -0
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
4
+
5
+ # Set Streamlit page config
6
+ st.set_page_config(page_title="ChatDoctor", page_icon="🩺")
7
+
8
+ # Title
9
+ st.title("🩺 ChatDoctor - Medical Assistant")
10
+
11
+ # Load model and tokenizer
12
+ @st.cache_resource
13
+ def load_model():
14
+ model = AutoModelForCausalLM.from_pretrained("abhiyanta/chatDoctor").to("cpu")
15
+ tokenizer = AutoTokenizer.from_pretrained("abhiyanta/chatDoctor")
16
+ return model, tokenizer
17
+
18
+ model, tokenizer = load_model()
19
+
20
+ # Alpaca-style prompt template
21
+ alpaca_prompt = "### Instruction:\n{0}\n\n### Input:\n{1}\n\n### Output:\n{2}"
22
+
23
+ # Text input for the user
24
+ user_input = st.text_input("Ask your medical question:")
25
+
26
+ # Button to trigger response
27
+ if st.button("Ask ChatDoctor"):
28
+ if user_input:
29
+ # Format the prompt
30
+ formatted_prompt = alpaca_prompt.format(
31
+ user_input,
32
+ "",
33
+ ""
34
+ )
35
+
36
+ # Tokenize and move to CPU
37
+ inputs = tokenizer([formatted_prompt], return_tensors="pt").to("cpu")
38
+
39
+ # Stream the generated output
40
+ st.write("**ChatDoctor:**")
41
+ text_streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
42
+
43
+ with st.spinner('Generating response...'):
44
+ generated_ids = model.generate(**inputs, streamer=text_streamer, max_new_tokens=1000)
45
+
46
+ else:
47
+ st.warning("Please enter a question to ask ChatDoctor.")
48
+
49
+ # Footer
50
+ st.markdown("---")
51
+ st.caption("Powered by Hugging Face 🤗 and bitsandbytes ⚡")