|
import streamlit as st |
|
from transformers import AutoModel, AutoTokenizer |
|
from transformers import LlamaTokenizer, LlamaForCausalLM |
|
from biomistral import BioMistral |
|
|
|
tokenizer = LlamaTokenizer.from_pretrained("BioMistral/BioMistral-7B") |
|
model = LlamaForCausalLM.from_pretrained("BioMistral/BioMistral-7B") |
|
biomistral = BioMistral(model=model, tokenizer=tokenizer) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("BioMistral/BioMistral-7B") |
|
model = AutoModel.from_pretrained("BioMistral/BioMistral-7B") |
|
|
|
def generate_response(input_text): |
|
inputs = tokenizer(input_text, return_tensors="pt") |
|
outputs = model.generate(**inputs, max_length=200, do_sample=True, top_p=0.95, top_k=50, num_return_sequences=1) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return response |
|
|
|
def main(): |
|
st.title("Doctor AI App") |
|
|
|
|
|
symptoms = st.text_area("Enter your symptoms", height=150) |
|
medical_history = st.text_area("Enter your medical history", height=150) |
|
allergies = st.text_area("Enter your allergies (if any)", height=150) |
|
|
|
if st.button("Get Diagnosis and Recommendations"): |
|
|
|
input_text = f"Based on the following information:\n\nSymptoms: {symptoms}\nMedical History: {medical_history}\nAllergies: {allergies}\n\nPlease provide a diagnosis and recommend medications or treatments." |
|
|
|
|
|
response = generate_response(input_text) |
|
|
|
st.write(response) |
|
|
|
if __name__ == "__main__": |
|
main() |