Spaces:
Sleeping
Sleeping
File size: 1,287 Bytes
8bbfca2 50f8f6a 8bbfca2 71cab9e 50f8f6a 71cab9e 50f8f6a 8bbfca2 71cab9e 1068cdc 50f8f6a 71cab9e 50f8f6a 71cab9e 50f8f6a 71cab9e 50f8f6a 71cab9e 50f8f6a 71cab9e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import streamlit as st
from langchain.llms import HuggingFaceHub
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
# Function to return the response
def generate_answer(query):
llm = HuggingFaceHub(
repo_id="google/flan-t5-xxl",
model_kwargs={"temperature": 0.7, "max_length": 64, "max_new_tokens": 512}
)
template = """Patient's Question: {query}
Doctor's Answer: Thank you for sharing. To better understand your situation, could you please provide more details about your headaches? For example, describe the frequency, intensity, any triggers you've identified, and how you currently manage them.
"""
prompt = PromptTemplate(template=template, input_variables=["query"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
result = llm_chain.run(query)
return result
# App UI starts here
st.set_page_config(page_title="Doctor Assistant Demo", page_icon=":robot:")
st.header("Doctor Assistant Demo")
# Gets User Input
def get_text():
input_text = st.text_input("You: ", key="input")
return input_text
user_input = get_text()
response = generate_answer(user_input)
submit = st.button("Generate")
# If the button clicked
if submit:
st.subheader("Doctor's Response:")
st.write(response) |