Spaces:
Running
Running
File size: 1,093 Bytes
8bbfca2 50f8f6a 8bbfca2 71cab9e 50f8f6a 71cab9e 50f8f6a 8bbfca2 699d9af f014430 699d9af 71cab9e 50f8f6a 71cab9e 50f8f6a 71cab9e 50f8f6a 71cab9e 50f8f6a 71cab9e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import streamlit as st
from langchain.llms import HuggingFaceHub
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
# Function to return the response
def generate_answer(query):
llm = HuggingFaceHub(
repo_id="google/flan-t5-xxl",
model_kwargs={"temperature": 0.7, "max_length": 64, "max_new_tokens": 512}
)
template = """Question: {question}
Answer: Let's give medical advices in kind way."""
prompt = PromptTemplate(template=template, input_variables=["query"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
result = llm_chain.run(query)
return result
# App UI starts here
st.set_page_config(page_title="Doctor Assistant Demo", page_icon=":robot:")
st.header("Doctor Assistant Demo")
# Gets User Input
def get_text():
input_text = st.text_input("You: ", key="input")
return input_text
user_input = get_text()
response = generate_answer(user_input)
submit = st.button("Generate")
# If the button clicked
if submit:
st.subheader("Doctor's Response:")
st.write(response) |