|
import streamlit as st
|
|
from langchain.llms import HuggingFaceHub
|
|
from langchain import PromptTemplate, LLMChain
|
|
|
|
|
|
def load_answer(question):
|
|
|
|
llm = HuggingFaceHub(
|
|
repo_id="google/flan-t5-large",
|
|
|
|
model_kwargs={"temperature": 0}
|
|
)
|
|
|
|
|
|
template = PromptTemplate(input_variables=["question"], template="{question}")
|
|
llm_chain = LLMChain(prompt=template, llm=llm)
|
|
|
|
|
|
answer = llm_chain.run(question)
|
|
return answer
|
|
|
|
|
|
st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
|
|
st.header("LangChain Demo")
|
|
|
|
|
|
def get_text():
|
|
input_text = st.text_input("You: ", key="input")
|
|
return input_text
|
|
|
|
user_input = get_text()
|
|
|
|
submit = st.button('Generate')
|
|
|
|
|
|
if submit and user_input:
|
|
response = load_answer(user_input)
|
|
st.subheader("Answer:")
|
|
st.write(response) |