Spaces:
Sleeping
Sleeping
import streamlit as st | |
st.title("Mistral QA") | |
# import chainlit as cl | |
import os | |
huggingfacehub_api_token = st.secrets["hf_token"] | |
from langchain import HuggingFaceHub, PromptTemplate, LLMChain | |
repo_id = "mistralai/Mistral-7B-v0.1" | |
llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token, | |
repo_id=repo_id, | |
model_kwargs={"temperature":0.2, "max_new_tokens":200}) | |
template = """Give answer for the question. | |
question: {question} | |
At the end of the answer, just say, 'Thanks for asking' | |
""" | |
# input = st.text_input("What do you want to ask about", placeholder="Input your question here") | |
# # @cl.langchain_factory | |
# def factory(): | |
# prompt = PromptTemplate(template=template, input_variables=['question']) | |
# llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True) | |
# return llm_chain | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm) | |
# result = llm_chain.predict(question=input) | |
# print(result) | |
def chat(query): | |
# prompt = PromptTemplate(template=template, input_variables=["question"]) | |
# llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm) | |
result = llm_chain.predict(question=query) | |
return result | |
def main(): | |
input = st.text_input("What do you want to ask about", placeholder="Input your question here") | |
if input: | |
output = chat(input) | |
st.write(output,unsafe_allow_html=True) | |
if __name__ == '__main__': | |
main() | |