Spaces:
Sleeping
Sleeping
File size: 1,349 Bytes
878fb0a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
from langchain_community.llms import HuggingFaceEndpoint
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
import streamlit as st
from dotenv import load_dotenv
load_dotenv()
# def qabot(question):
# llm_hugginface = HuggingFaceEndpoint(repo_id='google/flan-t5-large',token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),temperature=0.5,max_length=128)
# result= llm_hugginface("Can you write me the capital of {question}")
# return result
# ans =qabot("india")
# print(ans)
# question = "Who won the FIFA World Cup in the year 1994? "
def qabot(question):
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
llm = HuggingFaceEndpoint(
repo_id=repo_id, max_length=128, temperature=0.5, token=os.getenv('HUGGINGFACEHUB_API_TOKEN')
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
result =llm_chain.run(question)
return result
# print(qabot("Who won the FIFA World Cup in the year 1994? "))
st.header("Langchain Application")
input=st.text_input("Input: ",key="input")
response=qabot(input)
submit=st.button("Ask the question")
## If ask button is clicked
if submit:
st.subheader("The Response is")
st.write(response)
|