File size: 1,339 Bytes
2fc40d4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import streamlit as st
import os

from langchain import PromptTemplate, HuggingFaceHub, LLMChain

st.title("Generating Response with HuggingFace Models")
st.markdown("## Model: `facebook/blenderbot-1B-distill`")

def get_response(question: str) -> dict:
    """
    Generate a response to a given question using the Blenderbot Large Language Model.

    Args:
        question (str): The question to be answered.

    Returns:
        dict: A dictionary containing the response text and metadata.
    """
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])

    llm_chain = LLMChain(prompt=prompt, 
                        llm=HuggingFaceHub(repo_id="facebook/blenderbot-1B-distill", 
                                            model_kwargs={"temperature":0, 
                                                        "max_length":64}))

    response = llm_chain.invoke(question)

    return response

question = st.text_area("Enter your question here...")

if st.button("Get Response") and question:
    with st.spinner("Generating Response..."):
        answer = get_response(question)
    if answer is not None:
        st.success('Great! Response generated successfully')
        st.write(answer)
        st.write(answer["text"])