|
from dotenv import load_dotenv |
|
from langchain.embeddings import HuggingFaceHub, LLMChain |
|
from langchain.prompts import prompt_templates |
|
import gradio |
|
|
|
load_dotenv() |
|
os.getenv('HF_API') |
|
|
|
hub_llm = HuggingFaceHub(repo_id='facebook/blenderbot-400M-distill') |
|
|
|
prompt = prompt_templates( |
|
input_variable = ["question"] |
|
) |
|
|
|
hub_chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True) |
|
|
|
def responsenew(data): |
|
return hub_chain(data) |
|
|
|
|
|
gradio_interface = gradio.Interface( |
|
fn = responsenew, |
|
inputs = "text", |
|
outputs = "text" |
|
) |
|
gradio_interface.launch() |
|
|