Spaces:
Sleeping
Sleeping
import gradio as gr | |
from langchain import HuggingFaceHub, PromptTemplate, LLMChain | |
from langchain.memory import ConversationBufferMemory | |
repo_id = "tiiuae/falcon-7b-instruct" | |
template = """You are a chatbot having a conversation with a human. | |
{chat_history} | |
Human: {human_input} | |
Chatbot:""" | |
prompt = PromptTemplate(template=template, input_variables=["chat_history","human_input"]) | |
def generate_response(question, huggingfacehub_api_token, temperature=0.6, max_new_tokens=500): | |
memory = ConversationBufferMemory(memory_key="chat_history") | |
llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token, | |
repo_id=repo_id, | |
model_kwargs={"temperature": temperature, | |
"max_new_tokens": max_new_tokens}) | |
llm_chain = LLMChain(prompt=prompt, llm=llm, memory=memory) | |
return llm_chain.predict(chat_history="", human_input=question) | |
inputs = [ | |
gr.inputs.Textbox(label="Question"), | |
gr.inputs.Textbox(label="HuggingFace API Token", type="password", default=None), | |
gr.inputs.Slider(minimum=0.1, maximum=2.0, default=0.6, label="Temperature"), | |
gr.inputs.Slider(minimum=100, maximum=1000, default=500, label="Max New Tokens") | |
] | |
outputs = gr.outputs.Textbox(label="Response") | |
title = "Chatbot Interface" | |
description = "Provide a question and get helpful answers from the AI assistant." | |
examples = [["write a poem on Iron Man"], ["What are the benefits of using Python?"]] | |
iface = gr.Interface(fn=generate_response, | |
inputs=inputs, | |
outputs=outputs, | |
title=title, | |
description=description, | |
allow_flagging="never", | |
examples=examples) | |
iface.launch(share=True, debug=True, show_api=True) |