FirstPoC / app.py
Sreekan's picture
Update app.py
a35a697 verified
raw
history blame
6.11 kB
'''
import gradio as gr
from huggingface_hub import InferenceClient
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()
import gradio as gr
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_huggingface import HuggingFaceEndpoint
from langgraph.graph import StateGraph
# Define the LLM models
llm1 = HuggingFaceEndpoint(model='t5-small')
llm2 = HuggingFaceEndpoint(model='t5-large')
# Define the agent functions
def agent1(query):
return f"Agent 1: {query}"
def agent2(query):
return f"Agent 2: {query}"
# Define the states
s1 = StateGraph("s1")
s2 = StateGraph("s2")
# Define the LLM chains
chain1 = LLMChain(llm=llm1, prompt=PromptTemplate(input_variables=["query"], template="You are in state s1. {{query}}"))
chain2 = LLMChain(llm=llm2, prompt=PromptTemplate(input_variables=["query"], template="You are in state s2. {{query}}"))
# Define the transition functions
def transition_s1(query):
output = chain1.invoke(input=query)
return agent1(output), s2
def transition_s2(query):
output = chain2.invoke(input=query)
return agent2(output), s1
# Define the respond function
def respond(input, history, current_state):
if current_state == s1:
response, next_state = transition_s1(input)
elif current_state == s2:
response, next_state = transition_s2(input)
history.append((input, response))
return history, next_state
# Create the Gradio interface
current_state = s1 # Define current_state here
with gr.Blocks() as demo:
gr.Markdown("# Chatbot Interface")
chatbot_interface = gr.Chatbot()
user_input = gr.Textbox(label="Your Message", placeholder="Type something...")
submit_btn = gr.Button("Send")
# Define the behavior of the submit button
def submit_click(input, history):
global current_state # Use global instead of nonlocal
history, current_state = respond(input, history, current_state)
return history
submit_btn.click(
fn=submit_click,
inputs=[user_input, chatbot_interface],
outputs=chatbot_interface
)
# Launch the Gradio application
demo.launch()
'''
import gradio as gr
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_huggingface import HuggingFaceEndpoint
from langgraph.graph import StateGraph
# Define the LLM models
llm1 = HuggingFaceEndpoint(model='t5-small')
llm2 = HuggingFaceEndpoint(model='t5-large')
# Define the agent functions
def agent1(response):
return f"Agent 1: {response}"
def agent2(response):
return f"Agent 2: {response}"
# Define the prompts and LLM chains
chain1 = LLMChain(llm=llm1, prompt=PromptTemplate(
input_variables=["query"],
template="You are in state s1. {{query}}"
))
chain2 = LLMChain(llm=llm2, prompt=PromptTemplate(
input_variables=["query"],
template="You are in state s2. {{query}}"
))
# Create a state graph for managing the chatbot's states
graph = StateGraph()
# Create states and add them to the graph
state1 = graph.add_state("s1") # State for the first agent
state2 = graph.add_state("s2") # State for the second agent
# Define transitions
graph.add_edge(state1, state2, "next") # Transition from s1 to s2
graph.add_edge(state2, state1, "back") # Transition from s2 to s1
# Initialize the current state
current_state = state1
def handle_input(query):
global current_state
output = ''
# Process user input based on current state
if current_state == state1:
output = chain1.invoke(input=query) # Invoke chain1 with user input
response = agent1(output) # Process output through Agent 1
current_state = state2 # Transition to state s2
elif current_state == state2:
output = chain2.invoke(input=query) # Invoke chain2 with user input
response = agent2(output) # Process output through Agent 2
current_state = state1 # Transition back to state s1
return response
# Create the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Chatbot Interface")
chatbot_interface = gr.Chatbot()
user_input = gr.Textbox(label="Your Message", placeholder="Type something here...")
submit_btn = gr.Button("Send")
# Define the behavior of the submit button
submit_btn.click(
fn=lambda input_text: handle_input(input_text), # Handle user input
inputs=[user_input],
outputs=chatbot_interface
)
# Launch the Gradio application
demo.launch()