jstoppa's picture
Create app.py
925212b verified
raw
history blame
2.79 kB
import gradio as gr
from typing import Dict, TypedDict
from langgraph.graph import Graph
import transformers
from transformers import pipeline
class AgentState(TypedDict):
messages: list[str]
current_step: int
final_answer: str
def analyze_sentiment(state: AgentState) -> AgentState:
sentiment_analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
message = state["messages"][-1]
result = sentiment_analyzer(message)[0]
state["messages"].append(f"Sentiment analysis: {result['label']} ({result['score']:.2f})")
state["current_step"] += 1
return state
def generate_response(state: AgentState) -> AgentState:
generator = pipeline("text-generation", model="gpt2")
context = " ".join(state["messages"][-2:])
generated_text = generator(context, max_length=50, num_return_sequences=1)[0]["generated_text"]
state["messages"].append(f"Generated response: {generated_text}")
state["current_step"] += 1
return state
def create_summary(state: AgentState) -> AgentState:
if state["current_step"] >= 4:
summary = "Analysis complete. Final summary: "
summary += " | ".join(state["messages"])
state["final_answer"] = summary
return state
def build_graph():
workflow = Graph()
workflow.add_node("sentiment", analyze_sentiment)
workflow.add_node("generate", generate_response)
workflow.add_node("summarize", create_summary)
workflow.add_edge("sentiment", "generate")
workflow.add_edge("generate", "summarize")
workflow.add_edge("summarize", "sentiment")
workflow.set_entry_point("sentiment")
return workflow.compile()
# Initialize the graph globally
GRAPH = build_graph()
def process_input(message: str, history: list) -> tuple:
# Initialize state
state = AgentState(
messages=[message],
current_step=0,
final_answer=""
)
# Run the graph for a few steps
for _ in range(3):
state = GRAPH(state)
if state["final_answer"]:
break
# Format the conversation history
conversation = "\n".join(state["messages"])
# Add final answer if available
if state["final_answer"]:
conversation += f"\n\nFinal Summary:\n{state['final_answer']}"
return conversation
# Create Gradio interface
iface = gr.Interface(
fn=process_input,
inputs=[
gr.Textbox(label="Enter your message"),
gr.State([]) # For maintaining conversation history
],
outputs=gr.Textbox(label="Analysis Results"),
title="LangGraph Demo with Hugging Face",
description="Enter a message to analyze sentiment and generate responses using LangGraph and Hugging Face models."
)
if __name__ == "__main__":
iface.launch()