|
import gradio as gr |
|
from langchain_openai import ChatOpenAI |
|
from langchain_openai import TextProcessingAgent |
|
from dspy.agents import Agent |
|
from dspy.utils import spawn_processes |
|
from transformers import pipeline |
|
|
|
|
|
model_name = "Dolphin-Phi" |
|
|
|
|
|
llm = pipeline("text-generation", model=Dolphin-Phi) |
|
|
|
|
|
from dspy.agents import Agent |
|
from dspy.utils import SentenceSplitter, SentimentAnalyzer, NamedEntityRecognizer |
|
|
|
def dspy_generate_agent_prompts(prompt): |
|
""" |
|
Generates prompts for different agents based on the provided prompt and DSPy functionalities. |
|
|
|
Args: |
|
prompt (str): The user-provided prompt (e.g., customer reviews). |
|
|
|
Returns: |
|
list: A list containing agent-specific prompts. |
|
""" |
|
|
|
|
|
sentences = SentenceSplitter().process(prompt) |
|
|
|
|
|
sentiment_analyzer = SentimentAnalyzer() |
|
sentiment_labels = [] |
|
for sentence in sentences: |
|
sentiment_labels.append(sentiment_analyzer.analyze(sentence)) |
|
|
|
|
|
ner = NamedEntityRecognizer(model_name="en_core_web_sm") |
|
extracted_entities = {} |
|
for sentence in sentences: |
|
entities = ner.process(sentence) |
|
for entity in entities: |
|
if entity.label_ in ["FOOD", "ORG", "LOCATION"]: |
|
extracted_entities.setdefault(entity.label_, []).append(entity.text) |
|
|
|
|
|
agent_prompts = [] |
|
|
|
|
|
sentiment_prompt = f"Analyze the sentiment of the following sentences:\n" + "\n".join(sentences) |
|
agent_prompts.append(sentiment_prompt) |
|
|
|
|
|
topic_prompt = f"Extract the main topics discussed in the following text, focusing on food, service, and ambiance:\n{prompt}" |
|
agent_prompts.append(topic_prompt) |
|
|
|
|
|
positive_count = sum(label == "POSITIVE" for label in sentiment_labels) |
|
negative_count = sum(label == "NEGATIVE" for label in sentiment_labels) |
|
neutral_count = sum(label == "NEUTRAL" for label in sentiment_labels) |
|
topic_mentions = "\n".join(f"{k}: {','.join(v)}" for k, v in extracted_entities.items()) |
|
|
|
recommendation_prompt = f"""Based on the sentiment analysis (positive: {positive_count}, negative: {negative_count}, neutral: {neutral_count}) and extracted topics ({topic_mentions}), suggest recommendations for the restaurant to improve.""" |
|
agent_prompts.append(recommendation_prompt) |
|
|
|
return agent_prompts |
|
|
|
|
|
def generate_outputs(user_prompt): |
|
|
|
processed_prompt = langchain_function(user_prompt) |
|
|
|
|
|
synthetic_data = generate_synthetic_data_distributed(processed_prompt) |
|
|
|
|
|
combined_data = f"{user_prompt}\n{synthetic_data}" |
|
|
|
|
|
agent_prompts = dspy_generate_agent_prompts(processed_prompt) |
|
|
|
|
|
output_1 = llm(agent_prompts[0], max_length=100)[0]["generated_text"] |
|
output_2 = llm(agent_prompts[1], max_length=100)[0]["generated_text"] |
|
|
|
|
|
report, recommendations, visualization = produce_outputs(combined_data) |
|
|
|
return report, recommendations, visualization |
|
|
|
|
|
gr.Interface( |
|
fn=generate_outputs, |
|
inputs=gr.Textbox(label="Enter a prompt"), |
|
outputs=["textbox", "textbox", "image"], |
|
title="Multi-Agent Prompt Processor", |
|
description="Processes a prompt using Langchain, DSPy, and a chosen Hugging Face LLM to generate diverse outputs.", |
|
).launch() |
|
|