Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
# Step 1: Load model pipeline
|
5 |
+
generator = pipeline(
|
6 |
+
"text-generation",
|
7 |
+
model="HuggingFaceH4/zephyr-7b-alpha",
|
8 |
+
max_new_tokens=512,
|
9 |
+
temperature=0.7,
|
10 |
+
do_sample=True,
|
11 |
+
device_map="auto"
|
12 |
+
)
|
13 |
+
|
14 |
+
# Step 2: Agent functions
|
15 |
+
def chat_with_agent(role_description, prompt):
|
16 |
+
system = f"[System: {role_description}]\n"
|
17 |
+
input_text = system + prompt
|
18 |
+
response = generator(input_text, return_full_text=False)[0]['generated_text']
|
19 |
+
return response.replace(input_text, "").strip()
|
20 |
+
|
21 |
+
def get_topic():
|
22 |
+
return chat_with_agent("You are a task manager deciding AI research paper topics.",
|
23 |
+
"Suggest a trending topic in AI for academic writing.")
|
24 |
+
|
25 |
+
def get_research(topic):
|
26 |
+
return chat_with_agent("You are a research assistant gathering key points.",
|
27 |
+
f"Give me 5 major points about: {topic}")
|
28 |
+
|
29 |
+
def generate_draft(research_points):
|
30 |
+
return chat_with_agent("You are a technical writer.",
|
31 |
+
f"Write a short 300-word academic draft based on:\n{research_points}")
|
32 |
+
|
33 |
+
def review_draft(draft):
|
34 |
+
return chat_with_agent("You are a senior editor. Check for grammar, clarity, and flow.",
|
35 |
+
f"Please review and improve the following draft:\n\n{draft}")
|
36 |
+
|
37 |
+
def format_final(reviewed):
|
38 |
+
return chat_with_agent("You are a publisher formatting for IEEE conference style.",
|
39 |
+
f"Format this content:\n{reviewed}")
|
40 |
+
|
41 |
+
# Step 3: Gradio function for full pipeline
|
42 |
+
def multi_agent_ui(topic_choice=None):
|
43 |
+
topic = topic_choice if topic_choice else get_topic()
|
44 |
+
research = get_research(topic)
|
45 |
+
draft = generate_draft(research)
|
46 |
+
reviewed = review_draft(draft)
|
47 |
+
final_output = format_final(reviewed)
|
48 |
+
|
49 |
+
return {
|
50 |
+
"Topic": topic,
|
51 |
+
"Research Points": research,
|
52 |
+
"Draft": draft,
|
53 |
+
"Reviewed Draft": reviewed,
|
54 |
+
"Final Formatted Output": final_output
|
55 |
+
}
|
56 |
+
|
57 |
+
# Step 4: Gradio UI
|
58 |
+
with gr.Blocks() as demo:
|
59 |
+
gr.Markdown("## π§ AI Research Paper Assistant (Multi-Agent System)")
|
60 |
+
|
61 |
+
with gr.Row():
|
62 |
+
topic_input = gr.Textbox(label="Enter Topic (Optional)", placeholder="Leave blank to auto-generate")
|
63 |
+
generate_btn = gr.Button("Generate Research Paper")
|
64 |
+
|
65 |
+
with gr.Accordion("π Topic", open=False):
|
66 |
+
topic_output = gr.Textbox(label="Topic", lines=2)
|
67 |
+
|
68 |
+
with gr.Accordion("π Research Points", open=False):
|
69 |
+
research_output = gr.Textbox(label="Research Points", lines=6)
|
70 |
+
|
71 |
+
with gr.Accordion("π Draft", open=False):
|
72 |
+
draft_output = gr.Textbox(label="Draft", lines=10)
|
73 |
+
|
74 |
+
with gr.Accordion("β
Reviewed", open=False):
|
75 |
+
reviewed_output = gr.Textbox(label="Reviewed Draft", lines=10)
|
76 |
+
|
77 |
+
with gr.Accordion("π Final Paper", open=True):
|
78 |
+
final_output = gr.Textbox(label="Formatted Final Output", lines=12)
|
79 |
+
|
80 |
+
generate_btn.click(
|
81 |
+
fn=multi_agent_ui,
|
82 |
+
inputs=[topic_input],
|
83 |
+
outputs=[
|
84 |
+
topic_output, research_output,
|
85 |
+
draft_output, reviewed_output,
|
86 |
+
final_output
|
87 |
+
]
|
88 |
+
)
|
89 |
+
|
90 |
+
demo.launch()
|