wuhp commited on
Commit
5138a85
·
verified ·
1 Parent(s): 3e97d45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -14
app.py CHANGED
@@ -52,7 +52,7 @@ def retrieve_from_memory(query, top_k=2):
52
 
53
  # --- Swarm Agent Function with Shared Memory (RAG) - DECORATED with @spaces.GPU ---
54
  @spaces.GPU # <---- GPU DECORATOR ADDED HERE!
55
- def swarm_agent_sequential_rag(user_prompt):
56
  global shared_memory
57
  shared_memory = [] # Clear memory for each new request
58
 
@@ -64,7 +64,13 @@ def swarm_agent_sequential_rag(user_prompt):
64
  context_1_5b = "\n".join([f"- {mem}" for mem in retrieved_memory_1_5b]) if retrieved_memory_1_5b else "No relevant context found in memory."
65
  prompt_1_5b = f"Context from Shared Memory:\n{context_1_5b}\n\nYou are a quick idea generator. Generate an initial response to the following user request, considering the context above:\n\nUser Request: {user_prompt}\n\nInitial Response:"
66
  input_ids_1_5b = tokenizers["1.5B"].encode(prompt_1_5b, return_tensors="pt").to(models["1.5B"].device)
67
- output_1_5b = models["1.5B"].generate(input_ids_1_5b, max_new_tokens=200, temperature=0.7, do_sample=True) # Reverted to original max_new_tokens (can adjust)
 
 
 
 
 
 
68
  response_1_5b = tokenizers["1.5B"].decode(output_1_5b[0], skip_special_tokens=True)
69
  print(f"1.5B Response:\n{response_1_5b}")
70
  store_in_memory(f"1.5B Model Initial Response: {response_1_5b[:200]}...")
@@ -75,27 +81,41 @@ def swarm_agent_sequential_rag(user_prompt):
75
  context_7b = "\n".join([f"- {mem}" for mem in retrieved_memory_7b]) if retrieved_memory_7b else "No relevant context found in memory."
76
  prompt_7b = f"Context from Shared Memory:\n{context_7b}\n\nYou are a detailed elaborator. Take the following initial response and elaborate on it, adding more detail and reasoning, considering the context above. \n\nInitial Response:\n{response_1_5b}\n\nElaborated Response:"
77
  input_ids_7b = tokenizers["7B"].encode(prompt_7b, return_tensors="pt").to(models["7B"].device)
78
- output_7b = models["7B"].generate(input_ids_7b, max_new_tokens=300, temperature=0.7, do_sample=True) # Reverted to original max_new_tokens
 
 
 
 
 
 
79
  response_7b = tokenizers["7B"].decode(output_7b[0], skip_special_tokens=True)
80
  print(f"7B Response:\n{response_7b}")
81
  store_in_memory(f"7B Model Elaborated Response: {response_7b[:200]}...")
82
 
83
- # No 14B Model Stage anymore
84
-
85
  return response_7b # Now returns the 7B model's response as final
86
 
87
 
88
- # --- Gradio Interface --- (Modified to reflect 2-model output)
89
- def gradio_interface(user_prompt):
90
- final_response = swarm_agent_sequential_rag(user_prompt) # Get the final response (from 7B now)
91
- return final_response
 
 
 
 
 
 
92
 
93
- iface = gr.Interface(
94
  fn=gradio_interface,
95
- inputs=gr.Textbox(lines=5, placeholder="Enter your task here..."),
96
- outputs=gr.Textbox(lines=10, placeholder="Agent Swarm Output will appear here..."),
97
- title="DeepSeek Agent Swarm (ZeroGPU Demo - 2 Models)", # Updated title
98
- description="Agent swarm using DeepSeek-R1-Distill models (1.5B, 7B) with shared memory. **GPU accelerated using ZeroGPU!** (Requires Pro Space)", # Updated description
 
 
 
 
99
  )
100
 
101
  if __name__ == "__main__":
 
52
 
53
  # --- Swarm Agent Function with Shared Memory (RAG) - DECORATED with @spaces.GPU ---
54
  @spaces.GPU # <---- GPU DECORATOR ADDED HERE!
55
+ def swarm_agent_sequential_rag(user_prompt, temperature=0.7, top_p=0.9, max_new_tokens=300): # Added settings as arguments
56
  global shared_memory
57
  shared_memory = [] # Clear memory for each new request
58
 
 
64
  context_1_5b = "\n".join([f"- {mem}" for mem in retrieved_memory_1_5b]) if retrieved_memory_1_5b else "No relevant context found in memory."
65
  prompt_1_5b = f"Context from Shared Memory:\n{context_1_5b}\n\nYou are a quick idea generator. Generate an initial response to the following user request, considering the context above:\n\nUser Request: {user_prompt}\n\nInitial Response:"
66
  input_ids_1_5b = tokenizers["1.5B"].encode(prompt_1_5b, return_tensors="pt").to(models["1.5B"].device)
67
+ output_1_5b = models["1.5B"].generate(
68
+ input_ids_1_5b,
69
+ max_new_tokens=max_new_tokens, # Use user-defined max_new_tokens
70
+ temperature=temperature, # Use user-defined temperature
71
+ top_p=top_p, # Use user-defined top_p
72
+ do_sample=True
73
+ )
74
  response_1_5b = tokenizers["1.5B"].decode(output_1_5b[0], skip_special_tokens=True)
75
  print(f"1.5B Response:\n{response_1_5b}")
76
  store_in_memory(f"1.5B Model Initial Response: {response_1_5b[:200]}...")
 
81
  context_7b = "\n".join([f"- {mem}" for mem in retrieved_memory_7b]) if retrieved_memory_7b else "No relevant context found in memory."
82
  prompt_7b = f"Context from Shared Memory:\n{context_7b}\n\nYou are a detailed elaborator. Take the following initial response and elaborate on it, adding more detail and reasoning, considering the context above. \n\nInitial Response:\n{response_1_5b}\n\nElaborated Response:"
83
  input_ids_7b = tokenizers["7B"].encode(prompt_7b, return_tensors="pt").to(models["7B"].device)
84
+ output_7b = models["7B"].generate(
85
+ input_ids_7b,
86
+ max_new_tokens=max_new_tokens + 100, # Slightly more tokens for 7B
87
+ temperature=temperature, # Use user-defined temperature
88
+ top_p=top_p, # Use user-defined top_p
89
+ do_sample=True
90
+ )
91
  response_7b = tokenizers["7B"].decode(output_7b[0], skip_special_tokens=True)
92
  print(f"7B Response:\n{response_7b}")
93
  store_in_memory(f"7B Model Elaborated Response: {response_7b[:200]}...")
94
 
 
 
95
  return response_7b # Now returns the 7B model's response as final
96
 
97
 
98
+ # --- Gradio ChatInterface ---
99
+ def gradio_interface(message, history, temperature, top_p, max_tokens): # Accept settings from interface
100
+ # history is automatically managed by ChatInterface
101
+ response = swarm_agent_sequential_rag(
102
+ message,
103
+ temperature=temperature,
104
+ top_p=top_p,
105
+ max_new_tokens=int(max_tokens) # Ensure max_tokens is an integer
106
+ )
107
+ return response
108
 
109
+ iface = gr.ChatInterface( # Using ChatInterface now
110
  fn=gradio_interface,
111
+ # Define additional inputs for settings
112
+ additional_inputs=[
113
+ gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.7, label="Temperature"),
114
+ gr.Slider(minimum=0.01, maximum=1.0, step=0.05, value=0.9, label="Top P"),
115
+ gr.Number(value=300, label="Max Tokens", precision=0), # Use Number for integer tokens
116
+ ],
117
+ title="DeepSeek Agent Swarm Chat (ZeroGPU Demo - 2 Models)", # Updated title
118
+ description="Chat with a DeepSeek agent swarm (1.5B, 7B) with shared memory and adjustable settings. **GPU accelerated using ZeroGPU!** (Requires Pro Space)", # Updated description
119
  )
120
 
121
  if __name__ == "__main__":