Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -115,18 +115,56 @@ def run_gpt(
|
|
115 |
logging.info(LOG_RESPONSE.format(resp)) # Log the response
|
116 |
return resp
|
117 |
|
118 |
-
def generate(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
# Use 'prompt' here instead of 'message'
|
120 |
-
formatted_prompt = format_prompt(
|
121 |
logging.info(f"Formatted Prompt: {formatted_prompt}")
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
resp = ""
|
124 |
for response in stream:
|
125 |
resp += response.token.text
|
|
|
126 |
|
127 |
if VERBOSE:
|
128 |
-
logging.info(
|
129 |
-
|
|
|
130 |
|
131 |
def compress_history(purpose, task, history, directory):
|
132 |
resp = run_gpt(
|
|
|
115 |
logging.info(LOG_RESPONSE.format(resp)) # Log the response
|
116 |
return resp
|
117 |
|
118 |
+
def generate(
|
119 |
+
prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0, model="mistralai/Mixtral-8x7B-Instruct-v0.1"
|
120 |
+
):
|
121 |
+
seed = random.randint(1,1111111111111111)
|
122 |
+
logging.info(f"Seed: {seed}") # Log the seed
|
123 |
+
|
124 |
+
# Set the agent prompt based on agent_name
|
125 |
+
if agent_name == "WEB_DEV":
|
126 |
+
agent = "You are a helpful AI assistant. You are a web developer."
|
127 |
+
elif agent_name == "AI_SYSTEM_PROMPT":
|
128 |
+
agent = "You are a helpful AI assistant. You are an AI system."
|
129 |
+
elif agent_name == "PYTHON_CODE_DEV":
|
130 |
+
agent = "You are a helpful AI assistant. You are a Python code developer."
|
131 |
+
else:
|
132 |
+
agent = "You are a helpful AI assistant."
|
133 |
+
|
134 |
+
system_prompt = f"{agent} {sys_prompt}".strip()
|
135 |
+
|
136 |
+
temperature = max(float(temperature), 1e-2)
|
137 |
+
top_p = float(top_p)
|
138 |
+
|
139 |
+
# Add the system prompt to the beginning of the prompt
|
140 |
+
formatted_prompt = f"{system_prompt} {prompt}"
|
141 |
+
|
142 |
# Use 'prompt' here instead of 'message'
|
143 |
+
formatted_prompt = format_prompt(formatted_prompt, history, max_history_turns=5) # Truncated history
|
144 |
logging.info(f"Formatted Prompt: {formatted_prompt}")
|
145 |
+
|
146 |
+
# Use the specified model if it's different from the default
|
147 |
+
client = InferenceClient(model) if model != "mistralai/Mixtral-8x7B-Instruct-v0.1" else InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
148 |
+
|
149 |
+
stream = client.text_generation(
|
150 |
+
formatted_prompt,
|
151 |
+
temperature=temperature,
|
152 |
+
max_new_tokens=max_new_tokens,
|
153 |
+
top_p=top_p,
|
154 |
+
repetition_penalty=repetition_penalty,
|
155 |
+
stream=True,
|
156 |
+
details=True,
|
157 |
+
return_full_text=False
|
158 |
+
)
|
159 |
resp = ""
|
160 |
for response in stream:
|
161 |
resp += response.token.text
|
162 |
+
yield resp # This allows for streaming the response
|
163 |
|
164 |
if VERBOSE:
|
165 |
+
logging.info(f"RESPONSE: {resp}") # Log the response directly
|
166 |
+
|
167 |
+
return resp # Return the full response at the end
|
168 |
|
169 |
def compress_history(purpose, task, history, directory):
|
170 |
resp = run_gpt(
|