Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -118,7 +118,7 @@ def run_gpt(
|
|
118 |
def generate(
|
119 |
prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0, model="mistralai/Mixtral-8x7B-Instruct-v0.1"
|
120 |
):
|
121 |
-
seed = random.randint(1,1111111111111111)
|
122 |
logging.info(f"Seed: {seed}") # Log the seed
|
123 |
|
124 |
# Set the agent prompt based on agent_name
|
@@ -132,7 +132,7 @@ def generate(
|
|
132 |
agent = "You are a helpful AI assistant."
|
133 |
|
134 |
system_prompt = f"{agent} {sys_prompt}".strip()
|
135 |
-
|
136 |
temperature = max(float(temperature), 1e-2)
|
137 |
top_p = float(top_p)
|
138 |
|
@@ -142,18 +142,17 @@ def generate(
|
|
142 |
# Use 'prompt' here instead of 'message'
|
143 |
formatted_prompt = format_prompt(formatted_prompt, history, max_history_turns=5) # Truncated history
|
144 |
logging.info(f"Formatted Prompt: {formatted_prompt}")
|
145 |
-
|
146 |
-
# Use the specified model if it's different from the default
|
147 |
client = InferenceClient(model) if model != "mistralai/Mixtral-8x7B-Instruct-v0.1" else InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
148 |
-
|
149 |
stream = client.text_generation(
|
150 |
-
formatted_prompt,
|
151 |
-
temperature=temperature,
|
152 |
-
max_new_tokens=max_new_tokens,
|
153 |
-
top_p=top_p,
|
154 |
-
repetition_penalty=repetition_penalty,
|
155 |
-
stream=True,
|
156 |
-
details=True,
|
157 |
return_full_text=False
|
158 |
)
|
159 |
resp = ""
|
|
|
118 |
def generate(
|
119 |
prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0, model="mistralai/Mixtral-8x7B-Instruct-v0.1"
|
120 |
):
|
121 |
+
seed = random.randint(1, 1111111111111111)
|
122 |
logging.info(f"Seed: {seed}") # Log the seed
|
123 |
|
124 |
# Set the agent prompt based on agent_name
|
|
|
132 |
agent = "You are a helpful AI assistant."
|
133 |
|
134 |
system_prompt = f"{agent} {sys_prompt}".strip()
|
135 |
+
|
136 |
temperature = max(float(temperature), 1e-2)
|
137 |
top_p = float(top_p)
|
138 |
|
|
|
142 |
# Use 'prompt' here instead of 'message'
|
143 |
formatted_prompt = format_prompt(formatted_prompt, history, max_history_turns=5) # Truncated history
|
144 |
logging.info(f"Formatted Prompt: {formatted_prompt}")
|
145 |
+
|
|
|
146 |
client = InferenceClient(model) if model != "mistralai/Mixtral-8x7B-Instruct-v0.1" else InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
147 |
+
|
148 |
stream = client.text_generation(
|
149 |
+
formatted_prompt,
|
150 |
+
temperature=temperature,
|
151 |
+
max_new_tokens=max_new_tokens,
|
152 |
+
top_p=top_p,
|
153 |
+
repetition_penalty=repetition_penalty,
|
154 |
+
stream=True,
|
155 |
+
details=True,
|
156 |
return_full_text=False
|
157 |
)
|
158 |
resp = ""
|