Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -359,22 +359,22 @@ You are allowed to make up film and branding names, and do them like 80's, 90's
|
|
359 |
char_limit = compression_chars[compression_level]
|
360 |
base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
|
361 |
|
362 |
-
messages = f"
|
363 |
-
messages += f"\
|
364 |
|
365 |
stream = client.text_generation(messages, max_new_tokens=4000, do_sample=True, stream=True, details=True, return_full_text=False)
|
366 |
output = ""
|
367 |
for response in stream:
|
368 |
-
if not response.token.text == "
|
369 |
output += response.token.text
|
370 |
|
371 |
# Remove specific tokens based on the model
|
372 |
if model == "Llama 3":
|
373 |
-
output = output.rstrip("
|
374 |
elif model == "Mistral":
|
375 |
output = output.rstrip("</s>")
|
376 |
elif model == "Mistral-Nemo":
|
377 |
-
output = output.rstrip("
|
378 |
|
379 |
# Clean up the output
|
380 |
if ": " in output:
|
@@ -396,7 +396,7 @@ def create_interface():
|
|
396 |
|
397 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
398 |
|
399 |
-
gr.HTML(
|
400 |
|
401 |
with gr.Row():
|
402 |
with gr.Column(scale=2):
|
@@ -532,4 +532,4 @@ def create_interface():
|
|
532 |
|
533 |
if __name__ == "__main__":
|
534 |
demo = create_interface()
|
535 |
-
demo.launch()
|
|
|
359 |
char_limit = compression_chars[compression_level]
|
360 |
base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
|
361 |
|
362 |
+
messages = f"system\nYou are a helpful assistant. Try your best to give best response possible to user."
|
363 |
+
messages += f"\nuser\n{base_prompt}\nDescription: {input_text}\nassistant\n"
|
364 |
|
365 |
stream = client.text_generation(messages, max_new_tokens=4000, do_sample=True, stream=True, details=True, return_full_text=False)
|
366 |
output = ""
|
367 |
for response in stream:
|
368 |
+
if not response.token.text == "":
|
369 |
output += response.token.text
|
370 |
|
371 |
# Remove specific tokens based on the model
|
372 |
if model == "Llama 3":
|
373 |
+
output = output.rstrip("")
|
374 |
elif model == "Mistral":
|
375 |
output = output.rstrip("</s>")
|
376 |
elif model == "Mistral-Nemo":
|
377 |
+
output = output.rstrip("</s>")
|
378 |
|
379 |
# Clean up the output
|
380 |
if ": " in output:
|
|
|
396 |
|
397 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
398 |
|
399 |
+
gr.HTML("<h1>My AI Art Prompt Generator</h1>")
|
400 |
|
401 |
with gr.Row():
|
402 |
with gr.Column(scale=2):
|
|
|
532 |
|
533 |
if __name__ == "__main__":
|
534 |
demo = create_interface()
|
535 |
+
demo.launch()
|