ginipick commited on
Commit
edec858
1 Parent(s): b54bd5d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -359,22 +359,22 @@ You are allowed to make up film and branding names, and do them like 80's, 90's
359
  char_limit = compression_chars[compression_level]
360
  base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
361
 
362
- messages = f"<|im_start|>system\nYou are a helpful assistant. Try your best to give best response possible to user.<|im_end|>"
363
- messages += f"\n<|im_start|>user\n{base_prompt}\nDescription: {input_text}<|im_end|>\n<|im_start|>assistant\n"
364
 
365
  stream = client.text_generation(messages, max_new_tokens=4000, do_sample=True, stream=True, details=True, return_full_text=False)
366
  output = ""
367
  for response in stream:
368
- if not response.token.text == "<|im_end|>":
369
  output += response.token.text
370
 
371
  # Remove specific tokens based on the model
372
  if model == "Llama 3":
373
- output = output.rstrip("<|eot_id|>")
374
  elif model == "Mistral":
375
  output = output.rstrip("</s>")
376
  elif model == "Mistral-Nemo":
377
- output = output.rstrip("<|im_end|></s>")
378
 
379
  # Clean up the output
380
  if ": " in output:
@@ -396,7 +396,7 @@ def create_interface():
396
 
397
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
398
 
399
- gr.HTML(title)
400
 
401
  with gr.Row():
402
  with gr.Column(scale=2):
@@ -532,4 +532,4 @@ def create_interface():
532
 
533
  if __name__ == "__main__":
534
  demo = create_interface()
535
- demo.launch()
 
359
  char_limit = compression_chars[compression_level]
360
  base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
361
 
362
+ messages = f"system\nYou are a helpful assistant. Try your best to give best response possible to user."
363
+ messages += f"\nuser\n{base_prompt}\nDescription: {input_text}\nassistant\n"
364
 
365
  stream = client.text_generation(messages, max_new_tokens=4000, do_sample=True, stream=True, details=True, return_full_text=False)
366
  output = ""
367
  for response in stream:
368
+ if not response.token.text == "":
369
  output += response.token.text
370
 
371
  # Remove specific tokens based on the model
372
  if model == "Llama 3":
373
+ output = output.rstrip("")
374
  elif model == "Mistral":
375
  output = output.rstrip("</s>")
376
  elif model == "Mistral-Nemo":
377
+ output = output.rstrip("</s>")
378
 
379
  # Clean up the output
380
  if ": " in output:
 
396
 
397
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
398
 
399
+ gr.HTML("<h1>My AI Art Prompt Generator</h1>")
400
 
401
  with gr.Row():
402
  with gr.Column(scale=2):
 
532
 
533
  if __name__ == "__main__":
534
  demo = create_interface()
535
+ demo.launch()