randeom commited on
Commit
707e3ee
·
verified ·
1 Parent(s): 46a4fa5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
 
3
 
4
  # Load custom CSS
5
  with open('style.css') as f:
@@ -17,7 +18,12 @@ def format_prompt_for_image(name, hair_color, personality, outfit_style):
17
  prompt = f"Generate an image prompt for a waifu character named {name} with {hair_color} hair, a {personality} personality, and wearing a {outfit_style}."
18
  return prompt
19
 
20
- def generate_text(prompt, temperature=0.9, max_new_tokens=2512, top_p=0.95, repetition_penalty=1.0):
 
 
 
 
 
21
  temperature = max(temperature, 1e-2)
22
  generate_kwargs = dict(
23
  temperature=temperature,
@@ -32,7 +38,7 @@ def generate_text(prompt, temperature=0.9, max_new_tokens=2512, top_p=0.95, repe
32
  output = ""
33
  for response in stream:
34
  output += response.token.text
35
- return output
36
  except Exception as e:
37
  st.error(f"Error generating text: {e}")
38
  return ""
 
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
3
+ import re
4
 
5
  # Load custom CSS
6
  with open('style.css') as f:
 
18
  prompt = f"Generate an image prompt for a waifu character named {name} with {hair_color} hair, a {personality} personality, and wearing a {outfit_style}."
19
  return prompt
20
 
21
+ def clean_generated_text(text):
22
+ # Remove any unwanted trailing tags or characters like </s>
23
+ clean_text = re.sub(r'</s>$', '', text).strip()
24
+ return clean_text
25
+
26
+ def generate_text(prompt, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
27
  temperature = max(temperature, 1e-2)
28
  generate_kwargs = dict(
29
  temperature=temperature,
 
38
  output = ""
39
  for response in stream:
40
  output += response.token.text
41
+ return clean_generated_text(output)
42
  except Exception as e:
43
  st.error(f"Error generating text: {e}")
44
  return ""