Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -16,12 +16,9 @@ if 'is_authenticated' not in st.session_state:
|
|
16 |
|
17 |
class ModelGenerator:
|
18 |
@staticmethod
|
19 |
-
def generate_midjourney(prompt):
|
20 |
-
if not st.session_state.get('hf_token'):
|
21 |
-
return ("Midjourney", "Error: No authentication token found")
|
22 |
-
|
23 |
try:
|
24 |
-
client = Client("mukaist/Midjourney", hf_token=
|
25 |
result = client.predict(
|
26 |
prompt=prompt,
|
27 |
negative_prompt="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck",
|
@@ -52,12 +49,9 @@ class ModelGenerator:
|
|
52 |
return ("Midjourney", f"Error: {str(e)}")
|
53 |
|
54 |
@staticmethod
|
55 |
-
def generate_stable_cascade(prompt):
|
56 |
-
if not st.session_state.get('hf_token'):
|
57 |
-
return ("Stable Cascade", "Error: No authentication token found")
|
58 |
-
|
59 |
try:
|
60 |
-
client = Client("multimodalart/stable-cascade", hf_token=
|
61 |
result = client.predict(
|
62 |
prompt=prompt,
|
63 |
negative_prompt=prompt,
|
@@ -76,12 +70,9 @@ class ModelGenerator:
|
|
76 |
return ("Stable Cascade", f"Error: {str(e)}")
|
77 |
|
78 |
@staticmethod
|
79 |
-
def generate_stable_diffusion_3(prompt):
|
80 |
-
if not st.session_state.get('hf_token'):
|
81 |
-
return ("SD 3 Medium", "Error: No authentication token found")
|
82 |
-
|
83 |
try:
|
84 |
-
client = Client("stabilityai/stable-diffusion-3-medium", hf_token=
|
85 |
result = client.predict(
|
86 |
prompt=prompt,
|
87 |
negative_prompt=prompt,
|
@@ -98,12 +89,9 @@ class ModelGenerator:
|
|
98 |
return ("SD 3 Medium", f"Error: {str(e)}")
|
99 |
|
100 |
@staticmethod
|
101 |
-
def generate_stable_diffusion_35(prompt):
|
102 |
-
if not st.session_state.get('hf_token'):
|
103 |
-
return ("SD 3.5 Large", "Error: No authentication token found")
|
104 |
-
|
105 |
try:
|
106 |
-
client = Client("stabilityai/stable-diffusion-3.5-large", hf_token=
|
107 |
result = client.predict(
|
108 |
prompt=prompt,
|
109 |
negative_prompt=prompt,
|
@@ -120,13 +108,10 @@ class ModelGenerator:
|
|
120 |
return ("SD 3.5 Large", f"Error: {str(e)}")
|
121 |
|
122 |
@staticmethod
|
123 |
-
def generate_playground_v2_5(prompt):
|
124 |
-
if not st.session_state.get('hf_token'):
|
125 |
-
return ("Playground v2.5", "Error: No authentication token found")
|
126 |
-
|
127 |
try:
|
128 |
client = Client("https://playgroundai-playground-v2-5.hf.space/--replicas/ji5gy/",
|
129 |
-
hf_token=
|
130 |
result = client.predict(
|
131 |
prompt,
|
132 |
prompt, # negative prompt
|
@@ -145,15 +130,19 @@ class ModelGenerator:
|
|
145 |
return ("Playground v2.5", f"Error: {str(e)}")
|
146 |
|
147 |
def generate_images(prompt, selected_models):
|
|
|
|
|
|
|
|
|
148 |
results = []
|
149 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
150 |
futures = []
|
151 |
model_map = {
|
152 |
-
"Midjourney": ModelGenerator.generate_midjourney,
|
153 |
-
"Stable Cascade": ModelGenerator.generate_stable_cascade,
|
154 |
-
"SD 3 Medium": ModelGenerator.generate_stable_diffusion_3,
|
155 |
-
"SD 3.5 Large": ModelGenerator.generate_stable_diffusion_35,
|
156 |
-
"Playground v2.5": ModelGenerator.generate_playground_v2_5
|
157 |
}
|
158 |
|
159 |
for model in selected_models:
|
@@ -204,7 +193,6 @@ def main():
|
|
204 |
# Verify token is valid
|
205 |
api = HfApi(token=token)
|
206 |
api.whoami()
|
207 |
-
# Use dictionary style access to ensure proper setting
|
208 |
st.session_state['hf_token'] = token
|
209 |
st.session_state['is_authenticated'] = True
|
210 |
st.success("Successfully logged in!")
|
|
|
16 |
|
17 |
class ModelGenerator:
|
18 |
@staticmethod
|
19 |
+
def generate_midjourney(prompt, token):
|
|
|
|
|
|
|
20 |
try:
|
21 |
+
client = Client("mukaist/Midjourney", hf_token=token)
|
22 |
result = client.predict(
|
23 |
prompt=prompt,
|
24 |
negative_prompt="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck",
|
|
|
49 |
return ("Midjourney", f"Error: {str(e)}")
|
50 |
|
51 |
@staticmethod
|
52 |
+
def generate_stable_cascade(prompt, token):
|
|
|
|
|
|
|
53 |
try:
|
54 |
+
client = Client("multimodalart/stable-cascade", hf_token=token)
|
55 |
result = client.predict(
|
56 |
prompt=prompt,
|
57 |
negative_prompt=prompt,
|
|
|
70 |
return ("Stable Cascade", f"Error: {str(e)}")
|
71 |
|
72 |
@staticmethod
|
73 |
+
def generate_stable_diffusion_3(prompt, token):
|
|
|
|
|
|
|
74 |
try:
|
75 |
+
client = Client("stabilityai/stable-diffusion-3-medium", hf_token=token)
|
76 |
result = client.predict(
|
77 |
prompt=prompt,
|
78 |
negative_prompt=prompt,
|
|
|
89 |
return ("SD 3 Medium", f"Error: {str(e)}")
|
90 |
|
91 |
@staticmethod
|
92 |
+
def generate_stable_diffusion_35(prompt, token):
|
|
|
|
|
|
|
93 |
try:
|
94 |
+
client = Client("stabilityai/stable-diffusion-3.5-large", hf_token=token)
|
95 |
result = client.predict(
|
96 |
prompt=prompt,
|
97 |
negative_prompt=prompt,
|
|
|
108 |
return ("SD 3.5 Large", f"Error: {str(e)}")
|
109 |
|
110 |
@staticmethod
|
111 |
+
def generate_playground_v2_5(prompt, token):
|
|
|
|
|
|
|
112 |
try:
|
113 |
client = Client("https://playgroundai-playground-v2-5.hf.space/--replicas/ji5gy/",
|
114 |
+
hf_token=token)
|
115 |
result = client.predict(
|
116 |
prompt,
|
117 |
prompt, # negative prompt
|
|
|
130 |
return ("Playground v2.5", f"Error: {str(e)}")
|
131 |
|
132 |
def generate_images(prompt, selected_models):
|
133 |
+
token = st.session_state.get('hf_token')
|
134 |
+
if not token:
|
135 |
+
return [("Error", "No authentication token found")]
|
136 |
+
|
137 |
results = []
|
138 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
139 |
futures = []
|
140 |
model_map = {
|
141 |
+
"Midjourney": lambda p: ModelGenerator.generate_midjourney(p, token),
|
142 |
+
"Stable Cascade": lambda p: ModelGenerator.generate_stable_cascade(p, token),
|
143 |
+
"SD 3 Medium": lambda p: ModelGenerator.generate_stable_diffusion_3(p, token),
|
144 |
+
"SD 3.5 Large": lambda p: ModelGenerator.generate_stable_diffusion_35(p, token),
|
145 |
+
"Playground v2.5": lambda p: ModelGenerator.generate_playground_v2_5(p, token)
|
146 |
}
|
147 |
|
148 |
for model in selected_models:
|
|
|
193 |
# Verify token is valid
|
194 |
api = HfApi(token=token)
|
195 |
api.whoami()
|
|
|
196 |
st.session_state['hf_token'] = token
|
197 |
st.session_state['is_authenticated'] = True
|
198 |
st.success("Successfully logged in!")
|