Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
5777088
1
Parent(s):
c443141
Update model/model_manager.py (#6)
Browse files- Update model/model_manager.py (5c6a38a87ac0282c7bee84f4a76ac55afe06e266)
Co-authored-by: Yuansheng Ni <[email protected]>
- model/model_manager.py +7 -7
model/model_manager.py
CHANGED
@@ -21,7 +21,7 @@ class ModelManager:
|
|
21 |
self.model_ig_list = IMAGE_GENERATION_MODELS
|
22 |
self.model_ie_list = IMAGE_EDITION_MODELS
|
23 |
self.loaded_models = {}
|
24 |
-
@spaces.GPU
|
25 |
def load_model_pipe(self, model_name):
|
26 |
model_source, model_name, model_type = model_name.split("_")
|
27 |
if not model_name in self.loaded_models:
|
@@ -56,7 +56,7 @@ class ModelManager:
|
|
56 |
img = Image.open(io.BytesIO(base64.decodebytes(bytes(image_base64, "utf-8"))))
|
57 |
|
58 |
return img
|
59 |
-
@spaces.GPU
|
60 |
def generate_image_ig(self, prompt, model_name):
|
61 |
if 'playground' in model_name.lower():
|
62 |
result = self.generate_image_playground(model_name=model_name, prompt=prompt)
|
@@ -64,7 +64,7 @@ class ModelManager:
|
|
64 |
pipe = self.load_model_pipe(model_name)
|
65 |
result = pipe(prompt=prompt)
|
66 |
return result
|
67 |
-
|
68 |
def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
|
69 |
if model_A == "" and model_B == "":
|
70 |
model_names = random.sample([model for model in self.model_ig_list], 2)
|
@@ -78,7 +78,7 @@ class ModelManager:
|
|
78 |
result = future.result()
|
79 |
results.append(result)
|
80 |
return results[0], results[1], model_names[0], model_names[1]
|
81 |
-
|
82 |
def generate_image_ig_parallel(self, prompt, model_A, model_B):
|
83 |
results = []
|
84 |
model_names = [model_A, model_B]
|
@@ -88,12 +88,12 @@ class ModelManager:
|
|
88 |
result = future.result()
|
89 |
results.append(result)
|
90 |
return results[0], results[1]
|
91 |
-
@spaces.GPU
|
92 |
def generate_image_ie(self, textbox_source, textbox_target, textbox_instruct, source_image, model_name):
|
93 |
pipe = self.load_model_pipe(model_name)
|
94 |
result = pipe(src_image = source_image, src_prompt = textbox_source, target_prompt = textbox_target, instruct_prompt = textbox_instruct)
|
95 |
return result
|
96 |
-
|
97 |
def generate_image_ie_parallel(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
|
98 |
results = []
|
99 |
model_names = [model_A, model_B]
|
@@ -103,7 +103,7 @@ class ModelManager:
|
|
103 |
result = future.result()
|
104 |
results.append(result)
|
105 |
return results[0], results[1]
|
106 |
-
|
107 |
def generate_image_ie_parallel_anony(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
|
108 |
if model_A == "" and model_B == "":
|
109 |
model_names = random.sample([model for model in self.model_ie_list], 2)
|
|
|
21 |
self.model_ig_list = IMAGE_GENERATION_MODELS
|
22 |
self.model_ie_list = IMAGE_EDITION_MODELS
|
23 |
self.loaded_models = {}
|
24 |
+
# @spaces.GPU
|
25 |
def load_model_pipe(self, model_name):
|
26 |
model_source, model_name, model_type = model_name.split("_")
|
27 |
if not model_name in self.loaded_models:
|
|
|
56 |
img = Image.open(io.BytesIO(base64.decodebytes(bytes(image_base64, "utf-8"))))
|
57 |
|
58 |
return img
|
59 |
+
@spaces.GPU(duration=60)
|
60 |
def generate_image_ig(self, prompt, model_name):
|
61 |
if 'playground' in model_name.lower():
|
62 |
result = self.generate_image_playground(model_name=model_name, prompt=prompt)
|
|
|
64 |
pipe = self.load_model_pipe(model_name)
|
65 |
result = pipe(prompt=prompt)
|
66 |
return result
|
67 |
+
|
68 |
def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
|
69 |
if model_A == "" and model_B == "":
|
70 |
model_names = random.sample([model for model in self.model_ig_list], 2)
|
|
|
78 |
result = future.result()
|
79 |
results.append(result)
|
80 |
return results[0], results[1], model_names[0], model_names[1]
|
81 |
+
|
82 |
def generate_image_ig_parallel(self, prompt, model_A, model_B):
|
83 |
results = []
|
84 |
model_names = [model_A, model_B]
|
|
|
88 |
result = future.result()
|
89 |
results.append(result)
|
90 |
return results[0], results[1]
|
91 |
+
@spaces.GPU(duration=150)
|
92 |
def generate_image_ie(self, textbox_source, textbox_target, textbox_instruct, source_image, model_name):
|
93 |
pipe = self.load_model_pipe(model_name)
|
94 |
result = pipe(src_image = source_image, src_prompt = textbox_source, target_prompt = textbox_target, instruct_prompt = textbox_instruct)
|
95 |
return result
|
96 |
+
|
97 |
def generate_image_ie_parallel(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
|
98 |
results = []
|
99 |
model_names = [model_A, model_B]
|
|
|
103 |
result = future.result()
|
104 |
results.append(result)
|
105 |
return results[0], results[1]
|
106 |
+
|
107 |
def generate_image_ie_parallel_anony(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
|
108 |
if model_A == "" and model_B == "":
|
109 |
model_names = random.sample([model for model in self.model_ie_list], 2)
|