Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -16,15 +16,35 @@ API_URL_DEV = "https://api-inference.huggingface.co/models/black-forest-labs/FLU
|
|
16 |
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
|
17 |
timeout = 100
|
18 |
|
19 |
-
def enhance_prompt(prompt, style="
|
|
|
20 |
client = Client("K00B404/Mistral-Nemo-custom")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
result = client.predict(
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
25 |
)
|
26 |
return result
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
def mistral_nemo_call(prompt, API_TOKEN, model="mistralai/Mistral-Nemo-Instruct-2407", style="photo-realistic"):
|
29 |
|
30 |
client = InferenceClient(api_key=API_TOKEN)
|
@@ -74,7 +94,7 @@ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Ka
|
|
74 |
|
75 |
original_prompt = prompt
|
76 |
if enhance_prompt_option:
|
77 |
-
prompt = enhance_prompt(prompt)
|
78 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
79 |
if use_mistral_nemo:
|
80 |
prompt = mistral_nemo_call(prompt,API_TOKEN=API_TOKEN,style="cartoon")
|
|
|
16 |
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
|
17 |
timeout = 100
|
18 |
|
19 |
+
def enhance_prompt(prompt, model="mistralai/Mistral-7B-Instruct-v0.1", style="photo-realistic"):
|
20 |
+
|
21 |
client = Client("K00B404/Mistral-Nemo-custom")
|
22 |
+
|
23 |
+
system_prompt=f"""
|
24 |
+
You are a image generation prompt enhancer specialized in the {style} style.
|
25 |
+
You must respond only with the enhanced version of the users input prompt
|
26 |
+
Remember, image generation models can be stimulated by refering to camera 'effect' in the prompt like :4k ,award winning, super details, 35mm lens, hd
|
27 |
+
"""
|
28 |
+
user_message=f"###input image generation prompt### {prompt}"
|
29 |
+
|
30 |
result = client.predict(
|
31 |
+
system_prompt=system_prompt,
|
32 |
+
user_message=user_message,
|
33 |
+
max_tokens=256,
|
34 |
+
model_id=model,# "mistralai/Mistral-Nemo-Instruct-2407",
|
35 |
+
api_name="/predict"
|
36 |
)
|
37 |
return result
|
38 |
|
39 |
+
# The output value that appears in the "Response" Textbox component.
|
40 |
+
"""result = client.predict(
|
41 |
+
system_prompt=system_prompt,#"You are a image generation prompt enhancer and must respond only with the enhanced version of the users input prompt",
|
42 |
+
user_message=user_message,
|
43 |
+
max_tokens=500,
|
44 |
+
api_name="/predict"
|
45 |
+
)
|
46 |
+
return result
|
47 |
+
"""
|
48 |
def mistral_nemo_call(prompt, API_TOKEN, model="mistralai/Mistral-Nemo-Instruct-2407", style="photo-realistic"):
|
49 |
|
50 |
client = InferenceClient(api_key=API_TOKEN)
|
|
|
94 |
|
95 |
original_prompt = prompt
|
96 |
if enhance_prompt_option:
|
97 |
+
prompt = enhance_prompt(prompt, style="cartoon")
|
98 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
99 |
if use_mistral_nemo:
|
100 |
prompt = mistral_nemo_call(prompt,API_TOKEN=API_TOKEN,style="cartoon")
|