Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
|
|
11 |
API_TOKEN = os.getenv("HF_READ_TOKEN") # it is free
|
12 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
13 |
|
14 |
-
models_list = ["AbsoluteReality v1.8.1", "DALL-E 3 XL", "Playground v2", "Openjourney v4", "Lyriel 1.6", "Animagine XL 2.0", "Counterfeit v2.5", "Realistic Vision 5.1", "Incursios v1.6", "Anime Detailer XL LoRA", "epiCRealism", "PixelArt XL", "
|
15 |
|
16 |
def query(prompt, model, is_negative=False, steps=20, cfg_scale=7, seed=None):
|
17 |
language = detect(prompt)
|
@@ -44,8 +44,8 @@ def query(prompt, model, is_negative=False, steps=20, cfg_scale=7, seed=None):
|
|
44 |
API_URL = "https://api-inference.huggingface.co/models/emilianJR/epiCRealism"
|
45 |
if model == 'PixelArt XL':
|
46 |
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
|
47 |
-
if model == '
|
48 |
-
API_URL = "https://api-inference.huggingface.co/models/
|
49 |
|
50 |
payload = {
|
51 |
"inputs": prompt,
|
|
|
11 |
API_TOKEN = os.getenv("HF_READ_TOKEN") # it is free
|
12 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
13 |
|
14 |
+
models_list = ["AbsoluteReality v1.8.1", "DALL-E 3 XL", "Playground v2", "Openjourney v4", "Lyriel 1.6", "Animagine XL 2.0", "Counterfeit v2.5", "Realistic Vision 5.1", "Incursios v1.6", "Anime Detailer XL LoRA", "epiCRealism", "PixelArt XL", "NewReality XL"]
|
15 |
|
16 |
def query(prompt, model, is_negative=False, steps=20, cfg_scale=7, seed=None):
|
17 |
language = detect(prompt)
|
|
|
44 |
API_URL = "https://api-inference.huggingface.co/models/emilianJR/epiCRealism"
|
45 |
if model == 'PixelArt XL':
|
46 |
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
|
47 |
+
if model == 'NewReality XL':
|
48 |
+
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
|
49 |
|
50 |
payload = {
|
51 |
"inputs": prompt,
|