Spaces:
Running
Running
Commit
·
8e943a0
1
Parent(s):
dfdce18
Verify requests
Browse files- src/model_demo.py +12 -10
src/model_demo.py
CHANGED
@@ -17,20 +17,21 @@ current_model: str
|
|
17 |
last_current_model_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
18 |
|
19 |
def get_current_model() -> str | None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
try:
|
21 |
-
|
22 |
-
global last_current_model_sync
|
23 |
-
now = datetime.now(TIMEZONE)
|
24 |
-
if now - last_current_model_sync < timedelta(minutes=5):
|
25 |
-
return current_model
|
26 |
-
last_current_model_sync = now
|
27 |
-
|
28 |
-
response = requests.get(f"{SERVER_API}/model", verify=False)
|
29 |
response.raise_for_status()
|
30 |
model = response.json()
|
31 |
current_model = f"{model['uid']} - {model['url']}"
|
32 |
return current_model
|
33 |
except:
|
|
|
34 |
return None
|
35 |
|
36 |
|
@@ -49,12 +50,13 @@ def submit(prompt: str, seed: int | str | None, baseline: bool) -> tuple:
|
|
49 |
seed = random.randint(0, 2 ** 32 - 1)
|
50 |
|
51 |
print(f"Making request with prompt: {prompt}, seed: {seed}, baseline: {baseline}")
|
52 |
-
response = requests.post(f"{SERVER_API}/generate", params={"prompt": prompt, "baseline": baseline, "seed": seed}
|
53 |
response.raise_for_status()
|
54 |
result = response.json()
|
55 |
generation_time = float(result["generation_time"])
|
|
|
56 |
image = image_from_base64(result["image"])
|
57 |
-
print(f"Received image with generation time: {generation_time:.3f}s")
|
58 |
|
59 |
return None, gr.Image(
|
60 |
image,
|
|
|
17 |
last_current_model_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
18 |
|
19 |
def get_current_model() -> str | None:
|
20 |
+
global current_model
|
21 |
+
global last_current_model_sync
|
22 |
+
now = datetime.now(TIMEZONE)
|
23 |
+
if now - last_current_model_sync < timedelta(minutes=1):
|
24 |
+
return current_model
|
25 |
+
last_current_model_sync = now
|
26 |
+
|
27 |
try:
|
28 |
+
response = requests.get(f"{SERVER_API}/model")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
response.raise_for_status()
|
30 |
model = response.json()
|
31 |
current_model = f"{model['uid']} - {model['url']}"
|
32 |
return current_model
|
33 |
except:
|
34 |
+
print("Unable to connect to API")
|
35 |
return None
|
36 |
|
37 |
|
|
|
50 |
seed = random.randint(0, 2 ** 32 - 1)
|
51 |
|
52 |
print(f"Making request with prompt: {prompt}, seed: {seed}, baseline: {baseline}")
|
53 |
+
response = requests.post(f"{SERVER_API}/generate", params={"prompt": prompt, "baseline": baseline, "seed": seed})
|
54 |
response.raise_for_status()
|
55 |
result = response.json()
|
56 |
generation_time = float(result["generation_time"])
|
57 |
+
nsfw = result["nsfw"]
|
58 |
image = image_from_base64(result["image"])
|
59 |
+
print(f"Received image with generation time: {generation_time:.3f}s, NSFW: {nsfw}")
|
60 |
|
61 |
return None, gr.Image(
|
62 |
image,
|