Spaces:
Runtime error
Runtime error
Commit
·
6a0af53
1
Parent(s):
8e97e43
cache
Browse files- cache/__init__.py +0 -0
- cache/local_cache.py +34 -0
- routers/intference/stable_diffusion.py +2 -2
cache/__init__.py
ADDED
File without changes
|
cache/local_cache.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime, timedelta
|
2 |
+
from functools import wraps
|
3 |
+
|
4 |
+
CACHE_SIZE = 50
|
5 |
+
|
6 |
+
_cache = {}
|
7 |
+
_cache_time = {}
|
8 |
+
|
9 |
+
|
10 |
+
def ttl_cache(func, key_name, ttl_secs=20):
|
11 |
+
@wraps(func)
|
12 |
+
async def wrapper(*args, **kwargs):
|
13 |
+
# Assuming the prompt is the key for caching, change as necessary
|
14 |
+
key = kwargs.get(key_name)
|
15 |
+
ttl = timedelta(seconds=ttl_secs)
|
16 |
+
# Check cache
|
17 |
+
if key in _cache:
|
18 |
+
if datetime.now() - _cache_time[key] > ttl:
|
19 |
+
# Cache has expired
|
20 |
+
del _cache[key]
|
21 |
+
del _cache_time[key]
|
22 |
+
else:
|
23 |
+
return _cache[key]
|
24 |
+
|
25 |
+
# Call the actual function if not in cache or expired
|
26 |
+
response = await func(*args, **kwargs)
|
27 |
+
|
28 |
+
# Cache the result
|
29 |
+
_cache[key] = response
|
30 |
+
_cache_time[key] = datetime.now()
|
31 |
+
|
32 |
+
return response
|
33 |
+
|
34 |
+
return wrapper
|
routers/intference/stable_diffusion.py
CHANGED
@@ -6,6 +6,7 @@ from diffusers import DiffusionPipeline
|
|
6 |
from fastapi import APIRouter
|
7 |
from fastapi.responses import StreamingResponse
|
8 |
|
|
|
9 |
from config import settings
|
10 |
|
11 |
router = APIRouter()
|
@@ -31,6 +32,7 @@ refiner.enable_attention_slicing()
|
|
31 |
|
32 |
|
33 |
@router.get("/generate")
|
|
|
34 |
async def generate(prompt: str):
|
35 |
"""
|
36 |
generate image
|
@@ -42,8 +44,6 @@ async def generate(prompt: str):
|
|
42 |
|
43 |
prompt = f"single image. single model. {prompt}. zoomed in. full-body. real person. realistic. 4k. best quality."
|
44 |
print(prompt)
|
45 |
-
# "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly. bad anatomy, disfigured, poorly drawn face, mutation, mutated, extra limb, ugly, disgusting, poorly drawn hands, missing limb, floating limbs, disconnected limbs, malformed hands, blurry, mutated hands and fingers, watermark, watermarked, oversaturated, censored, distorted hands, amputation, missing hands, obese, doubled face, double hands, two women, anime style, cartoon, toon. multiple people."
|
46 |
-
# prompt = "Designs should play with different textures and layering but stick to a monochrome palette. Think leather jackets over mesh tops, or satin draped over matte cotton. in a studio. zoomed-in."
|
47 |
|
48 |
# run both experts
|
49 |
image = base(
|
|
|
6 |
from fastapi import APIRouter
|
7 |
from fastapi.responses import StreamingResponse
|
8 |
|
9 |
+
from cache.local_cache import ttl_cache
|
10 |
from config import settings
|
11 |
|
12 |
router = APIRouter()
|
|
|
32 |
|
33 |
|
34 |
@router.get("/generate")
|
35 |
+
@ttl_cache(key_name='prompt', ttl_secs=20)
|
36 |
async def generate(prompt: str):
|
37 |
"""
|
38 |
generate image
|
|
|
44 |
|
45 |
prompt = f"single image. single model. {prompt}. zoomed in. full-body. real person. realistic. 4k. best quality."
|
46 |
print(prompt)
|
|
|
|
|
47 |
|
48 |
# run both experts
|
49 |
image = base(
|