File size: 2,287 Bytes
f06461c f47aac7 f06461c f47aac7 f06461c f47aac7 f06461c 7b2b25d f06461c f47aac7 f06461c f47aac7 f06461c f47aac7 f06461c f47aac7 f06461c f47aac7 f06461c f47aac7 f06461c f47aac7 7b2b25d f47aac7 f06461c f47aac7 4f6d425 f47aac7 f06461c f47aac7 7b2b25d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
from flask import Flask, request, jsonify, send_file
import gradio as gr
from random import randint
from all_models import models
from externalmod import gr_Interface_load
import asyncio
import os
from threading import RLock
from PIL import Image
myapp = Flask(__name__)
lock = RLock()
HF_TOKEN = os.environ.get("HF_TOKEN")
# Load models
def load_fn(models):
global models_load
models_load = {}
for model in models:
if model not in models_load.keys():
try:
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
except Exception as error:
print(error)
m = gr.Interface(lambda: None, ['text'], ['image'])
models_load.update({model: m})
load_fn(models)
num_models = 6
MAX_SEED = 3999999999
default_models = models[:num_models]
inference_timeout = 600
# Gradio inference function
async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
kwargs = {"seed": seed}
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, **kwargs, token=HF_TOKEN))
await asyncio.sleep(0)
try:
result = await asyncio.wait_for(task, timeout=timeout)
except (Exception, asyncio.TimeoutError) as e:
print(e)
print(f"Task timed out: {model_str}")
if not task.done():
task.cancel()
result = None
if task.done() and result is not None:
with lock:
png_path = "generated_image.png"
result.save(png_path) # Save the result as an image
return png_path
return None
# API function to perform inference
@myapp.route('/generate-image', methods=['POST'])
def generate_image():
data = request.get_json()
model_str = data['model_str']
prompt = data['prompt']
seed = data.get('seed', 1)
# Run Gradio inference
result_path = asyncio.run(infer(model_str, prompt, seed))
if result_path:
# Send back the generated image file
return send_file(result_path, mimetype='image/png')
else:
return jsonify({"error": "Failed to generate image."}), 500
# Add this block to make sure your app runs when called
if __name__ == "__main__":
myapp.run(host='0.0.0.0', port=7860) # Run directly |