Geek7 commited on
Commit
4f6d425
·
verified ·
1 Parent(s): c130f6c

Update myapp.py

Browse files
Files changed (1) hide show
  1. myapp.py +11 -7
myapp.py CHANGED
@@ -1,7 +1,6 @@
1
  from flask import Flask, request, jsonify, send_file
2
  from flask_cors import CORS
3
  import os
4
- from random import randint
5
  from all_models import models
6
  from externalmod import gr_Interface_load
7
  import asyncio
@@ -27,7 +26,7 @@ def load_fn(models):
27
  m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
28
  models_load[model] = m
29
  except Exception as error:
30
- print(error)
31
  models_load[model] = None # Handle model loading failures
32
 
33
  load_fn(models)
@@ -36,10 +35,10 @@ inference_timeout = 600
36
 
37
  async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
38
  kwargs = {"seed": seed}
39
- task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, **kwargs, token=HF_TOKEN))
40
- await asyncio.sleep(0)
41
-
42
  try:
 
 
 
43
  result = await asyncio.wait_for(task, timeout=timeout)
44
  if task.done() and result is not None:
45
  with lock:
@@ -47,7 +46,7 @@ async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
47
  result.save(png_path)
48
  return png_path # Return the path of the saved image
49
  except Exception as e:
50
- print(e)
51
  return None
52
 
53
  @myapp.route('/generate', methods=['POST'])
@@ -57,12 +56,17 @@ def generate():
57
  prompt = data.get('prompt')
58
  seed = data.get('seed', 1)
59
 
 
 
60
  if model_str not in models_load or models_load[model_str] is None:
 
61
  return jsonify({"error": "Model not found or not loaded"}), 404
62
 
63
  image_path = asyncio.run(infer(model_str, prompt, seed, inference_timeout))
64
  if image_path is not None:
65
- return send_file(image_path, mimetype='image/png') # Send the generated image file
 
 
66
  return jsonify({"error": "Image generation failed"}), 500
67
 
68
  if __name__ == '__main__':
 
1
  from flask import Flask, request, jsonify, send_file
2
  from flask_cors import CORS
3
  import os
 
4
  from all_models import models
5
  from externalmod import gr_Interface_load
6
  import asyncio
 
26
  m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
27
  models_load[model] = m
28
  except Exception as error:
29
+ print(f"Error loading model {model}: {error}")
30
  models_load[model] = None # Handle model loading failures
31
 
32
  load_fn(models)
 
35
 
36
  async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
37
  kwargs = {"seed": seed}
 
 
 
38
  try:
39
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, **kwargs, token=HF_TOKEN))
40
+ await asyncio.sleep(0)
41
+
42
  result = await asyncio.wait_for(task, timeout=timeout)
43
  if task.done() and result is not None:
44
  with lock:
 
46
  result.save(png_path)
47
  return png_path # Return the path of the saved image
48
  except Exception as e:
49
+ print(f"Inference error for model {model_str}: {e}") # Log the error message
50
  return None
51
 
52
  @myapp.route('/generate', methods=['POST'])
 
56
  prompt = data.get('prompt')
57
  seed = data.get('seed', 1)
58
 
59
+ print(f"Received request for model: '{model_str}', prompt: '{prompt}', seed: {seed}")
60
+
61
  if model_str not in models_load or models_load[model_str] is None:
62
+ print(f"Model not found in models_load: {model_str}. Available models: {models_load.keys()}")
63
  return jsonify({"error": "Model not found or not loaded"}), 404
64
 
65
  image_path = asyncio.run(infer(model_str, prompt, seed, inference_timeout))
66
  if image_path is not None:
67
+ return send_file(image_path, mimetype='image/png')
68
+
69
+ print("Image generation failed for:", model_str) # Log failure reason
70
  return jsonify({"error": "Image generation failed"}), 500
71
 
72
  if __name__ == '__main__':