doublelotus commited on
Commit
781570f
·
1 Parent(s): 80b3e90
Files changed (3) hide show
  1. main-copyy.py +64 -0
  2. main.py +29 -16
  3. realify2.py +70 -0
main-copyy.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from flask import Flask, request, jsonify, send_file
3
+ from flask_cors import CORS
4
+ from diffusers import AutoPipelineForImage2Image
5
+ from diffusers.utils import load_image, make_image_grid
6
+ from PIL import Image
7
+ import torch
8
+ import io
9
+
10
+ # Set environment variable to avoid fragmentation
11
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
12
+
13
+ # Clear any unused GPU memory
14
+ torch.cuda.empty_cache()
15
+
16
+ app = Flask(__name__)
17
+ CORS(app)
18
+
19
+ # Load the image-to-image pipeline from Hugging Face
20
+ pipe = AutoPipelineForImage2Image.from_pretrained("RunDiffusion/Juggernaut-X-v10", torch_dtype=torch.float16).to("cuda")
21
+ pipe.enable_xformers_memory_efficient_attention()
22
+ pipe.enable_vae_tiling() # Improve performance on large images
23
+ pipe.enable_vae_slicing() # Improve performance on large batches
24
+
25
+ @app.route('/')
26
+ def hello():
27
+ return {"Goes Wrong": "Keeping it real"}
28
+
29
+ @app.route('/generate', methods=['POST'])
30
+ def generate():
31
+ if 'image' not in request.files:
32
+ return jsonify({"error": "No image file provided"}), 400
33
+
34
+ image_file = request.files['image']
35
+ prompt = request.form.get('prompt', 'fleece hoodie, front zip, abstract pattern, GAP logo, high quality, photo')
36
+ negative_prompt = request.form.get('negative_prompt', 'low quality, bad quality, sketches, hanger')
37
+ guidance_scale = float(request.form.get('guidance_scale', 7))
38
+ num_images = int(request.form.get('num_images', 2))
39
+
40
+ sketch = Image.open(image_file)
41
+
42
+ with torch.inference_mode():
43
+ images = pipe(
44
+ prompt=prompt,
45
+ negative_prompt=negative_prompt,
46
+ image=sketch,
47
+ num_inference_steps=35,
48
+ guidance_scale=guidance_scale,
49
+ strength=0.5,
50
+ generator=torch.manual_seed(69),
51
+ num_images_per_prompt=num_images,
52
+ ).images
53
+
54
+ grid = make_image_grid(images, rows=1, cols=num_images)
55
+
56
+ # Save the generated grid to a BytesIO object
57
+ img_byte_arr = io.BytesIO()
58
+ grid.save(img_byte_arr, format='PNG')
59
+ img_byte_arr.seek(0)
60
+
61
+ return send_file(img_byte_arr, mimetype='image/png')
62
+
63
+ if __name__ == '__main__':
64
+ app.run(debug=True)
main.py CHANGED
@@ -1,10 +1,11 @@
 
 
1
  import os
 
 
 
2
  from flask import Flask, request, jsonify, send_file
3
  from flask_cors import CORS
4
- from diffusers import AutoPipelineForImage2Image
5
- from diffusers.utils import load_image, make_image_grid
6
- from PIL import Image
7
- import torch
8
  import io
9
 
10
  # Set environment variable to avoid fragmentation
@@ -16,29 +17,40 @@ torch.cuda.empty_cache()
16
  app = Flask(__name__)
17
  CORS(app)
18
 
 
19
  # Load the image-to-image pipeline from Hugging Face
20
  pipe = AutoPipelineForImage2Image.from_pretrained("RunDiffusion/Juggernaut-X-v10", torch_dtype=torch.float16).to("cuda")
21
  pipe.enable_xformers_memory_efficient_attention()
22
  pipe.enable_vae_tiling() # Improve performance on large images
23
  pipe.enable_vae_slicing() # Improve performance on large batches
 
24
 
25
  @app.route('/')
26
  def hello():
27
  return {"Goes Wrong": "Keeping it real"}
28
 
29
- @app.route('/generate', methods=['POST'])
30
- def generate():
31
- if 'image' not in request.files:
32
- return jsonify({"error": "No image file provided"}), 400
 
 
33
 
34
- image_file = request.files['image']
35
- prompt = request.form.get('prompt', 'fleece hoodie, front zip, abstract pattern, GAP logo, high quality, photo')
36
- negative_prompt = request.form.get('negative_prompt', 'low quality, bad quality, sketches, hanger')
37
- guidance_scale = float(request.form.get('guidance_scale', 7))
38
- num_images = int(request.form.get('num_images', 2))
 
 
 
 
 
 
 
 
 
39
 
40
- sketch = Image.open(image_file)
41
-
42
  with torch.inference_mode():
43
  images = pipe(
44
  prompt=prompt,
@@ -52,7 +64,8 @@ def generate():
52
  ).images
53
 
54
  grid = make_image_grid(images, rows=1, cols=num_images)
55
-
 
56
  # Save the generated grid to a BytesIO object
57
  img_byte_arr = io.BytesIO()
58
  grid.save(img_byte_arr, format='PNG')
 
1
+ from diffusers import AutoPipelineForImage2Image
2
+ import torch
3
  import os
4
+ import numpy as np
5
+ from PIL import Image
6
+ from diffusers.utils import load_image, make_image_grid
7
  from flask import Flask, request, jsonify, send_file
8
  from flask_cors import CORS
 
 
 
 
9
  import io
10
 
11
  # Set environment variable to avoid fragmentation
 
17
  app = Flask(__name__)
18
  CORS(app)
19
 
20
+ print('loading models...')
21
  # Load the image-to-image pipeline from Hugging Face
22
  pipe = AutoPipelineForImage2Image.from_pretrained("RunDiffusion/Juggernaut-X-v10", torch_dtype=torch.float16).to("cuda")
23
  pipe.enable_xformers_memory_efficient_attention()
24
  pipe.enable_vae_tiling() # Improve performance on large images
25
  pipe.enable_vae_slicing() # Improve performance on large batches
26
+ print('loaded models...')
27
 
28
  @app.route('/')
29
  def hello():
30
  return {"Goes Wrong": "Keeping it real"}
31
 
32
+ @app.route('/run_inference', methods=['POST'])
33
+ def run_inference():
34
+ data = request.get_json()
35
+
36
+ if 'url' not in data:
37
+ return jsonify({"error": "No imageurl provided"}), 400
38
 
39
+ # base64_image = data['base64_image']
40
+ prompt = data.get('prompt', 'fleece hoodie, front zip, abstract pattern, GAP logo, high quality, photo')
41
+ negative_prompt = data.get('negative_prompt', 'low quality, bad quality, sketches, hanger')
42
+ guidance_scale = float(data.get('guidance_scale', 7))
43
+ num_images = int(data.get('num_images', 2))
44
+
45
+ url = data.get('url', 'https://storage.googleapis.com/sketch-bucket/dresstest2.PNG')
46
+ sketch = load_image(url)
47
+ print(f'Loaded image URL: {url}')
48
+
49
+ # testing
50
+ # prompt = "long waist dress, puffed sleeves, fringes on sleeve and hem, high quality, photo"
51
+ # negative_prompt = "low quality, bad quality, sketches, hanger"
52
+ # guidance_scale = 7
53
 
 
 
54
  with torch.inference_mode():
55
  images = pipe(
56
  prompt=prompt,
 
64
  ).images
65
 
66
  grid = make_image_grid(images, rows=1, cols=num_images)
67
+ # images[0].save('output.png')
68
+
69
  # Save the generated grid to a BytesIO object
70
  img_byte_arr = io.BytesIO()
71
  grid.save(img_byte_arr, format='PNG')
realify2.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from flask import Flask, request, jsonify, send_file
3
+ from flask_cors import CORS
4
+ from diffusers import AutoPipelineForImage2Image
5
+ from diffusers.utils import make_image_grid
6
+ from PIL import Image
7
+ import torch
8
+ import io
9
+ import base64
10
+
11
+ # Set environment variable to avoid fragmentation
12
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
13
+
14
+ # Clear any unused GPU memory
15
+ torch.cuda.empty_cache()
16
+
17
+ app = Flask(__name__)
18
+ CORS(app)
19
+
20
+ # Load the image-to-image pipeline from Hugging Face
21
+ pipe = AutoPipelineForImage2Image.from_pretrained("RunDiffusion/Juggernaut-X-v10", torch_dtype=torch.float16).to("cuda")
22
+ pipe.enable_xformers_memory_efficient_attention()
23
+ pipe.enable_vae_tiling() # Improve performance on large images
24
+ pipe.enable_vae_slicing() # Improve performance on large batches
25
+ print('loaded models...')
26
+
27
+ @app.route('/')
28
+ def hello():
29
+ return {"Goes Wrong": "Keeping it real"}
30
+
31
+ @app.route('/run_inference', methods=['POST'])
32
+ def run_inference():
33
+ data = request.get_json()
34
+
35
+ if 'base64_image' not in data:
36
+ return jsonify({"error": "No base64 image data provided"}), 400
37
+
38
+ base64_image = data['base64_image']
39
+ prompt = data.get('prompt', 'fleece hoodie, front zip, abstract pattern, GAP logo, high quality, photo')
40
+ negative_prompt = data.get('negative_prompt', 'low quality, bad quality, sketches, hanger')
41
+ guidance_scale = float(data.get('guidance_scale', 7))
42
+ num_images = int(data.get('num_images', 2))
43
+
44
+ # Decode the base64 image
45
+ image_data = base64.b64decode(base64_image)
46
+ sketch = Image.open(io.BytesIO(image_data))
47
+
48
+ with torch.inference_mode():
49
+ images = pipe(
50
+ prompt=prompt,
51
+ negative_prompt=negative_prompt,
52
+ image=sketch,
53
+ num_inference_steps=35,
54
+ guidance_scale=guidance_scale,
55
+ strength=0.5,
56
+ generator=torch.manual_seed(69),
57
+ num_images_per_prompt=num_images,
58
+ ).images
59
+
60
+ grid = make_image_grid(images, rows=1, cols=num_images)
61
+
62
+ # Save the generated grid to a BytesIO object
63
+ img_byte_arr = io.BytesIO()
64
+ grid.save(img_byte_arr, format='PNG')
65
+ img_byte_arr.seek(0)
66
+
67
+ return send_file(img_byte_arr, mimetype='image/png')
68
+
69
+ if __name__ == '__main__':
70
+ app.run(debug=True)