Geek7 commited on
Commit
d5cda1c
·
verified ·
1 Parent(s): 088744c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -23
app.py CHANGED
@@ -26,14 +26,14 @@ def is_prompt_explicit(prompt):
26
  return False
27
 
28
  # Function to generate an image from a text prompt
29
- def generate_image(prompt, negative_prompt=None, height=512, width=512, model="stabilityai/stable-diffusion-2-1", num_inference_steps=50, guidance_scale=7.5, seed=None):
30
  try:
31
  # Generate the image using Hugging Face's inference API with additional parameters
32
  image = client.text_to_image(
33
- prompt=prompt,
34
- negative_prompt=negative_prompt,
35
- height=height,
36
- width=width,
37
  model=model,
38
  num_inference_steps=num_inference_steps, # Control the number of inference steps
39
  guidance_scale=guidance_scale, # Control the guidance scale
@@ -44,7 +44,23 @@ def generate_image(prompt, negative_prompt=None, height=512, width=512, model="s
44
  print(f"Error generating image: {str(e)}")
45
  return None
46
 
47
- # Flask route for the API endpoint to generate an image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  @app.route('/generate_image', methods=['POST'])
49
  def generate_api():
50
  data = request.get_json()
@@ -56,7 +72,8 @@ def generate_api():
56
  width = data.get('width', 720) # Default width
57
  num_inference_steps = data.get('num_inference_steps', 50) # Default number of inference steps
58
  guidance_scale = data.get('guidance_scale', 7.5) # Default guidance scale
59
- model_name = data.get('model', 'stabilityai/stable-diffusion-2-1') # Default model
 
60
  seed = data.get('seed', None) # Seed for reproducibility, default is None
61
 
62
  if not prompt:
@@ -73,24 +90,30 @@ def generate_api():
73
  download_name='thinkgood.png'
74
  )
75
 
76
- # Call the generate_image function with the provided parameters
77
- image = generate_image(prompt, negative_prompt, height, width, model_name, num_inference_steps, guidance_scale, seed)
 
 
 
78
 
79
- if image:
80
- # Save the image to a BytesIO object
81
- img_byte_arr = BytesIO()
82
- image.save(img_byte_arr, format='PNG') # Convert the image to PNG
83
- img_byte_arr.seek(0) # Move to the start of the byte stream
84
 
85
- # Send the generated image as a response
86
- return send_file(
87
- img_byte_arr,
88
- mimetype='image/png',
89
- as_attachment=False, # Send the file as an attachment
90
- download_name='generated_image.png' # The file name for download
91
- )
92
- else:
93
- return jsonify({"error": "Failed to generate image"}), 500
 
 
 
94
  except Exception as e:
95
  print(f"Error in generate_api: {str(e)}") # Log the error
96
  return jsonify({"error": str(e)}), 500
 
26
  return False
27
 
28
  # Function to generate an image from a text prompt
29
+ def generate_image(prompt, negative_prompt=None, height=512, width=512, model="stabilityai/sd-3.5", num_inference_steps=50, guidance_scale=7.5, seed=None):
30
  try:
31
  # Generate the image using Hugging Face's inference API with additional parameters
32
  image = client.text_to_image(
33
+ prompt=prompt,
34
+ negative_prompt=negative_prompt,
35
+ height=height,
36
+ width=width,
37
  model=model,
38
  num_inference_steps=num_inference_steps, # Control the number of inference steps
39
  guidance_scale=guidance_scale, # Control the guidance scale
 
44
  print(f"Error generating image: {str(e)}")
45
  return None
46
 
47
+ # Function to refine an image using the refiner model
48
+ def refine_image(image, prompt, negative_prompt=None, model="stabilityai/stable-diffusion-xl-refiner-1.0", num_inference_steps=50, guidance_scale=7.5):
49
+ try:
50
+ # Use Hugging Face's image-to-image API to refine the image
51
+ refined_image = client.image_to_image(
52
+ prompt=prompt,
53
+ negative_prompt=negative_prompt,
54
+ image=image,
55
+ model=model,
56
+ num_inference_steps=num_inference_steps,
57
+ guidance_scale=guidance_scale
58
+ )
59
+ return refined_image
60
+ except Exception as e:
61
+ print(f"Error refining image: {str(e)}")
62
+ return None
63
+
64
  @app.route('/generate_image', methods=['POST'])
65
  def generate_api():
66
  data = request.get_json()
 
72
  width = data.get('width', 720) # Default width
73
  num_inference_steps = data.get('num_inference_steps', 50) # Default number of inference steps
74
  guidance_scale = data.get('guidance_scale', 7.5) # Default guidance scale
75
+ model_name = data.get('model', 'stabilityai/sd-3.5') # Base model
76
+ refiner_model_name = 'stabilityai/sd-xl-refiner-1.0' # Refiner model
77
  seed = data.get('seed', None) # Seed for reproducibility, default is None
78
 
79
  if not prompt:
 
90
  download_name='thinkgood.png'
91
  )
92
 
93
+ # Step 1: Generate the base image
94
+ base_image = generate_image(prompt, negative_prompt, height, width, model_name, num_inference_steps, guidance_scale, seed)
95
+
96
+ if not base_image:
97
+ return jsonify({"error": "Failed to generate base image"}), 500
98
 
99
+ # Step 2: Refine the image with the refiner model
100
+ refined_image = refine_image(base_image, prompt, negative_prompt, refiner_model_name, num_inference_steps, guidance_scale)
101
+
102
+ if not refined_image:
103
+ return jsonify({"error": "Failed to refine image"}), 500
104
 
105
+ # Save the refined image to a BytesIO object
106
+ img_byte_arr = BytesIO()
107
+ refined_image.save(img_byte_arr, format='PNG') # Convert the image to PNG
108
+ img_byte_arr.seek(0) # Move to the start of the byte stream
109
+
110
+ # Send the refined image as a response
111
+ return send_file(
112
+ img_byte_arr,
113
+ mimetype='image/png',
114
+ as_attachment=False, # Send the file inline
115
+ download_name='refined_image.png' # File name for download
116
+ )
117
  except Exception as e:
118
  print(f"Error in generate_api: {str(e)}") # Log the error
119
  return jsonify({"error": str(e)}), 500