Geek7 commited on
Commit
a322656
·
verified ·
1 Parent(s): 6d2d42c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -23
app.py CHANGED
@@ -4,6 +4,7 @@ import os
4
  from huggingface_hub import InferenceClient
5
  from io import BytesIO
6
  from PIL import Image
 
7
 
8
  # Initialize the Flask app
9
  app = Flask(__name__)
@@ -15,11 +16,10 @@ client = InferenceClient(token=HF_TOKEN)
15
 
16
  @app.route('/')
17
  def home():
18
- return "Welcome to the Image Background Remover!"
19
-
20
 
21
  # Function to generate an image from a text prompt
22
- def generate_image(prompt, negative_prompt=None, height=512, width=512, model="stabilityai/stable-diffusion-2-1", num_inference_steps=50, guidance_scale=7.5, seed=None):
23
  try:
24
  # Generate the image using Hugging Face's inference API with additional parameters
25
  image = client.text_to_image(
@@ -37,46 +37,101 @@ def generate_image(prompt, negative_prompt=None, height=512, width=512, model="s
37
  print(f"Error generating image: {str(e)}")
38
  return None
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  # Flask route for the API endpoint to generate an image
41
  @app.route('/generate_image', methods=['POST'])
42
  def generate_api():
43
  data = request.get_json()
44
 
45
- # Extract required fields from the request
46
  prompt = data.get('prompt', '')
47
  negative_prompt = data.get('negative_prompt', None)
48
- height = data.get('height', 1024) # Default height
49
- width = data.get('width', 720) # Default width
50
- num_inference_steps = data.get('num_inference_steps', 50) # Default number of inference steps
51
- guidance_scale = data.get('guidance_scale', 7.5) # Default guidance scale
52
- model_name = data.get('model', 'stabilityai/stable-diffusion-2-1') # Default model
53
- seed = data.get('seed', None) # Seed for reproducibility, default is None
54
 
55
  if not prompt:
56
  return jsonify({"error": "Prompt is required"}), 400
57
 
58
- try:
59
- # Call the generate_image function with the provided parameters
60
- image = generate_image(prompt, negative_prompt, height, width, model_name, num_inference_steps, guidance_scale, seed)
 
 
 
 
 
 
 
 
61
 
62
- if image:
63
- # Save the image to a BytesIO object
64
  img_byte_arr = BytesIO()
65
- image.save(img_byte_arr, format='PNG') # Convert the image to PNG
66
- img_byte_arr.seek(0) # Move to the start of the byte stream
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  # Send the generated image as a response
69
  return send_file(
70
  img_byte_arr,
71
  mimetype='image/png',
72
- as_attachment=False, # Send the file as an attachment
73
- download_name='generated_image.png' # The file name for download
74
  )
75
  else:
76
- return jsonify({"error": "Failed to generate image"}), 500
77
- except Exception as e:
78
- print(f"Error in generate_api: {str(e)}") # Log the error
79
- return jsonify({"error": str(e)}), 500
80
 
81
  # Add this block to make sure your app runs when called
82
  if __name__ == "__main__":
 
4
  from huggingface_hub import InferenceClient
5
  from io import BytesIO
6
  from PIL import Image
7
+ import base64
8
 
9
  # Initialize the Flask app
10
  app = Flask(__name__)
 
16
 
17
  @app.route('/')
18
  def home():
19
+ return "Welcome to the Image Generation Service!"
 
20
 
21
  # Function to generate an image from a text prompt
22
+ def generate_text_to_image(prompt, negative_prompt=None, height=512, width=512, model="stabilityai/stable-diffusion-2-1", num_inference_steps=50, guidance_scale=7.5, seed=None):
23
  try:
24
  # Generate the image using Hugging Face's inference API with additional parameters
25
  image = client.text_to_image(
 
37
  print(f"Error generating image: {str(e)}")
38
  return None
39
 
40
+ # Function to modify an existing image using image-to-image processing
41
+ def generate_image_to_image(input_image, prompt, height, width, num_inference_steps, guidance_scale, seed=None):
42
+ try:
43
+ # Hard-coded model for image-to-image processing
44
+ hardcoded_model = "stabilityai/sdxl-1.0"
45
+
46
+ # Convert the base64-encoded input image to a PIL image
47
+ decoded_image = Image.open(BytesIO(base64.b64decode(input_image)))
48
+
49
+ # Modify the image using image-to-image transformation
50
+ modified_image = client.image_to_image(
51
+ image=decoded_image,
52
+ prompt=prompt,
53
+ model=hardcoded_model,
54
+ height=height,
55
+ width=width,
56
+ num_inference_steps=num_inference_steps,
57
+ guidance_scale=guidance_scale,
58
+ seed=seed
59
+ )
60
+ return modified_image
61
+ except Exception as e:
62
+ print(f"Error generating image-to-image: {str(e)}")
63
+ return None
64
+
65
  # Flask route for the API endpoint to generate an image
66
  @app.route('/generate_image', methods=['POST'])
67
  def generate_api():
68
  data = request.get_json()
69
 
70
+ # Extract common fields
71
  prompt = data.get('prompt', '')
72
  negative_prompt = data.get('negative_prompt', None)
73
+ num_inference_steps = data.get('num_inference_steps', 50)
74
+ guidance_scale = data.get('guidance_scale', 7.5)
75
+ model_name = data.get('model', 'stabilityai/stable-diffusion-2-1') # Model specified by the user for text-to-image
76
+ seed = data.get('seed', None)
 
 
77
 
78
  if not prompt:
79
  return jsonify({"error": "Prompt is required"}), 400
80
 
81
+ # Check if the request contains an image for image-to-image processing
82
+ input_image = data.get('image', None) # Expecting a base64-encoded image string
83
+ if input_image:
84
+ # Extract parameters specific to image-to-image
85
+ height = data.get('height', 1024)
86
+ width = data.get('width', 720)
87
+
88
+ # Call the image-to-image function
89
+ final_image = generate_image_to_image(
90
+ input_image, prompt, height, width, num_inference_steps, guidance_scale, seed
91
+ )
92
 
93
+ if final_image:
94
+ # Save the modified image to a BytesIO object
95
  img_byte_arr = BytesIO()
96
+ final_image.save(img_byte_arr, format='PNG')
97
+ img_byte_arr.seek(0)
98
+
99
+ # Send the modified image as a response
100
+ return send_file(
101
+ img_byte_arr,
102
+ mimetype='image/png',
103
+ as_attachment=False,
104
+ download_name='final_image.png'
105
+ )
106
+ else:
107
+ return jsonify({"error": "Failed to generate image-to-image."}), 500
108
+
109
+ # If no image is provided, proceed with text-to-image generation
110
+ else:
111
+ # Default height and width for text-to-image
112
+ height = data.get('height', 512)
113
+ width = data.get('width', 512)
114
+
115
+ # Call the text-to-image function
116
+ generated_image = generate_text_to_image(
117
+ prompt, negative_prompt, height, width, model_name, num_inference_steps, guidance_scale, seed
118
+ )
119
+
120
+ if generated_image:
121
+ # Save the generated image to a BytesIO object
122
+ img_byte_arr = BytesIO()
123
+ generated_image.save(img_byte_arr, format='PNG')
124
+ img_byte_arr.seek(0)
125
 
126
  # Send the generated image as a response
127
  return send_file(
128
  img_byte_arr,
129
  mimetype='image/png',
130
+ as_attachment=False,
131
+ download_name='generated_image.png'
132
  )
133
  else:
134
+ return jsonify({"error": "Failed to generate text-to-image."}), 500
 
 
 
135
 
136
  # Add this block to make sure your app runs when called
137
  if __name__ == "__main__":