Geek7 commited on
Commit
26c97ce
1 Parent(s): 86c8bd9

Update myapp.py

Browse files
Files changed (1) hide show
  1. myapp.py +16 -15
myapp.py CHANGED
@@ -1,9 +1,9 @@
1
- from flask import Flask, request, jsonify, send_file
2
  from flask_cors import CORS
3
  import os
4
  from huggingface_hub import InferenceClient
5
  from io import BytesIO
6
- from PIL import Image # Importing Pillow for image processing
7
 
8
  # Initialize the Flask app
9
  myapp = Flask(__name__)
@@ -13,12 +13,8 @@ CORS(myapp) # Enable CORS for all routes
13
  HF_TOKEN = os.environ.get("HF_TOKEN") # Ensure to set your Hugging Face token in the environment
14
  client = InferenceClient(token=HF_TOKEN)
15
 
16
- @myapp.route('/')
17
- def home():
18
- return "Welcome to the Image Background Remover!"
19
-
20
- # Function to generate an image from a prompt using the specified model
21
- def generate_image(prompt, seed=1, model="prompthero/openjourney-v4"): # Default model if none provided
22
  try:
23
  # Generate the image using Hugging Face's inference API
24
  result_image = client.text_to_image(prompt=prompt, seed=seed, model=model)
@@ -27,7 +23,7 @@ def generate_image(prompt, seed=1, model="prompthero/openjourney-v4"): # Defaul
27
  print(f"Error generating image: {str(e)}")
28
  return None
29
 
30
- # Flask route for the API endpoint
31
  @myapp.route('/generate_image', methods=['POST'])
32
  def generate_api():
33
  data = request.get_json()
@@ -35,24 +31,24 @@ def generate_api():
35
  # Extract required fields from the request
36
  prompt = data.get('prompt', '')
37
  seed = data.get('seed', 1)
38
- model_name = data.get('model', 'prompthero/openjourney-v4') # Use the provided model name or a default one
39
 
40
  if not prompt:
41
  return jsonify({"error": "Prompt is required"}), 400
42
 
43
  try:
44
- # Call the generate_image function with the dynamically provided model name
45
  image = generate_image(prompt, seed, model_name)
46
 
47
  if image:
48
  # Save the image to a BytesIO object
49
- image_bytes = BytesIO()
50
- image.save(image_bytes, format='PNG')
51
- image_bytes.seek(0) # Go to the start of the byte stream
52
 
53
  # Send the generated image as an attachment
54
  return send_file(
55
- image_bytes,
56
  mimetype='image/png',
57
  as_attachment=True, # Send the file as an attachment
58
  download_name='generated_image.png' # The file name for download
@@ -63,6 +59,11 @@ def generate_api():
63
  print(f"Error in generate_api: {str(e)}") # Log the error
64
  return jsonify({"error": str(e)}), 500
65
 
 
 
 
 
 
66
  # Add this block to make sure your app runs when called
67
  if __name__ == "__main__":
68
  myapp.run(host='0.0.0.0', port=7860) # Run directly if needed for testing
 
1
+ from flask import Flask, request, jsonify, send_file, render_template
2
  from flask_cors import CORS
3
  import os
4
  from huggingface_hub import InferenceClient
5
  from io import BytesIO
6
+ from PIL import Image
7
 
8
  # Initialize the Flask app
9
  myapp = Flask(__name__)
 
13
  HF_TOKEN = os.environ.get("HF_TOKEN") # Ensure to set your Hugging Face token in the environment
14
  client = InferenceClient(token=HF_TOKEN)
15
 
16
+ # Function to generate an image from a text prompt
17
+ def generate_image(prompt, seed=1, model="prompthero/openjourney-v4"):
 
 
 
 
18
  try:
19
  # Generate the image using Hugging Face's inference API
20
  result_image = client.text_to_image(prompt=prompt, seed=seed, model=model)
 
23
  print(f"Error generating image: {str(e)}")
24
  return None
25
 
26
+ # Flask route for the API endpoint to generate an image based on a text prompt
27
  @myapp.route('/generate_image', methods=['POST'])
28
  def generate_api():
29
  data = request.get_json()
 
31
  # Extract required fields from the request
32
  prompt = data.get('prompt', '')
33
  seed = data.get('seed', 1)
34
+ model_name = data.get('model', 'prompthero/openjourney-v4') # Default model
35
 
36
  if not prompt:
37
  return jsonify({"error": "Prompt is required"}), 400
38
 
39
  try:
40
+ # Call the generate_image function with the custom model name
41
  image = generate_image(prompt, seed, model_name)
42
 
43
  if image:
44
  # Save the image to a BytesIO object
45
+ img_byte_arr = BytesIO()
46
+ image.save(img_byte_arr, format='PNG') # Convert the image to PNG
47
+ img_byte_arr.seek(0) # Move to the start of the byte stream
48
 
49
  # Send the generated image as an attachment
50
  return send_file(
51
+ img_byte_arr,
52
  mimetype='image/png',
53
  as_attachment=True, # Send the file as an attachment
54
  download_name='generated_image.png' # The file name for download
 
59
  print(f"Error in generate_api: {str(e)}") # Log the error
60
  return jsonify({"error": str(e)}), 500
61
 
62
+ # Flask route for the UI
63
+ @myapp.route('/')
64
+ def index():
65
+ return render_template('index.html') # Serve the HTML page
66
+
67
  # Add this block to make sure your app runs when called
68
  if __name__ == "__main__":
69
  myapp.run(host='0.0.0.0', port=7860) # Run directly if needed for testing