Geek7 commited on
Commit
d7f2d01
·
verified ·
1 Parent(s): 5766b0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -42
app.py CHANGED
@@ -1,63 +1,31 @@
1
- from flask import Flask, request
2
- from flask_cors import CORS
3
  import os
4
  from huggingface_hub import InferenceClient
5
- from io import BytesIO
6
  from PIL import Image
7
- import gradio as gr # Import Gradio
8
-
9
- # Initialize the Flask app
10
- app = Flask(__name__)
11
- CORS(app) # Enable CORS for all routes
12
 
13
  # Initialize the InferenceClient with your Hugging Face token
14
  HF_TOKEN = os.environ.get("HF_TOKEN") # Ensure to set your Hugging Face token in the environment
15
- client = InferenceClient(token=HF_TOKEN)
16
 
17
  # Function to generate an image from a text prompt
18
- def generate_image(prompt, seed=1, model="prompthero/openjourney-v4"):
19
  try:
20
  # Generate the image using Hugging Face's inference API
21
- result_image = client.text_to_image(prompt=prompt, seed=seed, model=model)
22
-
23
- # Convert the result to a PIL Image
24
- if isinstance(result_image, bytes):
25
- # If the result is in bytes format
26
- image = Image.open(BytesIO(result_image))
27
- else:
28
- # If the result is in another format, handle accordingly
29
- raise ValueError("Received image in an unexpected format")
30
-
31
- return image
32
  except Exception as e:
33
  print(f"Error generating image: {str(e)}")
34
  return None
35
 
36
- # Gradio interface function
37
- def gradio_interface(prompt, seed, model_name):
38
- image = generate_image(prompt, seed, model_name)
39
-
40
- if image:
41
- img_byte_arr = BytesIO()
42
- image.save(img_byte_arr, format='PNG') # Convert the image to PNG
43
- img_byte_arr.seek(0) # Move to the start of the byte stream
44
- return img_byte_arr # Return the image as bytes
45
- else:
46
- return "Failed to generate image"
47
-
48
  # Set up the Gradio interface
49
  gr.Interface(
50
- fn=gradio_interface,
51
  inputs=[
52
  gr.Textbox(label="Prompt", placeholder="Enter a text prompt", lines=2),
53
- gr.Number(label="Seed", value=1, precision=0),
54
- gr.Textbox(label="Model Name", value="prompthero/openjourney-v4", placeholder="Enter model name"),
55
  ],
56
  outputs="image",
57
  title="Image Generation with Hugging Face",
58
- description="Enter a prompt, seed, and model name to generate an image."
59
- ).launch(share=True) # Launch the Gradio interface
60
-
61
- # Add this block to make sure your app runs when called
62
- if __name__ == "__main__":
63
- app.run(host='0.0.0.0', port=7860) # Run directly if needed for testing
 
 
 
1
  import os
2
  from huggingface_hub import InferenceClient
 
3
  from PIL import Image
4
+ import gradio as gr # Import Gradio for the UI
 
 
 
 
5
 
6
  # Initialize the InferenceClient with your Hugging Face token
7
  HF_TOKEN = os.environ.get("HF_TOKEN") # Ensure to set your Hugging Face token in the environment
8
+ client = InferenceClient(token=HF_TOKEN) # Initialize the client
9
 
10
  # Function to generate an image from a text prompt
11
+ def generate_image(prompt, negative_prompt=None, model=None):
12
  try:
13
  # Generate the image using Hugging Face's inference API
14
+ image = client.text_to_image(prompt=prompt, negative_prompt=negative_prompt, model=model)
15
+ return image # Return the generated image
 
 
 
 
 
 
 
 
 
16
  except Exception as e:
17
  print(f"Error generating image: {str(e)}")
18
  return None
19
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  # Set up the Gradio interface
21
  gr.Interface(
22
+ fn=generate_image,
23
  inputs=[
24
  gr.Textbox(label="Prompt", placeholder="Enter a text prompt", lines=2),
25
+ gr.Textbox(label="Negative Prompt (Optional)", placeholder="Enter negative prompt", lines=2),
26
+ gr.Textbox(label="Model Name", placeholder="Enter model name", value="stabilityai/stable-diffusion-2-1"),
27
  ],
28
  outputs="image",
29
  title="Image Generation with Hugging Face",
30
+ description="Enter a prompt, optional negative prompt, and model name to generate an image.",
31
+ ).launch() # Launch the Gradio interface