import os import io import requests import gradio as gr import numpy as np from PIL import Image API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev" API_KEY = os.environ['HUGGINGFACEHUB_API_TOKEN'] def query_api(payload): headers = {"Authorization": f"Bearer {API_KEY}"} response = requests.post(API_URL, headers=headers, json=payload) response.raise_for_status() return response.content def generate_image(description, format, language): # Create a random image with a size of 600x600 pixels and 3 color channels # image = np.random.randint(0, 256, (600, 600, 3), dtype=np.uint8) image_bytes = query_api({"inputs": description}) image = Image.open(io.BytesIO(image_bytes)) print("type of imageis:", type(image)) # Return the image in the specified format return image, format, language with gr.Blocks() as demo: # Create a textarea for users to describe the image they want to generate description_textarea = gr.Textbox(label="Describe the Image", placeholder="Enter a description of the image you want to generate") # Create a dropdown to select the output format format_dropdown = gr.Dropdown(choices=["png", "jpg", "webp"], value="png", label="Output Format") # Create a dropdown to select the language language_dropdown = gr.Dropdown(choices=["English", "Spanish", "French"], value="English", label="Language") # Create an image component to display the generated image image_output = gr.Image(type="numpy", label="Generated Image") # Create a button to trigger the image generation generate_button = gr.Button("Generate Image") # Define the event listener for the button click generate_button.click(fn=generate_image, inputs=[description_textarea, format_dropdown, language_dropdown], outputs=[image_output, format_dropdown, language_dropdown]) # Launch the interface if __name__ == "__main__": demo.launch(show_error=True)