Spaces:
Running
Running
import os | |
import requests | |
import gradio as gr | |
from PIL import Image | |
from io import BytesIO | |
# Function to generate image from the Hugging Face API | |
def generate_image(prompt): | |
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney" | |
API_TOKEN = os.getenv("HF_READ_TOKEN") # Ensure the token is set in your environment | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
payload = { | |
"inputs": prompt | |
} | |
# Call the Hugging Face API to generate the image | |
response = requests.post(API_URL, headers=headers, json=payload) | |
# Check if the request was successful | |
if response.status_code != 200: | |
return f"Error: {response.status_code}, {response.text}" | |
# Convert the response content into a PIL image | |
image = Image.open(BytesIO(response.content)) | |
return image # Return the image to Gradio | |
# Define the chatbot function to return the generated image | |
def chatbot(prompt): | |
image = generate_image(prompt) | |
return image | |
# Create the Gradio interface with the same UI/UX | |
interface = gr.Interface( | |
fn=chatbot, | |
inputs="text", | |
outputs="image", | |
title="prompthero/openjourney", | |
description="Enter a text prompt and get an AI-generated image." | |
) | |
# Launch the interface | |
interface.launch() | |