Spaces:
Running
Running
import os | |
import requests | |
import gradio as gr | |
from PIL import Image | |
from io import BytesIO | |
# Function to generate image from Hugging Face API | |
def generate_image(prompt): | |
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney" | |
API_TOKEN = os.getenv("HF_READ_TOKEN") # Make sure the token is in your environment | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
payload = { | |
"inputs": prompt, | |
} | |
# Call the Hugging Face API to generate the image | |
response = requests.post(API_URL, headers=headers, json=payload) | |
# Check if the response was successful | |
if response.status_code != 200: | |
return f"Error: {response.status_code}, {response.text}" | |
# Ensure the response contains an image by loading it into PIL | |
try: | |
image = Image.open(BytesIO(response.content)) | |
except Exception as e: | |
return f"Error processing image: {str(e)}" | |
return image # Return the PIL image object | |
# Define the chatbot function to return the generated image | |
def chatbot(prompt): | |
image = generate_image(prompt) | |
return image | |
# Create the Gradio interface | |
interface = gr.Interface( | |
fn=chatbot, | |
inputs="text", | |
outputs="image", | |
title="prompthero/openjourney", | |
description="Enter a text prompt and get an AI-generated image." | |
) | |
# Launch the interface | |
interface.launch() | |