Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,37 +4,34 @@ import gradio as gr
|
|
4 |
from PIL import Image
|
5 |
from io import BytesIO
|
6 |
|
7 |
-
# Function to generate image from Hugging Face API
|
8 |
def generate_image(prompt):
|
9 |
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
|
10 |
-
API_TOKEN = os.getenv("HF_READ_TOKEN") #
|
11 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
12 |
|
13 |
payload = {
|
14 |
-
"inputs": prompt
|
15 |
}
|
16 |
|
17 |
# Call the Hugging Face API to generate the image
|
18 |
response = requests.post(API_URL, headers=headers, json=payload)
|
19 |
|
20 |
-
# Check if the
|
21 |
if response.status_code != 200:
|
22 |
return f"Error: {response.status_code}, {response.text}"
|
23 |
|
24 |
-
#
|
25 |
-
|
26 |
-
image = Image.open(BytesIO(response.content))
|
27 |
-
except Exception as e:
|
28 |
-
return f"Error processing image: {str(e)}"
|
29 |
|
30 |
-
return image # Return the
|
31 |
|
32 |
# Define the chatbot function to return the generated image
|
33 |
def chatbot(prompt):
|
34 |
image = generate_image(prompt)
|
35 |
return image
|
36 |
|
37 |
-
# Create the Gradio interface
|
38 |
interface = gr.Interface(
|
39 |
fn=chatbot,
|
40 |
inputs="text",
|
|
|
4 |
from PIL import Image
|
5 |
from io import BytesIO
|
6 |
|
7 |
+
# Function to generate image from the Hugging Face API
|
8 |
def generate_image(prompt):
|
9 |
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
|
10 |
+
API_TOKEN = os.getenv("HF_READ_TOKEN") # Ensure the token is set in your environment
|
11 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
12 |
|
13 |
payload = {
|
14 |
+
"inputs": prompt
|
15 |
}
|
16 |
|
17 |
# Call the Hugging Face API to generate the image
|
18 |
response = requests.post(API_URL, headers=headers, json=payload)
|
19 |
|
20 |
+
# Check if the request was successful
|
21 |
if response.status_code != 200:
|
22 |
return f"Error: {response.status_code}, {response.text}"
|
23 |
|
24 |
+
# Convert the response content into a PIL image
|
25 |
+
image = Image.open(BytesIO(response.content))
|
|
|
|
|
|
|
26 |
|
27 |
+
return image # Return the image to Gradio
|
28 |
|
29 |
# Define the chatbot function to return the generated image
|
30 |
def chatbot(prompt):
|
31 |
image = generate_image(prompt)
|
32 |
return image
|
33 |
|
34 |
+
# Create the Gradio interface with the same UI/UX
|
35 |
interface = gr.Interface(
|
36 |
fn=chatbot,
|
37 |
inputs="text",
|