wifix199 commited on
Commit
73aa616
1 Parent(s): f337aed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -4,37 +4,34 @@ import gradio as gr
4
  from PIL import Image
5
  from io import BytesIO
6
 
7
- # Function to generate image from Hugging Face API
8
  def generate_image(prompt):
9
  API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
10
- API_TOKEN = os.getenv("HF_READ_TOKEN") # Make sure the token is in your environment
11
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
12
 
13
  payload = {
14
- "inputs": prompt,
15
  }
16
 
17
  # Call the Hugging Face API to generate the image
18
  response = requests.post(API_URL, headers=headers, json=payload)
19
 
20
- # Check if the response was successful
21
  if response.status_code != 200:
22
  return f"Error: {response.status_code}, {response.text}"
23
 
24
- # Ensure the response contains an image by loading it into PIL
25
- try:
26
- image = Image.open(BytesIO(response.content))
27
- except Exception as e:
28
- return f"Error processing image: {str(e)}"
29
 
30
- return image # Return the PIL image object
31
 
32
  # Define the chatbot function to return the generated image
33
  def chatbot(prompt):
34
  image = generate_image(prompt)
35
  return image
36
 
37
- # Create the Gradio interface
38
  interface = gr.Interface(
39
  fn=chatbot,
40
  inputs="text",
 
4
  from PIL import Image
5
  from io import BytesIO
6
 
7
+ # Function to generate image from the Hugging Face API
8
  def generate_image(prompt):
9
  API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
10
+ API_TOKEN = os.getenv("HF_READ_TOKEN") # Ensure the token is set in your environment
11
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
12
 
13
  payload = {
14
+ "inputs": prompt
15
  }
16
 
17
  # Call the Hugging Face API to generate the image
18
  response = requests.post(API_URL, headers=headers, json=payload)
19
 
20
+ # Check if the request was successful
21
  if response.status_code != 200:
22
  return f"Error: {response.status_code}, {response.text}"
23
 
24
+ # Convert the response content into a PIL image
25
+ image = Image.open(BytesIO(response.content))
 
 
 
26
 
27
+ return image # Return the image to Gradio
28
 
29
  # Define the chatbot function to return the generated image
30
  def chatbot(prompt):
31
  image = generate_image(prompt)
32
  return image
33
 
34
+ # Create the Gradio interface with the same UI/UX
35
  interface = gr.Interface(
36
  fn=chatbot,
37
  inputs="text",