SpyC0der77 commited on
Commit
da28892
·
verified ·
1 Parent(s): 4abdc6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -38
app.py CHANGED
@@ -1,44 +1,29 @@
1
  import gradio as gr
2
- import torch
3
- import os
4
- import requests
5
- import io
6
- from PIL import Image
7
- API_URL = "https://api-inference.huggingface.co/models/EvanZhouDev/open-genmoji"
8
- headers = {"Authorization": "Bearer " + os.getenv("HUGGINGFACE_API_TOKEN")}
9
- def infer(prompt):
10
- def query(payload):
11
- response = requests.post(API_URL, headers=headers, json=payload)
12
- return response.content
13
- image_bytes = query({
14
- "inputs": prompt,
15
- })
16
 
17
- with gr.Blocks() as demo:
18
- with gr.Column(elem_id="col-container"):
19
- gr.Markdown(" # Text-to-Image Gradio Template")
20
-
21
- with gr.Row():
22
- prompt = gr.Text(
23
- label="Prompt",
24
- show_label=False,
25
- max_lines=1,
26
- placeholder="Enter your prompt",
27
- container=False,
28
- )
29
- run_button = gr.Button("Run", scale=0, variant="primary")
30
- result = gr.Image(label="Result", show_label=False)
31
 
 
 
 
 
 
 
32
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- # Run inference when run_button is clicked
35
- run_button.click(
36
- infer,
37
- inputs=[
38
- prompt
39
- ],
40
- outputs=[result],
41
- )
42
-
43
  if __name__ == "__main__":
44
- demo.launch()
 
1
  import gradio as gr
2
+ import numpy as np
3
+ from huggingface_hub import InferenceClient
4
+ client = InferenceClient("EvanZhouDev/open-genmoji", token=os.getenv("HUGGINGFACE_API_TOKEN"))
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # output is a PIL.Image object
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ # Define the process function that takes a text prompt and returns an image
9
+ def process(prompt):
10
+ # Placeholder for the actual image generation logic
11
+ # For demonstration, we generate a random image
12
+ image = client.text_to_image(prompt)
13
+ return image
14
 
15
+ # Create a Gradio Blocks app
16
+ with gr.Blocks() as demo:
17
+ # Create a Textbox for the input prompt
18
+ prompt_input = gr.Textbox(label="Enter a prompt")
19
+ # Create an Image component for the output image
20
+ image_output = gr.Image(label="Generated Image")
21
+ # Create a Button to trigger the image generation
22
+ generate_button = gr.Button("Generate Image")
23
+
24
+ # Define the event listener for the button click
25
+ generate_button.click(fn=process, inputs=prompt_input, outputs=image_output)
26
 
27
+ # Launch the interface
 
 
 
 
 
 
 
 
28
  if __name__ == "__main__":
29
+ demo.launch()