File size: 3,128 Bytes
66a500b
da28892
a180f58
d9c8156
e920afb
d9c8156
7685695
401f757
02458f6
d9c8156
 
 
 
 
 
 
 
 
80621e1
da28892
 
d516faa
c7f0abc
 
 
 
 
1698092
c7f0abc
d9c8156
d516faa
d9c8156
2377a1b
 
d516faa
7fca778
d9c8156
e920afb
da28892
f5b81b8
da28892
 
 
 
 
 
 
 
 
 
 
401f757
 
 
80621e1
da28892
66a500b
401f757
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import gradio as gr
from huggingface_hub import InferenceClient
import os
from datetime import datetime
import json


client = InferenceClient("EvanZhouDev/open-genmoji", token=os.getenv("HUGGINGFACE_API_TOKEN"))
llm = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
try:
    os.mkdir("outputs")
except FileExistsError:
    print("Folder already exists.")

try:
    os.mkdir("outputs/images")
except FileExistsError:
    print("Folder already exists.")

# Define the process function that takes a text prompt and returns an image
def process(prompt):
    print(prompt)
    messages = [
    	{ "role": "system", "content": "You are helping create a prompt for a Emoji generation image model. An emoji must be easily interpreted when small so details must be exaggerated to be clear. Your goal is to use descriptions to achieve this.\n\nYou will receive a user description, and you must rephrase it to consist of short phrases separated by periods, adding detail to everything the user provides.\n\nAdd describe the color of all parts or components of the emoji. Unless otherwise specified by the user, do not describe people. Do not describe the background of the image. Your output should be in the format:\n\n```\nemoji of {description}. {addon phrases}. 3D lighting. no cast shadows.\n```\n\nThe description should be a 1 sentence of your interpretation of the emoji.\nThen, you may choose to add addon phrases. You must use the following in the given scenarios:\n\n- \"cute.\": If generating anything that's not an object, and also not a human\n- \"enlarged head in cartoon style.\": ONLY animals\n- \"head is turned towards viewer.\": ONLY humans or animals\n- \"detailed texture.\": ONLY objects\n\nFurther addon phrases may be added to ensure the clarity of the emoji." },
    	{ "role": "user", "content": prompt }
    ]

    completion = llm.chat_completion(messages, max_tokens=100)

    response = completion.get("choices")[0].get("message").get("content").replace("```", "").replace("\n", "")
    print(response)
    time = str(datetime.now())
    print(time)
    time = time.replace(".", "").replace(":","")
    image = client.text_to_image(response)
    image.save("outputs/images/" + time + ".png")
    with open("outputs/" + time + ".json", "wb") as f:
        f.write(json.dumps({"prompt": prompt, "refined_prompt": response, "image": "outputs/images/" + time + ".png"}))
    return image

# Create a Gradio Blocks app
with gr.Blocks() as demo:
    # Create a Textbox for the input prompt
    prompt_input = gr.Textbox(label="Enter a prompt")
    # Create an Image component for the output image
    image_output = gr.Image(label="Generated Image")
    # Create a Button to trigger the image generation
    generate_button = gr.Button("Generate Image")
    
    # Define the event listener for the button click
    generate_button.click(fn=process, inputs=prompt_input, outputs=image_output)
    
    # Define the event listener for the Enter key press
    prompt_input.submit(fn=process, inputs=prompt_input, outputs=image_output)

# Launch the interface
if __name__ == "__main__":
    demo.launch(show_error=True)