File size: 4,092 Bytes
7a58110
 
 
31f5d1f
6b3696a
31f5d1f
b864045
553dd80
b4b980c
31f5d1f
b4b980c
 
8f528d9
b4b980c
 
 
f49b96e
 
 
 
 
 
2b25af2
f49b96e
 
 
 
 
 
 
b3fb41e
 
 
 
 
f49b96e
 
b3fb41e
f49b96e
b3fb41e
f49b96e
 
 
 
 
b4b980c
b3fb41e
b864045
b4b980c
7a58110
 
 
8f528d9
7a58110
 
 
 
 
b864045
b3015a7
 
b4b980c
6b3696a
7a58110
31f5d1f
 
b3015a7
b864045
b3015a7
 
 
 
 
b864045
 
b4b980c
b3015a7
28cb864
b4b980c
 
 
7a58110
b3015a7
 
 
 
 
 
 
 
 
 
 
b864045
b3015a7
b4b980c
b864045
7a58110
31f5d1f
6b3696a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import requests
import io
from PIL import Image
import gradio as gr
import os

# Assuming you have your API tokens set in environment variables
ZEPHYR_API_TOKEN = os.getenv("HF_API_TOKEN")
SD_API_TOKEN = os.getenv("HF_API_TOKEN")

if not ZEPHYR_API_TOKEN or not SD_API_TOKEN:
    raise ValueError("API tokens not found. Please set the ZEPHYR_API_TOKEN and HF_API_TOKEN environment variables.")

ZEPHYR_API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
SD_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"

def query_zephyr(linkedin_text):
    # Construct the prompt to include the LinkedIn post with an instruction for Zephyr
    instruction = "Prepare a prompt for Stable Diffusion for the following LinkedIn post:"
    conversation = [
        {"role": "system", "content": instruction},
        {"role": "user", "content": linkedin_text}
    ]
    
    headers = {
        "Authorization": f"Bearer {ZEPHYR_API_TOKEN}",
        "Content-Type": "application/json",
    }
    
    # Format the payload according to the Hugging Face Inference API documentation
    payload = {
        "inputs": {
            "past_user_inputs": [],
            "generated_responses": [],
            "text": linkedin_text,
            "conversation": conversation,
        },
    }
    
    response = requests.post(ZEPHYR_API_URL, headers=headers, json=payload)
    if response.status_code == 200:
        return response.json()
    else:
        raise Exception(f"Failed to query Zephyr model, status code: {response.status_code}")



def generate_image_from_prompt(prompt, negative_prompt, guidance_scale, width, height, num_inference_steps):
    headers = {"Authorization": f"Bearer {SD_API_TOKEN}"}
    payload = {
        "inputs": prompt,
        "parameters": {
            "guidance_scale": guidance_scale,
            "width": width,
            "height": height,
            "num_inference_steps": num_inference_steps,
        },
    }
    if negative_prompt:  # Add negative prompt if provided
        payload["parameters"]["negative_prompt"] = negative_prompt

    response = requests.post(SD_API_URL, headers=headers, json=payload)
    image_bytes = response.content
    image = Image.open(io.BytesIO(image_bytes))
    return image

def generate_image_from_linkedin_text(linkedin_text, negative_prompt, guidance_scale, width, height, num_inference_steps):
    # Generate a prompt from the LinkedIn text using Zephyr
    zephyr_response = query_zephyr(linkedin_text)
    if zephyr_response and isinstance(zephyr_response, list):
        generated_prompt = zephyr_response[0].get("generated_text", "")
    else:
        raise ValueError("Unexpected response format from Zephyr model.")

    # Use the generated prompt to create an image with Stable Diffusion
    if generated_prompt:
        image = generate_image_from_prompt(generated_prompt, negative_prompt, guidance_scale, width, height, num_inference_steps)
        return image, generated_prompt
    else:
        raise ValueError("Failed to generate a prompt from the LinkedIn text.")

iface = gr.Interface(
    fn=generate_image_from_linkedin_text,
    inputs=[
        gr.Textbox(label="LinkedIn Message", placeholder="Enter LinkedIn message here..."),
        gr.Textbox(label="Negative Prompt", placeholder="Enter a negative prompt here (optional)..."),
        gr.Slider(label="Guidance Scale", minimum=1, maximum=20, step=0.1, value=7.5),
        gr.Slider(label="Width", minimum=768, maximum=1024, step=1, value=1024),
        gr.Slider(label="Height", minimum=768, maximum=1024, step=1, value=768),
        gr.Slider(label="Number of Inference Steps", minimum=20, maximum=50, step=1, value=30)
    ],
    outputs=[
        gr.Image(type="pil"),
        gr.Label(label="Generated Prompt")
    ],
    title="Generate Images from LinkedIn Messages",
    description="Enter a LinkedIn message to generate a creative prompt with Zephyr, which is then used to generate an image with Stable Diffusion. Image parameters can be adjusted."
)

iface.launch()