File size: 2,277 Bytes
7a58110 31f5d1f 6b3696a 31f5d1f b4b980c 31f5d1f b4b980c 8f528d9 b4b980c 7a58110 8f528d9 7a58110 b4b980c 6b3696a 7a58110 31f5d1f b4b980c 7a58110 b4b980c 7a58110 b4b980c 7a58110 31f5d1f 6b3696a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import requests
import io
from PIL import Image
import gradio as gr
import os
# Assuming you have your API tokens set in environment variables
ZEPHYR_API_TOKEN = os.getenv("HF_API_TOKEN")
SD_API_TOKEN = os.getenv("HF_API_TOKEN")
if not ZEPHYR_API_TOKEN or not SD_API_TOKEN:
raise ValueError("API tokens not found. Please set the ZEPHYR_API_TOKEN and HF_API_TOKEN environment variables.")
ZEPHYR_API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
SD_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
def query_zephyr(prompt):
headers = {"Authorization": f"Bearer {ZEPHYR_API_TOKEN}"}
response = requests.post(ZEPHYR_API_URL, headers=headers, json={"inputs": prompt})
return response.json()
def generate_image_from_prompt(prompt, guidance_scale=7.5, width=1024, height=768, num_inference_steps=30):
headers = {"Authorization": f"Bearer {SD_API_TOKEN}"}
payload = {
"inputs": prompt,
"parameters": {
"guidance_scale": guidance_scale,
"width": width,
"height": height,
"num_inference_steps": num_inference_steps,
},
}
response = requests.post(SD_API_URL, headers=headers, json=payload)
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
return image
def generate_image_from_linkedin_text(linkedin_text):
# Step 1: Generate a prompt from the LinkedIn text using Zephyr
zephyr_output = query_zephyr(linkedin_text)
generated_prompt = zephyr_output.get("generated_text", "")
# Step 2: Use the generated prompt to create an image with Stable Diffusion
if generated_prompt:
return generate_image_from_prompt(generated_prompt)
else:
raise ValueError("Failed to generate a prompt from the LinkedIn text.")
iface = gr.Interface(
fn=generate_image_from_linkedin_text,
inputs=[gr.Textbox(label="LinkedIn Message", placeholder="Enter LinkedIn message here...")],
outputs=gr.Image(type="pil"),
title="Generate Images from LinkedIn Messages",
description="Enter a LinkedIn message to generate a creative prompt with Zephyr, which is then used to generate an image with Stable Diffusion."
)
iface.launch()
|