import requests import io from PIL import Image import gradio as gr import os # Assuming you have your API tokens set in environment variables ZEPHYR_API_TOKEN = os.getenv("HF_API_TOKEN") SD_API_TOKEN = os.getenv("HF_API_TOKEN") if not ZEPHYR_API_TOKEN or not SD_API_TOKEN: raise ValueError("API tokens not found. Please set the ZEPHYR_API_TOKEN and HF_API_TOKEN environment variables.") ZEPHYR_API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta" SD_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0" headers = { "Authorization": f"Bearer {ZEPHYR_API_TOKEN}", "Content-Type": "application/json", } # Assuming Zephyr supports a similar conversational structure chat = [ {"role": "system", "content": "Prepare a prompt for Stable Diffusion for the following LinkedIn post:"}, {"role": "user", "content": linkedin_text}, # You can add more turns here if necessary ] payload = { "inputs": chat, } response = requests.post(ZEPHYR_API_URL, headers=headers, json=payload) if response.status_code == 200: return response.json() else: print(f"Failed to query Zephyr model, status code: {response.status_code}") print(response.text) # Provides insight into what went wrong return None # Example LinkedIn post text linkedin_text = "Example LinkedIn post content here. How should this be visualized?" zephyr_response = query_zephyr_system_user_format(linkedin_text) if zephyr_response: print(zephyr_response) else: print("Error querying the Zephyr model.") def generate_image_from_prompt(prompt, negative_prompt, guidance_scale, width, height, num_inference_steps): headers = {"Authorization": f"Bearer {SD_API_TOKEN}"} payload = { "inputs": prompt, "parameters": { "guidance_scale": guidance_scale, "width": width, "height": height, "num_inference_steps": num_inference_steps, }, } if negative_prompt: # Add negative prompt if provided payload["parameters"]["negative_prompt"] = negative_prompt response = requests.post(SD_API_URL, headers=headers, json=payload) image_bytes = response.content image = Image.open(io.BytesIO(image_bytes)) return image def generate_image_from_linkedin_text(linkedin_text, negative_prompt, guidance_scale, width, height, num_inference_steps): # Generate a prompt from the LinkedIn text using Zephyr zephyr_response = query_zephyr(linkedin_text) if zephyr_response and isinstance(zephyr_response, list): generated_prompt = zephyr_response[0].get("generated_text", "") else: raise ValueError("Unexpected response format from Zephyr model.") # Use the generated prompt to create an image with Stable Diffusion if generated_prompt: image = generate_image_from_prompt(generated_prompt, negative_prompt, guidance_scale, width, height, num_inference_steps) return image, generated_prompt else: raise ValueError("Failed to generate a prompt from the LinkedIn text.") iface = gr.Interface( fn=generate_image_from_linkedin_text, inputs=[ gr.Textbox(label="LinkedIn Message", placeholder="Enter LinkedIn message here..."), gr.Textbox(label="Negative Prompt", placeholder="Enter a negative prompt here (optional)..."), gr.Slider(label="Guidance Scale", minimum=1, maximum=20, step=0.1, value=7.5), gr.Slider(label="Width", minimum=768, maximum=1024, step=1, value=1024), gr.Slider(label="Height", minimum=768, maximum=1024, step=1, value=768), gr.Slider(label="Number of Inference Steps", minimum=20, maximum=50, step=1, value=30) ], outputs=[ gr.Image(type="pil"), gr.Label(label="Generated Prompt") ], title="Generate Images from LinkedIn Messages", description="Enter a LinkedIn message to generate a creative prompt with Zephyr, which is then used to generate an image with Stable Diffusion. Image parameters can be adjusted." ) iface.launch()