umarabbas890 commited on
Commit
c6ed84e
·
verified ·
1 Parent(s): be50509

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -8
app.py CHANGED
@@ -1,8 +1,40 @@
1
- gradio==3.28.0
2
- transformers==4.34.0
3
- torch==2.1.0
4
- stable-diffusion-v1-4==1.0
5
- groq==0.2.0 # Adjust this version according to the Groq API version you're using
6
- requests==2.31.0
7
- Pillow==10.0.0 # For image handling
8
- python-dotenv==1.0.0 # For managing environment variables
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from gradio import Interface
3
+ from PIL import Image
4
+ from groq import Groq
5
+ from diffusers import StableDiffusionPipeline
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables from .env file
9
+ load_dotenv()
10
+
11
+ # Groq API setup
12
+ client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
13
+
14
+ # Stable Diffusion setup using Hugging Face's diffusers library
15
+ model = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
16
+
17
+ # Function to generate an image and interact with Groq's API
18
+ def generate_and_interact(prompt):
19
+ # Step 1: Generate image using Stable Diffusion
20
+ image = model(prompt).images[0]
21
+
22
+ # Step 2: Save the generated image (optional, for further use)
23
+ image_path = "generated_image.png"
24
+ image.save(image_path)
25
+
26
+ # Step 3: Interact with Groq API (example: send metadata or a description of the image)
27
+ chat_completion = client.chat.completions.create(
28
+ messages=[{"role": "user", "content": f"Analyze the generated image from this prompt: {prompt}"}],
29
+ model="llama3-8b-8192",
30
+ stream=False,
31
+ )
32
+
33
+ # Return the Groq response and the generated image
34
+ return chat_completion.choices[0].message.content, image
35
+
36
+ # Gradio interface setup
37
+ iface = Interface(fn=generate_and_interact, inputs="text", outputs=["text", "image"])
38
+
39
+ # Launch the app
40
+ iface.launch()