Kyan14 commited on
Commit
9798f2a
·
1 Parent(s): 18c4648

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py CHANGED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from PIL import Image
3
+ from io import BytesIO
4
+ import base64
5
+ import gradio as gr
6
+ from transformers import CLIPProcessor, CLIPModel
7
+
8
+ # Replace with your own API key
9
+ STABLE_DIFFUSION_API_KEY = "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"
10
+
11
+ # Load the CLIP model and processor
12
+ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
13
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
14
+
15
+ def get_mood_from_image(image: Image.Image):
16
+ moods = ["happy", "sad", "angry", "neutral"]
17
+ prompt = "The mood of the person in this image is: "
18
+
19
+ # Convert the image to base64
20
+ buffered = BytesIO()
21
+ image.save(buffered, format="JPEG")
22
+ image_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
23
+
24
+ # Create text prompts for each mood
25
+ text_inputs = [f"{prompt}{mood}" for mood in moods]
26
+
27
+ # Prepare the inputs for the model
28
+ inputs = processor(text=text_inputs, images=image, return_tensors="pt", padding=True)
29
+
30
+ # Run the model
31
+ logits = model(**inputs).logits
32
+ probs = logits.softmax(dim=-1).tolist()
33
+
34
+ # Calculate the scores for each mood
35
+ mood_scores = {}
36
+ for mood, score in zip(moods, probs[0]):
37
+ mood_scores[mood] = score
38
+
39
+ # Filter moods with a score above 60%
40
+ filtered_moods = {k: v for k, v in mood_scores.items() if v > 0.6}
41
+
42
+ if len(filtered_moods) < 2:
43
+ return None
44
+
45
+ # Select the mood with the highest score
46
+ selected_mood = max(filtered_moods, key=filtered_moods.get)
47
+ return selected_mood
48
+
49
+ def generate_art(mood):
50
+ # Implement art generation logic using the Stable Diffusion API
51
+ prompt = f"{mood} generative art"
52
+
53
+ headers = {
54
+ "Authorization": f"Bearer {STABLE_DIFFUSION_API_KEY}"
55
+ }
56
+
57
+ json_data = {
58
+ "inputs": prompt
59
+ }
60
+
61
+ response = requests.post('https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5', headers=headers, json=json_data).json()
62
+
63
+ # Extract the generated image URL from the response
64
+ image_url = response["generated_images"][0]["url"]
65
+
66
+ # Download the image and return it as a PIL Image
67
+ image_response = requests.get(image_url)
68
+ image = Image.open(BytesIO(image_response.content))
69
+
70
+ return image
71
+
72
+ def mood_art_generator(image: Image.Image):
73
+ mood = get_mood_from_image(image)
74
+ if mood:
75
+ art = generate_art(mood)
76
+ return art
77
+ else:
78
+ return None
79
+
80
+ image_input = gr.inputs.Image()
81
+ outputs = gr.outputs.Image(label="Generated Artwork")
82
+
83
+ interface = gr.Interface(
84
+ fn=mood_art_generator,
85
+ inputs=image_input,
86
+ outputs=outputs,
87
+ title="Mood-based Art Generator",
88
+ description="Upload an image of yourself and let the AI generate artwork based on your mood.",
89
+ )
90
+
91
+ if __name__ == "__main__":
92
+ interface.launch()