Kyan14 commited on
Commit
fa204a4
ยท
1 Parent(s): de28ff4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from PIL import Image
3
+ from io import BytesIO
4
+ import base64
5
+ import gradio as gr
6
+ from transformers import CLIPProcessor, CLIPModel
7
+ import numpy as np
8
+
9
+ # Replace with your own API key
10
+ STABLE_DIFFUSION_API_KEY = "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"
11
+
12
+ # Load the CLIP model and processor
13
+ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
14
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
15
+
16
+ def get_mood_from_image(image: Image.Image):
17
+ moods = ["fear", "anger", "joy", "sadness", "disgust", "surprise"]
18
+
19
+ # Create unique prompts for each mood
20
+ prompts = [
21
+ "The emotion conveyed by this image is fear. The person looks scared and tense.",
22
+ "The emotion conveyed by this image is anger. The person looks furious and irritated.",
23
+ "The emotion conveyed by this image is joy. The person looks happy and cheerful.",
24
+ "The emotion conveyed by this image is sadness. The person looks unhappy and gloomy.",
25
+ "The emotion conveyed by this image is disgust. The person looks repulsed and sickened.",
26
+ "The emotion conveyed by this image is surprise. The person looks astonished and amazed.",
27
+ ]
28
+
29
+ # Prepare the inputs for the model
30
+ inputs = processor(text=prompts, images=image, return_tensors="pt", padding=True)
31
+
32
+ # Run the model
33
+ logits = model(**inputs).logits_per_image
34
+ probs = logits.softmax(dim=-1).tolist()
35
+
36
+ # Calculate the scores for each mood
37
+ mood_scores = {}
38
+ for mood, score in zip(moods, probs[0]):
39
+ mood_scores[mood] = score
40
+ print("Mood Scores:", mood_scores)
41
+ # Select the mood with the highest score
42
+ selected_mood = max(mood_scores, key=mood_scores.get)
43
+
44
+ return selected_mood
45
+
46
+ def generate_art(mood):
47
+ # Implement art generation logic using the Stable Diffusion API
48
+ prompt = f"{mood} generative art with vibrant colors and intricate patterns"
49
+ ...
50
+
51
+ headers = {
52
+ "Authorization": f"Bearer {STABLE_DIFFUSION_API_KEY}",
53
+ "Accept": "image/jpeg", # Set the Accept header to receive an image directly
54
+ }
55
+
56
+ json_data = {
57
+ "inputs": prompt
58
+ }
59
+
60
+ response = requests.post('https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5', headers=headers, json=json_data)
61
+
62
+ # Check if the response status is not 200 (OK)
63
+ if response.status_code != 200:
64
+ print(f"Error: API response status code {response.status_code}")
65
+ print("Response content:")
66
+ print(response.content)
67
+ return None
68
+
69
+ # Load the image directly from the response content
70
+ image = Image.open(BytesIO(response.content))
71
+
72
+ return image
73
+
74
+ mood_emojis = {
75
+ "fear": "๐Ÿ˜จ",
76
+ "anger": "๐Ÿ˜ ",
77
+ "joy": "๐Ÿ˜„",
78
+ "sadness": "๐Ÿ˜ข",
79
+ "disgust": "๐Ÿคข",
80
+ "surprise": "๐Ÿ˜ฎ",
81
+ }
82
+
83
+ def mood_art_generator(image):
84
+ mood = get_mood_from_image(image)
85
+ print("Mood:", mood)
86
+ if mood:
87
+ art = generate_art(mood)
88
+ emoji = mood_emojis[mood
89
+ output_text = f"You seem to be {mood} {emoji}. Here's an artwork representing it!"
90
+ return art, output_text
91
+ else:
92
+ return None, "Failed to generate artwork."
93
+
94
+ iface = gr.Interface(
95
+ fn=mood_art_generator,
96
+ inputs=gr.inputs.Image(shape=(224, 224), image_mode="RGB", source="upload"),
97
+ outputs=[gr.outputs.Image(type="pil"), gr.outputs.Textbox()],
98
+ title="Mood-based Art Generator",
99
+ description="Upload an image of yourself and let the AI generate artwork based on your mood.",
100
+ allow_flagging=False,
101
+ analytics_enabled=False
102
+ )
103
+
104
+ iface.launch(share=True)