Kyan14 commited on
Commit
a1a6b25
·
1 Parent(s): f56f57c

Trying to better it

Browse files
Files changed (1) hide show
  1. app.py +31 -17
app.py CHANGED
@@ -15,14 +15,20 @@ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
15
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
16
 
17
  def get_mood_from_image(image: Image.Image):
18
- moods = ["happy", "sad", "angry", "neutral"]
19
- prompt = "The mood of the person in this image is: "
20
 
21
- # Create text prompts for each mood
22
- text_inputs = [f"{prompt}{mood}" for mood in moods]
 
 
 
 
 
 
 
23
 
24
  # Prepare the inputs for the model
25
- inputs = processor(text=text_inputs, images=image, return_tensors="pt", padding=True)
26
 
27
  # Run the model
28
  logits = model(**inputs).logits_per_image
@@ -40,10 +46,9 @@ def get_mood_from_image(image: Image.Image):
40
 
41
  def generate_art(mood):
42
  # Implement art generation logic using the Stable Diffusion API
43
- prompt = f"{mood} generative art with vibrant colors and intricate patterns"
44
- ...
45
-
46
- request_headers = {
47
  "Authorization": f"Bearer {STABLE_DIFFUSION_API_KEY}",
48
  "Accept": "image/jpeg", # Set the Accept header to receive an image directly
49
  }
@@ -53,11 +58,9 @@ def generate_art(mood):
53
  }
54
 
55
  while True:
56
- # Use the correct variable name (request_headers) in the post request
57
- response = requests.post('https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5', headers=request_headers, json=json_data)
58
 
59
- # Check if the response status is not 200 (OK)
60
- if response.status_code == 503: # Model is loading
61
  print("Model is loading, waiting for 30 seconds before retrying...")
62
  time.sleep(30)
63
  continue
@@ -70,24 +73,34 @@ def generate_art(mood):
70
 
71
  break
72
 
73
- # Load the image directly from the response content
74
  image = Image.open(BytesIO(response.content))
75
 
76
  return image
77
 
 
 
 
 
 
 
 
 
 
78
  def mood_art_generator(image):
79
  mood = get_mood_from_image(image)
80
  print("Mood:", mood)
81
  if mood:
82
  art = generate_art(mood)
83
- return art
 
 
84
  else:
85
- return None
86
 
87
  iface = gr.Interface(
88
  fn=mood_art_generator,
89
  inputs=gr.inputs.Image(shape=(224, 224), image_mode="RGB", source="upload"),
90
- outputs=gr.outputs.Image(type="pil"),
91
  title="Mood-based Art Generator",
92
  description="Upload an image of yourself and let the AI generate artwork based on your mood.",
93
  allow_flagging=False,
@@ -96,3 +109,4 @@ iface = gr.Interface(
96
  )
97
 
98
  iface.launch()
 
 
15
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
16
 
17
  def get_mood_from_image(image: Image.Image):
18
+ moods = ["fear", "anger", "joy", "sadness", "disgust", "surprise"]
 
19
 
20
+ # Create unique prompts for each mood
21
+ prompts = [
22
+ "The emotion conveyed by this image is fear. The person looks scared and tense.",
23
+ "The emotion conveyed by this image is anger. The person looks furious and irritated.",
24
+ "The emotion conveyed by this image is joy. The person looks happy and cheerful.",
25
+ "The emotion conveyed by this image is sadness. The person looks unhappy and gloomy.",
26
+ "The emotion conveyed by this image is disgust. The person looks repulsed and sickened.",
27
+ "The emotion conveyed by this image is surprise. The person looks astonished and amazed.",
28
+ ]
29
 
30
  # Prepare the inputs for the model
31
+ inputs = processor(text=prompts, images=image, return_tensors="pt", padding=True)
32
 
33
  # Run the model
34
  logits = model(**inputs).logits_per_image
 
46
 
47
  def generate_art(mood):
48
  # Implement art generation logic using the Stable Diffusion API
49
+ prompt = f"{mood} generative art with vibrant colors and intricate patterns ({str(np.random.randint(1, 10000))})"
50
+
51
+ headers = {
 
52
  "Authorization": f"Bearer {STABLE_DIFFUSION_API_KEY}",
53
  "Accept": "image/jpeg", # Set the Accept header to receive an image directly
54
  }
 
58
  }
59
 
60
  while True:
61
+ response = requests.post('https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5', headers=headers, json=json_data)
 
62
 
63
+ if response.status_code == 503:
 
64
  print("Model is loading, waiting for 30 seconds before retrying...")
65
  time.sleep(30)
66
  continue
 
73
 
74
  break
75
 
 
76
  image = Image.open(BytesIO(response.content))
77
 
78
  return image
79
 
80
+ mood_emojis = {
81
+ "fear": "😨",
82
+ "anger": "😠",
83
+ "joy": "😄",
84
+ "sadness": "😢",
85
+ "disgust": "🤢",
86
+ "surprise": "😮",
87
+ }
88
+
89
  def mood_art_generator(image):
90
  mood = get_mood_from_image(image)
91
  print("Mood:", mood)
92
  if mood:
93
  art = generate_art(mood)
94
+ emoji = mood_emojis[mood]
95
+ output_text = f"You seem to be {mood} {emoji}. Here's an artwork representing it!"
96
+ return art, output_text
97
  else:
98
+ return None, "Failed to generate artwork."
99
 
100
  iface = gr.Interface(
101
  fn=mood_art_generator,
102
  inputs=gr.inputs.Image(shape=(224, 224), image_mode="RGB", source="upload"),
103
+ outputs=[gr.outputs.Image(type="pil"), gr.outputs.Textbox()],
104
  title="Mood-based Art Generator",
105
  description="Upload an image of yourself and let the AI generate artwork based on your mood.",
106
  allow_flagging=False,
 
109
  )
110
 
111
  iface.launch()
112
+