Kyan14's picture
Update app.py
03c1fae
raw
history blame
1.86 kB
from flask import Flask, render_template, request, redirect, url_for
import requests
from PIL import Image
from io import BytesIO
import base64
app = Flask(__name__)
# Replace with your own API keys
CLIP_API_KEY = "your_clip_api_key"
STABLE_DIFFUSION_API_KEY = "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"
@app.route('/')
def index():
return render_template('index.html')
@app.route('/generate', methods=['POST'])
def generate():
image = request.files['image']
mood = get_mood_from_image(image)
if mood:
art, narrative = generate_art_and_narrative(mood)
return render_template('result.html', art=art, narrative=narrative)
else:
return redirect(url_for('index'))
def get_mood_from_image(image):
# Implement mood classification logic using the CLIP API
moods = ["happy", "sad", "angry", "neutral"]
prompt = "The mood of the person in this image is: "
headers = {
"Authorization": f"Bearer {CLIP_API_KEY}"
}
# Convert the image to base64
image_base64 = base64.b64encode(image.read()).decode('utf-8')
json_data = {
"inputs": [{"data": {"image": {"base64": image_base64}}, "prompt": prompt} for mood in moods]
}
response = requests.post('https://api-inference.huggingface.co/models/openai/clip-vit-base-patch32', headers=headers, json=json_data).json()
mood_scores = {}
for choice, mood in zip(response, moods):
mood_scores[mood] = float(choice['scores'][0])
# Filter moods with a score above 60%
filtered_moods = {k: v for k, v in mood_scores.items() if v > 0.6}
if len(filtered_moods) < 2:
return None
return filtered_moods
def generate_art_and_narrative(mood):
# Implement art generation logic using the Stable Diffusion API
pass
if __name__ == '__main__':
app.run(debug=True)