File size: 4,712 Bytes
fed8daa 558f17d fed8daa a04e93d fed8daa a04e93d fed8daa 81bb2e1 c3fe2d2 000fb48 27b64f7 81bb2e1 fed8daa a04e93d fed8daa a04e93d 26358b3 a04e93d 5e738b1 a04e93d 000c673 fed8daa a04e93d fed8daa a04e93d fed8daa a04e93d fed8daa a04e93d fed8daa a04e93d fed8daa a04e93d fed8daa a04e93d fed8daa a04e93d 000c673 fed8daa 000c673 a04e93d fed8daa a04e93d 000c673 a04e93d 000c673 fed8daa a04e93d fed8daa a04e93d 71311de a04e93d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
import os
import subprocess
from huggingface_hub import InferenceClient
from io import BytesIO
from PIL import Image
# Initialize the Flask app
app = Flask(__name__)
CORS(app) # Enable CORS for all routes
# Initialize the InferenceClient with your Hugging Face token
HF_TOKEN = os.environ.get("HF_TOKEN") # Ensure to set your Hugging Face token in the environment
client = InferenceClient(token=HF_TOKEN)
# Hardcoded negative prompt
NEGATIVE_PROMPT_FINGERS = """2D,missing fingers, extra fingers, elongated fingers, fused fingers,
mutated fingers, poorly drawn fingers, disfigured fingers,
too many fingers, deformed hands, extra hands, malformed hands,
blurry hands, disproportionate fingers"""
@app.route('/')
def home():
return "Welcome to the Image Background Remover!"
# Simple content moderation function
def is_prompt_explicit(prompt):
# Streamlined keyword list to avoid unnecessary restrictions
explicit_keywords = [
"sexual", "porn", "hentai", "fetish", "nude", "provocative", "obscene", "vulgar", "intimate", "kinky", "hardcore",
"threesome", "orgy", "masturbation", "genital", "suicide",
"self-harm", "depression", "kill myself", "worthless"
]
for keyword in explicit_keywords:
if keyword.lower() in prompt.lower():
return True
return False
# Function to generate an image from a text prompt
def generate_image(prompt, negative_prompt=None, height=512, width=512, model="stabilityai/stable-diffusion-2-1", num_inference_steps=50, guidance_scale=7.5, seed=None):
try:
# Generate the image using Hugging Face's inference API with additional parameters
image = client.text_to_image(
prompt=prompt,
negative_prompt=NEGATIVE_PROMPT_FINGERS,
height=height,
width=width,
model=model,
num_inference_steps=num_inference_steps, # Control the number of inference steps
guidance_scale=guidance_scale, # Control the guidance scale
seed=seed # Control the seed for reproducibility
)
return image # Return the generated image
except Exception as e:
print(f"Error generating image: {str(e)}")
return None
# Flask route for the API endpoint to generate an image
@app.route('/generate_image', methods=['POST'])
def generate_api():
data = request.get_json()
# Extract required fields from the request
prompt = data.get('prompt', '')
negative_prompt = data.get('negative_prompt', None)
height = data.get('height', 1024) # Default height
width = data.get('width', 720) # Default width
num_inference_steps = data.get('num_inference_steps', 50) # Default number of inference steps
guidance_scale = data.get('guidance_scale', 7.5) # Default guidance scale
model_name = data.get('model', 'stabilityai/stable-diffusion-2-1') # Default model
seed = data.get('seed', None) # Seed for reproducibility, default is None
if not prompt:
return jsonify({"error": "Prompt is required"}), 400
try:
# Check for explicit content
if is_prompt_explicit(prompt):
# Return the pre-defined "thinkgood.png" image
return send_file(
"nsfw.jpg",
mimetype='image/png',
as_attachment=False,
download_name='thinkgood.png'
)
# Call the generate_image function with the provided parameters
image = generate_image(prompt, negative_prompt, height, width, model_name, num_inference_steps, guidance_scale, seed)
if image:
# Save the image to a BytesIO object
img_byte_arr = BytesIO()
image.save(img_byte_arr, format='PNG') # Convert the image to PNG
img_byte_arr.seek(0) # Move to the start of the byte stream
# Send the generated image as a response
return send_file(
img_byte_arr,
mimetype='image/png',
as_attachment=False, # Send the file as an attachment
download_name='generated_image.png' # The file name for download
)
else:
return jsonify({"error": "Failed to generate image"}), 500
except Exception as e:
print(f"Error in generate_api: {str(e)}") # Log the error
return jsonify({"error": str(e)}), 500
# Add this block to make sure your app runs when called
if __name__ == "__main__":
subprocess.Popen(["python", "wk.py"]) # Start awake.py
app.run(host='0.0.0.0', port=7860) # Run directly if needed for testing |