Devsora commited on
Commit
bf9983e
·
1 Parent(s): 0505dd8
Files changed (2) hide show
  1. app.py +129 -0
  2. requirements.txt +8 -0
app.py CHANGED
@@ -5,3 +5,132 @@ def greet(name):
5
 
6
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
  demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
  demo.launch()
8
+ from fastapi import FastAPI, HTTPException
9
+ from pydantic import BaseModel
10
+ import spaces # Necessary for the @spaces.GPU decorator
11
+ from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
12
+ import torch
13
+ import os
14
+ from datetime import datetime
15
+ from PIL import Image
16
+ import boto3
17
+ from botocore.exceptions import NoCredentialsError
18
+ from dotenv import load_dotenv
19
+
20
+ # Carregar variáveis de ambiente do arquivo .env
21
+ load_dotenv()
22
+
23
+ # AWS S3 Configuration
24
+ AWS_ACCESS_KEY = os.getenv('AWS_ACCESS_KEY')
25
+ AWS_SECRET_KEY = os.getenv('AWS_SECRET_KEY')
26
+ AWS_BUCKET_NAME = os.getenv('AWS_BUCKET_NAME')
27
+ AWS_REGION = os.getenv('AWS_REGION')
28
+ HF_TOKEN = os.getenv('HF_TOKEN') # Add this line to load your Hugging Face token
29
+
30
+ # Initialize S3 client
31
+ s3_client = boto3.client(
32
+ 's3',
33
+ aws_access_key_id=AWS_ACCESS_KEY,
34
+ aws_secret_access_key=AWS_SECRET_KEY,
35
+ region_name=AWS_REGION
36
+ )
37
+
38
+ # Configuration for the character pipeline
39
+ character_pipe = DiffusionPipeline.from_pretrained(
40
+ "cagliostrolab/animagine-xl-3.1",
41
+ torch_dtype=torch.float16,
42
+ use_safetensors=True,
43
+ use_auth_token=HF_TOKEN # Include the token here
44
+ )
45
+ character_pipe.scheduler = EulerDiscreteScheduler.from_config(character_pipe.scheduler.config)
46
+
47
+ # Configuration for the item pipeline
48
+ item_pipe = DiffusionPipeline.from_pretrained(
49
+ "openart-custom/DynaVisionXL",
50
+ torch_dtype=torch.float16,
51
+ use_safetensors=True,
52
+ use_auth_token=HF_TOKEN # Include the token here
53
+ )
54
+ item_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(item_pipe.scheduler.config)
55
+
56
+ # Function for image generation with ZeroGPU
57
+ @spaces.GPU(duration=60) # Allocate GPU only during the execution of this function
58
+ def generate_image(model_type, prompt, negative_prompt, width, height, guidance_scale, num_inference_steps):
59
+ if model_type == "character":
60
+ pipe = character_pipe
61
+ default_prompt = "1girl, souji okita, fate series, solo, upper body, bedroom, night, seducing, (sexy clothes)"
62
+ default_negative_prompt = "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]"
63
+ elif model_type == "item":
64
+ pipe = item_pipe
65
+ default_prompt = "great sword, runes on blade, acid on blade, weapon, (((item)))"
66
+ default_negative_prompt = "1girl, girl, man, boy, 1man, men, girls"
67
+ else:
68
+ return "Invalid type. Choose between 'character' or 'item'."
69
+
70
+ # Use custom prompts if provided
71
+ final_prompt = prompt if prompt else default_prompt
72
+ final_negative_prompt = negative_prompt if negative_prompt else default_negative_prompt
73
+
74
+ # Move the pipeline to the GPU
75
+ pipe.to("cuda")
76
+
77
+ # Image generation
78
+ image = pipe(
79
+ prompt=final_prompt,
80
+ negative_prompt=final_negative_prompt,
81
+ width=int(width),
82
+ height=int(height),
83
+ guidance_scale=float(guidance_scale),
84
+ num_inference_steps=int(num_inference_steps)
85
+ ).images[0]
86
+
87
+ # Save image to a temporary file
88
+ temp_file = "/tmp/generated_image.png"
89
+ image.save(temp_file)
90
+
91
+ # Upload to S3
92
+ file_name = datetime.now().strftime("%Y%m%d_%H%M%S") + ".png"
93
+ try:
94
+ s3_client.upload_file(temp_file, AWS_BUCKET_NAME, file_name)
95
+ s3_url = f"https://{AWS_BUCKET_NAME}.s3.{AWS_REGION}.amazonaws.com/{file_name}"
96
+ return s3_url
97
+ except NoCredentialsError:
98
+ return "Credentials not available"
99
+
100
+ # Initialize FastAPI
101
+ app = FastAPI()
102
+
103
+ # Define request model
104
+ class PredictRequest(BaseModel):
105
+ model_type: str
106
+ prompt: str = ""
107
+ negative_prompt: str = ""
108
+ width: int
109
+ height: int
110
+ guidance_scale: float
111
+ num_inference_steps: int
112
+
113
+ # Add FastAPI routes
114
+ @app.get("/")
115
+ def read_root():
116
+ return {"Hello World"}
117
+
118
+ @app.post("/api/predict")
119
+ async def predict(request: PredictRequest):
120
+ result = generate_image(
121
+ model_type=request.model_type,
122
+ prompt=request.prompt,
123
+ negative_prompt=request.negative_prompt,
124
+ width=request.width,
125
+ height=request.height,
126
+ guidance_scale=request.guidance_scale,
127
+ num_inference_steps=request.num_inference_steps
128
+ )
129
+ if result is None:
130
+ raise HTTPException(status_code=400, detail="Invalid input")
131
+ return {"result": result}
132
+
133
+ # Run the FastAPI app with Uvicorn
134
+ if __name__ == "__main__":
135
+ import uvicorn
136
+ uvicorn.run(app, host="0.0.0.0", port=7860)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.100.0
2
+ uvicorn==0.23.1
3
+ spaces==0.31.1
4
+ diffusers==0.31.0
5
+ torch==2.4.0
6
+ boto3==1.35.85
7
+ python-dotenv==1.0.1
8
+ Pillow==11.0.0