LEIDIA commited on
Commit
9309e46
·
verified ·
1 Parent(s): 601b6f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -123
app.py CHANGED
@@ -5,157 +5,78 @@ import torch
5
  from diffusers import DiffusionPipeline
6
  from datasets import load_dataset
7
 
8
- # Configurações do dispositivo e modelo
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Substitua pelo modelo desejado
11
 
12
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
 
 
13
 
14
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
 
15
  pipe = pipe.to(device)
16
 
17
- # Definições de parâmetros gerais
18
- MAX_SEED = np.iinfo(np.int32).max
19
- MAX_IMAGE_SIZE = 752
20
-
21
  # Carregando o dataset do Hugging Face
22
  dataset = load_dataset("LEIDIA/Data_Womleimg")
23
 
24
- # Adicionando descrições ao dataset
25
- descriptions = [
 
26
  "A woman wearing a full blue leather catsuit",
27
- "A woman in a black leather pants",
28
- "A legs woman in tigh high blue leather boots",
29
- "A woman in long red leather jacket, red leather shorts and a tigh high red leather boots",
30
  "A legs woman in cream color leather pants",
31
- "A woman in purple leather leggings with tigh high black leather boots",
32
- # Adicione mais descrições conforme necessário
 
 
 
 
 
 
 
 
 
 
 
33
  ]
34
 
35
- def infer(prompt, num_inference_steps):
36
- """Função para gerar a imagem baseada no prompt."""
37
- image = pipe(
38
- prompt=prompt,
39
- num_inference_steps=num_inference_steps,
40
- height=MAX_IMAGE_SIZE,
41
- width=MAX_IMAGE_SIZE,
42
- ).images[0]
43
- return image
44
-
45
- # Função completa para inferência com mais parâmetros
46
-
47
- def advanced_infer(
48
- prompt,
49
- negative_prompt,
50
- seed,
51
- randomize_seed,
52
- width,
53
- height,
54
- guidance_scale,
55
- num_inference_steps,
56
- ):
57
- if randomize_seed:
58
- seed = random.randint(0, MAX_SEED)
59
-
60
- generator = torch.Generator().manual_seed(seed)
61
 
 
 
62
  image = pipe(
63
  prompt=prompt,
64
- negative_prompt=negative_prompt,
65
- guidance_scale=guidance_scale,
66
- num_inference_steps=num_inference_steps,
67
- width=width,
68
- height=height,
69
- generator=generator,
70
  ).images[0]
71
-
72
- return image, seed
73
 
74
  # Interface Gradio
75
  with gr.Blocks() as demo:
76
- with gr.Column(elem_id="col-container"):
77
- gr.Markdown("## Text-to-Image Optimized for CPU")
78
-
79
  with gr.Row():
80
- prompt = gr.Textbox(
81
- label="Prompt",
82
- show_label=False,
83
- max_lines=1,
84
- placeholder="Enter your prompt",
85
- container=False,
86
- )
87
 
88
- # Slider para definir o número de passos de inferência
89
- num_inference_steps = gr.Slider(
90
- label="Number of inference steps",
91
- minimum=1,
92
- maximum=7,
93
- step=1,
94
- value=15,
95
  )
96
 
97
- # Botão para gerar a imagem
98
  generate_button = gr.Button("Generate")
99
  result = gr.Image(label="Generated Image")
100
 
101
- # Clique no botão para gerar a imagem
102
  generate_button.click(
103
- infer,
104
- inputs=[prompt, num_inference_steps],
105
  outputs=result,
106
  )
107
 
108
- # Configurações avançadas
109
- with gr.Accordion("Advanced Settings", open=False):
110
- negative_prompt = gr.Textbox(
111
- label="Negative prompt",
112
- max_lines=1,
113
- placeholder="Enter a negative prompt",
114
- )
115
-
116
- seed = gr.Slider(
117
- label="Seed",
118
- minimum=0,
119
- maximum=MAX_SEED,
120
- step=1,
121
- value=0,
122
- )
123
-
124
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
125
-
126
- with gr.Row():
127
- width = gr.Slider(
128
- label="Width",
129
- minimum=64,
130
- maximum=MAX_IMAGE_SIZE,
131
- step=8,
132
- value=512,
133
- )
134
-
135
- height = gr.Slider(
136
- label="Height",
137
- minimum=64,
138
- maximum=MAX_IMAGE_SIZE,
139
- step=8,
140
- value=512,
141
- )
142
-
143
- guidance_scale = gr.Slider(
144
- label="Guidance scale",
145
- minimum=0.0,
146
- maximum=20.0,
147
- step=0.5,
148
- value=1.0,
149
- )
150
-
151
- # Exemplos de prompts
152
- gr.Examples(
153
- examples=[
154
- "A woman wearing leather pants",
155
- "A woman in a red leather jacket",
156
- ],
157
- inputs=[prompt],
158
- )
159
-
160
  if __name__ == "__main__":
161
  demo.launch()
 
5
  from diffusers import DiffusionPipeline
6
  from datasets import load_dataset
7
 
 
 
 
8
 
9
+ # Configurações do dispositivo para uso apenas da CPU
10
+ device = "cpu"
11
+ model_repo_id = "stabilityai/sdxl-turbo" # Continuando com o modelo especificado
12
 
13
+ # Carregar o pipeline configurado para CPU
14
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id)
15
  pipe = pipe.to(device)
16
 
 
 
 
 
17
  # Carregando o dataset do Hugging Face
18
  dataset = load_dataset("LEIDIA/Data_Womleimg")
19
 
20
+
21
+ # Parâmetros para carregar o dataset personalizado
22
+ dataset_descriptions = [
23
  "A woman wearing a full blue leather catsuit",
24
+ "A woman in black leather pants",
25
+ "A legs woman in tight high blue leather boots",
26
+ "A woman in a long red leather jacket, red leather shorts, and tight high red leather boots",
27
  "A legs woman in cream color leather pants",
28
+ "A woman in purple leather leggings with tight high black leather boots",
29
+ "A woman in black leather top and a long black leather skirt",
30
+ "A blonde woman with long curly hair wearing a yellow mini tight leather skirt",
31
+ "A thin Asian woman wearing a thigh-long black leather dress",
32
+ "Simple high brown leather boots",
33
+ "A beautiful brunette woman wearing leather clothes",
34
+ "A beautiful brunette woman in a sleeveless black dress seated at a bar holding a glass of champagne, with a cozy and elegant atmosphere in the background.",
35
+ "A curly blonde woman wearing a bold red leather jacket paired with black leather tight pants and red high-heeled leather boots, creating a modern and edgy vibe.",
36
+ "An ebony woman standing outdoors against a backdrop of rolling hills and a cloudy sky, wearing a striking outfit of a red leather shirt, black leather mini corset, red plaid skirt, and knee-high red lace-up leather boots.",
37
+ "A blonde curly woman wearing a fitted, shiny blue leather outfit including a jacket and pants with metallic buttons, paired with knee-high boots, in a neutral-colored room.",
38
+ "A girl in a black leather outfit with a heart-shaped cutout top, high-waisted leggings, and a purple cape, giving a superhero vibe.",
39
+ "A girl in a sleek black leather cropped top with a zip closure and high-waisted bottom, paired with long black gloves and pink hair styled in a ponytail, creating a bold and fashion-forward look.",
40
+ "A girl wearing a form-fitting black leather top, with long pink hair cascading down, creating a striking contrast in a neutral background."
41
  ]
42
 
43
+ # Definir parâmetros padrão para geração rápida
44
+ DEFAULT_PROMPT = "A beautiful brunette woman wearing a leather outfit"
45
+ DEFAULT_INFERENCE_STEPS = 6
46
+ IMAGE_WIDTH = 512
47
+ IMAGE_HEIGHT = 512
48
+ GUIDANCE_SCALE = 1.5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
+ # Função simples para gerar imagem
51
+ def infer_simple(prompt):
52
  image = pipe(
53
  prompt=prompt,
54
+ num_inference_steps=DEFAULT_INFERENCE_STEPS,
55
+ guidance_scale=GUIDANCE_SCALE,
56
+ height=IMAGE_HEIGHT,
57
+ width=IMAGE_WIDTH,
 
 
58
  ).images[0]
59
+ return image
 
60
 
61
  # Interface Gradio
62
  with gr.Blocks() as demo:
 
 
 
63
  with gr.Row():
64
+ gr.Markdown("## Text-to-Image Wom Test - Quick CPU Version")
 
 
 
 
 
 
65
 
66
+ prompt = gr.Textbox(
67
+ label="Prompt",
68
+ value=DEFAULT_PROMPT,
69
+ placeholder="Describe the image you want to generate",
 
 
 
70
  )
71
 
 
72
  generate_button = gr.Button("Generate")
73
  result = gr.Image(label="Generated Image")
74
 
 
75
  generate_button.click(
76
+ fn=infer_simple,
77
+ inputs=prompt,
78
  outputs=result,
79
  )
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  if __name__ == "__main__":
82
  demo.launch()