vcollos commited on
Commit
74ae722
·
verified ·
1 Parent(s): 51becd2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -11
app.py CHANGED
@@ -251,6 +251,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
251
  # URL do config.json no Space
252
  config_url = "https://huggingface.co/spaces/vcollos/family/resolve/main/config.json"
253
  local_config_path = "./config.json"
 
254
 
255
  # Baixar o config.json, se ainda não existir localmente
256
  if not os.path.exists(local_config_path):
@@ -259,13 +260,9 @@ if not os.path.exists(local_config_path):
259
  with open(local_config_path, "wb") as f:
260
  f.write(response.content)
261
 
262
- # Caminho para os pesos
263
- base_model = "SG161222/Verus_Vision_1.0b"
264
- weights_path = "./ae.safetensors"
265
-
266
  # Baixar os pesos do modelo se necessário
267
  if not os.path.exists(weights_path):
268
- weights_url = f"https://huggingface.co/{base_model}/resolve/main/ae.safetensors"
269
  response = requests.get(weights_url)
270
  response.raise_for_status()
271
  with open(weights_path, "wb") as f:
@@ -273,21 +270,22 @@ if not os.path.exists(weights_path):
273
 
274
  # Carregar o Autoencoder com o config.json local
275
  good_vae = AutoencoderKL.from_pretrained(
276
- pretrained_model_name_or_path=None, # Não buscar no repositório
277
- config=local_config_path, # Configuração local
278
- filename=weights_path, # Pesos locais
279
  torch_dtype=dtype
280
  ).to(device)
281
 
282
- # Carregar o pipeline principal com o config.json local
283
  pipe = DiffusionPipeline.from_pretrained(
284
- pretrained_model_name_or_path=None, # Não buscar no repositório
285
- config=local_config_path, # Configuração local
286
  filename="VerusVision_1.0b_Transformer_fp16.safetensors",
287
  torch_dtype=dtype
288
  ).to(device)
289
 
290
  print("Modelo carregado com sucesso!")
 
 
 
291
  # Se precisar do Image-to-Image
292
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
293
  base_model,
 
251
  # URL do config.json no Space
252
  config_url = "https://huggingface.co/spaces/vcollos/family/resolve/main/config.json"
253
  local_config_path = "./config.json"
254
+ weights_path = "./ae.safetensors"
255
 
256
  # Baixar o config.json, se ainda não existir localmente
257
  if not os.path.exists(local_config_path):
 
260
  with open(local_config_path, "wb") as f:
261
  f.write(response.content)
262
 
 
 
 
 
263
  # Baixar os pesos do modelo se necessário
264
  if not os.path.exists(weights_path):
265
+ weights_url = f"https://huggingface.co/SG161222/Verus_Vision_1.0b/resolve/main/ae.safetensors"
266
  response = requests.get(weights_url)
267
  response.raise_for_status()
268
  with open(weights_path, "wb") as f:
 
270
 
271
  # Carregar o Autoencoder com o config.json local
272
  good_vae = AutoencoderKL.from_pretrained(
273
+ pretrained_model_name_or_path=local_config_path, # Caminho local do config.json
274
+ filename=weights_path, # Pesos locais
 
275
  torch_dtype=dtype
276
  ).to(device)
277
 
278
+ # Configurar o pipeline principal com o config.json local
279
  pipe = DiffusionPipeline.from_pretrained(
280
+ pretrained_model_name_or_path=local_config_path, # Caminho local do config.json
 
281
  filename="VerusVision_1.0b_Transformer_fp16.safetensors",
282
  torch_dtype=dtype
283
  ).to(device)
284
 
285
  print("Modelo carregado com sucesso!")
286
+
287
+
288
+
289
  # Se precisar do Image-to-Image
290
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
291
  base_model,