uruguayai commited on
Commit
5a1023e
verified
1 Parent(s): f4e36bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -24
app.py CHANGED
@@ -1,33 +1,36 @@
1
  import gradio as gr
2
- from safetensors.torch import load_file
3
- import os
4
- import requests
5
 
6
- # URLs y rutas de archivos
7
- MODEL_URL = "https://huggingface.co/datasets/uruguayai/fooocus/resolve/main/juggernautXL_v8Rundiffusion.safetensors"
8
- MODEL_PATH = "/home/user/app/juggernautXL_v8Rundiffusion.safetensors"
9
-
10
- def download_file(url, path):
11
- if not os.path.exists(path):
12
- response = requests.get(url)
13
- if response.status_code == 200:
14
- with open(path, 'wb') as f:
15
- f.write(response.content)
16
- print(f"Downloaded {path}")
17
- else:
18
- raise Exception(f"Failed to download {url}, status code: {response.status_code}")
19
 
20
  def load_model():
21
- download_file(MODEL_URL, MODEL_PATH)
22
- model = load_file(MODEL_PATH) # Cargar modelo con safetensors
23
- return model
 
 
 
24
 
25
- def predict(input_text):
26
- # Funci贸n de predicci贸n simple
27
- return f"Model loaded and ready: {input_text}"
 
 
 
 
28
 
29
- iface = gr.Interface(fn=predict, inputs="text", outputs="text")
 
 
 
 
 
 
 
30
 
31
  if __name__ == "__main__":
32
- load_model() # Aseg煤rate de que el modelo est茅 cargado
33
  iface.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import gradio as gr
2
+ from diffusers import StableDiffusionPipeline
3
+ import torch
 
4
 
5
+ # Configura el modelo, puedes ajustar el modelo a tu necesidad espec铆fica.
6
+ MODEL_NAME = "CompVis/stable-diffusion-v1-4" # Puedes cambiarlo a tu modelo subido en HF
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
 
 
 
 
 
8
 
9
  def load_model():
10
+ # Cargar el modelo de Stable Diffusion
11
+ pipe = StableDiffusionPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16).to(device)
12
+ return pipe
13
+
14
+ # Cargar el modelo al iniciar la aplicaci贸n
15
+ model = load_model()
16
 
17
+ def generate_image(prompt):
18
+ # Generar imagen usando el modelo cargado
19
+ if not prompt:
20
+ return None
21
+ with torch.no_grad():
22
+ image = model(prompt).images[0] # Genera la imagen
23
+ return image
24
 
25
+ # Configurar la interfaz de Gradio
26
+ iface = gr.Interface(
27
+ fn=generate_image,
28
+ inputs=gr.Textbox(label="Enter your prompt"),
29
+ outputs=gr.Image(label="Generated Image"),
30
+ title="AI Image Generator",
31
+ description="Generate images from text prompts using Stable Diffusion."
32
+ )
33
 
34
  if __name__ == "__main__":
35
+ # Lanzar la interfaz
36
  iface.launch(server_name="0.0.0.0", server_port=7860)