Grandediw commited on
Commit
a534797
·
verified ·
1 Parent(s): 1f2775d

Update Interface

Browse files
Files changed (1) hide show
  1. app.py +22 -5
app.py CHANGED
@@ -4,16 +4,33 @@ import random
4
  from diffusers import DiffusionPipeline
5
  import torch
6
 
7
- device = "cuda" if torch.cuda.is_available() else "cpu"
8
- model_repo_id = "Grandediw/lora_model" # Use the fine-tuned model
 
 
 
 
 
 
9
 
10
- # Adjust torch data type based on device
 
 
 
 
 
 
11
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
12
 
13
- # Load the model pipeline
14
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
 
 
 
 
15
  pipe = pipe.to(device)
16
 
 
17
  # Constants
18
  MAX_SEED = np.iinfo(np.int32).max
19
  MAX_IMAGE_SIZE = 1024
 
4
  from diffusers import DiffusionPipeline
5
  import torch
6
 
7
+ from diffusers import DiffusionPipeline
8
+ import torch
9
+ from huggingface_hub import login
10
+
11
+ import yaml
12
+
13
+ with open("config.yaml", "r") as f:
14
+ config = yaml.safe_load(f)
15
 
16
+ token = config.get("huggingface_token")
17
+
18
+ # Login to Hugging Face Hub
19
+ login(token)
20
+
21
+ # Model details
22
+ device = "cuda" if torch.cuda.is_available() else "cpu"
23
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
24
 
25
+ # Load model
26
+ pipe = DiffusionPipeline.from_pretrained(
27
+ "Grandediw/lora_model",
28
+ torch_dtype=torch_dtype,
29
+ use_auth_token=True # Enables private model access
30
+ )
31
  pipe = pipe.to(device)
32
 
33
+
34
  # Constants
35
  MAX_SEED = np.iinfo(np.int32).max
36
  MAX_IMAGE_SIZE = 1024