John6666 commited on
Commit
29bc9a9
·
verified ·
1 Parent(s): 8414203

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -129,12 +129,12 @@ def load_pipeline(repo_id: str, cn_on: bool, model_type: str, task: str, dtype_s
129
  #transformer, text_encoder_2 = load_quantized_control(control_repo, dtype, hf_token)
130
  pipe = pipeline.from_pretrained(models_dev[0], transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype, token=hf_token)
131
  pipe_i2i = pipeline_i2i.from_pipe(pipe, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype)
132
- elif ".safetensors" in repo_id: # from single file # or ".gguf" in repo_id
133
  file_url = repo_id.replace("/resolve/main/", "/blob/main/").replace("?download=true", "")
134
- #if ".gguf" in file_url: transformer = transformer_model.from_single_file(file_url, subfolder="transformer",
135
- # quantization_config=GGUFQuantizationConfig(compute_dtype=dtype), torch_dtype=dtype, config=single_file_base_model)
136
- #else:
137
- transformer = transformer_model.from_single_file(file_url, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model)
138
  pipe = pipeline.from_pretrained(single_file_base_model, transformer=transformer, torch_dtype=dtype, token=hf_token, **kwargs)
139
  pipe_i2i = pipeline_i2i.from_pretrained(single_file_base_model, vae=pipe.vae, transformer=pipe.transformer,
140
  text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2,
 
129
  #transformer, text_encoder_2 = load_quantized_control(control_repo, dtype, hf_token)
130
  pipe = pipeline.from_pretrained(models_dev[0], transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype, token=hf_token)
131
  pipe_i2i = pipeline_i2i.from_pipe(pipe, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype)
132
+ elif ".safetensors" in repo_id or ".gguf" in repo_id: # from single file
133
  file_url = repo_id.replace("/resolve/main/", "/blob/main/").replace("?download=true", "")
134
+ if ".gguf" in file_url: transformer = transformer_model.from_single_file(file_url, subfolder="transformer",
135
+ quantization_config=GGUFQuantizationConfig(compute_dtype=dtype), torch_dtype=dtype, config=single_file_base_model)
136
+ else: transformer = transformer_model.from_single_file(file_url, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model)
137
+ if not transformer: raise Exception(f"URL not found. {file_url}")
138
  pipe = pipeline.from_pretrained(single_file_base_model, transformer=transformer, torch_dtype=dtype, token=hf_token, **kwargs)
139
  pipe_i2i = pipeline_i2i.from_pretrained(single_file_base_model, vae=pipe.vae, transformer=pipe.transformer,
140
  text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2,