foivospar commited on
Commit
12903b1
·
1 Parent(s): 5ef2207
Files changed (2) hide show
  1. app.py +12 -12
  2. requirements.txt +0 -1
app.py CHANGED
@@ -17,7 +17,7 @@ from insightface.app import FaceAnalysis
17
  from PIL import Image
18
  import numpy as np
19
  import random
20
- import os
21
 
22
  import gradio as gr
23
 
@@ -34,7 +34,7 @@ else:
34
  # download models
35
  from huggingface_hub import hf_hub_download
36
  #from modelscope import snapshot_download
37
- from modelscope.hub.file_download import model_file_download
38
 
39
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/config.json", local_dir="./models")
40
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/diffusion_pytorch_model.safetensors", local_dir="./models")
@@ -43,15 +43,15 @@ hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="encoder/pytorch_model.bi
43
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arcface.onnx", local_dir="./models/antelopev2")
44
 
45
  #base_model = snapshot_download('AI-ModelScope/stable-diffusion-v1-5', cache_dir='./models')
46
- model_dir = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='model_index.json', cache_dir='./models')
47
- base_model = os.path.dirname(model_dir)
48
- _ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='scheduler/scheduler_config.json', cache_dir='./models')
49
- _ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/merges.txt', cache_dir='./models')
50
- _ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/special_tokens_map.json', cache_dir='./models')
51
- _ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/tokenizer_config.json', cache_dir='./models')
52
- _ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/vocab.json', cache_dir='./models')
53
- _ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='vae/config.json', cache_dir='./models')
54
- _ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='vae/diffusion_pytorch_model.safetensors', cache_dir='./models')
55
 
56
  # Load face detection and recognition package
57
  app = FaceAnalysis(name='antelopev2', root='./', providers=['CPUExecutionProvider'])
@@ -59,6 +59,7 @@ app.prepare(ctx_id=0, det_size=(640, 640))
59
 
60
  # Load pipeline
61
  #base_model = 'runwayml/stable-diffusion-v1-5'
 
62
  encoder = CLIPTextModelWrapper.from_pretrained(
63
  'models', subfolder="encoder", torch_dtype=dtype
64
  )
@@ -71,7 +72,6 @@ pipeline = StableDiffusionPipeline.from_pretrained(
71
  unet=unet,
72
  torch_dtype=dtype,
73
  safety_checker=None,
74
- feature_extractor=None
75
  )
76
  pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
77
  pipeline = pipeline.to(device)
 
17
  from PIL import Image
18
  import numpy as np
19
  import random
20
+ #import os
21
 
22
  import gradio as gr
23
 
 
34
  # download models
35
  from huggingface_hub import hf_hub_download
36
  #from modelscope import snapshot_download
37
+ #from modelscope.hub.file_download import model_file_download
38
 
39
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/config.json", local_dir="./models")
40
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/diffusion_pytorch_model.safetensors", local_dir="./models")
 
43
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arcface.onnx", local_dir="./models/antelopev2")
44
 
45
  #base_model = snapshot_download('AI-ModelScope/stable-diffusion-v1-5', cache_dir='./models')
46
+ #model_dir = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='model_index.json', cache_dir='./models')
47
+ #base_model = os.path.dirname(model_dir)
48
+ #_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='scheduler/scheduler_config.json', cache_dir='./models')
49
+ #_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/merges.txt', cache_dir='./models')
50
+ #_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/special_tokens_map.json', cache_dir='./models')
51
+ #_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/tokenizer_config.json', cache_dir='./models')
52
+ #_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/vocab.json', cache_dir='./models')
53
+ #_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='vae/config.json', cache_dir='./models')
54
+ #_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='vae/diffusion_pytorch_model.safetensors', cache_dir='./models')
55
 
56
  # Load face detection and recognition package
57
  app = FaceAnalysis(name='antelopev2', root='./', providers=['CPUExecutionProvider'])
 
59
 
60
  # Load pipeline
61
  #base_model = 'runwayml/stable-diffusion-v1-5'
62
+ base_model = 'ashllay/stable-diffusion-v1-5-archive'
63
  encoder = CLIPTextModelWrapper.from_pretrained(
64
  'models', subfolder="encoder", torch_dtype=dtype
65
  )
 
72
  unet=unet,
73
  torch_dtype=dtype,
74
  safety_checker=None,
 
75
  )
76
  pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
77
  pipeline = pipeline.to(device)
requirements.txt CHANGED
@@ -5,7 +5,6 @@ diffusers==0.23.0
5
  transformers==4.34.1
6
  peft
7
  accelerate
8
- modelscope
9
  insightface
10
  onnxruntime-gpu
11
  gradio
 
5
  transformers==4.34.1
6
  peft
7
  accelerate
 
8
  insightface
9
  onnxruntime-gpu
10
  gradio