blanchon commited on
Commit
5888f20
·
1 Parent(s): 81d4850

update deps

Browse files
Files changed (2) hide show
  1. app.py +3 -1
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import gradio as gr
2
  import PIL
3
  import spaces
@@ -12,7 +14,7 @@ from transformers import LlamaForCausalLM, PreTrainedTokenizerFast
12
  MODEL_PREFIX: str = "HiDream-ai"
13
  LLAMA_MODEL_NAME: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"
14
  MODEL_PATH = "HiDream-ai/HiDream-I1-Dev"
15
- MODEL_CONFIGS: dict[str, dict] = {
16
  "guidance_scale": 0.0,
17
  "num_inference_steps": 28,
18
  "shift": 6.0,
 
1
+ from typing import Any
2
+
3
  import gradio as gr
4
  import PIL
5
  import spaces
 
14
  MODEL_PREFIX: str = "HiDream-ai"
15
  LLAMA_MODEL_NAME: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"
16
  MODEL_PATH = "HiDream-ai/HiDream-I1-Dev"
17
+ MODEL_CONFIGS: dict[str, Any] = {
18
  "guidance_scale": 0.0,
19
  "num_inference_steps": 28,
20
  "shift": 6.0,
requirements.txt CHANGED
@@ -4,7 +4,7 @@ diffusers
4
  transformers
5
  accelerate
6
  xformers
7
- # https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
8
  einops
9
  gradio
10
  spaces
 
4
  transformers
5
  accelerate
6
  xformers
7
+ flash-attn
8
  einops
9
  gradio
10
  spaces