Update app.py
Browse files
app.py
CHANGED
@@ -8,53 +8,6 @@ from transformers import AutoModelForCausalLM
|
|
8 |
from transformers import TextIteratorStreamer
|
9 |
from threading import Thread
|
10 |
|
11 |
-
import importlib.metadata
|
12 |
-
from packaging import version
|
13 |
-
from transformers.utils.import_utils import (
|
14 |
-
is_torch_available,
|
15 |
-
_is_package_available,
|
16 |
-
is_torch_mlu_available
|
17 |
-
)
|
18 |
-
|
19 |
-
def diagnose_flash_attn_2_availability():
|
20 |
-
if not is_torch_available():
|
21 |
-
return "PyTorch is not available."
|
22 |
-
|
23 |
-
if not _is_package_available("flash_attn"):
|
24 |
-
return "flash_attn package is not installed."
|
25 |
-
|
26 |
-
import torch
|
27 |
-
|
28 |
-
if not (torch.cuda.is_available() or is_torch_mlu_available()):
|
29 |
-
return "Neither CUDA nor MLU is available."
|
30 |
-
|
31 |
-
flash_attn_version = importlib.metadata.version("flash_attn")
|
32 |
-
|
33 |
-
if torch.version.cuda:
|
34 |
-
required_version = "2.1.0"
|
35 |
-
if version.parse(flash_attn_version) < version.parse(required_version):
|
36 |
-
return f"CUDA is available, but flash_attn version {flash_attn_version} is installed. Version >= {required_version} is required."
|
37 |
-
elif torch.version.hip:
|
38 |
-
required_version = "2.0.4"
|
39 |
-
if version.parse(flash_attn_version) < version.parse(required_version):
|
40 |
-
return f"HIP is available, but flash_attn version {flash_attn_version} is installed. Version >= {required_version} is required."
|
41 |
-
elif is_torch_mlu_available():
|
42 |
-
required_version = "2.3.3"
|
43 |
-
if version.parse(flash_attn_version) < version.parse(required_version):
|
44 |
-
return f"MLU is available, but flash_attn version {flash_attn_version} is installed. Version >= {required_version} is required."
|
45 |
-
else:
|
46 |
-
return "Unknown PyTorch backend."
|
47 |
-
|
48 |
-
return "All requirements for Flash Attention 2 are met."
|
49 |
-
|
50 |
-
# 使用诊断函数
|
51 |
-
result = diagnose_flash_attn_2_availability()
|
52 |
-
if result != "All requirements for Flash Attention 2 are met.":
|
53 |
-
print(f"Flash Attention 2 is not available: {result}")
|
54 |
-
print("Using `flash_attention_2` requires having the correct version of `flash_attn` installed.")
|
55 |
-
else:
|
56 |
-
print("Flash Attention 2 can be used.")
|
57 |
-
|
58 |
model_name = 'AIDC-AI/Ovis2-16B'
|
59 |
|
60 |
# load model
|
|
|
8 |
from transformers import TextIteratorStreamer
|
9 |
from threading import Thread
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
model_name = 'AIDC-AI/Ovis2-16B'
|
12 |
|
13 |
# load model
|