File size: 1,850 Bytes
f268251
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"

[project]
name = "llava_med"
version = "1.5.0"
description = "Towards GPT-4 like large language and visual assistant."
readme = "README.md"
requires-python = ">=3.8"
classifiers = [
    "Programming Language :: Python :: 3",
    "License :: OSI Approved :: Apache Software License",
]
dependencies = [
    "transformers==4.36.2",
    "tokenizers>=0.15.0",
    "sentencepiece==0.1.99",
    "shortuuid",
    "accelerate==0.21.0",
    "peft==0.4.0",
    "bitsandbytes==0.41.0",
    "pydantic<2,>=1",
    "markdown2[all]",
    "protobuf",
    "numpy",
    "scikit-learn==1.2.2",
    "gradio==3.35.2",
    "gradio_client==0.2.9",
    "requests",
    "httpx==0.24.0",
    "uvicorn",
    "fastapi",
    "einops==0.6.1",
    "einops-exts==0.0.4",
    "timm==0.9.12",
    "tiktoken",
    "openai==1.12.0",
    "backoff",
]

[project.optional-dependencies]
train = ["deepspeed==0.9.5", "ninja", "wandb"]
eval = [
    "azure-ai-ml",
    "datasets",
    "fire",
    "opencv-python",
    "openpyxl==3.1.2",
    "pillow==9.4.0",
    "python-Levenshtein",
    "rich",
    "streamlit==1.29.0",
    "typer[all]",
    "word2number",
]

[project.urls]
"Homepage" = "https://github.com/microsoft/LLaVA-Med"
"Bug Tracker" = "https://github.com/microsoft/LLaVA-Med/issues"

[tool.setuptools.packages.find]
exclude = [
    "assets*",
    "benchmark*",
    "docs",
    "dist*",
    "playground*",
    "scripts*",
    "tests*",
]

[tool.wheel]
exclude = [
    "assets*",
    "benchmark*",
    "docs",
    "dist*",
    "playground*",
    "scripts*",
    "tests*",
]

[tool.black]
line-length = 120
skip-string-normalization = true

[tool.pyright]
exclude = [
    "**/__pycache__",
    "playground",
    "_results",
    "_data",
    "models",
    "checkpoints",
    "wandb",
    "docs",
]