Merge branch 'main' of https://huggingface.co/spaces/li-qing/FIRE
Browse filesThis view is limited to 50 files because it contains too many changes. Β
See raw diff
- assets/fjyn0081_24.png +0 -0
- assets/test_10422.jpg +0 -0
- assets/validation_Public_Health_12.jpg +0 -0
- checkpoints/{README.md β llava-next-llama-3-8b-student-lora-merged-110224/README.md} +0 -0
- checkpoints/{adapter_config.json β llava-next-llama-3-8b-student-lora-merged-110224/adapter_config.json} +0 -0
- checkpoints/{adapter_model.safetensors β llava-next-llama-3-8b-student-lora-merged-110224/adapter_model.safetensors} +1 -1
- checkpoints/{config.json β llava-next-llama-3-8b-student-lora-merged-110224/config.json} +0 -0
- checkpoints/{non_lora_trainables.bin β llava-next-llama-3-8b-student-lora-merged-110224/non_lora_trainables.bin} +1 -1
- checkpoints/{trainer_state.json β llava-next-llama-3-8b-student-lora-merged-110224/trainer_state.json} +0 -0
- checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/README.md +202 -0
- checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/adapter_config.json +30 -0
- checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/adapter_model.safetensors +3 -0
- checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/config.json +68 -0
- checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/non_lora_trainables.bin +3 -0
- checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/trainer_state.json +0 -0
- checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/README.md +202 -0
- checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/adapter_config.json +29 -0
- checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/adapter_model.safetensors +3 -0
- checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/config.json +68 -0
- checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/non_lora_trainables.bin +3 -0
- checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/trainer_state.json +0 -0
- requirements.txt +2 -1
- src/__pycache__/__init__.cpython-310.pyc +0 -0
- src/__pycache__/constants.cpython-310.pyc +0 -0
- src/__pycache__/conversation.cpython-310.pyc +0 -0
- src/__pycache__/utils.cpython-310.pyc +0 -0
- src/conversation.py +2 -2
- src/model/__pycache__/__init__.cpython-310.pyc +0 -0
- src/model/__pycache__/compression.cpython-310.pyc +0 -0
- src/model/__pycache__/llama_condense_monkey_patch.cpython-310.pyc +0 -0
- src/model/__pycache__/model_adapter.cpython-310.pyc +0 -0
- src/model/__pycache__/model_chatglm.cpython-310.pyc +0 -0
- src/model/__pycache__/model_cllm.cpython-310.pyc +0 -0
- src/model/__pycache__/model_codet5p.cpython-310.pyc +0 -0
- src/model/__pycache__/model_exllama.cpython-310.pyc +0 -0
- src/model/__pycache__/model_falcon.cpython-310.pyc +0 -0
- src/model/__pycache__/model_registry.cpython-310.pyc +0 -0
- src/model/__pycache__/model_xfastertransformer.cpython-310.pyc +0 -0
- src/model/__pycache__/model_yuan2.cpython-310.pyc +0 -0
- src/model/__pycache__/monkey_patch_non_inplace.cpython-310.pyc +0 -0
- src/model/model_llava.py +18 -14
- src/model/model_registry.py +6 -2
- src/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- src/modules/__pycache__/awq.cpython-310.pyc +0 -0
- src/modules/__pycache__/exllama.cpython-310.pyc +0 -0
- src/modules/__pycache__/gptq.cpython-310.pyc +0 -0
- src/modules/__pycache__/xfastertransformer.cpython-310.pyc +0 -0
- src/serve/__pycache__/__init__.cpython-310.pyc +0 -0
- src/serve/__pycache__/api_provider.cpython-310.pyc +0 -0
- src/serve/__pycache__/gradio_block_arena_named.cpython-310.pyc +0 -0
assets/fjyn0081_24.png
ADDED
![]() |
assets/test_10422.jpg
ADDED
![]() |
assets/validation_Public_Health_12.jpg
ADDED
![]() |
checkpoints/{README.md β llava-next-llama-3-8b-student-lora-merged-110224/README.md}
RENAMED
File without changes
|
checkpoints/{adapter_config.json β llava-next-llama-3-8b-student-lora-merged-110224/adapter_config.json}
RENAMED
File without changes
|
checkpoints/{adapter_model.safetensors β llava-next-llama-3-8b-student-lora-merged-110224/adapter_model.safetensors}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 67143744
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b80ac0467df0c61c94d95b60189f68c54f4dfb97c40fbbaa3f82905736fb2fd4
|
3 |
size 67143744
|
checkpoints/{config.json β llava-next-llama-3-8b-student-lora-merged-110224/config.json}
RENAMED
File without changes
|
checkpoints/{non_lora_trainables.bin β llava-next-llama-3-8b-student-lora-merged-110224/non_lora_trainables.bin}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 41961648
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:feefff1c747c9a282d8c702b927f2256d4ac64e590efdc5e7c44650290f26a50
|
3 |
size 41961648
|
checkpoints/{trainer_state.json β llava-next-llama-3-8b-student-lora-merged-110224/trainer_state.json}
RENAMED
The diff for this file is too large to render.
See raw diff
|
|
checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
base_model: Lin-Chen/open-llava-next-llama3-8b
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.11.1
|
checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/adapter_config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Lin-Chen/open-llava-next-llama3-8b",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 256,
|
14 |
+
"lora_dropout": 0.05,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 64,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"k_proj",
|
25 |
+
"v_proj"
|
26 |
+
],
|
27 |
+
"task_type": "CAUSAL_LM",
|
28 |
+
"use_dora": false,
|
29 |
+
"use_rslora": false
|
30 |
+
}
|
checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f42b42ed4f8f216c6e1a20fede923656c1eb6f22752fa732f1c624b1c7e14d44
|
3 |
+
size 94424168
|
checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Lin-Chen/open-llava-next-llama3-8b",
|
3 |
+
"architectures": [
|
4 |
+
"LlavaLlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 128000,
|
9 |
+
"eos_token_id": 128001,
|
10 |
+
"freeze_mm_mlp_adapter": false,
|
11 |
+
"hidden_act": "silu",
|
12 |
+
"hidden_size": 4096,
|
13 |
+
"image_aspect_ratio": "anyres",
|
14 |
+
"image_grid_pinpoints": [
|
15 |
+
[
|
16 |
+
336,
|
17 |
+
672
|
18 |
+
],
|
19 |
+
[
|
20 |
+
672,
|
21 |
+
336
|
22 |
+
],
|
23 |
+
[
|
24 |
+
672,
|
25 |
+
672
|
26 |
+
],
|
27 |
+
[
|
28 |
+
1008,
|
29 |
+
336
|
30 |
+
],
|
31 |
+
[
|
32 |
+
336,
|
33 |
+
1008
|
34 |
+
]
|
35 |
+
],
|
36 |
+
"initializer_range": 0.02,
|
37 |
+
"intermediate_size": 14336,
|
38 |
+
"max_position_embeddings": 8192,
|
39 |
+
"mm_hidden_size": 1024,
|
40 |
+
"mm_patch_merge_type": "spatial_unpad",
|
41 |
+
"mm_projector_lr": null,
|
42 |
+
"mm_projector_type": "mlp2x_gelu",
|
43 |
+
"mm_use_im_patch_token": false,
|
44 |
+
"mm_use_im_start_end": false,
|
45 |
+
"mm_vision_select_feature": "patch",
|
46 |
+
"mm_vision_select_layer": -2,
|
47 |
+
"mm_vision_tower": "openai/clip-vit-large-patch14-336",
|
48 |
+
"mm_vision_tower_lr": 2e-06,
|
49 |
+
"model_type": "llava_llama",
|
50 |
+
"num_attention_heads": 32,
|
51 |
+
"num_hidden_layers": 32,
|
52 |
+
"num_key_value_heads": 8,
|
53 |
+
"pad_token_id": 128256,
|
54 |
+
"pretraining_tp": 1,
|
55 |
+
"rms_norm_eps": 1e-05,
|
56 |
+
"rope_scaling": null,
|
57 |
+
"rope_theta": 500000.0,
|
58 |
+
"tie_word_embeddings": false,
|
59 |
+
"tokenizer_model_max_length": 3192,
|
60 |
+
"tokenizer_padding_side": "right",
|
61 |
+
"torch_dtype": "bfloat16",
|
62 |
+
"transformers_version": "4.37.2",
|
63 |
+
"tune_mm_mlp_adapter": false,
|
64 |
+
"unfreeze_mm_vision_tower": true,
|
65 |
+
"use_cache": true,
|
66 |
+
"use_mm_proj": true,
|
67 |
+
"vocab_size": 128257
|
68 |
+
}
|
checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/non_lora_trainables.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd5b0389306ba247fb26f3661c4cc7a0445ef5def1554e94c9462ac366c7debe
|
3 |
+
size 41961648
|
checkpoints/llava-next-llama-3-8b-student-lora-merged-115124/trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
base_model: Lin-Chen/open-llava-next-llama3-8b
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.11.1
|
checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/adapter_config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Lin-Chen/open-llava-next-llama3-8b",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 256,
|
14 |
+
"lora_dropout": 0.05,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 64,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"k_proj"
|
25 |
+
],
|
26 |
+
"task_type": "CAUSAL_LM",
|
27 |
+
"use_dora": false,
|
28 |
+
"use_rslora": false
|
29 |
+
}
|
checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3190200c1b6b0e91e3d29bb523ebd26152b9fb8156a031b830c48c1a3c07f0be
|
3 |
+
size 67143744
|
checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Lin-Chen/open-llava-next-llama3-8b",
|
3 |
+
"architectures": [
|
4 |
+
"LlavaLlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 128000,
|
9 |
+
"eos_token_id": 128001,
|
10 |
+
"freeze_mm_mlp_adapter": false,
|
11 |
+
"hidden_act": "silu",
|
12 |
+
"hidden_size": 4096,
|
13 |
+
"image_aspect_ratio": "anyres",
|
14 |
+
"image_grid_pinpoints": [
|
15 |
+
[
|
16 |
+
336,
|
17 |
+
672
|
18 |
+
],
|
19 |
+
[
|
20 |
+
672,
|
21 |
+
336
|
22 |
+
],
|
23 |
+
[
|
24 |
+
672,
|
25 |
+
672
|
26 |
+
],
|
27 |
+
[
|
28 |
+
1008,
|
29 |
+
336
|
30 |
+
],
|
31 |
+
[
|
32 |
+
336,
|
33 |
+
1008
|
34 |
+
]
|
35 |
+
],
|
36 |
+
"initializer_range": 0.02,
|
37 |
+
"intermediate_size": 14336,
|
38 |
+
"max_position_embeddings": 8192,
|
39 |
+
"mm_hidden_size": 1024,
|
40 |
+
"mm_patch_merge_type": "spatial_unpad",
|
41 |
+
"mm_projector_lr": null,
|
42 |
+
"mm_projector_type": "mlp2x_gelu",
|
43 |
+
"mm_use_im_patch_token": false,
|
44 |
+
"mm_use_im_start_end": false,
|
45 |
+
"mm_vision_select_feature": "patch",
|
46 |
+
"mm_vision_select_layer": -2,
|
47 |
+
"mm_vision_tower": "openai/clip-vit-large-patch14-336",
|
48 |
+
"mm_vision_tower_lr": 2e-06,
|
49 |
+
"model_type": "llava_llama",
|
50 |
+
"num_attention_heads": 32,
|
51 |
+
"num_hidden_layers": 32,
|
52 |
+
"num_key_value_heads": 8,
|
53 |
+
"pad_token_id": 128256,
|
54 |
+
"pretraining_tp": 1,
|
55 |
+
"rms_norm_eps": 1e-05,
|
56 |
+
"rope_scaling": null,
|
57 |
+
"rope_theta": 500000.0,
|
58 |
+
"tie_word_embeddings": false,
|
59 |
+
"tokenizer_model_max_length": 4096,
|
60 |
+
"tokenizer_padding_side": "right",
|
61 |
+
"torch_dtype": "bfloat16",
|
62 |
+
"transformers_version": "4.37.2",
|
63 |
+
"tune_mm_mlp_adapter": false,
|
64 |
+
"unfreeze_mm_vision_tower": true,
|
65 |
+
"use_cache": true,
|
66 |
+
"use_mm_proj": true,
|
67 |
+
"vocab_size": 128257
|
68 |
+
}
|
checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/non_lora_trainables.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3676f1bea77732bc7833861067fd95232efb962269f935a2c6179e19489cceb8
|
3 |
+
size 41961648
|
checkpoints/llava-next-llama-3-8b-teacher-lora-merged-115124/trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
CHANGED
@@ -5,4 +5,5 @@ numpy<2
|
|
5 |
peft
|
6 |
sentencepiece
|
7 |
protobuf
|
8 |
-
loguru
|
|
|
|
5 |
peft
|
6 |
sentencepiece
|
7 |
protobuf
|
8 |
+
loguru
|
9 |
+
gradio==4.29.0
|
src/__pycache__/__init__.cpython-310.pyc
DELETED
Binary file (123 Bytes)
|
|
src/__pycache__/constants.cpython-310.pyc
DELETED
Binary file (2.56 kB)
|
|
src/__pycache__/conversation.cpython-310.pyc
DELETED
Binary file (38.8 kB)
|
|
src/__pycache__/utils.cpython-310.pyc
DELETED
Binary file (13.6 kB)
|
|
src/conversation.py
CHANGED
@@ -165,10 +165,10 @@ class Conversation:
|
|
165 |
ret += ""
|
166 |
for i, (role, message) in enumerate(self.messages):
|
167 |
if message:
|
168 |
-
logger.info("msg={}", message)
|
169 |
if type(message) is tuple:
|
170 |
message, images = message
|
171 |
-
message = "<image>" * len(images) + message
|
172 |
ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
|
173 |
ret += f"{message.strip()}<|eot_id|>"
|
174 |
else:
|
|
|
165 |
ret += ""
|
166 |
for i, (role, message) in enumerate(self.messages):
|
167 |
if message:
|
168 |
+
logger.info("msg = {}", message)
|
169 |
if type(message) is tuple:
|
170 |
message, images = message
|
171 |
+
message = "<image>" * len(images) + "\n" + message
|
172 |
ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
|
173 |
ret += f"{message.strip()}<|eot_id|>"
|
174 |
else:
|
src/model/__pycache__/__init__.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/__init__.cpython-310.pyc and b/src/model/__pycache__/__init__.cpython-310.pyc differ
|
|
src/model/__pycache__/compression.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/compression.cpython-310.pyc and b/src/model/__pycache__/compression.cpython-310.pyc differ
|
|
src/model/__pycache__/llama_condense_monkey_patch.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/llama_condense_monkey_patch.cpython-310.pyc and b/src/model/__pycache__/llama_condense_monkey_patch.cpython-310.pyc differ
|
|
src/model/__pycache__/model_adapter.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_adapter.cpython-310.pyc and b/src/model/__pycache__/model_adapter.cpython-310.pyc differ
|
|
src/model/__pycache__/model_chatglm.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_chatglm.cpython-310.pyc and b/src/model/__pycache__/model_chatglm.cpython-310.pyc differ
|
|
src/model/__pycache__/model_cllm.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_cllm.cpython-310.pyc and b/src/model/__pycache__/model_cllm.cpython-310.pyc differ
|
|
src/model/__pycache__/model_codet5p.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_codet5p.cpython-310.pyc and b/src/model/__pycache__/model_codet5p.cpython-310.pyc differ
|
|
src/model/__pycache__/model_exllama.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_exllama.cpython-310.pyc and b/src/model/__pycache__/model_exllama.cpython-310.pyc differ
|
|
src/model/__pycache__/model_falcon.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_falcon.cpython-310.pyc and b/src/model/__pycache__/model_falcon.cpython-310.pyc differ
|
|
src/model/__pycache__/model_registry.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_registry.cpython-310.pyc and b/src/model/__pycache__/model_registry.cpython-310.pyc differ
|
|
src/model/__pycache__/model_xfastertransformer.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_xfastertransformer.cpython-310.pyc and b/src/model/__pycache__/model_xfastertransformer.cpython-310.pyc differ
|
|
src/model/__pycache__/model_yuan2.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_yuan2.cpython-310.pyc and b/src/model/__pycache__/model_yuan2.cpython-310.pyc differ
|
|
src/model/__pycache__/monkey_patch_non_inplace.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/monkey_patch_non_inplace.cpython-310.pyc and b/src/model/__pycache__/monkey_patch_non_inplace.cpython-310.pyc differ
|
|
src/model/model_llava.py
CHANGED
@@ -2,7 +2,6 @@ from llava.model.builder import load_pretrained_model
|
|
2 |
from llava.mm_utils import get_model_name_from_path, process_images, tokenizer_image_token
|
3 |
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
4 |
from llava.conversation import conv_templates
|
5 |
-
from loguru import logger
|
6 |
|
7 |
from PIL import Image
|
8 |
import requests
|
@@ -14,10 +13,14 @@ from io import BytesIO
|
|
14 |
import base64
|
15 |
#model_path = "/scratch/TecManDep/A_Models/llava-v1.6-vicuna-7b"
|
16 |
#conv_template = "vicuna_v1" # Make sure you use correct chat template for different models
|
|
|
|
|
|
|
17 |
|
|
|
18 |
def load_llava_model(lora_checkpoint=None):
|
19 |
model_path = "Lin-Chen/open-llava-next-llama3-8b"
|
20 |
-
conv_template = "
|
21 |
model_name = get_model_name_from_path(model_path)
|
22 |
device = "cuda"
|
23 |
device_map = "auto"
|
@@ -30,12 +33,13 @@ def load_llava_model(lora_checkpoint=None):
|
|
30 |
|
31 |
model.eval()
|
32 |
model.tie_weights()
|
33 |
-
logger.info("model device {
|
34 |
return tokenizer, model, image_processor, conv_template
|
35 |
|
36 |
tokenizer_llava, model_llava, image_processor_llava, conv_template_llava = load_llava_model(None)
|
37 |
-
tokenizer_llava_fire, model_llava_fire, image_processor_llava_fire, conv_template_llava = load_llava_model("checkpoints/")
|
38 |
model_llava_fire.to("cuda")
|
|
|
39 |
@spaces.GPU
|
40 |
def inference():
|
41 |
image = Image.open("assets/example.jpg").convert("RGB")
|
@@ -67,7 +71,7 @@ def inference():
|
|
67 |
return text_outputs
|
68 |
|
69 |
|
70 |
-
@spaces.GPU
|
71 |
def inference_by_prompt_and_images(prompt, images):
|
72 |
device = "cuda"
|
73 |
if len(images) > 0 and type(images[0]) is str:
|
@@ -79,7 +83,7 @@ def inference_by_prompt_and_images(prompt, images):
|
|
79 |
image_tensor = image_tensor.to(dtype=torch.float16, device=device)
|
80 |
input_ids = tokenizer_image_token(prompt, tokenizer_llava, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device)
|
81 |
image_sizes = [image.size for image in images]
|
82 |
-
logger.info("Shape: {};{}; Devices: {};{
|
83 |
with torch.inference_mode():
|
84 |
cont = model_llava.generate(
|
85 |
input_ids,
|
@@ -91,10 +95,10 @@ def inference_by_prompt_and_images(prompt, images):
|
|
91 |
use_cache=True
|
92 |
)
|
93 |
text_outputs = tokenizer_llava.batch_decode(cont, skip_special_tokens=True)
|
94 |
-
|
95 |
return text_outputs
|
96 |
|
97 |
-
@spaces.GPU
|
98 |
def inference_by_prompt_and_images_fire(prompt, images):
|
99 |
device = "cuda"
|
100 |
if len(images) > 0 and type(images[0]) is str:
|
@@ -102,23 +106,23 @@ def inference_by_prompt_and_images_fire(prompt, images):
|
|
102 |
for image in images:
|
103 |
image_data.append(Image.open(BytesIO(base64.b64decode(image))))
|
104 |
images = image_data
|
105 |
-
image_tensor = process_images(images,
|
106 |
image_tensor = image_tensor.to(dtype=torch.float16, device=device)
|
107 |
-
input_ids = tokenizer_image_token(prompt,
|
108 |
image_sizes = [image.size for image in images]
|
109 |
-
logger.info("Shape: {};{}; Devices: {};{
|
110 |
with torch.inference_mode():
|
111 |
cont = model_llava_fire.generate(
|
112 |
input_ids,
|
113 |
-
images=image_tensor,
|
114 |
image_sizes=image_sizes,
|
115 |
do_sample=False,
|
116 |
temperature=0,
|
117 |
max_new_tokens=256,
|
118 |
use_cache=True
|
119 |
)
|
120 |
-
text_outputs =
|
121 |
-
logger.info("response={}"
|
122 |
return text_outputs
|
123 |
|
124 |
if __name__ == "__main__":
|
|
|
2 |
from llava.mm_utils import get_model_name_from_path, process_images, tokenizer_image_token
|
3 |
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
4 |
from llava.conversation import conv_templates
|
|
|
5 |
|
6 |
from PIL import Image
|
7 |
import requests
|
|
|
13 |
import base64
|
14 |
#model_path = "/scratch/TecManDep/A_Models/llava-v1.6-vicuna-7b"
|
15 |
#conv_template = "vicuna_v1" # Make sure you use correct chat template for different models
|
16 |
+
from src.utils import (
|
17 |
+
build_logger,
|
18 |
+
)
|
19 |
|
20 |
+
logger = build_logger("model_llava", "model_llava.log")
|
21 |
def load_llava_model(lora_checkpoint=None):
|
22 |
model_path = "Lin-Chen/open-llava-next-llama3-8b"
|
23 |
+
conv_template = "llama_v3_student"
|
24 |
model_name = get_model_name_from_path(model_path)
|
25 |
device = "cuda"
|
26 |
device_map = "auto"
|
|
|
33 |
|
34 |
model.eval()
|
35 |
model.tie_weights()
|
36 |
+
logger.info(f"model device {model.device}")
|
37 |
return tokenizer, model, image_processor, conv_template
|
38 |
|
39 |
tokenizer_llava, model_llava, image_processor_llava, conv_template_llava = load_llava_model(None)
|
40 |
+
tokenizer_llava_fire, model_llava_fire, image_processor_llava_fire, conv_template_llava = load_llava_model("checkpoints/llava-next-llama-3-8b-student-lora-merged-115124")
|
41 |
model_llava_fire.to("cuda")
|
42 |
+
|
43 |
@spaces.GPU
|
44 |
def inference():
|
45 |
image = Image.open("assets/example.jpg").convert("RGB")
|
|
|
71 |
return text_outputs
|
72 |
|
73 |
|
74 |
+
@spaces.GPU(duration=25)
|
75 |
def inference_by_prompt_and_images(prompt, images):
|
76 |
device = "cuda"
|
77 |
if len(images) > 0 and type(images[0]) is str:
|
|
|
83 |
image_tensor = image_tensor.to(dtype=torch.float16, device=device)
|
84 |
input_ids = tokenizer_image_token(prompt, tokenizer_llava, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device)
|
85 |
image_sizes = [image.size for image in images]
|
86 |
+
logger.info(f"Shape: {input_ids.shape};{image_tensor.shape}; Devices: {input_ids.device};{image_tensor.device}")
|
87 |
with torch.inference_mode():
|
88 |
cont = model_llava.generate(
|
89 |
input_ids,
|
|
|
95 |
use_cache=True
|
96 |
)
|
97 |
text_outputs = tokenizer_llava.batch_decode(cont, skip_special_tokens=True)
|
98 |
+
|
99 |
return text_outputs
|
100 |
|
101 |
+
@spaces.GPU(duration=25)
|
102 |
def inference_by_prompt_and_images_fire(prompt, images):
|
103 |
device = "cuda"
|
104 |
if len(images) > 0 and type(images[0]) is str:
|
|
|
106 |
for image in images:
|
107 |
image_data.append(Image.open(BytesIO(base64.b64decode(image))))
|
108 |
images = image_data
|
109 |
+
image_tensor = process_images(images, image_processor_llava_fire, model_llava_fire.config)
|
110 |
image_tensor = image_tensor.to(dtype=torch.float16, device=device)
|
111 |
+
input_ids = tokenizer_image_token(prompt, tokenizer_llava_fire, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device)
|
112 |
image_sizes = [image.size for image in images]
|
113 |
+
logger.info(f"Shape: {input_ids.shape};{image_tensor.shape}; Devices: {input_ids.device};{image_tensor.device}")
|
114 |
with torch.inference_mode():
|
115 |
cont = model_llava_fire.generate(
|
116 |
input_ids,
|
117 |
+
images=[image_tensor.squeeze(dim=0)],
|
118 |
image_sizes=image_sizes,
|
119 |
do_sample=False,
|
120 |
temperature=0,
|
121 |
max_new_tokens=256,
|
122 |
use_cache=True
|
123 |
)
|
124 |
+
text_outputs = tokenizer_llava_fire.batch_decode(cont, skip_special_tokens=True)
|
125 |
+
logger.info(f"response={text_outputs}")
|
126 |
return text_outputs
|
127 |
|
128 |
if __name__ == "__main__":
|
src/model/model_registry.py
CHANGED
@@ -22,10 +22,14 @@ def get_model_info(name: str) -> ModelInfo:
|
|
22 |
if name in ['llava-fire', 'llava-original']:
|
23 |
description = {
|
24 |
"llava-fire": "LLaVA fine-tuned from FIRE dataset",
|
25 |
-
"llava-original": "LLaVA-NeXT with LLaMA-3-8B as language decoder"
|
|
|
|
|
|
|
|
|
26 |
}
|
27 |
return ModelInfo(
|
28 |
-
name, "", description[name]
|
29 |
)
|
30 |
if name in model_info:
|
31 |
return model_info[name]
|
|
|
22 |
if name in ['llava-fire', 'llava-original']:
|
23 |
description = {
|
24 |
"llava-fire": "LLaVA fine-tuned from FIRE dataset",
|
25 |
+
"llava-original": "LLaVA-NeXT with LLaMA-3-8B as the language decoder"
|
26 |
+
}
|
27 |
+
model_names = {
|
28 |
+
"llava-fire": "FIRE-LLaVA",
|
29 |
+
"llava-original": "LLaVA-NeXT-LLaMA-3-8B"
|
30 |
}
|
31 |
return ModelInfo(
|
32 |
+
model_names[name], "", description[name]
|
33 |
)
|
34 |
if name in model_info:
|
35 |
return model_info[name]
|
src/modules/__pycache__/__init__.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/__init__.cpython-310.pyc and b/src/modules/__pycache__/__init__.cpython-310.pyc differ
|
|
src/modules/__pycache__/awq.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/awq.cpython-310.pyc and b/src/modules/__pycache__/awq.cpython-310.pyc differ
|
|
src/modules/__pycache__/exllama.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/exllama.cpython-310.pyc and b/src/modules/__pycache__/exllama.cpython-310.pyc differ
|
|
src/modules/__pycache__/gptq.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/gptq.cpython-310.pyc and b/src/modules/__pycache__/gptq.cpython-310.pyc differ
|
|
src/modules/__pycache__/xfastertransformer.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/xfastertransformer.cpython-310.pyc and b/src/modules/__pycache__/xfastertransformer.cpython-310.pyc differ
|
|
src/serve/__pycache__/__init__.cpython-310.pyc
DELETED
Binary file (129 Bytes)
|
|
src/serve/__pycache__/api_provider.cpython-310.pyc
DELETED
Binary file (17.9 kB)
|
|
src/serve/__pycache__/gradio_block_arena_named.cpython-310.pyc
DELETED
Binary file (11.4 kB)
|
|