Update README.md
Browse files
README.md
CHANGED
@@ -99,11 +99,73 @@ Hugging Face Hub 本身不能自动运行上传的模型,但通过 `Spaces`
|
|
99 |
通过这些方式,您可以让模型仓库既支持在线运行,也便于用户离线部署。
|
100 |
|
101 |
## Uses
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
### Direct Use
|
106 |
-
|
107 |
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
108 |
|
109 |
[More Information Needed]
|
|
|
99 |
通过这些方式,您可以让模型仓库既支持在线运行,也便于用户离线部署。
|
100 |
|
101 |
## Uses
|
102 |
+
```python
|
103 |
+
import os
|
104 |
+
import torch
|
105 |
+
from model import AutoModel, Config
|
106 |
+
|
107 |
+
def load_model(model_path, config_path):
|
108 |
+
"""
|
109 |
+
加载模型权重和配置
|
110 |
+
"""
|
111 |
+
# 加载配置
|
112 |
+
if not os.path.exists(config_path):
|
113 |
+
raise FileNotFoundError(f"配置文件未找到: {config_path}")
|
114 |
+
print(f"加载配置文件: {config_path}")
|
115 |
+
config = Config()
|
116 |
+
|
117 |
+
# 初始化模型
|
118 |
+
model = AutoModel(config)
|
119 |
+
|
120 |
+
# 加载权重
|
121 |
+
if not os.path.exists(model_path):
|
122 |
+
raise FileNotFoundError(f"模型文件未找到: {model_path}")
|
123 |
+
print(f"加载模型权重: {model_path}")
|
124 |
+
state_dict = torch.load(model_path, map_location=torch.device("cpu"))
|
125 |
+
model.load_state_dict(state_dict)
|
126 |
+
model.eval()
|
127 |
+
print("模型加载成功并设置为评估模式。")
|
128 |
+
|
129 |
+
return model, config
|
130 |
+
|
131 |
+
|
132 |
+
def run_inference(model, config):
|
133 |
+
"""
|
134 |
+
使用模型运行推理
|
135 |
+
"""
|
136 |
+
# 模拟示例输入
|
137 |
+
image = torch.randn(1, 3, 224, 224) # 图像输入
|
138 |
+
text = torch.randn(1, config.max_position_embeddings, config.hidden_size) # 文本输入
|
139 |
+
audio = torch.randn(1, config.audio_sample_rate) # 音频输入
|
140 |
+
|
141 |
+
# 模型推理
|
142 |
+
outputs = model(image, text, audio)
|
143 |
+
vqa_output, caption_output, retrieval_output, asr_output, realtime_asr_output = outputs
|
144 |
+
|
145 |
+
# 打印结果
|
146 |
+
print("\n推理结果:")
|
147 |
+
print(f"VQA output shape: {vqa_output.shape}")
|
148 |
+
print(f"Caption output shape: {caption_output.shape}")
|
149 |
+
print(f"Retrieval output shape: {retrieval_output.shape}")
|
150 |
+
print(f"ASR output shape: {asr_output.shape}")
|
151 |
+
print(f"Realtime ASR output shape: {realtime_asr_output.shape}")
|
152 |
+
|
153 |
+
if __name__ == "__main__":
|
154 |
+
# 文件路径
|
155 |
+
model_path = "AutoModel.pth"
|
156 |
+
config_path = "config.json"
|
157 |
+
|
158 |
+
# 加载模型
|
159 |
+
try:
|
160 |
+
model, config = load_model(model_path, config_path)
|
161 |
+
|
162 |
+
# 运行推理
|
163 |
+
run_inference(model, config)
|
164 |
+
except Exception as e:
|
165 |
+
print(f"运行失败: {e}")
|
166 |
+
```
|
167 |
|
168 |
### Direct Use
|
|
|
169 |
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
170 |
|
171 |
[More Information Needed]
|