Spaces:
Sleeping
Sleeping
tanshaohui
commited on
Commit
·
54a6ed7
1
Parent(s):
067022f
feat: 部署demo
Browse files- app.py +59 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import spaces
|
3 |
+
import torch
|
4 |
+
import os
|
5 |
+
import subprocess
|
6 |
+
|
7 |
+
# os.system("pip install git+https://github.com/huggingface/transformers")
|
8 |
+
|
9 |
+
from PIL import Image
|
10 |
+
import requests
|
11 |
+
from transformers import AutoModelForCausalLM
|
12 |
+
from transformers import AutoProcessor
|
13 |
+
|
14 |
+
model_id = "microsoft/Phi-3-vision-128k-instruct"
|
15 |
+
|
16 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype="auto", _attn_implementation='eager').cuda() # use _attn_implementation='eager' to disable flash attention
|
17 |
+
|
18 |
+
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
19 |
+
|
20 |
+
@spaces.GPU
|
21 |
+
def infer(u, t):
|
22 |
+
if len(u) < 1:
|
23 |
+
u = "https://lf3-static.bytednsdoc.com/obj/eden-cn/pbovhozuha/output.png"
|
24 |
+
if len(t) < 1:
|
25 |
+
t = "Convert the text in the image to markdown"
|
26 |
+
messages = messages = [
|
27 |
+
{"role": "user", "content": "<|image_1|>\n" + t},
|
28 |
+
]
|
29 |
+
url = u
|
30 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
31 |
+
|
32 |
+
prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
33 |
+
|
34 |
+
inputs = processor(prompt, [image], return_tensors="pt").to(model.device)
|
35 |
+
|
36 |
+
generation_args = {
|
37 |
+
"max_new_tokens": 512,
|
38 |
+
"temperature": 0.7,
|
39 |
+
"do_sample": True,
|
40 |
+
}
|
41 |
+
|
42 |
+
generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
|
43 |
+
|
44 |
+
# remove input tokens
|
45 |
+
generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
|
46 |
+
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
47 |
+
|
48 |
+
return response
|
49 |
+
|
50 |
+
|
51 |
+
demo = gr.Interface(
|
52 |
+
fn=infer,
|
53 |
+
inputs=[
|
54 |
+
gr.Text(label="url"),
|
55 |
+
gr.Text(label="text"),
|
56 |
+
],
|
57 |
+
outputs=gr.Text(),
|
58 |
+
)
|
59 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy==1.24.4
|
2 |
+
Pillow==10.3.0
|
3 |
+
Requests==2.31.0
|
4 |
+
torch==2.3.0
|
5 |
+
torchvision==0.18.0
|
6 |
+
transformers==4.40.2
|