gizemsarsinlar commited on
Commit
1a6af13
1 Parent(s): 2900530

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +13 -13
  2. app.py +69 -0
  3. requirements.txt +7 -0
README.md CHANGED
@@ -1,13 +1,13 @@
1
- ---
2
- title: Phi 3.5 Vision Model
3
- emoji: 🐨
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 5.5.0
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: Phi 3.5 Vision
3
+ emoji: 🔥
4
+ colorFrom: gray
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 4.42.0
8
+ app_file: app.py
9
+ pinned: true
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from transformers import AutoModelForCausalLM, AutoProcessor
4
+ import torch
5
+ from PIL import Image
6
+ import subprocess
7
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
8
+
9
+ models = {
10
+ "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
11
+
12
+ }
13
+
14
+ processors = {
15
+ "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
16
+ }
17
+
18
+ DESCRIPTION = "[Phi-3.5-vision Demo](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)"
19
+
20
+ kwargs = {}
21
+ kwargs['torch_dtype'] = torch.bfloat16
22
+
23
+ user_prompt = '<|user|>\n'
24
+ assistant_prompt = '<|assistant|>\n'
25
+ prompt_suffix = "<|end|>\n"
26
+
27
+ @spaces.GPU
28
+ def run_example(image, text_input=None, model_id="microsoft/Phi-3.5-vision-instruct"):
29
+ model = models[model_id]
30
+ processor = processors[model_id]
31
+
32
+ prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
33
+ image = Image.fromarray(image).convert("RGB")
34
+
35
+ inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
36
+ generate_ids = model.generate(**inputs,
37
+ max_new_tokens=1000,
38
+ eos_token_id=processor.tokenizer.eos_token_id,
39
+ )
40
+ generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
41
+ response = processor.batch_decode(generate_ids,
42
+ skip_special_tokens=True,
43
+ clean_up_tokenization_spaces=False)[0]
44
+ return response
45
+
46
+ css = """
47
+ #output {
48
+ height: 500px;
49
+ overflow: auto;
50
+ border: 1px solid #ccc;
51
+ }
52
+ """
53
+
54
+ with gr.Blocks(css=css) as demo:
55
+ gr.Markdown(DESCRIPTION)
56
+ with gr.Tab(label="Phi-3.5 Input"):
57
+ with gr.Row():
58
+ with gr.Column():
59
+ input_img = gr.Image(label="Input Picture")
60
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct")
61
+ text_input = gr.Textbox(label="Question")
62
+ submit_btn = gr.Button(value="Submit")
63
+ with gr.Column():
64
+ output_text = gr.Textbox(label="Output Text")
65
+
66
+ submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
67
+
68
+ demo.queue(api_open=False)
69
+ demo.launch(debug=True, show_api=False)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ numpy==1.24.4
2
+ Pillow==10.3.0
3
+ Requests==2.31.0
4
+ torch
5
+ torchvision
6
+ transformers==4.43.0
7
+ accelerate==0.30.0