VanguardAI commited on
Commit
784dddf
·
verified ·
1 Parent(s): 26e0039

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -112
app.py DELETED
@@ -1,112 +0,0 @@
1
- import subprocess
2
- # Installing flash_attn
3
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
4
-
5
- import gradio as gr
6
- from PIL import Image
7
- from transformers import AutoModelForCausalLM
8
- from transformers import AutoProcessor
9
- from transformers import TextIteratorStreamer
10
- import time
11
- from threading import Thread
12
- import torch
13
- import spaces
14
-
15
- model_id = "microsoft/Phi-3-vision-128k-instruct"
16
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, torch_dtype="auto")
17
- processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
18
-
19
-
20
- PLACEHOLDER = """
21
- <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
22
- <img src="https://cdn-thumbnails.huggingface.co/social-thumbnails/models/microsoft/Phi-3-vision-128k-instruct.png" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
23
- <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Microsoft's Phi3 Vision</h1>
24
- <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Phi-3-Vision is a 4.2B parameter multimodal model that brings together language and vision capabilities.</p>
25
- </div>
26
- """
27
-
28
- @spaces.GPU
29
- def bot_streaming(message, history):
30
- print(f'message is - {message}')
31
- print(f'history is - {history}')
32
- if message["files"]:
33
- # message["files"][-1] is a Dict or just a string
34
- if type(message["files"][-1]) == dict:
35
- image = message["files"][-1]["path"]
36
- else:
37
- image = message["files"][-1]
38
- else:
39
- # if there's no image uploaded for this turn, look for images in the past turns
40
- # kept inside tuples, take the last one
41
- for hist in history:
42
- if type(hist[0]) == tuple:
43
- image = hist[0][0]
44
- try:
45
- if image is None:
46
- # Handle the case where image is None
47
- raise gr.Error("You need to upload an image for Phi3-Vision to work. Close the error and try again with an Image.")
48
- except NameError:
49
- # Handle the case where 'image' is not defined at all
50
- raise gr.Error("You need to upload an image for Phi3-Vision to work. Close the error and try again with an Image.")
51
-
52
- conversation = []
53
- flag=False
54
- for user, assistant in history:
55
- if assistant is None:
56
- #pass
57
- flag=True
58
- conversation.extend([{"role": "user", "content":""}])
59
- continue
60
- if flag==True:
61
- conversation[0]['content'] = f"<|image_1|>\n{user}"
62
- conversation.extend([{"role": "assistant", "content": assistant}])
63
- flag=False
64
- continue
65
- conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
66
-
67
- if len(history) == 0:
68
- conversation.append({"role": "user", "content": f"<|image_1|>\n{message['text']}"})
69
- else:
70
- conversation.append({"role": "user", "content": message['text']})
71
- print(f"prompt is -\n{conversation}")
72
- prompt = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
73
- image = Image.open(image)
74
- inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
75
-
76
- streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True, "skip_prompt": True, 'clean_up_tokenization_spaces':False,})
77
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=False, temperature=0.0, eos_token_id=processor.tokenizer.eos_token_id,)
78
-
79
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
80
- thread.start()
81
-
82
- buffer = ""
83
- for new_text in streamer:
84
- buffer += new_text
85
- yield buffer
86
-
87
- print(conversation)
88
-
89
-
90
- chatbot=gr.Chatbot(scale=1, placeholder=PLACEHOLDER)
91
- chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
92
- with gr.Blocks(fill_height=True, ) as demo:
93
- gr.ChatInterface(
94
- fn=bot_streaming,
95
- title="Phi3 Vision 128K Instruct",
96
- examples=[{"text": "Describe the image in details?", "files": ["./robo.jpg"]},
97
- {"text": "What does the chart display?", "files": ["./dataviz.png"]},
98
- {"text": "What is 3?", "files": ["./setofmark1.jpg"]},
99
- {"text": "Count the number of apples.", "files": ["./setofmark6.png"]},
100
- {"text": "I want to find a seat close to windows, where can I sit?", "files": ["./office1.jpg"]},
101
- ],
102
- description="Try the [Phi3-Vision model](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) from Microsoft. Upload an image and start chatting about it, or simply try one of the examples below. If you won't upload an image, you will receive an error. This is not the official demo.",
103
- stop_btn="Stop Generation",
104
- multimodal=True,
105
- textbox=chat_input,
106
- chatbot=chatbot,
107
- cache_examples=False,
108
- examples_per_page=3
109
- )
110
-
111
- demo.queue()
112
- demo.launch(debug=True, quiet=True)