sagar007 commited on
Commit
10dacc2
·
verified ·
1 Parent(s): 03cdb75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -182
app.py CHANGED
@@ -1,197 +1,84 @@
1
  import gradio as gr
2
- import spaces
3
  from PIL import Image
4
- import requests
5
- from transformers import AutoModelForCausalLM, AutoProcessor
6
  import torch
7
- import subprocess
8
- from io import BytesIO
9
- import os
10
 
11
- # Install flash-attn
12
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
13
 
14
- # Load the model and processor
15
- model_id = "microsoft/Phi-3.5-vision-instruct"
16
- model = AutoModelForCausalLM.from_pretrained(
17
- model_id,
18
- trust_remote_code=True,
19
- torch_dtype=torch.float16,
20
- use_flash_attention_2=False, # Explicitly disable Flash Attention 2
21
- )
22
- processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16)
23
-
24
- @spaces.GPU(duration=120) # Adjust the duration as needed
25
- def solve_math_problem(image):
26
- # Move model to GPU for this function call
27
- model.to('cuda')
28
-
29
- # Prepare the input
30
- messages = [
31
- {"role": "user", "content": "<|image_1|>\nSolve this math problem step by step. Explain your reasoning clearly."},
32
- ]
33
- prompt = processor.tokenizer.apply_chat_template(
34
- messages, tokenize=False, add_generation_prompt=True
35
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- # Process the input
38
- inputs = processor(prompt, image, return_tensors="pt").to("cuda")
39
-
40
- # Generate the response
41
- generation_args = {
42
- "max_new_tokens": 1000,
43
- "temperature": 0.2,
44
- "do_sample": True,
45
- }
46
- generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
47
-
48
- # Decode the response
49
- generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
50
- response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
51
 
52
- # Move model back to CPU to free up GPU memory
53
- model.to('cpu')
54
- return response
55
 
56
- def load_image_from_file(file_path):
57
- if os.path.exists(file_path):
58
- return Image.open(file_path)
59
  else:
60
- raise FileNotFoundError(f"Image file not found: {file_path}")
61
- # Custom CSS
62
- # Custom CSS
63
- custom_css = """
64
- <style>
65
- body {
66
- font-family: 'Arial', sans-serif;
67
- background-color: #f0f3f7;
68
- margin: 0;
69
- padding: 0;
70
- }
71
- .container {
72
- max-width: 1200px;
73
- margin: 0 auto;
74
- padding: 20px;
75
- }
76
- .header {
77
- background-color: #2c3e50;
78
- color: white;
79
- padding: 20px 0;
80
- text-align: center;
81
- }
82
- .header h1 {
83
- margin: 0;
84
- font-size: 2.5em;
85
- }
86
- .main-content {
87
- display: flex;
88
- justify-content: space-between;
89
- margin-top: 30px;
90
- }
91
- .input-section, .output-section {
92
- width: 48%;
93
- background-color: white;
94
- border-radius: 8px;
95
- padding: 20px;
96
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
97
- }
98
- .gr-button {
99
- background-color: #27ae60;
100
- color: white;
101
- border: none;
102
- padding: 10px 20px;
103
- border-radius: 5px;
104
- cursor: pointer;
105
- transition: background-color 0.3s;
106
- }
107
- .gr-button:hover {
108
- background-color: #2ecc71;
109
- }
110
- .examples-section {
111
- margin-top: 30px;
112
- background-color: white;
113
- border-radius: 8px;
114
- padding: 20px;
115
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
116
- }
117
- .examples-section h3 {
118
- margin-top: 0;
119
- color: #2c3e50;
120
- }
121
- .footer {
122
- text-align: center;
123
- margin-top: 30px;
124
- color: #7f8c8d;
125
- }
126
- </style>
127
- """
128
-
129
- # Custom HTML
130
- custom_html = """
131
- <div class="container">
132
- <div class="header">
133
- <h1>AI Math Equation Solver</h1>
134
- <p>Upload an image of a math problem, and our AI will solve it step by step!</p>
135
- </div>
136
- <div class="main-content">
137
- <div class="input-section">
138
- <h2>Upload Your Math Problem</h2>
139
- {input_image}
140
- {submit_btn}
141
- </div>
142
- <div class="output-section">
143
- <h2>Solution</h2>
144
- {output_text}
145
- </div>
146
- </div>
147
- <div class="examples-section">
148
- <h3>Try These Examples</h3>
149
- {examples}
150
- </div>
151
- <div class="footer">
152
- <p>Powered by Gradio and AI - Created for educational purposes</p>
153
- </div>
154
- </div>
155
- """
156
 
157
- # Create the Gradio interface
158
- with gr.Blocks(css=custom_css) as iface:
159
- gr.HTML("""
160
- <div class="header">
161
- <h1>AI Math Equation Solver</h1>
162
- <p>Upload an image of a math problem, and our AI will solve it step by step!</p>
163
- </div>
164
- """)
165
 
166
- with gr.Row(equal_height=True):
167
- with gr.Column():
168
- gr.HTML("<h2>Upload Your Math Problem</h2>")
169
- input_image = gr.Image(type="pil", label="Upload Math Problem Image")
170
- submit_btn = gr.Button("Solve Problem", elem_classes=["gr-button"])
171
-
172
- with gr.Column():
173
- gr.HTML("<h2>Solution</h2>")
174
- output_text = gr.Textbox(label="Step-by-step Solution", lines=10)
175
 
176
- gr.HTML("<h3>Try These Examples</h3>")
177
- examples = gr.Examples(
178
- examples=[
179
- os.path.join(os.path.dirname(__file__), "eqn1.png"),
180
- os.path.join(os.path.dirname(__file__), "eqn2.png")
181
- ],
182
- inputs=input_image,
183
- outputs=output_text,
184
- fn=solve_math_problem,
185
- cache_examples=True,
186
- )
187
 
188
- gr.HTML("""
189
- <div class="footer">
190
- <p>Powered by Gradio and AI - Created for educational purposes</p>
191
- </div>
192
- """)
193
-
194
- submit_btn.click(fn=solve_math_problem, inputs=input_image, outputs=output_text)
195
 
196
- # Launch the app
197
- iface.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoProcessor, pipeline
3
  from PIL import Image
 
 
4
  import torch
5
+ import warnings
 
 
6
 
7
+ # Suppress warnings
8
+ warnings.filterwarnings("ignore")
9
 
10
+ # Load Phi-3.5-vision model
11
+ phi_model_id = "microsoft/Phi-3.5-vision-instruct"
12
+ try:
13
+ phi_model = AutoModelForCausalLM.from_pretrained(
14
+ phi_model_id,
15
+ device_map="auto",
16
+ trust_remote_code=True,
17
+ torch_dtype=torch.float16, # Use float16 to reduce memory usage
18
+ _attn_implementation="eager" # Fall back to eager implementation if flash attention is not available
 
 
 
 
 
 
 
 
 
 
 
 
19
  )
20
+ except ImportError:
21
+ print("FlashAttention not available, falling back to eager implementation.")
22
+ phi_model = AutoModelForCausalLM.from_pretrained(
23
+ phi_model_id,
24
+ device_map="auto",
25
+ trust_remote_code=True,
26
+ torch_dtype=torch.float16,
27
+ _attn_implementation="eager"
28
+ )
29
+
30
+ phi_processor = AutoProcessor.from_pretrained(phi_model_id, trust_remote_code=True)
31
+
32
+ # Load Llama 3.1 model
33
+ llama_model_id = "meta-llama/Llama-3.1-8B"
34
+ try:
35
+ llama_pipeline = pipeline("text-generation", model=llama_model_id, device_map="auto", torch_dtype=torch.float16)
36
+ except Exception as e:
37
+ print(f"Error loading Llama 3.1 model: {e}")
38
+ print("Falling back to a smaller, open-source model.")
39
+ llama_model_id = "gpt2" # Fallback to a smaller, open-source model
40
+ llama_pipeline = pipeline("text-generation", model=llama_model_id, device_map="auto")
41
+
42
+ def analyze_image(image, query):
43
+ prompt = f"<|user|>\n<|image_1|>\n{query}<|end|>\n<|assistant|>\n"
44
+ inputs = phi_processor(prompt, images=image, return_tensors="pt").to(phi_model.device)
45
 
46
+ with torch.no_grad():
47
+ output = phi_model.generate(**inputs, max_new_tokens=100)
48
+ return phi_processor.decode(output[0], skip_special_tokens=True)
49
+
50
+ def generate_text(query, history):
51
+ context = "\n".join([f"{h[0]}\n{h[1]}" for h in history])
52
+ prompt = f"{context}\nHuman: {query}\nAI:"
 
 
 
 
 
 
 
53
 
54
+ response = llama_pipeline(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)[0]['generated_text']
55
+ return response.split("AI:")[-1].strip()
 
56
 
57
+ def chatbot(image, query, history):
58
+ if image is not None:
59
+ response = analyze_image(Image.fromarray(image), query)
60
  else:
61
+ response = generate_text(query, history)
62
+
63
+ history.append((query, response))
64
+ return "", history, history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
+ with gr.Blocks() as demo:
67
+ gr.Markdown("# Multi-Modal AI Assistant")
 
 
 
 
 
 
68
 
69
+ with gr.Row():
70
+ image_input = gr.Image(type="numpy", label="Upload an image (optional)")
71
+ chat_history = gr.Chatbot(label="Chat History")
 
 
 
 
 
 
72
 
73
+ query_input = gr.Textbox(label="Ask a question or enter a prompt")
74
+ submit_button = gr.Button("Submit")
 
 
 
 
 
 
 
 
 
75
 
76
+ state = gr.State([])
77
+
78
+ submit_button.click(
79
+ chatbot,
80
+ inputs=[image_input, query_input, state],
81
+ outputs=[query_input, chat_history, state]
82
+ )
83
 
84
+ demo.launch()