karthi311 commited on
Commit
2f3ede2
·
verified ·
1 Parent(s): 14627c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +227 -43
app.py CHANGED
@@ -61,14 +61,13 @@ def transcribe_audio(audio_path):
61
  # Read the audio file and prepare inputs for Whisper
62
  inputs = ffmpeg_read(audio_path, whisper_pipeline.feature_extractor.sampling_rate)
63
  inputs = {"array": inputs, "sampling_rate": whisper_pipeline.feature_extractor.sampling_rate}
64
-
65
  # Perform transcription using Whisper
66
  result = whisper_pipeline(inputs, batch_size=BATCH_SIZE, return_timestamps=False)
67
  return result["text"]
68
  except Exception as e:
69
  return f"Error during transcription: {e}"
70
 
71
-
72
  # Classify the sentence to the correct SOAP section
73
  def classify_sentence(sentence):
74
  similarities = {section: util.pytorch_cos_sim(embedder.encode(sentence), soap_embeddings[section]) for section in soap_prompts.keys()}
@@ -134,52 +133,17 @@ def process_file(file, user_prompt):
134
  soap_note = soap_analysis(transcription)
135
  print("SOAP Notes: ", soap_note)
136
 
137
- # # Generate template and JSON using LLaMA
138
- # template_output = llama_query(user_prompt, soap_note)
139
- # print("Template: ", template_output)
140
-
141
- # json_output = llama_convert_to_json(template_output)
142
-
143
  # Clean up temporary files
144
  if file.name.endswith(".mp4"):
145
  os.remove(temp_mp3_path)
146
 
147
- return soap_note#, template_output, json_output
148
 
149
  # Process text function for text input to SOAP
150
  def process_text(text, user_prompt):
151
  soap_note = soap_analysis(text)
152
  print(soap_note)
153
-
154
- # template_output = llama_query(user_prompt, soap_note)
155
- # print(template_output)
156
- # json_output = llama_convert_to_json(template_output)
157
-
158
- return soap_note#, template_output, json_output
159
-
160
- # # Llama query function
161
- # def llama_query(user_prompt, soap_note, model="llama3.2"):
162
- # combined_prompt = f"User Instructions:\n{user_prompt}\n\nContext:\n{soap_note}"
163
- # try:
164
- # process = Popen(['ollama', 'run', model], stdin=PIPE, stdout=PIPE, stderr=PIPE, text=True, encoding='utf-8')
165
- # stdout, stderr = process.communicate(input=combined_prompt)
166
- # if process.returncode != 0:
167
- # return f"Error: {stderr.strip()}"
168
- # return stdout.strip()
169
- # except Exception as e:
170
- # return f"Unexpected error: {str(e)}"
171
-
172
- # # Convert the response to JSON format
173
- # def llama_convert_to_json(template_output, model="llama3.2"):
174
- # json_prompt = f"Convert the following template into a structured JSON format:\n\n{template_output}"
175
- # try:
176
- # process = Popen(['ollama', 'run', model], stdin=PIPE, stdout=PIPE, stderr=PIPE, text=True, encoding='utf-8')
177
- # stdout, stderr = process.communicate(input=json_prompt)
178
- # if process.returncode != 0:
179
- # return f"Error: {stderr.strip()}"
180
- # return stdout.strip() # Assuming the model outputs a valid JSON string
181
- # except Exception as e:
182
- # return f"Unexpected error: {str(e)}"
183
 
184
  # Gradio interface
185
  def launch_gradio():
@@ -192,8 +156,6 @@ def launch_gradio():
192
  inputs=[gr.File(label="Upload Audio/Video File"), gr.Textbox(label="Enter Prompt for Template", placeholder="Enter a detailed prompt...", lines=6)],
193
  outputs=[
194
  gr.Textbox(label="SOAP Note"),
195
- # gr.Textbox(label="Generated Template from LLaMA"),
196
- # gr.Textbox(label="JSON Output")
197
  ],
198
  )
199
 
@@ -203,8 +165,6 @@ def launch_gradio():
203
  inputs=[gr.Textbox(label="Enter Text", placeholder="Enter medical notes...", lines=6), gr.Textbox(label="Enter Prompt for Template", placeholder="Enter a detailed prompt...", lines=6)],
204
  outputs=[
205
  gr.Textbox(label="SOAP Note"),
206
- # gr.Textbox(label="Generated Template from LLaMA"),
207
- # gr.Textbox(label="JSON Output")
208
  ],
209
  )
210
 
@@ -213,3 +173,227 @@ def launch_gradio():
213
  # Run the Gradio app
214
  if __name__ == "__main__":
215
  launch_gradio()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  # Read the audio file and prepare inputs for Whisper
62
  inputs = ffmpeg_read(audio_path, whisper_pipeline.feature_extractor.sampling_rate)
63
  inputs = {"array": inputs, "sampling_rate": whisper_pipeline.feature_extractor.sampling_rate}
64
+
65
  # Perform transcription using Whisper
66
  result = whisper_pipeline(inputs, batch_size=BATCH_SIZE, return_timestamps=False)
67
  return result["text"]
68
  except Exception as e:
69
  return f"Error during transcription: {e}"
70
 
 
71
  # Classify the sentence to the correct SOAP section
72
  def classify_sentence(sentence):
73
  similarities = {section: util.pytorch_cos_sim(embedder.encode(sentence), soap_embeddings[section]) for section in soap_prompts.keys()}
 
133
  soap_note = soap_analysis(transcription)
134
  print("SOAP Notes: ", soap_note)
135
 
 
 
 
 
 
 
136
  # Clean up temporary files
137
  if file.name.endswith(".mp4"):
138
  os.remove(temp_mp3_path)
139
 
140
+ return soap_note
141
 
142
  # Process text function for text input to SOAP
143
  def process_text(text, user_prompt):
144
  soap_note = soap_analysis(text)
145
  print(soap_note)
146
+ return soap_note
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  # Gradio interface
149
  def launch_gradio():
 
156
  inputs=[gr.File(label="Upload Audio/Video File"), gr.Textbox(label="Enter Prompt for Template", placeholder="Enter a detailed prompt...", lines=6)],
157
  outputs=[
158
  gr.Textbox(label="SOAP Note"),
 
 
159
  ],
160
  )
161
 
 
165
  inputs=[gr.Textbox(label="Enter Text", placeholder="Enter medical notes...", lines=6), gr.Textbox(label="Enter Prompt for Template", placeholder="Enter a detailed prompt...", lines=6)],
166
  outputs=[
167
  gr.Textbox(label="SOAP Note"),
 
 
168
  ],
169
  )
170
 
 
173
  # Run the Gradio app
174
  if __name__ == "__main__":
175
  launch_gradio()
176
+
177
+
178
+
179
+
180
+
181
+
182
+
183
+
184
+
185
+ # import os
186
+ # import tempfile
187
+ # from subprocess import Popen, PIPE
188
+ # import torch
189
+ # import gradio as gr
190
+ # from pydub import AudioSegment
191
+ # from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
192
+ # from transformers.pipelines.audio_utils import ffmpeg_read
193
+ # from sentence_transformers import SentenceTransformer, util
194
+ # import spacy
195
+ # import spacy.cli
196
+ # spacy.cli.download("en_core_web_sm")
197
+
198
+ # # Constants
199
+ # MODEL_NAME = "openai/whisper-large-v3-turbo"
200
+ # BATCH_SIZE = 8
201
+ # FILE_LIMIT_MB = 1000
202
+ # device = 0 if torch.cuda.is_available() else "cpu"
203
+
204
+ # # Whisper pipeline
205
+ # whisper_pipeline = pipeline(
206
+ # task="automatic-speech-recognition",
207
+ # model=MODEL_NAME,
208
+ # chunk_length_s=30,
209
+ # device=device,
210
+ # )
211
+
212
+ # # NLP model and other helpers
213
+ # nlp = spacy.load("en_core_web_sm")
214
+ # embedder = SentenceTransformer("all-MiniLM-L6-v2")
215
+
216
+ # # Summarization model
217
+ # summarizer_model_name = "Mahalingam/DistilBart-Med-Summary"
218
+ # tokenizer = AutoTokenizer.from_pretrained(summarizer_model_name)
219
+ # summarizer_model = AutoModelForSeq2SeqLM.from_pretrained(summarizer_model_name)
220
+ # summarizer = pipeline("summarization", model=summarizer_model, tokenizer=tokenizer)
221
+
222
+ # # SOAP prompts and embeddings
223
+ # soap_prompts = {
224
+ # "subjective": "Personal reports, symptoms described by patients, or personal health concerns. Details reflecting individual symptoms or health descriptions.",
225
+ # "objective": "Observable facts, clinical findings, professional observations, specific medical specialties, and diagnoses.",
226
+ # "assessment": "Clinical assessments, expertise-based opinions on conditions, and significance of medical interventions. Focused on medical evaluations or patient condition summaries.",
227
+ # "plan": "Future steps, recommendations for treatment, follow-up instructions, and healthcare management plans."
228
+ # }
229
+ # soap_embeddings = {section: embedder.encode(prompt, convert_to_tensor=True) for section, prompt in soap_prompts.items()}
230
+
231
+ # # Convert MP4 to MP3
232
+ # def convert_mp4_to_mp3(mp4_path, mp3_path):
233
+ # try:
234
+ # audio = AudioSegment.from_file(mp4_path, format="mp4")
235
+ # audio.export(mp3_path, format="mp3")
236
+ # except Exception as e:
237
+ # raise RuntimeError(f"Error converting MP4 to MP3: {e}")
238
+
239
+ # # Transcribe audio
240
+ # def transcribe_audio(audio_path):
241
+ # try:
242
+ # if not os.path.exists(audio_path):
243
+ # raise FileNotFoundError(f"Audio file not found: {audio_path}")
244
+
245
+ # # Read the audio file and prepare inputs for Whisper
246
+ # inputs = ffmpeg_read(audio_path, whisper_pipeline.feature_extractor.sampling_rate)
247
+ # inputs = {"array": inputs, "sampling_rate": whisper_pipeline.feature_extractor.sampling_rate}
248
+
249
+ # # Perform transcription using Whisper
250
+ # result = whisper_pipeline(inputs, batch_size=BATCH_SIZE, return_timestamps=False)
251
+ # return result["text"]
252
+ # except Exception as e:
253
+ # return f"Error during transcription: {e}"
254
+
255
+
256
+ # # Classify the sentence to the correct SOAP section
257
+ # def classify_sentence(sentence):
258
+ # similarities = {section: util.pytorch_cos_sim(embedder.encode(sentence), soap_embeddings[section]) for section in soap_prompts.keys()}
259
+ # return max(similarities, key=similarities.get)
260
+
261
+ # # Summarize the section if it's too long
262
+ # def summarize_section(section_text):
263
+ # if len(section_text.split()) < 50:
264
+ # return section_text
265
+ # target_length = int(len(section_text.split()) * 0.50)
266
+ # inputs = tokenizer.encode(section_text, return_tensors="pt", truncation=True, max_length=1024)
267
+ # summary_ids = summarizer_model.generate(
268
+ # inputs,
269
+ # max_length=target_length,
270
+ # min_length=int(target_length * 0.45),
271
+ # length_penalty=1.0,
272
+ # num_beams=4
273
+ # )
274
+ # return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
275
+
276
+ # # Analyze the SOAP content and divide into sections
277
+ # def soap_analysis(text):
278
+ # doc = nlp(text)
279
+ # soap_note = {section: "" for section in soap_prompts.keys()}
280
+
281
+ # for sentence in doc.sents:
282
+ # section = classify_sentence(sentence.text)
283
+ # soap_note[section] += sentence.text + " "
284
+
285
+ # # Summarize each section of the SOAP note
286
+ # for section in soap_note:
287
+ # soap_note[section] = summarize_section(soap_note[section].strip())
288
+
289
+ # return format_soap_output(soap_note)
290
+
291
+ # # Format the SOAP note output
292
+ # def format_soap_output(soap_note):
293
+ # return (
294
+ # f"Subjective:\n{soap_note['subjective']}\n\n"
295
+ # f"Objective:\n{soap_note['objective']}\n\n"
296
+ # f"Assessment:\n{soap_note['assessment']}\n\n"
297
+ # f"Plan:\n{soap_note['plan']}\n"
298
+ # )
299
+
300
+ # # Process file function for audio/video to SOAP
301
+ # def process_file(file, user_prompt):
302
+ # # Determine file type and convert if necessary
303
+ # if file.name.endswith(".mp4"):
304
+ # temp_mp3_path = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False).name
305
+ # try:
306
+ # convert_mp4_to_mp3(file.name, temp_mp3_path)
307
+ # audio_path = temp_mp3_path
308
+ # except Exception as e:
309
+ # return f"Error during MP4 to MP3 conversion: {e}", "", ""
310
+ # else:
311
+ # audio_path = file.name
312
+
313
+ # # Transcribe audio
314
+ # transcription = transcribe_audio(audio_path)
315
+ # print("Transcribed Text: ", transcription)
316
+
317
+ # # Perform SOAP analysis
318
+ # soap_note = soap_analysis(transcription)
319
+ # print("SOAP Notes: ", soap_note)
320
+
321
+ # # # Generate template and JSON using LLaMA
322
+ # # template_output = llama_query(user_prompt, soap_note)
323
+ # # print("Template: ", template_output)
324
+
325
+ # # json_output = llama_convert_to_json(template_output)
326
+
327
+ # # Clean up temporary files
328
+ # if file.name.endswith(".mp4"):
329
+ # os.remove(temp_mp3_path)
330
+
331
+ # return soap_note#, template_output, json_output
332
+
333
+ # # Process text function for text input to SOAP
334
+ # def process_text(text, user_prompt):
335
+ # soap_note = soap_analysis(text)
336
+ # print(soap_note)
337
+
338
+ # # template_output = llama_query(user_prompt, soap_note)
339
+ # # print(template_output)
340
+ # # json_output = llama_convert_to_json(template_output)
341
+
342
+ # return soap_note#, template_output, json_output
343
+
344
+ # # # Llama query function
345
+ # # def llama_query(user_prompt, soap_note, model="llama3.2"):
346
+ # # combined_prompt = f"User Instructions:\n{user_prompt}\n\nContext:\n{soap_note}"
347
+ # # try:
348
+ # # process = Popen(['ollama', 'run', model], stdin=PIPE, stdout=PIPE, stderr=PIPE, text=True, encoding='utf-8')
349
+ # # stdout, stderr = process.communicate(input=combined_prompt)
350
+ # # if process.returncode != 0:
351
+ # # return f"Error: {stderr.strip()}"
352
+ # # return stdout.strip()
353
+ # # except Exception as e:
354
+ # # return f"Unexpected error: {str(e)}"
355
+
356
+ # # # Convert the response to JSON format
357
+ # # def llama_convert_to_json(template_output, model="llama3.2"):
358
+ # # json_prompt = f"Convert the following template into a structured JSON format:\n\n{template_output}"
359
+ # # try:
360
+ # # process = Popen(['ollama', 'run', model], stdin=PIPE, stdout=PIPE, stderr=PIPE, text=True, encoding='utf-8')
361
+ # # stdout, stderr = process.communicate(input=json_prompt)
362
+ # # if process.returncode != 0:
363
+ # # return f"Error: {stderr.strip()}"
364
+ # # return stdout.strip() # Assuming the model outputs a valid JSON string
365
+ # # except Exception as e:
366
+ # # return f"Unexpected error: {str(e)}"
367
+
368
+ # # Gradio interface
369
+ # def launch_gradio():
370
+ # with gr.Blocks(theme=gr.themes.Default()) as demo:
371
+ # gr.Markdown("# Enhanced Video to SOAP Note Generator")
372
+
373
+ # with gr.Tab("Audio/Video File to SOAP"):
374
+ # gr.Interface(
375
+ # fn=process_file,
376
+ # inputs=[gr.File(label="Upload Audio/Video File"), gr.Textbox(label="Enter Prompt for Template", placeholder="Enter a detailed prompt...", lines=6)],
377
+ # outputs=[
378
+ # gr.Textbox(label="SOAP Note"),
379
+ # # gr.Textbox(label="Generated Template from LLaMA"),
380
+ # # gr.Textbox(label="JSON Output")
381
+ # ],
382
+ # )
383
+
384
+ # with gr.Tab("Text Input to SOAP"):
385
+ # gr.Interface(
386
+ # fn=process_text,
387
+ # inputs=[gr.Textbox(label="Enter Text", placeholder="Enter medical notes...", lines=6), gr.Textbox(label="Enter Prompt for Template", placeholder="Enter a detailed prompt...", lines=6)],
388
+ # outputs=[
389
+ # gr.Textbox(label="SOAP Note"),
390
+ # # gr.Textbox(label="Generated Template from LLaMA"),
391
+ # # gr.Textbox(label="JSON Output")
392
+ # ],
393
+ # )
394
+
395
+ # demo.launch(share=True, debug=True)
396
+
397
+ # # Run the Gradio app
398
+ # if __name__ == "__main__":
399
+ # launch_gradio()