Alesmikes commited on
Commit
1b10626
·
verified ·
1 Parent(s): 3d3d753

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -64
app.py CHANGED
@@ -2,86 +2,35 @@ import streamlit as st
2
  from audiorecorder import audiorecorder
3
  import openai
4
  import os
5
- openai.api_key = os.environ['OPENAI_API_KEY']
6
-
7
-
8
- def get_completion(messages, model="gpt-3.5-turbo"):
9
- response = openai.ChatCompletion.create(
10
- model=model,
11
- messages=messages,
12
- temperature=0)
13
- return response.choices[0].message["content"]
14
 
 
 
15
 
16
  def transcribe(audio_path):
17
  audio_file = open(audio_path, "rb")
18
- transcript = openai.Audio.translate_raw("whisper-1", audio_file, filename = '1.mp3')
19
  return transcript["text"]
20
 
21
-
22
- def get_ddx(vignette):
23
- messages_ddx = [
24
- {'role': 'system', 'content': 'Jste nástroj AI asistenta lékaře. Napište sadu příkazů pro pacienta k odlišení mezi stavy. Napište pouze příkazy a zdůvodnění. Nepište žádné další informace. Nepište žádný úvod.'},
25
- {'role': 'user', 'content': vignette}]
26
- ddx = get_completion(messages_ddx)
27
- return ddx
28
-
29
-
30
- def get_orders(vignette, ddx):
31
- messages_orders = [
32
- {'role': 'system', 'content': 'Jste nástroj AI asistenta lékaře. Napište sadu příkazů pro pacienta k odlišení mezi stavy. Napište pouze příkazy a zdůvodnění. Nepište žádné další informace. Nepište žádný úvod.'},
33
- {'role': 'user', 'content': f'Informace o pacientovi: {vignette}. Rozdílné diagnózy: {ddx}'}]
34
- orders = get_completion(messages_orders)
35
- return orders
36
-
37
-
38
  if 'vignette' not in st.session_state:
39
  st.session_state['vignette'] = ''
40
 
41
- if 'ddx' not in st.session_state:
42
- st.session_state['ddx'] = ''
43
-
44
- if 'orders' not in st.session_state:
45
- st.session_state['orders'] = ''
46
-
47
  if 'length' not in st.session_state:
48
  st.session_state['length'] = 0
49
 
50
-
51
  st.title("AI loop for healthcare providers")
52
- st.markdown(
53
- "Record your patient presentation and get the differential diagnoses and orders.")
54
  st.divider()
55
 
 
56
  audio = audiorecorder("Record", "Stop")
57
 
58
-
59
- if (len(audio) != st.session_state['length']):
60
  st.session_state['length'] = len(audio)
61
- # wav_file = open("audio.mp3", "wb")
62
- # wav_file.write(audio.tobytes())
63
- transcript = openai.Audio.translate_raw("whisper-1", audio.tobytes(), filename = '1.mp3')
64
- transcript["text"]
65
- st.session_state['vignette'] += transcript["text"]
66
-
67
-
68
- st.session_state['vignette'] = st.text_area(
69
- "Vignette", value=st.session_state['vignette'])
70
-
71
-
72
- if st.button("Get DDX and Orders"):
73
- vignette = st.session_state['vignette']
74
- ddx = get_ddx(vignette)
75
- st.session_state['ddx'] = ddx
76
- st.session_state['orders'] = get_orders(vignette, ddx)
77
-
78
-
79
- col1, col2 = st.columns(2)
80
-
81
- with col1:
82
- st.markdown(
83
- f"**DDX**\n\n{st.session_state['ddx']}", unsafe_allow_html=True)
84
 
85
- with col2:
86
- st.markdown(
87
- f"**ORDERS**\n\n{st.session_state['orders']}", unsafe_allow_html=True)
 
2
  from audiorecorder import audiorecorder
3
  import openai
4
  import os
 
 
 
 
 
 
 
 
 
5
 
6
+ # Nastavení OpenAI klíče
7
+ openai.api_key = os.environ['OPENAI_API_KEY']
8
 
9
  def transcribe(audio_path):
10
  audio_file = open(audio_path, "rb")
11
+ transcript = openai.Audio.translate_raw("whisper-1", audio_file, filename='1.mp3')
12
  return transcript["text"]
13
 
14
+ # Inicializace session state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  if 'vignette' not in st.session_state:
16
  st.session_state['vignette'] = ''
17
 
 
 
 
 
 
 
18
  if 'length' not in st.session_state:
19
  st.session_state['length'] = 0
20
 
21
+ # Hlavní titulek aplikace
22
  st.title("AI loop for healthcare providers")
23
+ st.markdown("Record your patient presentation and get the transcription.")
 
24
  st.divider()
25
 
26
+ # Nahrávání audia
27
  audio = audiorecorder("Record", "Stop")
28
 
29
+ # Zpracování nahrávky
30
+ if len(audio) != st.session_state['length']:
31
  st.session_state['length'] = len(audio)
32
+ transcript = transcribe(audio.tobytes())
33
+ st.session_state['vignette'] += transcript
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ # Zobrazení transkripce
36
+ st.session_state['vignette'] = st.text_area("Vignette", value=st.session_state['vignette'])