bharat-raghunathan commited on
Commit
19737a0
·
verified ·
1 Parent(s): 62a03c7

Revert to public link for demo

Browse files
Files changed (1) hide show
  1. app.py +174 -175
app.py CHANGED
@@ -1,175 +1,174 @@
1
- import gradio as gr
2
- import os
3
- import requests
4
- from huggingface_hub import InferenceClient
5
- import google.generativeai as genai
6
- import openai
7
-
8
- def api_check_msg(api_key, selected_model):
9
- res = validate_api_key(api_key, selected_model)
10
- return res["message"]
11
-
12
- def validate_api_key(api_key, selected_model):
13
- # Check if the API key is valid for GPT-3.5-Turbo
14
- if "GPT" in selected_model:
15
- url = "https://api.openai.com/v1/models"
16
- headers = {
17
- "Authorization": f"Bearer {api_key}"
18
- }
19
- try:
20
- response = requests.get(url, headers=headers)
21
- if response.status_code == 200:
22
- return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
23
- else:
24
- return {"is_valid": False, "message": f'<p style="color: red;">Invalid OpenAI API Key. Status code: {response.status_code}</p>'}
25
- except requests.exceptions.RequestException as e:
26
- return {"is_valid": False, "message": f'<p style="color: red;">Invalid OpenAI API Key. Error: {e}</p>'}
27
- elif "Llama" in selected_model:
28
- url = "https://huggingface.co/api/whoami-v2"
29
- headers = {
30
- "Authorization": f"Bearer {api_key}"
31
- }
32
- try:
33
- response = requests.get(url, headers=headers)
34
- if response.status_code == 200:
35
- return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
36
- else:
37
- return {"is_valid": False, "message": f'<p style="color: red;">Invalid Hugging Face API Key. Status code: {response.status_code}</p>'}
38
- except requests.exceptions.RequestException as e:
39
- return {"is_valid": False, "message": f'<p style="color: red;">Invalid Hugging Face API Key. Error: {e}</p>'}
40
- elif "Gemini" in selected_model:
41
- try:
42
- genai.configure(api_key=api_key)
43
- model = genai.GenerativeModel("gemini-1.5-flash")
44
- response = model.generate_content("Help me diagnose the patient.")
45
- return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
46
- except Exception as e:
47
- return {"is_valid": False, "message": f'<p style="color: red;">Invalid Google API Key. Error: {e}</p>'}
48
-
49
- def generate_text_chatgpt(key, prompt, temperature, top_p):
50
-
51
- openai.api_key = key
52
-
53
- response = openai.chat.completions.create(
54
- model="gpt-4-0613",
55
- messages=[{"role": "system", "content": "You are a talented diagnostician who is diagnosing a patient."},
56
- {"role": "user", "content": prompt}],
57
- temperature=temperature,
58
- max_tokens=50,
59
- top_p=top_p,
60
- frequency_penalty=0
61
- )
62
-
63
- return response.choices[0].message.content
64
-
65
-
66
- def generate_text_gemini(key, prompt, temperature, top_p):
67
- genai.configure(api_key=key)
68
-
69
- generation_config = genai.GenerationConfig(
70
- max_output_tokens=len(prompt)+50,
71
- temperature=temperature,
72
- top_p=top_p,
73
- )
74
- model = genai.GenerativeModel("gemini-1.5-flash", generation_config=generation_config)
75
- response = model.generate_content(prompt)
76
- return response.text
77
-
78
-
79
- def generate_text_llama(key, prompt, temperature, top_p):
80
- model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
81
- client = InferenceClient(api_key=key)
82
-
83
- messages = [{"role": "system", "content": "You are a talented diagnostician who is diagnosing a patient."},
84
- {"role": "user","content": prompt}]
85
-
86
- completion = client.chat.completions.create(
87
- model=model_name,
88
- messages=messages,
89
- max_tokens=len(prompt)+50,
90
- temperature=temperature,
91
- top_p=top_p
92
- )
93
-
94
- response = completion.choices[0].message.content
95
- if len(response) > len(prompt):
96
- return response[len(prompt):]
97
- return response
98
-
99
-
100
- def diagnose(key, model, top_k, temperature, symptom_prompt):
101
-
102
- model_map = {
103
- "GPT-3.5-Turbo": "GPT",
104
- "Llama-3": "Llama",
105
- "Gemini-1.5": "Gemini"
106
- }
107
- if symptom_prompt:
108
- if "GPT" in model:
109
- message = generate_text_chatgpt(key, symptom_prompt, temperature, top_k)
110
- elif "Llama" in model:
111
- message = generate_text_llama(key, symptom_prompt, temperature, top_k)
112
- elif "Gemini" in model:
113
- message = generate_text_gemini(key, symptom_prompt, temperature, top_k)
114
- else:
115
- message = "Incorrect model, please try again."
116
- else:
117
- message = "Please add the symptoms data"
118
-
119
- return message
120
-
121
- def update_model_components(selected_model):
122
- model_map = {
123
- "GPT-3.5-Turbo": "GPT",
124
- "Llama-3": "Llama",
125
- "Gemini-1.5": "Gemini"
126
- }
127
-
128
- link_map = {
129
- "GPT-3.5-Turbo": "https://platform.openai.com/account/api-keys",
130
- "Llama-3": "https://hf.co/settings/tokens",
131
- "Gemini-1.5": "https://aistudio.google.com/apikey"
132
- }
133
- textbox_label = f"Please input the API key for your {model_map[selected_model]} model"
134
- button_value = f"Don't have an API key? Get one for the {model_map[selected_model]} model here."
135
- button_link = link_map[selected_model]
136
- return gr.update(label=textbox_label), gr.update(value=button_value, link=button_link)
137
-
138
- def toggle_button(symptoms_text, api_key, model):
139
- if symptoms_text.strip() and validate_api_key(api_key, model):
140
- return gr.update(interactive=True)
141
- return gr.update(interactive=False)
142
-
143
-
144
- with gr.Blocks() as ui:
145
-
146
- with gr.Row(equal_height=500):
147
- with gr.Column(scale=1, min_width=300):
148
- model = gr.Radio(label="LLM Selection", value="GPT-3.5-Turbo",
149
- choices=["GPT-3.5-Turbo", "Llama-3", "Gemini-1.5"])
150
- is_valid = False
151
- key = gr.Textbox(label="Please input the API key for your GPT model", type="password")
152
- status_message = gr.HTML(label="Validation Status")
153
- key.input(fn=api_check_msg, inputs=[key, model], outputs=status_message)
154
- button = gr.Button(value="Don't have an API key? Get one for the GPT model here.", link="https://platform.openai.com/account/api-keys")
155
- model.change(update_model_components, inputs=model, outputs=[key, button])
156
- # gr.Button(value="OpenAi Key", link="https://platform.openai.com/account/api-keys")
157
- # gr.Button(value="Meta Llama Key", link="https://platform.openai.com/account/api-keys")
158
- # gr.Button(value="Gemini Key", link="https://platform.openai.com/account/api-keys")
159
- gr.ClearButton(key, variant="primary")
160
-
161
- with gr.Column(scale=2, min_width=600):
162
- gr.Markdown("## Hello, Welcome to the GUI by Team #9.")
163
- temperature = gr.Slider(0.0, 1.0, value=0.7, step = 0.05, label="Temperature", info="Set the Temperature")
164
- top_p = gr.Slider(0.0, 1.0, value=0.9, step = 0.05, label="top-p value", info="Set the sampling nucleus parameter")
165
- symptoms = gr.Textbox(label="Add the symptom data in the input to receive diagnosis")
166
- llm_btn = gr.Button(value="Diagnose Disease", variant="primary", elem_id="diagnose", interactive=False)
167
- symptoms.input(toggle_button, inputs=[symptoms, key, model], outputs=llm_btn)
168
- key.input(toggle_button, inputs=[symptoms, key, model], outputs=llm_btn)
169
- model.change(toggle_button, inputs=[symptoms, key, model], outputs=llm_btn)
170
- output = gr.Textbox(label="LLM Output Status", interactive=False, placeholder="Output will appear here...")
171
- llm_btn.click(fn=diagnose, inputs=[key, model, top_p, temperature, symptoms], outputs=output, api_name="auditor")
172
-
173
-
174
- # ui.launch(share=True)
175
- ui.launch()
 
1
+ import gradio as gr
2
+ import os
3
+ import requests
4
+ from huggingface_hub import InferenceClient
5
+ import google.generativeai as genai
6
+ import openai
7
+
8
+ def api_check_msg(api_key, selected_model):
9
+ res = validate_api_key(api_key, selected_model)
10
+ return res["message"]
11
+
12
+ def validate_api_key(api_key, selected_model):
13
+ # Check if the API key is valid for GPT-3.5-Turbo
14
+ if "GPT" in selected_model:
15
+ url = "https://api.openai.com/v1/models"
16
+ headers = {
17
+ "Authorization": f"Bearer {api_key}"
18
+ }
19
+ try:
20
+ response = requests.get(url, headers=headers)
21
+ if response.status_code == 200:
22
+ return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
23
+ else:
24
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid OpenAI API Key. Status code: {response.status_code}</p>'}
25
+ except requests.exceptions.RequestException as e:
26
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid OpenAI API Key. Error: {e}</p>'}
27
+ elif "Llama" in selected_model:
28
+ url = "https://huggingface.co/api/whoami-v2"
29
+ headers = {
30
+ "Authorization": f"Bearer {api_key}"
31
+ }
32
+ try:
33
+ response = requests.get(url, headers=headers)
34
+ if response.status_code == 200:
35
+ return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
36
+ else:
37
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid Hugging Face API Key. Status code: {response.status_code}</p>'}
38
+ except requests.exceptions.RequestException as e:
39
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid Hugging Face API Key. Error: {e}</p>'}
40
+ elif "Gemini" in selected_model:
41
+ try:
42
+ genai.configure(api_key=api_key)
43
+ model = genai.GenerativeModel("gemini-1.5-flash")
44
+ response = model.generate_content("Help me diagnose the patient.")
45
+ return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
46
+ except Exception as e:
47
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid Google API Key. Error: {e}</p>'}
48
+
49
+ def generate_text_chatgpt(key, prompt, temperature, top_p):
50
+
51
+ openai.api_key = key
52
+
53
+ response = openai.chat.completions.create(
54
+ model="gpt-4-0613",
55
+ messages=[{"role": "system", "content": "You are a talented diagnostician who is diagnosing a patient."},
56
+ {"role": "user", "content": prompt}],
57
+ temperature=temperature,
58
+ max_tokens=50,
59
+ top_p=top_p,
60
+ frequency_penalty=0
61
+ )
62
+
63
+ return response.choices[0].message.content
64
+
65
+
66
+ def generate_text_gemini(key, prompt, temperature, top_p):
67
+ genai.configure(api_key=key)
68
+
69
+ generation_config = genai.GenerationConfig(
70
+ max_output_tokens=len(prompt)+50,
71
+ temperature=temperature,
72
+ top_p=top_p,
73
+ )
74
+ model = genai.GenerativeModel("gemini-1.5-flash", generation_config=generation_config)
75
+ response = model.generate_content(prompt)
76
+ return response.text
77
+
78
+
79
+ def generate_text_llama(key, prompt, temperature, top_p):
80
+ model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
81
+ client = InferenceClient(api_key=key)
82
+
83
+ messages = [{"role": "system", "content": "You are a talented diagnostician who is diagnosing a patient."},
84
+ {"role": "user","content": prompt}]
85
+
86
+ completion = client.chat.completions.create(
87
+ model=model_name,
88
+ messages=messages,
89
+ max_tokens=len(prompt)+50,
90
+ temperature=temperature,
91
+ top_p=top_p
92
+ )
93
+
94
+ response = completion.choices[0].message.content
95
+ if len(response) > len(prompt):
96
+ return response[len(prompt):]
97
+ return response
98
+
99
+
100
+ def diagnose(key, model, top_k, temperature, symptom_prompt):
101
+
102
+ model_map = {
103
+ "GPT-3.5-Turbo": "GPT",
104
+ "Llama-3": "Llama",
105
+ "Gemini-1.5": "Gemini"
106
+ }
107
+ if symptom_prompt:
108
+ if "GPT" in model:
109
+ message = generate_text_chatgpt(key, symptom_prompt, temperature, top_k)
110
+ elif "Llama" in model:
111
+ message = generate_text_llama(key, symptom_prompt, temperature, top_k)
112
+ elif "Gemini" in model:
113
+ message = generate_text_gemini(key, symptom_prompt, temperature, top_k)
114
+ else:
115
+ message = "Incorrect model, please try again."
116
+ else:
117
+ message = "Please add the symptoms data"
118
+
119
+ return message
120
+
121
+ def update_model_components(selected_model):
122
+ model_map = {
123
+ "GPT-3.5-Turbo": "GPT",
124
+ "Llama-3": "Llama",
125
+ "Gemini-1.5": "Gemini"
126
+ }
127
+
128
+ link_map = {
129
+ "GPT-3.5-Turbo": "https://platform.openai.com/account/api-keys",
130
+ "Llama-3": "https://hf.co/settings/tokens",
131
+ "Gemini-1.5": "https://aistudio.google.com/apikey"
132
+ }
133
+ textbox_label = f"Please input the API key for your {model_map[selected_model]} model"
134
+ button_value = f"Don't have an API key? Get one for the {model_map[selected_model]} model here."
135
+ button_link = link_map[selected_model]
136
+ return gr.update(label=textbox_label), gr.update(value=button_value, link=button_link)
137
+
138
+ def toggle_button(symptoms_text, api_key, model):
139
+ if symptoms_text.strip() and validate_api_key(api_key, model):
140
+ return gr.update(interactive=True)
141
+ return gr.update(interactive=False)
142
+
143
+
144
+ with gr.Blocks() as ui:
145
+
146
+ with gr.Row(equal_height=500):
147
+ with gr.Column(scale=1, min_width=300):
148
+ model = gr.Radio(label="LLM Selection", value="GPT-3.5-Turbo",
149
+ choices=["GPT-3.5-Turbo", "Llama-3", "Gemini-1.5"])
150
+ is_valid = False
151
+ key = gr.Textbox(label="Please input the API key for your GPT model", type="password")
152
+ status_message = gr.HTML(label="Validation Status")
153
+ key.input(fn=api_check_msg, inputs=[key, model], outputs=status_message)
154
+ button = gr.Button(value="Don't have an API key? Get one for the GPT model here.", link="https://platform.openai.com/account/api-keys")
155
+ model.change(update_model_components, inputs=model, outputs=[key, button])
156
+ # gr.Button(value="OpenAi Key", link="https://platform.openai.com/account/api-keys")
157
+ # gr.Button(value="Meta Llama Key", link="https://platform.openai.com/account/api-keys")
158
+ # gr.Button(value="Gemini Key", link="https://platform.openai.com/account/api-keys")
159
+ gr.ClearButton(key, variant="primary")
160
+
161
+ with gr.Column(scale=2, min_width=600):
162
+ gr.Markdown("## Hello, Welcome to the GUI by Team #9.")
163
+ temperature = gr.Slider(0.0, 1.0, value=0.7, step = 0.05, label="Temperature", info="Set the Temperature")
164
+ top_p = gr.Slider(0.0, 1.0, value=0.9, step = 0.05, label="top-p value", info="Set the sampling nucleus parameter")
165
+ symptoms = gr.Textbox(label="Add the symptom data in the input to receive diagnosis")
166
+ llm_btn = gr.Button(value="Diagnose Disease", variant="primary", elem_id="diagnose", interactive=False)
167
+ symptoms.input(toggle_button, inputs=[symptoms, key, model], outputs=llm_btn)
168
+ key.input(toggle_button, inputs=[symptoms, key, model], outputs=llm_btn)
169
+ model.change(toggle_button, inputs=[symptoms, key, model], outputs=llm_btn)
170
+ output = gr.Textbox(label="LLM Output Status", interactive=False, placeholder="Output will appear here...")
171
+ llm_btn.click(fn=diagnose, inputs=[key, model, top_p, temperature, symptoms], outputs=output, api_name="auditor")
172
+
173
+
174
+ ui.launch(share=True)