File size: 5,523 Bytes
1ac84a9 163bdcc 1ac84a9 56c2a68 1ac84a9 ed04595 66ec133 1ac84a9 f6fc806 ed04595 1ac84a9 fa530b9 f6fc806 fbdf06c f6fc806 d98c79b ed04595 3cbea9b ed04595 9180e08 ed04595 3c671c6 1ac84a9 ed04595 1ac84a9 3c671c6 ed04595 3c671c6 5ae861e 84a19ec 24cfdc1 84a19ec 1ac84a9 723d90c ed04595 84a19ec ed04595 5ae861e 1ac84a9 7f990d9 ed04595 7f990d9 f6fc806 84a19ec 7f990d9 1ac84a9 84a19ec 1ac84a9 ed04595 3c671c6 1ac84a9 f05ed48 1ac84a9 f05ed48 1ac84a9 f05ed48 1ac84a9 91b95bc f05ed48 f6fc806 d98c79b f05ed48 d98c79b f05ed48 20eb2c5 f05ed48 bf30b16 f05ed48 91b95bc f05ed48 5ae861e f6fc806 73b2985 6f4788a f05ed48 6f4788a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import gradio as gr
import openai
from openai import OpenAI
import google.generativeai as genai
import os
import io
import base64
# Set API key and organization ID from environment variables
api_key = os.environ.get("API_KEY")
#base_url = os.environ.get("OPENAI_API_BASE")
# Define the model to be used
MODEL = os.environ.get("MODEL")
MODEL_NAME = MODEL.split("/")[-1] if "/" in MODEL else MODEL
def read(filename):
with open(filename) as f:
data = f.read()
return data
SYS_PROMPT = read('system_prompt.txt')
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">诊疗助手Alpha</h1>
<p>一个帮助您分析症状和检验报告的AI工具。</p>
<p>🔎 选择您需要咨询的科室,在输入框中输入症状描述或者体检信息等;您也可以在图片框中上传检测报告图。</p>
<p>🦕 请注意生成信息可能不准确,且不具备任何实际参考价值,如有需要请联系专业医生。</p>
</div>
'''
css = """
h1 {
text-align: center;
display: block;
}
footer {
display:none !important
}
"""
LICENSE = '采用 ' + MODEL_NAME + ' 模型'
def endpoints(api_key):
if api_key is not None:
if api_key.startswith('sk-'):
return 'OPENAI'
else:
return 'GOOGLE'
def process_text(text_input, unit):
endpoint = endpoints(api_key)
if text_input and endpoint == 'OPENAI':
client = OpenAI(api_key=api_key)
completion = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": f" You are a experienced {unit} doctor AI assistant." + SYS_PROMPT},
{"role": "user", "content": f"Hello! Could you solve {text_input}?"}
]
)
return completion.choices[0].message.content
elif text_input and endpoint == 'GOOGLE':
genai.configure(api_key=api_key)
model = genai.GenerativeModel(model_name=MODEL)
prompt = f" You are a experienced {unit} doctor AI assistant." + SYS_PROMPT + f"Could you solve {text_input}?"
response = model.generate_content(prompt)
return response.text
return ""
def encode_image_to_base64(image_input):
buffered = io.BytesIO()
image_input.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
def process_image(image_input, unit):
endpoint = endpoints(api_key)
if image_input is not None and endpoint == 'OPENAI':
#with open(image_input.name, "rb") as f:
# base64_image = base64.b64encode(f.read()).decode("utf-8")
client = OpenAI(api_key=api_key)
base64_image = encode_image_to_base64(image_input)
response = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": f" You are a experienced {unit} doctor AI assistant." + SYS_PROMPT},
{"role": "user", "content": [
{"type": "text", "text": "Help me understand what is in this picture and analysis."},
{"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
"detail":"low"}
}
]}
],
temperature=0.0,
max_tokens=1024,
)
return response.choices[0].message.content
elif image_input is not None and endpoint == 'GOOGLE':
genai.configure(api_key=api_key)
model = genai.GenerativeModel(model_name=MODEL)
prompt = f" You are a experienced {unit} doctor AI assistant." + SYS_PROMPT + "Help me understand what is in this picture and analysis."
response = model.generate_content([prompt, image_input],request_options={"timeout": 60})
return response.text
def main(text_input="", image_input=None, unit=""):
if text_input and image_input is None:
return process_text(text_input,unit)
elif image_input is not None:
return process_image(image_input,unit)
with gr.Blocks(theme='shivi/calm_seafoam', css=css, title="诊疗助手demo") as iface:
with gr.Accordion(""):
gr.Markdown(DESCRIPTION)
unit = gr.Dropdown(label="🩺科室", value='中医科', elem_id="units",
choices=["中医科", "内科", "外科", "妇产科", "儿科", \
"五官科", "男科", "皮肤性病科", "传染科", "精神心理科", \
"整形美容科", "营养科", "生殖中心", "麻醉医学科", "医学影像科", \
"骨科", "肿瘤科", "急诊科", "检验科"])
with gr.Row():
output_box = gr.Markdown(label="分析") # Create an output textbox
with gr.Row():
image_input = gr.Image(type="pil", label="上传图片") # Create an image upload button
text_input = gr.Textbox(label="输入") # Create a text input box
with gr.Row():
submit_btn = gr.Button("🚀 确认") # Create a submit button
clear_btn = gr.ClearButton([output_box,image_input,text_input], value="🗑️ 清空") # Create a clear button
# Set up the event listeners
submit_btn.click(main, inputs=[text_input, image_input, unit], outputs=output_box)
gr.Markdown(LICENSE)
#gr.close_all()
iface.queue().launch(show_api=False) # Launch the Gradio interface |