orrinin commited on
Commit
ed04595
·
verified ·
1 Parent(s): 18addf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -8
app.py CHANGED
@@ -6,12 +6,12 @@ import io
6
  import base64
7
 
8
  # Set API key and organization ID from environment variables
9
- api_key = os.environ.get("OPENAI_API_KEY")
10
  #base_url = os.environ.get("OPENAI_API_BASE")
11
- client = OpenAI(api_key=api_key)
12
 
13
  # Define the model to be used
14
  MODEL = os.environ.get("MODEL")
 
15
 
16
  def read(filename):
17
  with open(filename) as f:
@@ -42,18 +42,33 @@ footer {
42
  """
43
 
44
 
45
- LICENSE = '采用 ' + MODEL + ' 模型'
46
-
 
 
 
 
 
 
 
47
  def process_text(text_input, unit):
48
- if text_input:
 
 
49
  completion = client.chat.completions.create(
50
  model=MODEL,
51
  messages=[
52
- {"role": "system", "content": f" You are a experienced {unit} doctor." + SYS_PROMPT},
53
  {"role": "user", "content": f"Hello! Could you solve {text_input}?"}
54
  ]
55
  )
56
  return completion.choices[0].message.content
 
 
 
 
 
 
57
  return ""
58
 
59
  def encode_image_to_base64(image_input):
@@ -63,14 +78,16 @@ def encode_image_to_base64(image_input):
63
  return img_str
64
 
65
  def process_image(image_input, unit):
66
- if image_input is not None:
 
67
  #with open(image_input.name, "rb") as f:
68
  # base64_image = base64.b64encode(f.read()).decode("utf-8")
 
69
  base64_image = encode_image_to_base64(image_input)
70
  response = client.chat.completions.create(
71
  model=MODEL,
72
  messages=[
73
- {"role": "system", "content": f" You are a experienced {unit} doctor." + SYS_PROMPT},
74
  {"role": "user", "content": [
75
  {"type": "text", "text": "Help me understand what is in this picture and analysis."},
76
  {"type": "image_url",
@@ -84,6 +101,12 @@ def process_image(image_input, unit):
84
  max_tokens=1024,
85
  )
86
  return response.choices[0].message.content
 
 
 
 
 
 
87
 
88
 
89
  def main(text_input="", image_input=None, unit=""):
 
6
  import base64
7
 
8
  # Set API key and organization ID from environment variables
9
+ api_key = os.environ.get("API_KEY")
10
  #base_url = os.environ.get("OPENAI_API_BASE")
 
11
 
12
  # Define the model to be used
13
  MODEL = os.environ.get("MODEL")
14
+ MODEL_NAME = MODEL.split("/")[-1] if "/" in MODEL else MODEL
15
 
16
  def read(filename):
17
  with open(filename) as f:
 
42
  """
43
 
44
 
45
+ LICENSE = '采用 ' + MODEL_NAME + ' 模型'
46
+
47
+ def endpoints(api_key):
48
+ if api_key is not None:
49
+ if api_key.startwith('sk-'):
50
+ return 'OPENAI'
51
+ else:
52
+ return 'GOOGLE'
53
+
54
  def process_text(text_input, unit):
55
+ endpoint = endpoints(api_key)
56
+ if text_input and endpoint == 'OPENAI':
57
+ client = OpenAI(api_key=api_key)
58
  completion = client.chat.completions.create(
59
  model=MODEL,
60
  messages=[
61
+ {"role": "system", "content": f" You are a experienced {unit} doctor AI assistant." + SYS_PROMPT},
62
  {"role": "user", "content": f"Hello! Could you solve {text_input}?"}
63
  ]
64
  )
65
  return completion.choices[0].message.content
66
+ elif text_input and endpoint == 'GOOGLE':
67
+ genai.configure(api_key=api_key)
68
+ model = genai.GenerativeModel(model_name=MODEL)
69
+ prompt = f" You are a experienced {unit} doctor AI assistant." + SYS_PROMPT + f"Could you solve {text_input}?"
70
+ response = model.generate_content(prompt)
71
+ return response.text
72
  return ""
73
 
74
  def encode_image_to_base64(image_input):
 
78
  return img_str
79
 
80
  def process_image(image_input, unit):
81
+ endpoint = endpoints(api_key)
82
+ if image_input is not None and endpoint == 'OPENAI':
83
  #with open(image_input.name, "rb") as f:
84
  # base64_image = base64.b64encode(f.read()).decode("utf-8")
85
+ client = OpenAI(api_key=api_key)
86
  base64_image = encode_image_to_base64(image_input)
87
  response = client.chat.completions.create(
88
  model=MODEL,
89
  messages=[
90
+ {"role": "system", "content": f" You are a experienced {unit} doctor AI assistant." + SYS_PROMPT},
91
  {"role": "user", "content": [
92
  {"type": "text", "text": "Help me understand what is in this picture and analysis."},
93
  {"type": "image_url",
 
101
  max_tokens=1024,
102
  )
103
  return response.choices[0].message.content
104
+ elif image_input is not None and endpoint == 'GOOGLE':
105
+ genai.configure(api_key=api_key)
106
+ model = genai.GenerativeModel(model_name=MODEL)
107
+ prompt = f" You are a experienced {unit} doctor AI assistant." + SYS_PROMPT + "Help me understand what is in this picture and analysis."
108
+ response = model.generate_content([prompt, image_input],request_options={"timeout": 60})
109
+ return response.text
110
 
111
 
112
  def main(text_input="", image_input=None, unit=""):