ProfessorLeVesseur commited on
Commit
a688783
·
verified ·
1 Parent(s): b376142

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -21
app.py CHANGED
@@ -38,11 +38,80 @@ if show_details:
38
  )
39
 
40
  # Button to trigger the analysis
41
- analyze_button = st.button("Analyse the Image", type="secondary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  # Check if an image has been uploaded, if the API key is available, and if the button has been pressed
44
  if uploaded_file is not None and analyze_button:
45
-
46
  with st.spinner("Analyzing the image ..."):
47
  # Encode the image
48
  base64_image = encode_image(uploaded_file)
@@ -53,13 +122,11 @@ if uploaded_file is not None and analyze_button:
53
  "Your task is to examine the following image in detail. "
54
  "Provide a comprehensive, factual, and accurate explanation of what the image depicts. "
55
  "Highlight key elements and their significance, and present your analysis in clear, well-structured paragraph format. "
56
- "Create a detailed image caption in explaining in 150 words or less."
57
  )
58
 
59
  if show_details and additional_details:
60
- prompt_text += (
61
- f"\n\nAdditional Context Provided by the User:\n{additional_details}"
62
- )
63
 
64
  # Create the payload for the completion request
65
  messages = [
@@ -77,31 +144,23 @@ if uploaded_file is not None and analyze_button:
77
 
78
  # Make the request to the OpenAI API
79
  try:
80
- # Without Stream
81
-
82
- # response = openai.chat.completions.create(
83
- # model="gpt-4-vision-preview", messages=messages, max_tokens=500, stream=False
84
- # )
85
-
86
- # Stream the response
87
  full_response = ""
88
- message_placeholder = st.empty()
89
- for completion in openai.chat.completions.create(
90
  model="gpt-4-vision-preview", messages=messages,
91
  max_tokens=150, stream=True
92
  ):
93
  # Check if there is content to display
94
  if completion.choices[0].delta.content is not None:
95
  full_response += completion.choices[0].delta.content
96
- message_placeholder.markdown(full_response + "▌")
97
- # Final update to placeholder after the stream ends
98
- message_placeholder.markdown(full_response)
99
-
100
- # Display the response in the app
101
- # st.write(response.choices[0].message.content)
102
  except Exception as e:
103
  st.error(f"An error occurred: {e}")
104
  else:
105
  # Warnings for user action required
106
  if not uploaded_file and analyze_button:
107
  st.warning("Please upload an image.")
 
 
38
  )
39
 
40
  # Button to trigger the analysis
41
+ analyze_button = st.button("Analyze the Image", type="secondary")
42
+
43
+ # # Check if an image has been uploaded, if the API key is available, and if the button has been pressed
44
+ # if uploaded_file is not None and analyze_button:
45
+
46
+ # with st.spinner("Analyzing the image ..."):
47
+ # # Encode the image
48
+ # base64_image = encode_image(uploaded_file)
49
+
50
+ # # Optimized prompt for additional clarity and detail
51
+ # prompt_text = (
52
+ # "You are a highly knowledgeable accessibility expert. "
53
+ # "Your task is to examine the following image in detail. "
54
+ # "Provide a comprehensive, factual, and accurate explanation of what the image depicts. "
55
+ # "Highlight key elements and their significance, and present your analysis in clear, well-structured paragraph format. "
56
+ # "Create a detailed image caption in explaining in 150 words or less."
57
+ # )
58
+
59
+ # if show_details and additional_details:
60
+ # prompt_text += (
61
+ # f"\n\nAdditional Context Provided by the User:\n{additional_details}"
62
+ # )
63
+
64
+ # # Create the payload for the completion request
65
+ # messages = [
66
+ # {
67
+ # "role": "user",
68
+ # "content": [
69
+ # {"type": "text", "text": prompt_text},
70
+ # {
71
+ # "type": "image_url",
72
+ # "image_url": f"data:image/jpeg;base64,{base64_image}",
73
+ # },
74
+ # ],
75
+ # }
76
+ # ]
77
+
78
+ # # Make the request to the OpenAI API
79
+ # try:
80
+ # # Without Stream
81
+
82
+ # # response = openai.chat.completions.create(
83
+ # # model="gpt-4-vision-preview", messages=messages, max_tokens=500, stream=False
84
+ # # )
85
+
86
+ # # Stream the response
87
+ # full_response = ""
88
+ # message_placeholder = st.empty()
89
+ # for completion in openai.chat.completions.create(
90
+ # model="gpt-4-vision-preview", messages=messages,
91
+ # max_tokens=150, stream=True
92
+ # ):
93
+ # # Check if there is content to display
94
+ # if completion.choices[0].delta.content is not None:
95
+ # full_response += completion.choices[0].delta.content
96
+ # message_placeholder.markdown(full_response + "▌")
97
+ # # Final update to placeholder after the stream ends
98
+ # message_placeholder.markdown(full_response)
99
+
100
+ # # Display the response in the app
101
+ # # st.write(response.choices[0].message.content)
102
+ # except Exception as e:
103
+ # st.error(f"An error occurred: {e}")
104
+ # else:
105
+ # # Warnings for user action required
106
+ # if not uploaded_file and analyze_button:
107
+ # st.warning("Please upload an image.")
108
+
109
+
110
+
111
+
112
 
113
  # Check if an image has been uploaded, if the API key is available, and if the button has been pressed
114
  if uploaded_file is not None and analyze_button:
 
115
  with st.spinner("Analyzing the image ..."):
116
  # Encode the image
117
  base64_image = encode_image(uploaded_file)
 
122
  "Your task is to examine the following image in detail. "
123
  "Provide a comprehensive, factual, and accurate explanation of what the image depicts. "
124
  "Highlight key elements and their significance, and present your analysis in clear, well-structured paragraph format. "
125
+ "Create a detailed image caption explaining in 150 words or less."
126
  )
127
 
128
  if show_details and additional_details:
129
+ prompt_text += f"\n\nAdditional Context Provided by the User:\n{additional_details}"
 
 
130
 
131
  # Create the payload for the completion request
132
  messages = [
 
144
 
145
  # Make the request to the OpenAI API
146
  try:
 
 
 
 
 
 
 
147
  full_response = ""
148
+ for completion in openai.ChatCompletion.create(
 
149
  model="gpt-4-vision-preview", messages=messages,
150
  max_tokens=150, stream=True
151
  ):
152
  # Check if there is content to display
153
  if completion.choices[0].delta.content is not None:
154
  full_response += completion.choices[0].delta.content
155
+
156
+ # Display the response in a text area
157
+ st.text_area('Response:', value=full_response, height=400, key="response_text_area")
158
+
159
+ st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
 
160
  except Exception as e:
161
  st.error(f"An error occurred: {e}")
162
  else:
163
  # Warnings for user action required
164
  if not uploaded_file and analyze_button:
165
  st.warning("Please upload an image.")
166
+