ProfessorLeVesseur commited on
Commit
a2d596c
·
verified ·
1 Parent(s): 1c29c42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -112
app.py CHANGED
@@ -16,119 +16,80 @@ openai.api_key = st.secrets["openai_api_key"]
16
  # File uploader for images
17
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
18
 
19
- # if uploaded_file:
20
- # with st.expander("Image", expanded=True):
21
- # st.image(uploaded_file, caption=uploaded_file.name, use_column_width=True)
22
-
23
- # # Toggle for additional details input
24
- # show_details = st.checkbox("Add details about the image")
25
-
26
- # if show_details:
27
- # additional_details = st.text_area("Add any additional details or context about the image here:")
28
-
29
- # # Button to trigger the analysis
30
- # analyze_button = st.button("Analyse the MTSS Image")
31
-
32
- # if analyze_button:
33
- # with st.spinner("Analyzing the image..."):
34
- # base64_image = encode_image(uploaded_file)
35
- # prompt_text = (
36
- # "You are a highly knowledgeable accessibility expert. "
37
- # "Your task is to examine the following image in detail. "
38
- # "Provide a comprehensive, factual, and accurate explanation of what the image depicts. "
39
- # "Highlight key elements and their significance, and present your analysis in clear, well-structured format. "
40
- # "Create a detailed image caption in explaining in 150 words or less."
41
- # )
42
-
43
- # if show_details and additional_details:
44
- # prompt_text += f"\n\nAdditional Context Provided by the User:\n{additional_details}"
45
-
46
- # # Define the messages payload
47
- # messages = [{
48
- # "role": "user",
49
- # "content": [{"type": "text", "text": prompt_text}, {"type": "image_url", "image_url": f"data:image/jpeg;base64,{base64_image}"}]
50
- # }]
51
-
52
- # try:
53
- # # Make the request to OpenAI and handle streaming if required
54
- # response = openai.chat.completions.create(model="gpt-4-vision-preview", messages=messages, max_tokens=150)
55
- # st.write(response.choices[0].message.content)
56
- # except Exception as e:
57
- # st.error(f"An error occurred: {e}")
58
- # else:
59
- # st.warning("Please upload an image.")
60
-
61
-
62
-
63
-
64
-
65
-
66
-
67
-
68
- # Check if an image has been uploaded, if the API key is available, and if the button has been pressed
69
- if uploaded_file is not None and api_key and analyze_button:
70
-
71
- with st.spinner("Analysing the image ..."):
72
- # Encode the image
73
- base64_image = encode_image(uploaded_file)
74
-
75
- # Optimized prompt for additional clarity and detail
76
- prompt_text = (
77
- "You are a highly knowledgeable accessibility expert. "
78
  "Your task is to examine the following image in detail. "
79
  "Provide a comprehensive, factual, and accurate explanation of what the image depicts. "
80
  "Highlight key elements and their significance, and present your analysis in clear, well-structured format. "
81
- "Create a detailed image caption in explaining in 150 words or less."
82
- )
83
-
84
- if show_details and additional_details:
85
- prompt_text += (
86
- f"\n\nAdditional Context Provided by the User:\n{additional_details}"
87
  )
88
-
89
- # Create the payload for the completion request
90
- messages = [
91
- {
92
- "role": "user",
93
- "content": [
94
- {"type": "text", "text": prompt_text},
95
- {
96
- "type": "image_url",
97
- "image_url": f"data:image/jpeg;base64,{base64_image}",
98
- },
99
- ],
100
- }
101
- ]
102
-
103
- # Make the request to the OpenAI API
104
- try:
105
- # Without Stream
106
-
107
- # response = client.chat.completions.create(
108
- # model="gpt-4-vision-preview", messages=messages, max_tokens=500, stream=False
109
- # )
110
-
111
- # Stream the response
112
- full_response = ""
113
- message_placeholder = st.empty()
114
- for completion in openai.chat.completions.create(
115
- model="gpt-4-vision-preview", messages=messages,
116
- max_tokens=150, stream=True
117
- ):
118
- # Check if there is content to display
119
- if completion.choices[0].delta.content is not None:
120
- full_response += completion.choices[0].delta.content
121
- message_placeholder.markdown(full_response + "▌")
122
- # Final update to placeholder after the stream ends
123
- message_placeholder.markdown(full_response)
124
-
125
- # Display the response in the app
126
- # st.write(response.choices[0].message.content)
127
- except Exception as e:
128
- st.error(f"An error occurred: {e}")
129
- else:
130
- # Warnings for user action required
131
- if not uploaded_file and analyze_button:
132
- st.warning("Please upload an image.")
133
- if not api_key:
134
- st.warning("Please enter your OpenAI API key.")
 
 
 
 
16
  # File uploader for images
17
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
18
 
19
+ if uploaded_file:
20
+ with st.expander("Image", expanded=True):
21
+ st.image(uploaded_file, caption=uploaded_file.name, use_column_width=True)
22
+
23
+ # Toggle for additional details input
24
+ show_details = st.checkbox("Add details about the image")
25
+
26
+ if show_details:
27
+ # Text input for additional details about the image, shown only if the toggle is True
28
+ additional_details = st.text_area(
29
+ "Add any additional details or context about the image here:",
30
+ disabled=not show_details
31
+ )
32
+
33
+ # Button to trigger the analysis
34
+ analyze_button = st.button("Analyse the MTSS Image")
35
+
36
+ if analyze_button:
37
+ with st.spinner("Analyzing the image..."):
38
+ base64_image = encode_image(uploaded_file)
39
+ prompt_text = (
40
+ "You are a highly knowledgeable accessibility expert. "
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  "Your task is to examine the following image in detail. "
42
  "Provide a comprehensive, factual, and accurate explanation of what the image depicts. "
43
  "Highlight key elements and their significance, and present your analysis in clear, well-structured format. "
44
+ "Create a detailed image caption explaining in 150 words or less. "
 
 
 
 
 
45
  )
46
+
47
+ if show_details and additional_details:
48
+ prompt_text += f"\n\nAdditional Context Provided by the User:\n{additional_details}"
49
+
50
+ # Create the payload for the completion request
51
+ messages = [
52
+ {
53
+ "role": "user",
54
+ "content": [
55
+ {"type": "text", "text": prompt_text},
56
+ {
57
+ "type": "image_url",
58
+ "image_url": f"data:image/jpeg;base64,{base64_image}",
59
+ },
60
+ ],
61
+ }
62
+ ]
63
+
64
+ # Make the request to the OpenAI API
65
+ try:
66
+ # Without Stream
67
+
68
+ # response = openai.chat.completions.create(
69
+ # model="gpt-4-vision-preview", messages=messages, max_tokens=500, stream=False
70
+ # )
71
+
72
+ # Stream the response
73
+ full_response = ""
74
+ message_placeholder = st.empty()
75
+ for completion in openai.chat.completions.create(
76
+ model="gpt-4-vision-preview", messages=messages,
77
+ max_tokens=150, stream=True
78
+ ):
79
+ # Check if there is content to display
80
+ if completion.choices[0].delta.content is not None:
81
+ full_response += completion.choices[0].delta.content
82
+ message_placeholder.markdown(full_response + "▌")
83
+ # Final update to placeholder after the stream ends
84
+ message_placeholder.markdown(full_response)
85
+
86
+ # Display the response in the app
87
+ # st.write(response.choices[0].message.content)
88
+ except Exception as e:
89
+ st.error(f"An error occurred: {e}")
90
+ else:
91
+ # Warnings for user action required
92
+ if not uploaded_file and analyze_button:
93
+ st.warning("Please upload an image.")
94
+ if not api_key:
95
+ st.warning("Please enter your OpenAI API key.")