qqubb commited on
Commit
1c8fe13
·
1 Parent(s): 0c7583a

switch to streamlit UI, add download for updated cards, other minor fixes

Browse files
.gitignore CHANGED
@@ -1,2 +1,3 @@
1
  __pycache__
2
  compliancecards.code-workspace
 
 
1
  __pycache__
2
  compliancecards.code-workspace
3
+ temp.ipynb
.ipynb_checkpoints/app-checkpoint.ipynb ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import streamlit as st\n",
10
+ "\n",
11
+ "from streamlit_jupyter import StreamlitPatcher, tqdm\n",
12
+ "\n",
13
+ "StreamlitPatcher().jupyter() # register streamlit with jupyter-compatible wrappers"
14
+ ]
15
+ },
16
+ {
17
+ "cell_type": "code",
18
+ "execution_count": 2,
19
+ "metadata": {},
20
+ "outputs": [
21
+ {
22
+ "data": {
23
+ "text/markdown": [
24
+ "# AI"
25
+ ],
26
+ "text/plain": [
27
+ "<IPython.core.display.Markdown object>"
28
+ ]
29
+ },
30
+ "metadata": {},
31
+ "output_type": "display_data"
32
+ },
33
+ {
34
+ "name": "stderr",
35
+ "output_type": "stream",
36
+ "text": [
37
+ "2024-08-13 14:11:32.399 \n",
38
+ " \u001b[33m\u001b[1mWarning:\u001b[0m to view this Streamlit app on a browser, run it with the following\n",
39
+ " command:\n",
40
+ "\n",
41
+ " streamlit run /mnt/wsl/PHYSICALDRIVE1p1/@home/non/.venv/lib/python3.10/site-packages/ipykernel_launcher.py [ARGUMENTS]\n"
42
+ ]
43
+ }
44
+ ],
45
+ "source": [
46
+ "import yaml\n",
47
+ "from pathlib import Path\n",
48
+ "from compliance_analysis import run_compliance_analysis_on_project, run_compliance_analysis_on_data, run_compliance_analysis_on_model\n",
49
+ "\n",
50
+ "# def process_files(files):\n",
51
+ "# results = []\n",
52
+ "# for file in files:\n",
53
+ "# with open(file.name, 'r') as f:\n",
54
+ "# content = f.read()\n",
55
+ "# if Path(file.name).name == \"project_cc.yaml\":\n",
56
+ "# project_cc_yaml = yaml.safe_load(content)\n",
57
+ "# msg = run_compliance_analysis_on_project(project_cc_yaml)\n",
58
+ "# results.append(msg) \n",
59
+ "# # if Path(file.name).name == \"data_cc.yaml\":\n",
60
+ "# # data_cc_yaml = yaml.safe_load(content)\n",
61
+ "# # msg = run_compliance_analysis_on_data(data_cc_yaml)\n",
62
+ "# # results.append(msg) \n",
63
+ "# # if Path(file.name).name == \"model_cc.yaml\":\n",
64
+ "# # model_cc_yaml = yaml.safe_load(content)\n",
65
+ "# # msg = run_compliance_analysis_on_model(model_cc_yaml)\n",
66
+ "# # results.append(msg)\n",
67
+ " \n",
68
+ "# return results\n",
69
+ "\n",
70
+ "import yaml\n",
71
+ "from pathlib import Path\n",
72
+ "import pandas as pd\n",
73
+ "\n",
74
+ "\n",
75
+ "def process_files(files):\n",
76
+ " results = []\n",
77
+ " for file in files:\n",
78
+ " content = file.read().decode(\"utf-8\")\n",
79
+ " if Path(file.name).name == \"project_cc.yaml\":\n",
80
+ " project_cc_yaml = yaml.safe_load(content)\n",
81
+ " if project_cc_yaml:\n",
82
+ " msg = run_compliance_analysis_on_project(project_cc_yaml)\n",
83
+ " results.append(msg) \n",
84
+ " return results\n",
85
+ "\n",
86
+ "def extract_properties(files):\n",
87
+ " properties = []\n",
88
+ " for file in files:\n",
89
+ " content = file.read().decode(\"utf-8\")\n",
90
+ " project_cc_yaml = yaml.safe_load(content)\n",
91
+ " if project_cc_yaml:\n",
92
+ " properties.extend([key for key in project_cc_yaml])\n",
93
+ " return properties\n",
94
+ "\n",
95
+ "def sentence_builder(keys):\n",
96
+ " return f\"Selected options: {', '.join(keys)}\"\n",
97
+ "\n",
98
+ "# Streamlit app\n",
99
+ "st.title(\"AI\")\n",
100
+ "\n",
101
+ "uploaded_files = st.file_uploader(\"Upload YAML Files\", type=\"yaml\", accept_multiple_files=True)\n",
102
+ "\n",
103
+ "if uploaded_files:\n",
104
+ " # Process the files and display the output\n",
105
+ " if st.button(\"Process Files\"):\n",
106
+ " results = process_files(uploaded_files)\n",
107
+ " for result in results:\n",
108
+ " st.text(result)\n",
109
+ " \n",
110
+ " # Extract properties\n",
111
+ " properties = extract_properties(uploaded_files)\n",
112
+ " \n",
113
+ " # Create a DataFrame with properties and a checkbox column\n",
114
+ " df = pd.DataFrame({\n",
115
+ " \"Property\": properties,\n",
116
+ " \"Select\": [False] * len(properties) # Default to unchecked\n",
117
+ " })\n",
118
+ "\n",
119
+ " # Display DataFrame with checkboxes using st.column_config.CheckboxColumn\n",
120
+ " edited_df = st.data_editor(\n",
121
+ " df,\n",
122
+ " column_config={\n",
123
+ " \"Select\": st.column_config.CheckboxColumn(\"Select\"),\n",
124
+ " },\n",
125
+ " key=\"data_editor\"\n",
126
+ " )\n",
127
+ "\n",
128
+ " # Get selected properties\n",
129
+ " selected_properties = edited_df[edited_df[\"Select\"]][\"Property\"].tolist()\n",
130
+ " \n",
131
+ " # Build the sentence based on selected properties\n",
132
+ " if selected_properties:\n",
133
+ " sentence = sentence_builder(selected_properties)\n",
134
+ " st.text(sentence)"
135
+ ]
136
+ },
137
+ {
138
+ "cell_type": "code",
139
+ "execution_count": null,
140
+ "metadata": {},
141
+ "outputs": [],
142
+ "source": []
143
+ }
144
+ ],
145
+ "metadata": {
146
+ "kernelspec": {
147
+ "display_name": "Python 3 (ipykernel)",
148
+ "language": "python",
149
+ "name": "python3"
150
+ },
151
+ "language_info": {
152
+ "codemirror_mode": {
153
+ "name": "ipython",
154
+ "version": 3
155
+ },
156
+ "file_extension": ".py",
157
+ "mimetype": "text/x-python",
158
+ "name": "python",
159
+ "nbconvert_exporter": "python",
160
+ "pygments_lexer": "ipython3",
161
+ "version": "3.10.12"
162
+ }
163
+ },
164
+ "nbformat": 4,
165
+ "nbformat_minor": 4
166
+ }
__pycache__/compliance_analysis.cpython-310.pyc CHANGED
Binary files a/__pycache__/compliance_analysis.cpython-310.pyc and b/__pycache__/compliance_analysis.cpython-310.pyc differ
 
__pycache__/utils.cpython-310.pyc CHANGED
Binary files a/__pycache__/utils.cpython-310.pyc and b/__pycache__/utils.cpython-310.pyc differ
 
app.py CHANGED
@@ -3,32 +3,211 @@ import yaml
3
  from pathlib import Path
4
  from compliance_analysis import run_compliance_analysis_on_project, run_compliance_analysis_on_data, run_compliance_analysis_on_model
5
 
6
- def process_files(files):
7
- results = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  for file in files:
9
- with open(file.name, 'r') as f:
10
- content = f.read()
11
  if Path(file.name).name == "project_cc.yaml":
12
  project_cc_yaml = yaml.safe_load(content)
13
- msg = run_compliance_analysis_on_project(project_cc_yaml)
14
- results.append(msg)
15
- # if Path(file.name).name == "data_cc.yaml":
16
- # data_cc_yaml = yaml.safe_load(content)
17
- # msg = run_compliance_analysis_on_data(data_cc_yaml)
18
- # results.append(msg)
19
- # if Path(file.name).name == "model_cc.yaml":
20
- # model_cc_yaml = yaml.safe_load(content)
21
- # msg = run_compliance_analysis_on_model(model_cc_yaml)
22
- # results.append(msg)
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  return results
25
 
26
- # Gradio interface
27
- with gr.Blocks() as demo:
28
- file_input = gr.File(label="Upload Files", file_count="multiple")
29
- output = gr.Textbox(label="Output", lines=10)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- submit_button = gr.Button("Process Files")
32
- submit_button.click(process_files, inputs=file_input, outputs=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from pathlib import Path
4
  from compliance_analysis import run_compliance_analysis_on_project, run_compliance_analysis_on_data, run_compliance_analysis_on_model
5
 
6
+ # def process_files(files):
7
+ # results = []
8
+ # for file in files:
9
+ # with open(file.name, 'r') as f:
10
+ # content = f.read()
11
+ # if Path(file.name).name == "project_cc.yaml":
12
+ # project_cc_yaml = yaml.safe_load(content)
13
+ # msg = run_compliance_analysis_on_project(project_cc_yaml)
14
+ # results.append(msg)
15
+ # # if Path(file.name).name == "data_cc.yaml":
16
+ # # data_cc_yaml = yaml.safe_load(content)
17
+ # # msg = run_compliance_analysis_on_data(data_cc_yaml)
18
+ # # results.append(msg)
19
+ # # if Path(file.name).name == "model_cc.yaml":
20
+ # # model_cc_yaml = yaml.safe_load(content)
21
+ # # msg = run_compliance_analysis_on_model(model_cc_yaml)
22
+ # # results.append(msg)
23
+
24
+ # return results
25
+
26
+ # def extract_properties(files):
27
+ # for file in files:
28
+ # with open(file.name, 'r') as f:
29
+ # content = f.read()
30
+ # project_cc_yaml = yaml.safe_load(content)
31
+ # project_cc = [key for key in project_cc_yaml]
32
+ # return project_cc
33
+
34
+ # def sentence_builder(countries):
35
+ # return f"{countries}"
36
+
37
+ # # # Gradio interface
38
+ # with gr.Blocks() as demo:
39
+ # file_input = gr.File(label="Upload Files", file_count="multiple")
40
+ # output = gr.Textbox(label="Output", lines=10)
41
+
42
+ # submit_button = gr.Button("Process Files")
43
+ # submit_button.click(process_files, inputs=file_input, outputs=output)
44
+
45
+ # # Create the CheckboxGroup (initially empty)
46
+ # checkbox_group = gr.CheckboxGroup(choices=[], label="", interactive=True)
47
+
48
+ # # Create the output Textbox
49
+ # output = gr.Textbox()
50
+
51
+ # # Function to update the CheckboxGroup when files are uploaded
52
+ # def update_checkboxes(files):
53
+ # choices = extract_properties(files)
54
+ # return gr.CheckboxGroup(choices=choices, label="", interactive=True)
55
+
56
+ # # Create a Button to trigger the sentence builder
57
+ # submit_button = gr.Button("Submit")
58
+
59
+ # # Set up the interaction for file input and updating checkboxes
60
+ # file_input.change(update_checkboxes, inputs=file_input, outputs=checkbox_group)
61
+
62
+ # gr.CheckboxGroup.change(update_checkboxes)
63
+
64
+ # output = gr.Textbox()
65
+ # submit_button = gr.Button("Submit")
66
+ # submit_button.click(sentence_builder, inputs=checkbox_group, outputs=output)
67
+
68
+
69
+ # if __name__ == "__main__":
70
+ # demo.launch()
71
+
72
+ import streamlit as st
73
+ import yaml
74
+ from pathlib import Path
75
+ import pandas as pd
76
+
77
+
78
+ def load_data(files):
79
+ cards = []
80
  for file in files:
81
+ content = file.read().decode("utf-8")
 
82
  if Path(file.name).name == "project_cc.yaml":
83
  project_cc_yaml = yaml.safe_load(content)
84
+ data = project_cc_yaml
85
+ card_type = "project"
86
+ cards.append((card_type, data))
87
+ if Path(file.name).name == "data_cc.yaml":
88
+ data_cc_yaml = yaml.safe_load(content)
89
+ data = data_cc_yaml
90
+ card_type = "data"
91
+ cards.append((card_type, data))
92
+ if Path(file.name).name == "model_cc.yaml":
93
+ model_cc_yaml = yaml.safe_load(content)
94
+ data = model_cc_yaml
95
+ card_type = "model"
96
+ cards.append((card_type, data))
97
+ return cards
98
+
99
+ # def process_files(files):
100
+ # results = []
101
+ # for file in files:
102
+ # content = file.read().decode("utf-8")
103
+ # if Path(file.name).name == "project_cc.yaml":
104
+ # project_cc_yaml = yaml.safe_load(content)
105
+ # if project_cc_yaml:
106
+ # msg = run_compliance_analysis_on_project(project_cc_yaml)
107
+ # results.append(msg)
108
+ # return results
109
+
110
+ def process_files(data):
111
+ results = []
112
+ msg = run_compliance_analysis_on_project(yaml.safe_load(data))
113
+ results.append(msg)
114
  return results
115
 
116
+ def extract_properties(data):
117
+
118
+ flattened_data = []
119
+
120
+ for category, items in data.items():
121
+ for item, attributes in items.items():
122
+ flattened_data.append({
123
+ "Category": category,
124
+ "Item": item,
125
+ "Verbose": attributes["verbose"],
126
+ "Value": attributes["value"]
127
+ })
128
+ df = pd.DataFrame(flattened_data)
129
+
130
+ return df
131
+
132
+ def sentence_builder(keys):
133
+ return f"Selected options: {', '.join(keys)}"
134
+
135
+ # Streamlit app
136
+ # st.set_page_config(page_title="AI", layout="wide")
137
+ # st.markdown(
138
+ # """
139
+ # <style>
140
+ # [data-testid="stSidebar"][aria-expanded="true"] > div:first-child{
141
+ # width: 600px;
142
+ # }
143
+ # [data-testid="stSidebar"][aria-expanded="false"] > div:first-child{
144
+ # width: 600px;
145
+ # margin-left: -400px;
146
+ # }
147
+
148
+ # """,
149
+ # unsafe_allow_html=True,
150
+ # )
151
+
152
+ st.title("AI")
153
+
154
+ uploaded_files = st.file_uploader("Upload YAML Files", type="yaml", accept_multiple_files=True)
155
+ # project_files = st.file_uploader("Upload Project Files", type="yaml", accept_multiple_files=True)
156
+
157
+ if uploaded_files:
158
+
159
+ cards = load_data(uploaded_files)
160
+
161
+ for card in cards:
162
+
163
+ data = card[1]
164
 
165
+ if data != None:
166
+ # df = extract_properties(data)
167
+ # df["Value"] = df["Value"].astype(bool)
168
+
169
+ # edited_df = st.data_editor(
170
+ # df,
171
+ # column_config={
172
+ # "Value": st.column_config.CheckboxColumn("Value"),
173
+ # },
174
+ # key="data_editor"
175
+ # )
176
+
177
+ st.title("Compliance Checkboxes")
178
+ st.title(f"{card[0]}")
179
+
180
+ for section, items in data.items():
181
+ st.header(section.replace('_', ' ').title()) # section header
182
+ for key, details in items.items():
183
+ details['value'] = st.checkbox(details['verbose'], value=details['value'])
184
 
185
+ # st.write("Updated Data:", data)
186
+
187
+ yaml_data = yaml.dump(data)
188
+
189
+ st.download_button(
190
+ label=f"Download Updated Data as YAML{card[0]}",
191
+ data=yaml_data,
192
+ file_name="updated_data.yaml",
193
+ mime="text/yaml"
194
+ )
195
+
196
+ # json_data = json.dumps(data, indent=2)
197
+ # st.download_button(
198
+ # label="Download Updated Data as JSON",
199
+ # data=json_data,
200
+ # file_name="updated_data.json",
201
+ # mime="application/json"
202
+ # )
203
+
204
+ # selected_properties = edited_df[edited_df["Value"]]["Item"].tolist()
205
+
206
+ # if selected_properties:
207
+ # sentence = sentence_builder(selected_properties)
208
+ # st.text(sentence)
209
+
210
+ if st.button(f"Process {card[0]}"):
211
+ results = process_files(yaml_data)
212
+ for result in results:
213
+ st.text(result)
compliance_analysis.py CHANGED
@@ -1,5 +1,5 @@
1
  import yaml
2
- from utils import set_type, set_operator_role_and_location, set_eu_market_status, check_within_scope
3
 
4
  # Create some variables we will use throughout our analysis
5
 
@@ -33,17 +33,16 @@ def run_compliance_analysis_on_project(project_cc_yaml):
33
  set_eu_market_status(project_variables, project_cc_yaml)
34
 
35
  # Check if the project is within scope of the Act. If it's not, the analysis is over.
36
- if check_within_scope(project_cc_yaml):
37
  msg = ("Project is within the scope of Act. Let's continue...")
38
  else:
39
  msg = ("Project is not within the scope of what is regulated by the Act.")
40
-
41
- # # Check for prohibited practices. If any exist, the analysis is over.
42
- # if check_prohibited(project_cc_yaml) == True:
43
- # print("Project contains prohibited practices and is therefore non-compliant.")
44
- # msg = ("Project is non-compliant due to a prohibited practice.")
45
- # else:
46
- # print("Project does not contain prohibited practies. Let's continue...")
47
 
48
  # If project is high-risk AI system, check that is has met all the requirements for such systems:
49
 
 
1
  import yaml
2
+ from utils import set_type, set_operator_role_and_location, set_eu_market_status, check_within_scope, check_prohibited
3
 
4
  # Create some variables we will use throughout our analysis
5
 
 
33
  set_eu_market_status(project_variables, project_cc_yaml)
34
 
35
  # Check if the project is within scope of the Act. If it's not, the analysis is over.
36
+ if check_within_scope(project_variables, project_cc_yaml):
37
  msg = ("Project is within the scope of Act. Let's continue...")
38
  else:
39
  msg = ("Project is not within the scope of what is regulated by the Act.")
40
+ return msg
41
+
42
+ # Check for prohibited practices. If any exist, the analysis is over.
43
+ if check_prohibited(project_variables, project_cc_yaml) == True:
44
+ msg = ("Project is non-compliant due to a prohibited practice.")
45
+ return msg
 
46
 
47
  # If project is high-risk AI system, check that is has met all the requirements for such systems:
48
 
data_cc.yaml CHANGED
@@ -105,23 +105,23 @@ quality_management_system: # Art. 17(1)(f)
105
  verbose: 'Systems and procedures for data management, including data acquisition, data collection, data analysis, data labelling, data storage, data filtration, data mining, data aggregation, data retention and any other operation regarding the data that is performed before and for the purposes of the placing on the market or putting into service of high-risk AI systems'
106
  value: !!bool false
107
 
108
- gpai_requirements: # Art. 53(1); Annex XI(2)(c)
109
- gpai_requirements:
110
- data_type:
111
- verbose: 'Documentation for the dataset is available that contains the type of data'
112
- value: !!bool false
113
- data_provenance:
114
- verbose: 'Documentation for the dataset is available that contains the provenance of data'
115
- value: !!bool false
116
- data_curation:
117
- verbose: 'Documentation for the dataset is available that contains the curation methodologies (e.g. cleaning, filtering, etc.)'
118
- value: !!bool false
119
- data_number:
120
- verbose: 'Documentation for the dataset is available that contains the number of data points'
121
- value: !!bool false
122
- data_scope:
123
- verbose: 'Documentation for the dataset is available that contains the number of data scope and main characteristics'
124
- value: !!bool false
125
- data_origin:
126
- verbose: 'Documentation for the dataset is available that contains information on how the data was obtained and selected as well as all other measures to detect the unsuitability of data sources and methods to detect identifiable biases'
127
- value: !!bool false
 
105
  verbose: 'Systems and procedures for data management, including data acquisition, data collection, data analysis, data labelling, data storage, data filtration, data mining, data aggregation, data retention and any other operation regarding the data that is performed before and for the purposes of the placing on the market or putting into service of high-risk AI systems'
106
  value: !!bool false
107
 
108
+ # gpai_requirements: # Art. 53(1); Annex XI(2)(c)
109
+ # gpai_requirements:
110
+ # data_type:
111
+ # verbose: 'Documentation for the dataset is available that contains the type of data'
112
+ # value: !!bool false
113
+ # data_provenance:
114
+ # verbose: 'Documentation for the dataset is available that contains the provenance of data'
115
+ # value: !!bool false
116
+ # data_curation:
117
+ # verbose: 'Documentation for the dataset is available that contains the curation methodologies (e.g. cleaning, filtering, etc.)'
118
+ # value: !!bool false
119
+ # data_number:
120
+ # verbose: 'Documentation for the dataset is available that contains the number of data points'
121
+ # value: !!bool false
122
+ # data_scope:
123
+ # verbose: 'Documentation for the dataset is available that contains the number of data scope and main characteristics'
124
+ # value: !!bool false
125
+ # data_origin:
126
+ # verbose: 'Documentation for the dataset is available that contains information on how the data was obtained and selected as well as all other measures to detect the unsuitability of data sources and methods to detect identifiable biases'
127
+ # value: !!bool false
model_cc.yaml CHANGED
@@ -140,7 +140,7 @@ obligations_for_providers_of_gpai_models:
140
  verbose: 'The architecture and number of parameters'
141
  value: !!bool false
142
  input_output_modality: # Art. 53; Annex XI(1)(1)(e)
143
- verbos: 'Modality (e.g. text, image) and format of inputs and outputs'
144
  value: !!bool false
145
  license: # Art. 53; Annex XI(1)(1)(f)
146
  verbose: 'The license'
 
140
  verbose: 'The architecture and number of parameters'
141
  value: !!bool false
142
  input_output_modality: # Art. 53; Annex XI(1)(1)(e)
143
+ verbose: 'Modality (e.g. text, image) and format of inputs and outputs'
144
  value: !!bool false
145
  license: # Art. 53; Annex XI(1)(1)(f)
146
  verbose: 'The license'
project_cc.yaml CHANGED
@@ -1,5 +1,20 @@
 
1
 
2
  # Information related to high-level characteristics of AI project, including the role of the operator, market status, and type of AI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  operator_role:
5
  provider: # Art. 2
@@ -93,6 +108,7 @@ excepted:
93
  verbose: 'AI project involves AI models that are released under a free and open-source licence that allows for the access, usage, modification, and distribution of the model, and whose parameters, including the weights, the information on the model architecture, and the information on model usage, are made publicly available. This exception shall not apply to general purpose AI models with systemic risks'
94
  value: !!bool false
95
 
 
96
  # Information related to practices prohibited by the Act
97
 
98
  prohibited_practice:
@@ -130,6 +146,43 @@ prohibited_practice:
130
  real_time_exception_investigation:
131
  verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the localisation or identification of a person suspected of having committed a criminal offence, for the purpose of conducting a criminal investigation or prosecution or executing a criminal penalty for offences referred to in Annex II and punishable in the Member State concerned by a custodial sentence or a detention order for a maximum period of at least four years.'
132
  value: !!bool false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
  # Requirements for those projects which involve high-risk AI systems
135
 
 
1
+ <<<<<<< HEAD
2
 
3
  # Information related to high-level characteristics of AI project, including the role of the operator, market status, and type of AI
4
+ =======
5
+ smb:
6
+ smb: # Art. 11(1)
7
+ verbose: 'AI project is operated by a small or medium-sized enterprise'
8
+ value: true
9
+
10
+ eu_market_status:
11
+ placed_on_market: # Art. 3(9)
12
+ verbose: 'AI project is being made available on the Union market for the first time'
13
+ value: false
14
+ put_into_service: #Art. 3(11)
15
+ verbose: 'AI project is supplied for first use directly to the deployer or for own use in the Union for its intended purpose;'
16
+ value: false
17
+ >>>>>>> 2a132e5 (switch to streamlit UI, add download for updated cards, other minor fixes)
18
 
19
  operator_role:
20
  provider: # Art. 2
 
108
  verbose: 'AI project involves AI models that are released under a free and open-source licence that allows for the access, usage, modification, and distribution of the model, and whose parameters, including the weights, the information on the model architecture, and the information on model usage, are made publicly available. This exception shall not apply to general purpose AI models with systemic risks'
109
  value: !!bool false
110
 
111
+ <<<<<<< HEAD
112
  # Information related to practices prohibited by the Act
113
 
114
  prohibited_practice:
 
146
  real_time_exception_investigation:
147
  verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the localisation or identification of a person suspected of having committed a criminal offence, for the purpose of conducting a criminal investigation or prosecution or executing a criminal penalty for offences referred to in Annex II and punishable in the Member State concerned by a custodial sentence or a detention order for a maximum period of at least four years.'
148
  value: !!bool false
149
+ =======
150
+ # prohibited_practice:
151
+ # ai_system:
152
+ # manipulative: # Art. 5(1)(a)
153
+ # verbose: 'The AI project deploys subliminal or purposefully manipulative or deceptive techniques, with the objective or effect of materially distorting the behavior of people by appreciably impairing their ability to make an informed decision, thereby causing them to take a decision that they would not have otherwise taken in a manner that causes or is reasonably likely to cause significant harm'
154
+ # value: !!bool false
155
+ # exploit_vulnerable: # Art. 5(1)(b)
156
+ # verbose: 'The AI project exploits the vulnerabilities of natural people due to their age, disability or a specific social or economic situation, with the objective or effect of materially distorting their behaviour in a manner that causes or is reasonably likely to cause significant harm'
157
+ # value: !!bool false
158
+ # social_score: # Art. 5(1)(c)
159
+ # verbose: 'The AI project is for the evaluation or classification of natural people over a certain period of time based on their social behaviour or known, inferred or predicted personal or personality characteristics, with the social score leading to at least one of the following: (i) detrimental or unfavourable treatment of certain natural people in social contexts that are unrelated to the contexts in which the data was originally generated or collected; (ii) detrimental or unfavourable treatment of certain natural people that is unjustified or disproportionate to their social behaviour or its gravity'
160
+ # value: !!bool false
161
+ # crime_prediction: # Art. 5(1)(d)
162
+ # verbose: 'This AI project makes risk assessments of natural persons in order to assess or predict the risk of them committing a criminal offence, based solely on the profiling of the natural person or on assessing their personality traits and characteristics (and does not support the human assessment of the involvement of a person in a criminal activity, which is already based on objective and verifiable facts directly linked to a criminal activity)'
163
+ # value: !!bool false
164
+ # untarged_face: # Art. 5(1)(e)
165
+ # verbose: 'This AI project creates or expand facial recognition databases through the untargeted scraping of facial images from the internet or CCTV footage'
166
+ # value: !!bool false
167
+ # emotion_prediction: # Art. 5(1)(f)
168
+ # verbose: 'The AI project infers emotions of a natural person in the areas of workplace and education institutions and is not intended to be put in place or into the market for medical or safety reasons'
169
+ # value: !!bool false
170
+ # biometric:
171
+ # categorization: # Art. 5(1)(g)
172
+ # verbose: 'The AI project involves the use of biometric categorisation systems that categorise individually natural persons based on their biometric data to deduce or infer their race, political opinions, trade union membership, religious or philosophical beliefs, sex life or sexual orientation; this prohibition does not cover any labelling or filtering of lawfully acquired biometric datasets, such as images, based on biometric data or categorizing of biometric data in the area of law enforcement'
173
+ # value: !!bool false
174
+ # real_time: # Art. 5(1)(h)
175
+ # verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement'
176
+ # value: !!bool false
177
+ # real_time_exception_victim: # Art. 5(1)(h)
178
+ # verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the targeted search for specific victims of abduction, trafficking in human beings or sexual exploitation of human beings, or the search for missing persons'
179
+ # value: !!bool false
180
+ # real_time_exception_threat:
181
+ # verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the prevention of a specific, substantial and imminent threat to the life or physical safety of natural persons or a genuine and present or genuine and foreseeable threat of a terrorist attack'
182
+ # real_time_exception_investigation:
183
+ # verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the localisation or identification of a person suspected of having committed a criminal offence, for the purpose of conducting a criminal investigation or prosecution or executing a criminal penalty for offences referred to in Annex II and punishable in the Member State concerned by a custodial sentence or a detention order for a maximum period of at least four years.'
184
+ # value: !!bool false
185
+ >>>>>>> 2a132e5 (switch to streamlit UI, add download for updated cards, other minor fixes)
186
 
187
  # Requirements for those projects which involve high-risk AI systems
188
 
utils.py CHANGED
@@ -55,7 +55,10 @@ def set_eu_market_status(project_variables, project_cc_yaml):
55
  return project_variables
56
 
57
 
58
- def check_within_scope(project_cc_yaml):
 
 
 
59
  if not check_excepted(project_cc_yaml):
60
  if provider and ((ai_system and (placed_on_market or put_into_service)) or (gpai_model and placed_on_market)): # Article 2.1(a)
61
  return True
@@ -76,7 +79,7 @@ def check_excepted(project_cc_yaml):
76
  else:
77
  return False
78
 
79
- def check_prohibited (project_variables, project_cc_yaml):
80
 
81
  ai_system = project_variables['ai_project_type']['ai_system']
82
 
@@ -93,18 +96,4 @@ def check_prohibited (project_variables, project_cc_yaml):
93
  return True
94
  else:
95
  print("You are not engaged in any prohibited practices.")
96
- return False
97
-
98
- # def check_all_true(file_path):
99
- # data = yaml.safe_load(file_path)
100
-
101
- # # Iterate through top-level keys
102
- # for top_key, top_value in data.items():
103
- # if isinstance(top_value, dict):
104
- # # Iterate through second-level keys
105
- # for second_key, second_value in top_value.items():
106
- # if not second_value:
107
- # print("You are non-compliant with the Act")
108
- # break
109
- # else:
110
- # print("No problems here")
 
55
  return project_variables
56
 
57
 
58
+ def check_within_scope(project_variables, project_cc_yaml):
59
+
60
+ ai_system = project_variables['ai_project_type']['ai_system']
61
+
62
  if not check_excepted(project_cc_yaml):
63
  if provider and ((ai_system and (placed_on_market or put_into_service)) or (gpai_model and placed_on_market)): # Article 2.1(a)
64
  return True
 
79
  else:
80
  return False
81
 
82
+ def check_prohibited(project_variables, project_cc_yaml):
83
 
84
  ai_system = project_variables['ai_project_type']['ai_system']
85
 
 
96
  return True
97
  else:
98
  print("You are not engaged in any prohibited practices.")
99
+ return False