Spaces:
Sleeping
Sleeping
Merge pull request #3 from billmarinocam/dev
Browse files- __pycache__/compliance_analysis.cpython-310.pyc +0 -0
- __pycache__/utils.cpython-310.pyc +0 -0
- app.py +34 -0
- compliance_analysis.py +130 -278
- data_cc.yaml +2 -2
- model_cc.yaml +2 -2
- project_cc.yaml +58 -25
- utils.py +107 -0
__pycache__/compliance_analysis.cpython-310.pyc
ADDED
Binary file (4.24 kB). View file
|
|
__pycache__/utils.cpython-310.pyc
ADDED
Binary file (2.27 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import yaml
|
3 |
+
from pathlib import Path
|
4 |
+
from compliance_analysis import run_compliance_analysis_on_project, run_compliance_analysis_on_data, run_compliance_analysis_on_model
|
5 |
+
|
6 |
+
def process_files(files):
|
7 |
+
results = []
|
8 |
+
for file in files:
|
9 |
+
with open(file.name, 'r') as f:
|
10 |
+
content = f.read()
|
11 |
+
if Path(file.name).name == "project_cc.yaml":
|
12 |
+
project_cc_yaml = yaml.safe_load(content)
|
13 |
+
msg = run_compliance_analysis_on_project(project_cc_yaml)
|
14 |
+
results.append(msg)
|
15 |
+
# if Path(file.name).name == "data_cc.yaml":
|
16 |
+
# data_cc_yaml = yaml.safe_load(content)
|
17 |
+
# msg = run_compliance_analysis_on_data(data_cc_yaml)
|
18 |
+
# results.append(msg)
|
19 |
+
# if Path(file.name).name == "model_cc.yaml":
|
20 |
+
# model_cc_yaml = yaml.safe_load(content)
|
21 |
+
# msg = run_compliance_analysis_on_model(model_cc_yaml)
|
22 |
+
# results.append(msg)
|
23 |
+
|
24 |
+
return results
|
25 |
+
|
26 |
+
# Gradio interface
|
27 |
+
with gr.Blocks() as demo:
|
28 |
+
file_input = gr.File(label="Upload Files", file_count="multiple")
|
29 |
+
output = gr.Textbox(label="Output", lines=10)
|
30 |
+
|
31 |
+
submit_button = gr.Button("Process Files")
|
32 |
+
submit_button.click(process_files, inputs=file_input, outputs=output)
|
33 |
+
|
34 |
+
demo.launch()
|
compliance_analysis.py
CHANGED
@@ -1,330 +1,182 @@
|
|
1 |
-
import os
|
2 |
import yaml
|
3 |
-
from
|
4 |
|
5 |
# Create some variables we will use throughout our analysis
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
found_files.append(os.path.join(root, filename))
|
31 |
-
|
32 |
-
#Define a function that checks for a Project CC. Without this, there simply cannot be an analysis.
|
33 |
-
def check_for_project_cc(folder_path):
|
34 |
-
found_files = []
|
35 |
-
|
36 |
-
# Walk through the directory
|
37 |
-
for root, dirs, files in os.walk(folder_path):
|
38 |
-
for filename in files:
|
39 |
-
if filename.lower() == 'project_cc.yaml':
|
40 |
-
found_files.append(os.path.join(root, filename))
|
41 |
-
|
42 |
-
# Check the results
|
43 |
-
if len(found_files) == 0:
|
44 |
-
print(f"We did not find a Project CC in your folder. We cannot run a compliance analysis without a Project CC.")
|
45 |
-
sys.exit()
|
46 |
-
elif len(found_files) == 1:
|
47 |
-
print(f"We found exactly one Project CC in your folder. Great job!:")
|
48 |
-
print(f" - {found_files[0]}")
|
49 |
-
run_compliance_analysis(folder_path)
|
50 |
-
else:
|
51 |
-
print(f"Multiple Project CCs found:")
|
52 |
-
for file_path in found_files:
|
53 |
-
print(f" - {file_path}")
|
54 |
-
print("We found multiple Project CCs in your folder. There should only be one Project CC per project.")
|
55 |
-
|
56 |
-
def run_compliance_analysis(folder_path):
|
57 |
-
|
58 |
-
# Load the Project CC YAML file from the supplied folder. This will be our starting point.
|
59 |
-
with open(folder_path + 'project_cc.yaml', 'r') as file:
|
60 |
-
project_cc_yaml = yaml.safe_load(file)
|
61 |
|
62 |
# Determine project type (AI system vs. GPAI model) as well as operator type. We will use these for different things.
|
63 |
-
set_type(project_cc_yaml)
|
64 |
-
set_operator_role_and_location(
|
65 |
-
set_eu_market_status(project_cc_yaml)
|
66 |
|
67 |
# Check if the project is within scope of the Act. If it's not, the analysis is over.
|
68 |
if check_within_scope(project_cc_yaml):
|
69 |
-
|
70 |
else:
|
71 |
-
|
72 |
|
73 |
-
# Check for prohibited practices. If any exist, the analysis is over.
|
74 |
-
if check_prohibited(project_cc_yaml) == True:
|
75 |
-
|
76 |
-
|
77 |
-
else:
|
78 |
-
|
79 |
|
80 |
# If project is high-risk AI system, check that is has met all the requirements for such systems:
|
81 |
|
82 |
-
if high_risk_ai_system:
|
83 |
|
84 |
# Do this by examining the Project CC
|
85 |
|
86 |
for key, value in project_cc_yaml['risk_management_system']:
|
87 |
if not value:
|
88 |
-
|
89 |
for key, value in project_cc_yaml['technical_documentation']:
|
90 |
if not value:
|
91 |
-
|
92 |
for key, value in project_cc_yaml['record_keeping']:
|
93 |
if not value:
|
94 |
-
|
95 |
for key, value in project_cc_yaml['transparency_and_provision_of_information_to_deployers']:
|
96 |
if not value:
|
97 |
-
|
98 |
for key, value in project_cc_yaml['human_oversight']:
|
99 |
if not value:
|
100 |
-
|
101 |
for key, value in project_cc_yaml['accuracy_robustness_cybersecurity']:
|
102 |
if not value:
|
103 |
-
|
104 |
for key, value in project_cc_yaml['quality_management_system']:
|
105 |
if not value:
|
106 |
-
|
107 |
-
|
108 |
-
# Do this by examining any and all Data CCs too
|
109 |
-
|
110 |
-
for filename in os.listdir(folder_path):
|
111 |
-
# Check if the search word is in the filename
|
112 |
-
if "data_cc.md" in filename.lower():
|
113 |
-
|
114 |
-
# If it is, load the yaml
|
115 |
-
|
116 |
-
with open(folder_path + filename, 'r') as file:
|
117 |
-
data_cc_yaml = yaml.safe_load(file)
|
118 |
-
|
119 |
-
for key, value in data_cc_yaml['data_and_data_governance']:
|
120 |
-
if not value:
|
121 |
-
sys.exit(f"Because of the dataset represented by {filename}, this high-risk AI system fails the data and data governance requirements under Article 10.")
|
122 |
-
for key, value in data_cc_yaml['technical_documentation']:
|
123 |
-
if not value:
|
124 |
-
sys.exit(f"Because of the dataset represented by {filename}, this high-risk AI system fails the technical documentation requirements under Article 11.")
|
125 |
-
for key, value in data_cc_yaml['transparency_and_provision_of_information_to_deployers']:
|
126 |
-
if not value:
|
127 |
-
sys.exit(f"Because of the dataset represented by {filename}, this high-risk AI system fails the transparency requirements under Article 13.")
|
128 |
-
for key, value in data_cc_yaml['quality_management_system']:
|
129 |
-
if not value:
|
130 |
-
sys.exit(f"Because of the dataset represented by {filename}, this high-risk AI system fails the quality management requirements under Article 17.")
|
131 |
-
|
132 |
-
# Do this by examining any and all Model CCs too
|
133 |
-
|
134 |
-
for filename in os.listdir(folder_path):
|
135 |
-
# Check if the search word is in the filename
|
136 |
-
if "model_cc.md" in filename.lower():
|
137 |
-
|
138 |
-
# If it is, load the yaml
|
139 |
-
|
140 |
-
with open(folder_path + filename, 'r') as file:
|
141 |
-
model_cc_yaml = yaml.safe_load(file)
|
142 |
-
|
143 |
-
for key, value in model_cc_yaml['risk_management_system']:
|
144 |
-
if not value:
|
145 |
-
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the risk management requirements under Article 9.")
|
146 |
-
for key, value in data_cc_yaml['technical_documentation']:
|
147 |
-
if not value:
|
148 |
-
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the technical documentation requirements under Article 11.")
|
149 |
-
for key, value in data_cc_yaml['transparency_and_provision_of_information_to_deployers']:
|
150 |
-
if not value:
|
151 |
-
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the transparency requirements under Article 13.")
|
152 |
-
for key, value in data_cc_yaml['accuracy_robustness_cybersecurity']:
|
153 |
-
if not value:
|
154 |
-
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the quality management requirements under Article 15.")
|
155 |
-
for key, value in data_cc_yaml['quality_management_system']:
|
156 |
-
if not value:
|
157 |
-
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the quality management requirements under Article 17.")
|
158 |
-
|
159 |
-
# If the project is a GPAI model, check that is has met all the requirements for such systems:
|
160 |
-
|
161 |
-
if gpai_model:
|
162 |
|
163 |
-
|
164 |
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
169 |
-
# Do this by examining
|
170 |
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
|
175 |
-
|
176 |
|
177 |
-
|
178 |
-
|
|
|
179 |
|
180 |
-
|
181 |
-
if not value:
|
182 |
-
sys.exit(f"Because of the dataset represented by {filename}, this GPAI fails the transparency requirements under Article 53.")
|
183 |
|
184 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
|
190 |
-
|
191 |
|
192 |
-
|
193 |
-
|
194 |
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
|
199 |
-
# If the project is a GPAI model with systematic risk, check that is has additionally met all the requirements for such systems:
|
200 |
|
201 |
-
if gpai_model_systematic_risk:
|
202 |
|
203 |
-
# Do this by examining the Project CC
|
204 |
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
# Do this by examining any and all Model CCs too
|
210 |
-
|
211 |
-
for filename in os.listdir(folder_path):
|
212 |
-
# Check if the search word is in the filename
|
213 |
-
if "model_cc.md" in filename.lower():
|
214 |
-
|
215 |
-
# If it is, load the yaml
|
216 |
-
|
217 |
-
with open(folder_path + filename, 'r') as file:
|
218 |
-
model_cc_yaml = yaml.safe_load(file)
|
219 |
-
|
220 |
-
for key, value in model_cc_yaml['obligations_for_providers_of_gpai_models_with_systemic_risk']:
|
221 |
-
if not value:
|
222 |
-
sys.exit(f"Because of the model represented by {filename}, this GPAI model with systematic risk fails the transparency requirements under Article 55.")
|
223 |
-
|
224 |
-
def set_type(project_cc_yaml):
|
225 |
-
if project_cc_yaml['ai_system']['ai_system']['value']:
|
226 |
-
ai_system = True
|
227 |
-
if project_cc_yaml['gpai_model']['ai_system']['value']:
|
228 |
-
gpai_model = True
|
229 |
-
if ai_system and gpai_model:
|
230 |
-
sys.exit("Your project cannot be both an AI system and a GPAI model. Please revise your Project CC accordingly.")
|
231 |
-
if ai_system == True:
|
232 |
-
for key, value in project_cc_yaml['high_risk_ai_system']:
|
233 |
-
if value and sum(map(bool, [project_cc_yaml['high_risk_ai_system']['filter_exception_rights'],project_cc_yaml['high_risk_ai_system']['filter_exception_narrow'],project_cc_yaml['high_risk_ai_system']['filter_exception_human'],project_cc_yaml['high_risk_ai_system']['filter_exception_deviation'], project_cc_yaml['high_risk_ai_system']['filter_exception_prep']])) < 1:
|
234 |
-
high_risk_ai_system == True
|
235 |
-
if gpai_model == True:
|
236 |
-
if project_cc_yaml['gpai_model_systematic_risk']['evaluation'] or project_cc_yaml['gpai_model_systematic_risk']['flops']:
|
237 |
-
gpai_model_systematic_risk == True
|
238 |
-
|
239 |
-
def set_operator_role_and_location(project_cc):
|
240 |
-
if project_cc_yaml['operator_role']['eu_located']['value']:
|
241 |
-
eu_located = True
|
242 |
-
if project_cc_yaml['operator_role']['provider']['value']:
|
243 |
-
provider = True
|
244 |
-
if project_cc_yaml['operator_role']['deployer']['value']:
|
245 |
-
deployer = True
|
246 |
-
if project_cc_yaml['operator_role']['importer']['value']:
|
247 |
-
importer = True
|
248 |
-
if project_cc_yaml['operator_role']['distributor']['value']:
|
249 |
-
distributor = True
|
250 |
-
if project_cc_yaml['operator_role']['product_manufacturer']['value']:
|
251 |
-
product_manufacturer = True
|
252 |
-
if ai_system and gpai_model:
|
253 |
-
sys.exit("Your project cannot be both an AI system and a GPAI model. Please revise your Project CC accordingly.")
|
254 |
-
if sum(map(bool, [provider,deployer,importer,distributor, product_manufacturer])) != 1:
|
255 |
-
sys.exit("Please specify exactly one operator role.")
|
256 |
-
|
257 |
-
def set_eu_market_status(project_cc):
|
258 |
-
if project_cc_yaml['eu_market']['placed_on_market']['value']:
|
259 |
-
placed_on_market = True
|
260 |
-
if project_cc_yaml['eu_market']['put_into_service']['value']:
|
261 |
-
put_into_service = True
|
262 |
-
if project_cc_yaml['operator_role']['output_used']['value']:
|
263 |
-
output_used == True
|
264 |
-
|
265 |
-
def check_within_scope(project_cc):
|
266 |
-
if not check_excepted(project_cc):
|
267 |
-
if provider and ((ai_system and (placed_on_market or put_into_service)) or (gpai_model and placed_on_market)): # Article 2.1(a)
|
268 |
-
return True
|
269 |
-
if deployer and eu_located: # Article 2.1(b)
|
270 |
-
return True
|
271 |
-
if (provider or deployer) and (ai_system and eu_located and output_used): # Article 2.1(c)
|
272 |
-
return True
|
273 |
-
if (importer or distributor) and ai_system: # Article 2.1(d)
|
274 |
-
return True
|
275 |
-
if product_manufacturer and ai_system and (placed_on_market or put_into_service): # Article 2.1(e)
|
276 |
-
return True
|
277 |
-
else
|
278 |
-
return False
|
279 |
-
|
280 |
-
def check_excepted(project_cc):
|
281 |
-
if project_cc_yaml['excepted']['scientific'] or project_cc_yaml['excepted']['pre_market'] or (ai_system and project_cc_yaml['excepted']['open_source_ai_system']) or (gpai_model and project_cc_yaml['excepted']['open_source_gpai_system']):
|
282 |
-
return True
|
283 |
-
else:
|
284 |
-
return False
|
285 |
-
|
286 |
-
def check_prohibited (project_cc):
|
287 |
-
if ai_system:
|
288 |
-
for key in project_cc_yaml['prohibited_practice']['ai_system']:
|
289 |
-
if key[value]:
|
290 |
-
print("You are engaged in a prohibited practice and thus the project is non-compliant.")
|
291 |
-
return True
|
292 |
-
if project_cc_yaml['prohibited_practice']['biometric']['categorization']:
|
293 |
-
print("You are engaged in a prohibited practice and thus the project is non-compliant.")
|
294 |
-
return True
|
295 |
-
if project_cc_yaml['prohibited_practice']['biometric']['real_time'] and sum(map(bool, [project_cc['prohibited_practice']['biometric']['real_time_exception_victim'],project_cc['prohibited_practice']['biometric']['real_time_exception_threat'], project_cc['prohibited_practice']['biometric']['real_time_exception_investigation']])) == 0:
|
296 |
-
print("You are engaged in a prohibited practice and thus the project is non-compliant.")
|
297 |
-
return True
|
298 |
-
else:
|
299 |
-
print("You are not engaged in any prohibited practices.")
|
300 |
-
return False
|
301 |
|
|
|
302 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
303 |
|
304 |
|
305 |
|
306 |
-
def check_all_true(file_path):
|
307 |
-
# Load the YAML file
|
308 |
-
with open(project_cc, 'r') as file:
|
309 |
-
data = yaml.safe_load(file)
|
310 |
-
|
311 |
-
# Iterate through top-level keys
|
312 |
-
for top_key, top_value in data.items():
|
313 |
-
if isinstance(top_value, dict):
|
314 |
-
# Iterate through second-level keys
|
315 |
-
for second_key, second_value in top_value.items():
|
316 |
-
if not second_value:
|
317 |
-
print("You are non-compliant with the Act")
|
318 |
-
break
|
319 |
-
else:
|
320 |
-
print("No problems here")
|
321 |
-
|
322 |
-
def main():
|
323 |
-
# Prompt the user to enter a filename
|
324 |
-
file_path = input("Please enter a file path to the folder containing all your AI project's Compliance Cards: ")
|
325 |
-
|
326 |
-
# Call the function with the entered filename
|
327 |
-
check_for_project_cc(file_path)
|
328 |
-
|
329 |
-
if __name__ == "__main__":
|
330 |
-
main()
|
|
|
|
|
1 |
import yaml
|
2 |
+
from utils import set_type, set_operator_role_and_location, set_eu_market_status, check_within_scope
|
3 |
|
4 |
# Create some variables we will use throughout our analysis
|
5 |
|
6 |
+
project_variables = {
|
7 |
+
"ai_project_type": {
|
8 |
+
"ai_system": False,
|
9 |
+
"gpai_model": False,
|
10 |
+
"high_risk_ai_system": False,
|
11 |
+
"gpai_model_systematic_risk": False
|
12 |
+
},
|
13 |
+
"operator_role": {
|
14 |
+
"provider": False,
|
15 |
+
"deployer": False,
|
16 |
+
"importer": False,
|
17 |
+
"distributor": False,
|
18 |
+
"product_manufacturer": False,
|
19 |
+
"eu_located": False
|
20 |
+
},
|
21 |
+
"eu_market_status": {
|
22 |
+
"placed_on_market": False,
|
23 |
+
"put_into_service": False,
|
24 |
+
"output_used": False
|
25 |
+
}
|
26 |
+
}
|
27 |
+
|
28 |
+
def run_compliance_analysis_on_project(project_cc_yaml):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# Determine project type (AI system vs. GPAI model) as well as operator type. We will use these for different things.
|
31 |
+
project_type = set_type(project_variables, project_cc_yaml)
|
32 |
+
set_operator_role_and_location(project_variables, project_cc_yaml)
|
33 |
+
set_eu_market_status(project_variables, project_cc_yaml)
|
34 |
|
35 |
# Check if the project is within scope of the Act. If it's not, the analysis is over.
|
36 |
if check_within_scope(project_cc_yaml):
|
37 |
+
msg = ("Project is within the scope of Act. Let's continue...")
|
38 |
else:
|
39 |
+
msg = ("Project is not within the scope of what is regulated by the Act.")
|
40 |
|
41 |
+
# # Check for prohibited practices. If any exist, the analysis is over.
|
42 |
+
# if check_prohibited(project_cc_yaml) == True:
|
43 |
+
# print("Project contains prohibited practices and is therefore non-compliant.")
|
44 |
+
# msg = ("Project is non-compliant due to a prohibited practice.")
|
45 |
+
# else:
|
46 |
+
# print("Project does not contain prohibited practies. Let's continue...")
|
47 |
|
48 |
# If project is high-risk AI system, check that is has met all the requirements for such systems:
|
49 |
|
50 |
+
if project_type == "high_risk_ai_system":
|
51 |
|
52 |
# Do this by examining the Project CC
|
53 |
|
54 |
for key, value in project_cc_yaml['risk_management_system']:
|
55 |
if not value:
|
56 |
+
msg = ("Because of project-level characteristics, this high-risk AI system fails the risk management requirements under Article 9.")
|
57 |
for key, value in project_cc_yaml['technical_documentation']:
|
58 |
if not value:
|
59 |
+
msg = ("Because of project-level characteristics, this high-risk AI system fails the risk management requirements under Article 11.")
|
60 |
for key, value in project_cc_yaml['record_keeping']:
|
61 |
if not value:
|
62 |
+
msg = ("Because of project-level characteristics, this high-risk AI system fails the risk management requirements under Article 12.")
|
63 |
for key, value in project_cc_yaml['transparency_and_provision_of_information_to_deployers']:
|
64 |
if not value:
|
65 |
+
msg = ("Because of project-level characteristics, this high-risk AI system fails the transparency requirements under Article 13.")
|
66 |
for key, value in project_cc_yaml['human_oversight']:
|
67 |
if not value:
|
68 |
+
msg = ("Because of project-level characteristics, this high-risk AI system fails the human oversight requirements under Article 14.")
|
69 |
for key, value in project_cc_yaml['accuracy_robustness_cybersecurity']:
|
70 |
if not value:
|
71 |
+
msg = ("Because of project-level characteristics, this high-risk AI system fails the accuracy, robustness, and cybersecurity requirements under Article 15.")
|
72 |
for key, value in project_cc_yaml['quality_management_system']:
|
73 |
if not value:
|
74 |
+
msg = ("Because of project-level characteristics, this high-risk AI system fails the accuracy, robustness, and cybersecurity requirements under Article 17.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
+
return msg
|
77 |
|
78 |
+
def run_compliance_analysis_on_data(data_cc_yaml):
|
79 |
+
|
80 |
+
for key, value in data_cc_yaml['data_and_data_governance']:
|
81 |
+
if not value:
|
82 |
+
msg = (f"Because of the dataset represented by , this high-risk AI system fails the data and data governance requirements under Article 10.")
|
83 |
+
for key, value in data_cc_yaml['technical_documentation']:
|
84 |
+
if not value:
|
85 |
+
msg = (f"Because of the dataset represented by , this high-risk AI system fails the technical documentation requirements under Article 11.")
|
86 |
+
for key, value in data_cc_yaml['transparency_and_provision_of_information_to_deployers']:
|
87 |
+
if not value:
|
88 |
+
msg = (f"Because of the dataset represented by , this high-risk AI system fails the transparency requirements under Article 13.")
|
89 |
+
for key, value in data_cc_yaml['quality_management_system']:
|
90 |
+
if not value:
|
91 |
+
msg = (f"Because of the dataset represented by , this high-risk AI system fails the quality management requirements under Article 17.")
|
92 |
+
|
93 |
+
return msg
|
94 |
+
|
95 |
+
def run_compliance_analysis_on_model(model_cc_yaml):
|
96 |
+
|
97 |
+
for key, value in model_cc_yaml['risk_management_system']:
|
98 |
+
if not value:
|
99 |
+
msg = (f"Because of the model represented by , this high-risk AI system fails the risk management requirements under Article 9.")
|
100 |
+
for key, value in data_cc_yaml['technical_documentation']:
|
101 |
+
if not value:
|
102 |
+
msg = (f"Because of the model represented by , this high-risk AI system fails the technical documentation requirements under Article 11.")
|
103 |
+
for key, value in data_cc_yaml['transparency_and_provision_of_information_to_deployers']:
|
104 |
+
if not value:
|
105 |
+
msg = (f"Because of the model represented by , this high-risk AI system fails the transparency requirements under Article 13.")
|
106 |
+
for key, value in data_cc_yaml['accuracy_robustness_cybersecurity']:
|
107 |
+
if not value:
|
108 |
+
msg = (f"Because of the model represented by , this high-risk AI system fails the quality management requirements under Article 15.")
|
109 |
+
for key, value in data_cc_yaml['quality_management_system']:
|
110 |
+
if not value:
|
111 |
+
msg = (f"Because of the model represented by , this high-risk AI system fails the quality management requirements under Article 17.")
|
112 |
+
|
113 |
+
return msg
|
114 |
+
|
115 |
+
|
116 |
+
# # If the project is a GPAI model, check that is has met all the requirements for such systems:
|
117 |
+
|
118 |
+
# if gpai_model:
|
119 |
|
120 |
+
# # Do this by examining the Project CC
|
121 |
|
122 |
+
# for key, value in project_cc_yaml['gpai_model_provider_obligations']:
|
123 |
+
# if not value:
|
124 |
+
# msg = ("GPAI model fails the transparency requirements under Article 53.")
|
125 |
|
126 |
+
# # Do this by examining any and all Data CCs too
|
127 |
|
128 |
+
# for filename in os.listdir(folder_path):
|
129 |
+
# # Check if the search word is in the filename
|
130 |
+
# if "data_cc.md" in filename.lower():
|
131 |
|
132 |
+
# # If it is, load the yaml
|
|
|
|
|
133 |
|
134 |
+
# with open(folder_path + filename, 'r') as file:
|
135 |
+
# data_cc_yaml = yaml.safe_load(file)
|
136 |
+
|
137 |
+
# for key, value in data_cc_yaml['gpai_requirements']['gpai_requirements']:
|
138 |
+
# if not value:
|
139 |
+
# msg = (f"Because of the dataset represented by {filename}, this GPAI fails the transparency requirements under Article 53.")
|
140 |
+
|
141 |
+
# # Do this by examining any and all Model CCs too
|
142 |
|
143 |
+
# for filename in os.listdir(folder_path):
|
144 |
+
# # Check if the search word is in the filename
|
145 |
+
# if "model_cc.md" in filename.lower():
|
146 |
|
147 |
+
# # If it is, load the yaml
|
148 |
|
149 |
+
# with open(folder_path + filename, 'r') as file:
|
150 |
+
# model_cc_yaml = yaml.safe_load(file)
|
151 |
|
152 |
+
# for key, value in model_cc_yaml['obligations_for_providers_of_gpai_models']:
|
153 |
+
# if not value:
|
154 |
+
# msg = (f"Because of the model represented by {filename}, this GPAI fails the transparency requirements under Article 53.")
|
155 |
|
156 |
+
# # If the project is a GPAI model with systematic risk, check that is has additionally met all the requirements for such systems:
|
157 |
|
158 |
+
# if gpai_model_systematic_risk:
|
159 |
|
160 |
+
# # Do this by examining the Project CC
|
161 |
|
162 |
+
# for key, value in project_cc_yaml['gpai_obligations_for_systemic_risk_models']:
|
163 |
+
# if not value:
|
164 |
+
# msg = ("GPAI model with systematic risk fails the transparency requirements under Article 55.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
+
# # Do this by examining any and all Model CCs too
|
167 |
|
168 |
+
# for filename in os.listdir(folder_path):
|
169 |
+
# # Check if the search word is in the filename
|
170 |
+
# if "model_cc.md" in filename.lower():
|
171 |
+
|
172 |
+
# # If it is, load the yaml
|
173 |
+
|
174 |
+
# with open(folder_path + filename, 'r') as file:
|
175 |
+
# model_cc_yaml = yaml.safe_load(file)
|
176 |
+
|
177 |
+
# for key, value in model_cc_yaml['obligations_for_providers_of_gpai_models_with_systemic_risk']:
|
178 |
+
# if not value:
|
179 |
+
# msg = (f"Because of the model represented by {filename}, this GPAI model with systematic risk fails the transparency requirements under Article 55.")
|
180 |
|
181 |
|
182 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_cc.yaml
CHANGED
@@ -68,13 +68,13 @@ data_and_data_governance:
|
|
68 |
personal_data_deletion: # Art. 10(5)(e)
|
69 |
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were deleted once the bias was corrected or the personal data reached the end of its retention period (whichever came first)'
|
70 |
value: !!bool false
|
71 |
-
|
72 |
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the records of processing activities pursuant to Regulations (EU) 2016/679 and (EU) 2018/1725 and Directive (EU) 2016/680 include the reasons why the processing of special categories of personal data was strictly necessary to detect and correct biases, and why that objective could not be achieved by processing other data'
|
73 |
value: !!bool false
|
74 |
|
75 |
technical_documentation:
|
76 |
general_description: # Art. 11; Annex IV(2)(d)
|
77 |
-
verbose: 'Dataset carries technical documention, such as a dataseet, including a general description of the dataset.
|
78 |
value: !!bool false
|
79 |
provenance: # Art. 11; Annex IV(2)(d)
|
80 |
verbose: 'Dataset carries technical documention, such as a dataseet, including information about its provenance'
|
|
|
68 |
personal_data_deletion: # Art. 10(5)(e)
|
69 |
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were deleted once the bias was corrected or the personal data reached the end of its retention period (whichever came first)'
|
70 |
value: !!bool false
|
71 |
+
personal_data_necessary_105f: # Art. 10(5)(f)
|
72 |
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the records of processing activities pursuant to Regulations (EU) 2016/679 and (EU) 2018/1725 and Directive (EU) 2016/680 include the reasons why the processing of special categories of personal data was strictly necessary to detect and correct biases, and why that objective could not be achieved by processing other data'
|
73 |
value: !!bool false
|
74 |
|
75 |
technical_documentation:
|
76 |
general_description: # Art. 11; Annex IV(2)(d)
|
77 |
+
verbose: 'Dataset carries technical documention, such as a dataseet, including a general description of the dataset.'
|
78 |
value: !!bool false
|
79 |
provenance: # Art. 11; Annex IV(2)(d)
|
80 |
verbose: 'Dataset carries technical documention, such as a dataseet, including information about its provenance'
|
model_cc.yaml
CHANGED
@@ -13,7 +13,7 @@ risk_management_system:
|
|
13 |
value: !!bool false
|
14 |
testing_compliance: # Art. 9(6)
|
15 |
verbose: 'Testing to ensure model complies with Act'
|
16 |
-
|
17 |
testing_benchmark: # Art. 9(8)
|
18 |
verbose: 'Testing against prior defined metrics appropriate to intended purpose'
|
19 |
value: !!bool false
|
@@ -87,7 +87,7 @@ accuracy_robustness_cybersecurity:
|
|
87 |
verbose: 'Model is designed and developed to achieve appropriate level of accuracy'
|
88 |
value: !!bool false
|
89 |
robustiness: # Art. 15(1)
|
90 |
-
verbose 'Model is designed and developed to achieve appropriate level of robustness'
|
91 |
value: !!bool false
|
92 |
cybersecurity: # Art. 15(1)
|
93 |
verbose: 'Model is designed and developed to achieve appropriate level of cybersecurity'
|
|
|
13 |
value: !!bool false
|
14 |
testing_compliance: # Art. 9(6)
|
15 |
verbose: 'Testing to ensure model complies with Act'
|
16 |
+
value: !!bool false
|
17 |
testing_benchmark: # Art. 9(8)
|
18 |
verbose: 'Testing against prior defined metrics appropriate to intended purpose'
|
19 |
value: !!bool false
|
|
|
87 |
verbose: 'Model is designed and developed to achieve appropriate level of accuracy'
|
88 |
value: !!bool false
|
89 |
robustiness: # Art. 15(1)
|
90 |
+
verbose: 'Model is designed and developed to achieve appropriate level of robustness'
|
91 |
value: !!bool false
|
92 |
cybersecurity: # Art. 15(1)
|
93 |
verbose: 'Model is designed and developed to achieve appropriate level of cybersecurity'
|
project_cc.yaml
CHANGED
@@ -1,28 +1,16 @@
|
|
1 |
-
smb:
|
2 |
-
smb: # Art. 11(1)
|
3 |
-
verbose: 'AI project is operated by a small or medium-sized enterprise'
|
4 |
-
value: !!bool false
|
5 |
|
6 |
-
|
7 |
-
placed_on_market: # Art. 3(9)
|
8 |
-
verbose: 'AI project is being made available on the Union market for the first time'
|
9 |
-
value: !!bool false
|
10 |
-
put_into_service: #Art. 3(11)
|
11 |
-
verbose: 'AI project is supplied for first use directly to the deployer or for own use in the Union for its intended purpose;'
|
12 |
|
13 |
operator_role:
|
14 |
provider: # Art. 2
|
15 |
-
verbose: 'The
|
16 |
value: !!bool false
|
17 |
-
on_market: # Art 2
|
18 |
-
verbose: 'AI project is placed on the market or put into service in the Union'
|
19 |
-
value: !!bool false
|
20 |
deployer: # Art. 2
|
21 |
verbose: 'AI project operator is a natural or legal person, public authority, agency or other body using an AI system under its authority except where the AI system is used in the course of a personal non-professional activity'
|
22 |
value: !!bool false
|
23 |
eu_located: # Art. 2
|
24 |
verbose: 'AI project operator has its place of establishment or location within the Union'
|
25 |
-
value: !!bool
|
26 |
output_used: # Art. 2
|
27 |
verbose: 'The output produced by the AI system is used in the Union'
|
28 |
value: !!bool false
|
@@ -36,6 +24,14 @@ operator_role:
|
|
36 |
verbose: 'AI project operator is a product manufacturer'
|
37 |
value: !!bool false # Art. 2
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
ai_system:
|
40 |
ai_system: # Art. 3(1)
|
41 |
verbose: 'AI project is a machine-based system that is designed to operate with varying levels of autonomy and that may exhibit adaptiveness after deployment, and that, for explicit or implicit objectives, infers, from the input it receives, how to generate outputs such as predictions, content, recommendations, or decisions that can influence physical or virtual environments'
|
@@ -46,6 +42,19 @@ gpai_model:
|
|
46 |
verbose: 'AI project is an AI model, including where such an AI model is trained with a large amount of data using self-supervision at scale, that displays significant generality and is capable of competently performing a wide range of distinct tasks regardless of the way the model is placed on the market and that can be integrated into a variety of downstream systems or applications, except AI models that are used for research, development or prototyping activities before they are placed on the market'
|
47 |
value: !!bool false
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
excepted:
|
50 |
scientific: # Art. 2(6)
|
51 |
verbose: 'AI project is or was specifically developed and put into service for the sole purpose of scientific research and development'
|
@@ -60,6 +69,8 @@ excepted:
|
|
60 |
verbose: 'AI project involves AI models that are released under a free and open-source licence that allows for the access, usage, modification, and distribution of the model, and whose parameters, including the weights, the information on the model architecture, and the information on model usage, are made publicly available. This exception shall not apply to general purpose AI models with systemic risks'
|
61 |
value: !!bool false
|
62 |
|
|
|
|
|
63 |
prohibited_practice:
|
64 |
ai_system:
|
65 |
manipulative: # Art. 5(1)(a)
|
@@ -96,6 +107,8 @@ prohibited_practice:
|
|
96 |
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the localisation or identification of a person suspected of having committed a criminal offence, for the purpose of conducting a criminal investigation or prosecution or executing a criminal penalty for offences referred to in Annex II and punishable in the Member State concerned by a custodial sentence or a detention order for a maximum period of at least four years.'
|
97 |
value: !!bool false
|
98 |
|
|
|
|
|
99 |
high_risk_ai_system:
|
100 |
safety_component: # Art. 6(1)(a)
|
101 |
verbose: 'AI project is intended to be used as a safety component of a product'
|
@@ -163,7 +176,7 @@ risk_management_system:
|
|
163 |
verbose: 'Risk management system for AI system includes the identification and analysis of any known or reasonably foreseeable risks that the AI system might pose to health, safety or fundamental rights when used in accordance with its intended purpose'
|
164 |
value: !!bool false
|
165 |
risk_estimation_foreseeable: # Art. 9(2)(b)
|
166 |
-
verbose: 'Risk management system for AI system includes the estimation and evaluation of the risks that may emerge when the high-risk AI system is used in accordance with its intended purpose, and under conditions of reasonably foreseeable misuse
|
167 |
value: !!bool false
|
168 |
risk_post_market: # Art. 9(2)(c)
|
169 |
verbose: 'Risk management system for AI system includes the evaluation of other risks possibly arising, based on the analysis of data gathered from the post-market monitoring system'
|
@@ -190,7 +203,7 @@ technical_documentation:
|
|
190 |
value: !!bool false
|
191 |
interaction: # Art. 11(1); Annex IV(1)(b)
|
192 |
verbose: 'The Technical Documentation includes a general description of the AI system that covers how the AI system interacts with, or can be used to interact with, hardware or software, including with other AI systems, that are not part of the AI system itself, where applicable'
|
193 |
-
|
194 |
versions: # Art. 11(1); Annex IV(1)(c)
|
195 |
verbose: 'Technical Documentation includes a general description of the AI system that covers the versions of relevant software or firmware, and any requirements related to version updates'
|
196 |
value: !!bool false
|
@@ -361,6 +374,28 @@ quality_management_system:
|
|
361 |
verbose: 'System includes examination, test, and validation procedures before, during, and after development'
|
362 |
value: !!bool false
|
363 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
transparency_obligations:
|
365 |
synthetic_content: # Art. 50(2)
|
366 |
verbose: 'Providers of AI systems generating synthetic content ensure outputs are marked and detectable as artificially generated'
|
@@ -369,13 +404,7 @@ transparency_obligations:
|
|
369 |
verbose: 'Technical solutions for marking are effective, interoperable, robust, and reliable'
|
370 |
value: !!bool false
|
371 |
|
372 |
-
|
373 |
-
evaluation: # Art. 51
|
374 |
-
verbose: 'Model impact capabilities were evaluated using appropriate technical tools and methodologies'
|
375 |
-
value: !!bool false
|
376 |
-
flops: # Art. 51(2)
|
377 |
-
verbose: 'Cumulative compute for training measured in floating point operations (FLOPs)'
|
378 |
-
value: !!bool false
|
379 |
|
380 |
gpai_model_provider_obligations:
|
381 |
intended_uses: # Art. 53(1)(a); Annex XI(1)(1)(a-c)
|
@@ -403,7 +432,9 @@ gpai_model_provider_obligations:
|
|
403 |
verbose: 'To downstream providers, describe model elements, development process, and integration requirements'
|
404 |
value: !!bool false
|
405 |
|
406 |
-
|
|
|
|
|
407 |
evaluation: # Art. 55(1)(a)
|
408 |
verbose: 'Perform model evaluation using standardized protocols and conduct adversarial testing'
|
409 |
value: !!bool false
|
@@ -414,3 +445,5 @@ gpai_obligations_for_systemic_risk_models:
|
|
414 |
verbose: 'Ensure adequate cybersecurity protection for the model and infrastructure'
|
415 |
value: !!bool false
|
416 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
+
# Information related to high-level characteristics of AI project, including its market status, operator, and type of AI
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
operator_role:
|
5 |
provider: # Art. 2
|
6 |
+
verbose: 'The operator of this AI project is a natural or legal person, public authority, agency or other body that develops an AI system or a general-purpose AI model or that has an AI system or a general-purpose AI model developed and places it on the market or puts the AI system into service under its own name or trademark, whether for payment or free of charge'
|
7 |
value: !!bool false
|
|
|
|
|
|
|
8 |
deployer: # Art. 2
|
9 |
verbose: 'AI project operator is a natural or legal person, public authority, agency or other body using an AI system under its authority except where the AI system is used in the course of a personal non-professional activity'
|
10 |
value: !!bool false
|
11 |
eu_located: # Art. 2
|
12 |
verbose: 'AI project operator has its place of establishment or location within the Union'
|
13 |
+
value: !!bool True
|
14 |
output_used: # Art. 2
|
15 |
verbose: 'The output produced by the AI system is used in the Union'
|
16 |
value: !!bool false
|
|
|
24 |
verbose: 'AI project operator is a product manufacturer'
|
25 |
value: !!bool false # Art. 2
|
26 |
|
27 |
+
eu_market_status:
|
28 |
+
placed_on_market: # Art. 3(9)
|
29 |
+
verbose: 'AI project is being made available on the EU market (i.e., supplied for distribution or use in the course of a commercial activity, whether in return for payment or free of charge) for the first time'
|
30 |
+
value: !!bool false
|
31 |
+
put_into_service: #Art. 3(11)
|
32 |
+
verbose: 'AI project is being used for its intended purpose for the first time in the EU, either by the operator or by a deployer to whom it is directly supplied'
|
33 |
+
value: !!bool false
|
34 |
+
|
35 |
ai_system:
|
36 |
ai_system: # Art. 3(1)
|
37 |
verbose: 'AI project is a machine-based system that is designed to operate with varying levels of autonomy and that may exhibit adaptiveness after deployment, and that, for explicit or implicit objectives, infers, from the input it receives, how to generate outputs such as predictions, content, recommendations, or decisions that can influence physical or virtual environments'
|
|
|
42 |
verbose: 'AI project is an AI model, including where such an AI model is trained with a large amount of data using self-supervision at scale, that displays significant generality and is capable of competently performing a wide range of distinct tasks regardless of the way the model is placed on the market and that can be integrated into a variety of downstream systems or applications, except AI models that are used for research, development or prototyping activities before they are placed on the market'
|
43 |
value: !!bool false
|
44 |
|
45 |
+
gpai_model_systematic_risk:
|
46 |
+
evaluation: # Art. 51 (1)(a)
|
47 |
+
verbose: 'The AI project has high impact capabilities based on an evaluation using appropriate technical tools and methodologies, including indicators and benchmarks'
|
48 |
+
value: !!bool false
|
49 |
+
committee: # Art. 51 (1)(b)
|
50 |
+
verbose: 'The AI project has capabilities or an impact equivalent to high impact capabilities based on a decision of the Commission, ex officio or following a qualified alert from the scientific panel'
|
51 |
+
value: !!bool false
|
52 |
+
flops: # Art. 51(2)
|
53 |
+
verbose: 'The cumulative amount of computation used for the training of the AI project, as measured in floating point operations (FLOPs), has been greater than 10^25'
|
54 |
+
value: !!bool false
|
55 |
+
|
56 |
+
# Information related to the Act's exceptions for scientific research, open-source AI, and more
|
57 |
+
|
58 |
excepted:
|
59 |
scientific: # Art. 2(6)
|
60 |
verbose: 'AI project is or was specifically developed and put into service for the sole purpose of scientific research and development'
|
|
|
69 |
verbose: 'AI project involves AI models that are released under a free and open-source licence that allows for the access, usage, modification, and distribution of the model, and whose parameters, including the weights, the information on the model architecture, and the information on model usage, are made publicly available. This exception shall not apply to general purpose AI models with systemic risks'
|
70 |
value: !!bool false
|
71 |
|
72 |
+
# Information related to practices prohibited by the Act
|
73 |
+
|
74 |
prohibited_practice:
|
75 |
ai_system:
|
76 |
manipulative: # Art. 5(1)(a)
|
|
|
107 |
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the localisation or identification of a person suspected of having committed a criminal offence, for the purpose of conducting a criminal investigation or prosecution or executing a criminal penalty for offences referred to in Annex II and punishable in the Member State concerned by a custodial sentence or a detention order for a maximum period of at least four years.'
|
108 |
value: !!bool false
|
109 |
|
110 |
+
# Requirements for those projects which involve high-risk AI systems
|
111 |
+
|
112 |
high_risk_ai_system:
|
113 |
safety_component: # Art. 6(1)(a)
|
114 |
verbose: 'AI project is intended to be used as a safety component of a product'
|
|
|
176 |
verbose: 'Risk management system for AI system includes the identification and analysis of any known or reasonably foreseeable risks that the AI system might pose to health, safety or fundamental rights when used in accordance with its intended purpose'
|
177 |
value: !!bool false
|
178 |
risk_estimation_foreseeable: # Art. 9(2)(b)
|
179 |
+
verbose: 'Risk management system for AI system includes the estimation and evaluation of the risks that may emerge when the high-risk AI system is used in accordance with its intended purpose, and under conditions of reasonably foreseeable misuse'
|
180 |
value: !!bool false
|
181 |
risk_post_market: # Art. 9(2)(c)
|
182 |
verbose: 'Risk management system for AI system includes the evaluation of other risks possibly arising, based on the analysis of data gathered from the post-market monitoring system'
|
|
|
203 |
value: !!bool false
|
204 |
interaction: # Art. 11(1); Annex IV(1)(b)
|
205 |
verbose: 'The Technical Documentation includes a general description of the AI system that covers how the AI system interacts with, or can be used to interact with, hardware or software, including with other AI systems, that are not part of the AI system itself, where applicable'
|
206 |
+
value: !!bool false
|
207 |
versions: # Art. 11(1); Annex IV(1)(c)
|
208 |
verbose: 'Technical Documentation includes a general description of the AI system that covers the versions of relevant software or firmware, and any requirements related to version updates'
|
209 |
value: !!bool false
|
|
|
374 |
verbose: 'System includes examination, test, and validation procedures before, during, and after development'
|
375 |
value: !!bool false
|
376 |
|
377 |
+
fundamental_rights_assessment:
|
378 |
+
process: # Art. 27(1)(a)
|
379 |
+
verbose: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the deployer’s processes in which the high-risk AI system will be used in line with its intended purpose'
|
380 |
+
value: !!bool false
|
381 |
+
time_period: # Art. 27(1)(b)
|
382 |
+
value: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the period of time within which, and the frequency with which, each high-risk AI system is intended to be used'
|
383 |
+
value: !!bool false
|
384 |
+
persons_affected: # Art. 27(1)(c)
|
385 |
+
verbose: 'AI project has been subject to a fundamental rights impact assessment that describes the categories of natural persons and groups likely to be affected by its use in the specific context'
|
386 |
+
value: !!bool false
|
387 |
+
likely_harms: # Art. 27(1)(d)
|
388 |
+
verbose: 'AI project has been subject to a fundamental rights impact assessment that describes the specific risks of harm likely to have an impact on the categories of natural persons and groups likely to be affected by its use in the specific context'
|
389 |
+
value: !!bool false
|
390 |
+
human_oversight: # Art. 27(1)(e)
|
391 |
+
verbose: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the implementation of human oversight measures, according to the instructions for use'
|
392 |
+
value: !!bool false
|
393 |
+
risk_mitigation: # Art. 27(1)(f)
|
394 |
+
verbose: 'AI project has been subject to a fundamental rights impact assessment that describes the measures to be taken in the case of the materialisation of risks of harm likely to have an impact on the categories of natural persons and groups likely to be affected by its use in the specific context, including the arrangements for internal governance and complaint mechanisms'
|
395 |
+
value: !!bool false
|
396 |
+
|
397 |
+
# Information related to the Act's requirements for all AI systems
|
398 |
+
|
399 |
transparency_obligations:
|
400 |
synthetic_content: # Art. 50(2)
|
401 |
verbose: 'Providers of AI systems generating synthetic content ensure outputs are marked and detectable as artificially generated'
|
|
|
404 |
verbose: 'Technical solutions for marking are effective, interoperable, robust, and reliable'
|
405 |
value: !!bool false
|
406 |
|
407 |
+
# Information related to the Act's requirements for GPAI models
|
|
|
|
|
|
|
|
|
|
|
|
|
408 |
|
409 |
gpai_model_provider_obligations:
|
410 |
intended_uses: # Art. 53(1)(a); Annex XI(1)(1)(a-c)
|
|
|
432 |
verbose: 'To downstream providers, describe model elements, development process, and integration requirements'
|
433 |
value: !!bool false
|
434 |
|
435 |
+
# Information related to the Act's requirements for GPAI models with systematic risk
|
436 |
+
|
437 |
+
obligations_for_gpai_models_with_systemic_risk:
|
438 |
evaluation: # Art. 55(1)(a)
|
439 |
verbose: 'Perform model evaluation using standardized protocols and conduct adversarial testing'
|
440 |
value: !!bool false
|
|
|
445 |
verbose: 'Ensure adequate cybersecurity protection for the model and infrastructure'
|
446 |
value: !!bool false
|
447 |
|
448 |
+
|
449 |
+
|
utils.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml
|
2 |
+
|
3 |
+
def set_type(project_variables, project_cc_yaml):
|
4 |
+
|
5 |
+
project_type = None
|
6 |
+
|
7 |
+
ai_system = project_variables['ai_project_type']['ai_system']
|
8 |
+
gpai_model = project_variables['ai_project_type']['gpai_model']
|
9 |
+
|
10 |
+
if project_cc_yaml['ai_system']['ai_system']['value']:
|
11 |
+
ai_system = True
|
12 |
+
if project_cc_yaml['gpai_model']['gpai_model']['value']:
|
13 |
+
gpai_model = True
|
14 |
+
if ai_system and gpai_model:
|
15 |
+
msg = ("Your project cannot be both an AI system and a GPAI model. Please revise your Project CC accordingly.")
|
16 |
+
if ai_system == True:
|
17 |
+
for key, value in project_cc_yaml['high_risk_ai_system']:
|
18 |
+
if value and sum(map(bool, [project_cc_yaml['high_risk_ai_system']['filter_exception_rights'],project_cc_yaml['high_risk_ai_system']['filter_exception_narrow'],project_cc_yaml['high_risk_ai_system']['filter_exception_human'],project_cc_yaml['high_risk_ai_system']['filter_exception_deviation'], project_cc_yaml['high_risk_ai_system']['filter_exception_prep']])) < 1:
|
19 |
+
project_type = "high_risk_ai_system"
|
20 |
+
|
21 |
+
if gpai_model == True:
|
22 |
+
if project_cc_yaml['gpai_model_systematic_risk']['evaluation'] or project_cc_yaml['gpai_model_systematic_risk']['flops']:
|
23 |
+
project_type = "gpai_model_systematic_risk"
|
24 |
+
|
25 |
+
return project_type
|
26 |
+
|
27 |
+
def set_operator_role_and_location(project_variables, project_cc_yaml):
|
28 |
+
operators = 0
|
29 |
+
|
30 |
+
ai_system = project_variables['ai_project_type']['ai_system']
|
31 |
+
gpai_model = project_variables['ai_project_type']['gpai_model']
|
32 |
+
|
33 |
+
for var in project_variables['operator_role']:
|
34 |
+
if project_cc_yaml['operator_role'][f'{var}']['value']:
|
35 |
+
project_variables['operator_role'][f'{var}'] = True
|
36 |
+
operators += 1
|
37 |
+
|
38 |
+
if ai_system and gpai_model:
|
39 |
+
msg = ("Your project cannot be both an AI system and a GPAI model. Please revise your Project CC accordingly.")
|
40 |
+
if operators != 1:
|
41 |
+
msg = ("Please specify exactly one operator role.")
|
42 |
+
|
43 |
+
return project_variables
|
44 |
+
|
45 |
+
def set_eu_market_status(project_variables, project_cc_yaml):
|
46 |
+
|
47 |
+
if project_cc_yaml['eu_market_status']['placed_on_market']['value']:
|
48 |
+
project_variables['eu_market_status']["placed_on_market"] = True
|
49 |
+
if project_cc_yaml['eu_market_status']['put_into_service']['value']:
|
50 |
+
project_variables['eu_market_status']["put_into_service"] = True
|
51 |
+
|
52 |
+
if project_cc_yaml['operator_role']['output_used']['value']:
|
53 |
+
project_variables['operator_role']["output_used"] = True
|
54 |
+
|
55 |
+
return project_variables
|
56 |
+
|
57 |
+
|
58 |
+
def check_within_scope(project_cc_yaml):
|
59 |
+
if not check_excepted(project_cc_yaml):
|
60 |
+
if provider and ((ai_system and (placed_on_market or put_into_service)) or (gpai_model and placed_on_market)): # Article 2.1(a)
|
61 |
+
return True
|
62 |
+
if deployer and eu_located: # Article 2.1(b)
|
63 |
+
return True
|
64 |
+
if (provider or deployer) and (ai_system and eu_located and output_used): # Article 2.1(c)
|
65 |
+
return True
|
66 |
+
if (importer or distributor) and ai_system: # Article 2.1(d)
|
67 |
+
return True
|
68 |
+
if product_manufacturer and ai_system and (placed_on_market or put_into_service): # Article 2.1(e)
|
69 |
+
return True
|
70 |
+
else:
|
71 |
+
return False
|
72 |
+
|
73 |
+
def check_excepted(project_cc_yaml):
|
74 |
+
if project_cc_yaml['excepted']['scientific'] or project_cc_yaml['excepted']['pre_market'] or (ai_system and project_cc_yaml['excepted']['open_source_ai_system']) or (gpai_model and project_cc_yaml['excepted']['open_source_gpai_system']):
|
75 |
+
return True
|
76 |
+
else:
|
77 |
+
return False
|
78 |
+
|
79 |
+
# def check_prohibited (project_cc_yaml):
|
80 |
+
# if ai_system:
|
81 |
+
# for key in project_cc_yaml['prohibited_practice']['ai_system']:
|
82 |
+
# if key[value]:
|
83 |
+
# print("You are engaged in a prohibited practice and thus the project is non-compliant.")
|
84 |
+
# return True
|
85 |
+
# if project_cc_yaml['prohibited_practice']['biometric']['categorization']:
|
86 |
+
# print("You are engaged in a prohibited practice and thus the project is non-compliant.")
|
87 |
+
# return True
|
88 |
+
# if project_cc_yaml['prohibited_practice']['biometric']['real_time'] and sum(map(bool, [project_cc_yaml['prohibited_practice']['biometric']['real_time_exception_victim'],project_cc['prohibited_practice']['biometric']['real_time_exception_threat'], project_cc_yaml['prohibited_practice']['biometric']['real_time_exception_investigation']])) == 0:
|
89 |
+
# print("You are engaged in a prohibited practice and thus the project is non-compliant.")
|
90 |
+
# return True
|
91 |
+
# else:
|
92 |
+
# print("You are not engaged in any prohibited practices.")
|
93 |
+
# return False
|
94 |
+
|
95 |
+
# def check_all_true(file_path):
|
96 |
+
# data = yaml.safe_load(file_path)
|
97 |
+
|
98 |
+
# # Iterate through top-level keys
|
99 |
+
# for top_key, top_value in data.items():
|
100 |
+
# if isinstance(top_value, dict):
|
101 |
+
# # Iterate through second-level keys
|
102 |
+
# for second_key, second_value in top_value.items():
|
103 |
+
# if not second_value:
|
104 |
+
# print("You are non-compliant with the Act")
|
105 |
+
# break
|
106 |
+
# else:
|
107 |
+
# print("No problems here")
|