ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4dff56c3c6fb253a9824cb8811c474c74a38dc | # terrascript/fortios/r.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class fortios_alertemail_setting(terrascript.Resource):
pass
class fortios_antivirus_heuristic(terrascript.Resource):
pass
class fortios_antivirus_profile(terrascript.Resource):
pass
class fortios_antivirus_quarantine(terrascript.Resource):
pass
class fortios_antivirus_settings(terrascript.Resource):
pass
class fortios_application_custom(terrascript.Resource):
pass
class fortios_application_group(terrascript.Resource):
pass
class fortios_application_list(terrascript.Resource):
pass
class fortios_application_name(terrascript.Resource):
pass
class fortios_application_rulesettings(terrascript.Resource):
pass
class fortios_authentication_rule(terrascript.Resource):
pass
class fortios_authentication_scheme(terrascript.Resource):
pass
class fortios_authentication_setting(terrascript.Resource):
pass
class fortios_certificate_ca(terrascript.Resource):
pass
class fortios_certificate_crl(terrascript.Resource):
pass
class fortios_certificate_local(terrascript.Resource):
pass
class fortios_certificate_remote(terrascript.Resource):
pass
class fortios_cifs_domaincontroller(terrascript.Resource):
pass
class fortios_cifs_profile(terrascript.Resource):
pass
class fortios_credentialstore_domaincontroller(terrascript.Resource):
pass
class fortios_dlp_filepattern(terrascript.Resource):
pass
class fortios_dlp_fpdocsource(terrascript.Resource):
pass
class fortios_dlp_fpsensitivity(terrascript.Resource):
pass
class fortios_dlp_sensitivity(terrascript.Resource):
pass
class fortios_dlp_sensor(terrascript.Resource):
pass
class fortios_dlp_settings(terrascript.Resource):
pass
class fortios_dnsfilter_domainfilter(terrascript.Resource):
pass
class fortios_dnsfilter_profile(terrascript.Resource):
pass
class fortios_dpdk_cpus(terrascript.Resource):
pass
class fortios_dpdk_global(terrascript.Resource):
pass
class fortios_emailfilter_blockallowlist(terrascript.Resource):
pass
class fortios_emailfilter_bwl(terrascript.Resource):
pass
class fortios_emailfilter_bword(terrascript.Resource):
pass
class fortios_emailfilter_dnsbl(terrascript.Resource):
pass
class fortios_emailfilter_fortishield(terrascript.Resource):
pass
class fortios_emailfilter_iptrust(terrascript.Resource):
pass
class fortios_emailfilter_mheader(terrascript.Resource):
pass
class fortios_emailfilter_options(terrascript.Resource):
pass
class fortios_emailfilter_profile(terrascript.Resource):
pass
class fortios_endpointcontrol_client(terrascript.Resource):
pass
class fortios_endpointcontrol_fctems(terrascript.Resource):
pass
class fortios_endpointcontrol_forticlientems(terrascript.Resource):
pass
class fortios_endpointcontrol_forticlientregistrationsync(terrascript.Resource):
pass
class fortios_endpointcontrol_profile(terrascript.Resource):
pass
class fortios_endpointcontrol_registeredforticlient(terrascript.Resource):
pass
class fortios_endpointcontrol_settings(terrascript.Resource):
pass
class fortios_extendercontroller_dataplan(terrascript.Resource):
pass
class fortios_extendercontroller_extender(terrascript.Resource):
pass
class fortios_extendercontroller_extender1(terrascript.Resource):
pass
class fortios_filefilter_profile(terrascript.Resource):
pass
class fortios_firewall_DoSpolicy(terrascript.Resource):
pass
class fortios_firewall_DoSpolicy6(terrascript.Resource):
pass
class fortios_firewall_address(terrascript.Resource):
pass
class fortios_firewall_address6(terrascript.Resource):
pass
class fortios_firewall_address6template(terrascript.Resource):
pass
class fortios_firewall_addrgrp(terrascript.Resource):
pass
class fortios_firewall_addrgrp6(terrascript.Resource):
pass
class fortios_firewall_authportal(terrascript.Resource):
pass
class fortios_firewall_centralsnatmap(terrascript.Resource):
pass
class fortios_firewall_centralsnatmap_move(terrascript.Resource):
pass
class fortios_firewall_centralsnatmap_sort(terrascript.Resource):
pass
class fortios_firewall_city(terrascript.Resource):
pass
class fortios_firewall_country(terrascript.Resource):
pass
class fortios_firewall_decryptedtrafficmirror(terrascript.Resource):
pass
class fortios_firewall_dnstranslation(terrascript.Resource):
pass
class fortios_firewall_identitybasedroute(terrascript.Resource):
pass
class fortios_firewall_interfacepolicy(terrascript.Resource):
pass
class fortios_firewall_interfacepolicy6(terrascript.Resource):
pass
class fortios_firewall_internetservice(terrascript.Resource):
pass
class fortios_firewall_internetserviceaddition(terrascript.Resource):
pass
class fortios_firewall_internetserviceappend(terrascript.Resource):
pass
class fortios_firewall_internetservicebotnet(terrascript.Resource):
pass
class fortios_firewall_internetservicecustom(terrascript.Resource):
pass
class fortios_firewall_internetservicecustomgroup(terrascript.Resource):
pass
class fortios_firewall_internetservicedefinition(terrascript.Resource):
pass
class fortios_firewall_internetserviceextension(terrascript.Resource):
pass
class fortios_firewall_internetservicegroup(terrascript.Resource):
pass
class fortios_firewall_internetserviceipblreason(terrascript.Resource):
pass
class fortios_firewall_internetserviceipblvendor(terrascript.Resource):
pass
class fortios_firewall_internetservicelist(terrascript.Resource):
pass
class fortios_firewall_internetservicename(terrascript.Resource):
pass
class fortios_firewall_internetserviceowner(terrascript.Resource):
pass
class fortios_firewall_internetservicereputation(terrascript.Resource):
pass
class fortios_firewall_ippool(terrascript.Resource):
pass
class fortios_firewall_ippool6(terrascript.Resource):
pass
class fortios_firewall_iptranslation(terrascript.Resource):
pass
class fortios_firewall_ipv6ehfilter(terrascript.Resource):
pass
class fortios_firewall_ldbmonitor(terrascript.Resource):
pass
class fortios_firewall_localinpolicy(terrascript.Resource):
pass
class fortios_firewall_localinpolicy6(terrascript.Resource):
pass
class fortios_firewall_multicastaddress(terrascript.Resource):
pass
class fortios_firewall_multicastaddress6(terrascript.Resource):
pass
class fortios_firewall_multicastpolicy(terrascript.Resource):
pass
class fortios_firewall_multicastpolicy6(terrascript.Resource):
pass
class fortios_firewall_object_address(terrascript.Resource):
pass
class fortios_firewall_object_addressgroup(terrascript.Resource):
pass
class fortios_firewall_object_ippool(terrascript.Resource):
pass
class fortios_firewall_object_service(terrascript.Resource):
pass
class fortios_firewall_object_servicecategory(terrascript.Resource):
pass
class fortios_firewall_object_servicegroup(terrascript.Resource):
pass
class fortios_firewall_object_vip(terrascript.Resource):
pass
class fortios_firewall_object_vipgroup(terrascript.Resource):
pass
class fortios_firewall_policy(terrascript.Resource):
pass
class fortios_firewall_policy46(terrascript.Resource):
pass
class fortios_firewall_policy6(terrascript.Resource):
pass
class fortios_firewall_policy64(terrascript.Resource):
pass
class fortios_firewall_profilegroup(terrascript.Resource):
pass
class fortios_firewall_profileprotocoloptions(terrascript.Resource):
pass
class fortios_firewall_proxyaddress(terrascript.Resource):
pass
class fortios_firewall_proxyaddrgrp(terrascript.Resource):
pass
class fortios_firewall_proxypolicy(terrascript.Resource):
pass
class fortios_firewall_proxypolicy_move(terrascript.Resource):
pass
class fortios_firewall_proxypolicy_sort(terrascript.Resource):
pass
class fortios_firewall_region(terrascript.Resource):
pass
class fortios_firewall_security_policy(terrascript.Resource):
pass
class fortios_firewall_security_policyseq(terrascript.Resource):
pass
class fortios_firewall_security_policysort(terrascript.Resource):
pass
class fortios_firewall_securitypolicy(terrascript.Resource):
pass
class fortios_firewall_shapingpolicy(terrascript.Resource):
pass
class fortios_firewall_shapingprofile(terrascript.Resource):
pass
class fortios_firewall_sniffer(terrascript.Resource):
pass
class fortios_firewall_sslserver(terrascript.Resource):
pass
class fortios_firewall_sslsshprofile(terrascript.Resource):
pass
class fortios_firewall_trafficclass(terrascript.Resource):
pass
class fortios_firewall_ttlpolicy(terrascript.Resource):
pass
class fortios_firewall_vendormac(terrascript.Resource):
pass
class fortios_firewall_vip(terrascript.Resource):
pass
class fortios_firewall_vip46(terrascript.Resource):
pass
class fortios_firewall_vip6(terrascript.Resource):
pass
class fortios_firewall_vip64(terrascript.Resource):
pass
class fortios_firewall_vipgrp(terrascript.Resource):
pass
class fortios_firewall_vipgrp46(terrascript.Resource):
pass
class fortios_firewall_vipgrp6(terrascript.Resource):
pass
class fortios_firewall_vipgrp64(terrascript.Resource):
pass
class fortios_firewallconsolidated_policy(terrascript.Resource):
pass
class fortios_firewallipmacbinding_setting(terrascript.Resource):
pass
class fortios_firewallipmacbinding_table(terrascript.Resource):
pass
class fortios_firewallschedule_group(terrascript.Resource):
pass
class fortios_firewallschedule_onetime(terrascript.Resource):
pass
class fortios_firewallschedule_recurring(terrascript.Resource):
pass
class fortios_firewallservice_category(terrascript.Resource):
pass
class fortios_firewallservice_custom(terrascript.Resource):
pass
class fortios_firewallservice_group(terrascript.Resource):
pass
class fortios_firewallshaper_peripshaper(terrascript.Resource):
pass
class fortios_firewallshaper_trafficshaper(terrascript.Resource):
pass
class fortios_firewallssh_hostkey(terrascript.Resource):
pass
class fortios_firewallssh_localca(terrascript.Resource):
pass
class fortios_firewallssh_localkey(terrascript.Resource):
pass
class fortios_firewallssh_setting(terrascript.Resource):
pass
class fortios_firewallssl_setting(terrascript.Resource):
pass
class fortios_firewallwildcardfqdn_custom(terrascript.Resource):
pass
class fortios_firewallwildcardfqdn_group(terrascript.Resource):
pass
class fortios_fmg_devicemanager_device(terrascript.Resource):
pass
class fortios_fmg_devicemanager_install_device(terrascript.Resource):
pass
class fortios_fmg_devicemanager_install_policypackage(terrascript.Resource):
pass
class fortios_fmg_devicemanager_script(terrascript.Resource):
pass
class fortios_fmg_devicemanager_script_execute(terrascript.Resource):
pass
class fortios_fmg_firewall_object_address(terrascript.Resource):
pass
class fortios_fmg_firewall_object_ippool(terrascript.Resource):
pass
class fortios_fmg_firewall_object_service(terrascript.Resource):
pass
class fortios_fmg_firewall_object_vip(terrascript.Resource):
pass
class fortios_fmg_firewall_security_policy(terrascript.Resource):
pass
class fortios_fmg_firewall_security_policypackage(terrascript.Resource):
pass
class fortios_fmg_jsonrpc_request(terrascript.Resource):
pass
class fortios_fmg_object_adom_revision(terrascript.Resource):
pass
class fortios_fmg_system_admin(terrascript.Resource):
pass
class fortios_fmg_system_admin_profiles(terrascript.Resource):
pass
class fortios_fmg_system_admin_user(terrascript.Resource):
pass
class fortios_fmg_system_adom(terrascript.Resource):
pass
class fortios_fmg_system_dns(terrascript.Resource):
pass
class fortios_fmg_system_global(terrascript.Resource):
pass
class fortios_fmg_system_license_forticare(terrascript.Resource):
pass
class fortios_fmg_system_license_vm(terrascript.Resource):
pass
class fortios_fmg_system_network_interface(terrascript.Resource):
pass
class fortios_fmg_system_network_route(terrascript.Resource):
pass
class fortios_fmg_system_ntp(terrascript.Resource):
pass
class fortios_fmg_system_syslogserver(terrascript.Resource):
pass
class fortios_ftpproxy_explicit(terrascript.Resource):
pass
class fortios_icap_profile(terrascript.Resource):
pass
class fortios_icap_server(terrascript.Resource):
pass
class fortios_ips_custom(terrascript.Resource):
pass
class fortios_ips_decoder(terrascript.Resource):
pass
class fortios_ips_global(terrascript.Resource):
pass
class fortios_ips_rule(terrascript.Resource):
pass
class fortios_ips_rulesettings(terrascript.Resource):
pass
class fortios_ips_sensor(terrascript.Resource):
pass
class fortios_ips_settings(terrascript.Resource):
pass
class fortios_ips_viewmap(terrascript.Resource):
pass
class fortios_json_generic_api(terrascript.Resource):
pass
class fortios_log_customfield(terrascript.Resource):
pass
class fortios_log_eventfilter(terrascript.Resource):
pass
class fortios_log_fortianalyzer_setting(terrascript.Resource):
pass
class fortios_log_guidisplay(terrascript.Resource):
pass
class fortios_log_setting(terrascript.Resource):
pass
class fortios_log_syslog_setting(terrascript.Resource):
pass
class fortios_log_threatweight(terrascript.Resource):
pass
class fortios_logdisk_filter(terrascript.Resource):
pass
class fortios_logdisk_setting(terrascript.Resource):
pass
class fortios_logfortianalyzer2_filter(terrascript.Resource):
pass
class fortios_logfortianalyzer2_overridefilter(terrascript.Resource):
pass
class fortios_logfortianalyzer2_overridesetting(terrascript.Resource):
pass
class fortios_logfortianalyzer2_setting(terrascript.Resource):
pass
class fortios_logfortianalyzer3_filter(terrascript.Resource):
pass
class fortios_logfortianalyzer3_overridefilter(terrascript.Resource):
pass
class fortios_logfortianalyzer3_overridesetting(terrascript.Resource):
pass
class fortios_logfortianalyzer3_setting(terrascript.Resource):
pass
class fortios_logfortianalyzer_filter(terrascript.Resource):
pass
class fortios_logfortianalyzer_overridefilter(terrascript.Resource):
pass
class fortios_logfortianalyzer_overridesetting(terrascript.Resource):
pass
class fortios_logfortianalyzer_setting(terrascript.Resource):
pass
class fortios_logfortianalyzercloud_filter(terrascript.Resource):
pass
class fortios_logfortianalyzercloud_overridefilter(terrascript.Resource):
pass
class fortios_logfortianalyzercloud_overridesetting(terrascript.Resource):
pass
class fortios_logfortianalyzercloud_setting(terrascript.Resource):
pass
class fortios_logfortiguard_filter(terrascript.Resource):
pass
class fortios_logfortiguard_overridefilter(terrascript.Resource):
pass
class fortios_logfortiguard_overridesetting(terrascript.Resource):
pass
class fortios_logfortiguard_setting(terrascript.Resource):
pass
class fortios_logmemory_filter(terrascript.Resource):
pass
class fortios_logmemory_globalsetting(terrascript.Resource):
pass
class fortios_logmemory_setting(terrascript.Resource):
pass
class fortios_lognulldevice_filter(terrascript.Resource):
pass
class fortios_lognulldevice_setting(terrascript.Resource):
pass
class fortios_logsyslogd2_filter(terrascript.Resource):
pass
class fortios_logsyslogd2_overridefilter(terrascript.Resource):
pass
class fortios_logsyslogd2_overridesetting(terrascript.Resource):
pass
class fortios_logsyslogd2_setting(terrascript.Resource):
pass
class fortios_logsyslogd3_filter(terrascript.Resource):
pass
class fortios_logsyslogd3_overridefilter(terrascript.Resource):
pass
class fortios_logsyslogd3_overridesetting(terrascript.Resource):
pass
class fortios_logsyslogd3_setting(terrascript.Resource):
pass
class fortios_logsyslogd4_filter(terrascript.Resource):
pass
class fortios_logsyslogd4_overridefilter(terrascript.Resource):
pass
class fortios_logsyslogd4_overridesetting(terrascript.Resource):
pass
class fortios_logsyslogd4_setting(terrascript.Resource):
pass
class fortios_logsyslogd_filter(terrascript.Resource):
pass
class fortios_logsyslogd_overridefilter(terrascript.Resource):
pass
class fortios_logsyslogd_overridesetting(terrascript.Resource):
pass
class fortios_logsyslogd_setting(terrascript.Resource):
pass
class fortios_logwebtrends_filter(terrascript.Resource):
pass
class fortios_logwebtrends_setting(terrascript.Resource):
pass
class fortios_networking_interface_port(terrascript.Resource):
pass
class fortios_networking_route_static(terrascript.Resource):
pass
class fortios_nsxt_servicechain(terrascript.Resource):
pass
class fortios_nsxt_setting(terrascript.Resource):
pass
class fortios_report_chart(terrascript.Resource):
pass
class fortios_report_dataset(terrascript.Resource):
pass
class fortios_report_layout(terrascript.Resource):
pass
class fortios_report_setting(terrascript.Resource):
pass
class fortios_report_style(terrascript.Resource):
pass
class fortios_report_theme(terrascript.Resource):
pass
class fortios_router_accesslist(terrascript.Resource):
pass
class fortios_router_accesslist6(terrascript.Resource):
pass
class fortios_router_aspathlist(terrascript.Resource):
pass
class fortios_router_authpath(terrascript.Resource):
pass
class fortios_router_bfd(terrascript.Resource):
pass
class fortios_router_bfd6(terrascript.Resource):
pass
class fortios_router_bgp(terrascript.Resource):
pass
class fortios_router_communitylist(terrascript.Resource):
pass
class fortios_router_isis(terrascript.Resource):
pass
class fortios_router_keychain(terrascript.Resource):
pass
class fortios_router_multicast(terrascript.Resource):
pass
class fortios_router_multicast6(terrascript.Resource):
pass
class fortios_router_multicastflow(terrascript.Resource):
pass
class fortios_router_ospf(terrascript.Resource):
pass
class fortios_router_ospf6(terrascript.Resource):
pass
class fortios_router_policy(terrascript.Resource):
pass
class fortios_router_policy6(terrascript.Resource):
pass
class fortios_router_prefixlist(terrascript.Resource):
pass
class fortios_router_prefixlist6(terrascript.Resource):
pass
class fortios_router_rip(terrascript.Resource):
pass
class fortios_router_ripng(terrascript.Resource):
pass
class fortios_router_routemap(terrascript.Resource):
pass
class fortios_router_setting(terrascript.Resource):
pass
class fortios_router_static(terrascript.Resource):
pass
class fortios_router_static6(terrascript.Resource):
pass
class fortios_routerbgp_neighbor(terrascript.Resource):
pass
class fortios_routerbgp_network(terrascript.Resource):
pass
class fortios_routerbgp_network6(terrascript.Resource):
pass
class fortios_routerospf6_ospf6interface(terrascript.Resource):
pass
class fortios_routerospf_neighbor(terrascript.Resource):
pass
class fortios_routerospf_network(terrascript.Resource):
pass
class fortios_routerospf_ospfinterface(terrascript.Resource):
pass
class fortios_spamfilter_bwl(terrascript.Resource):
pass
class fortios_spamfilter_bword(terrascript.Resource):
pass
class fortios_spamfilter_dnsbl(terrascript.Resource):
pass
class fortios_spamfilter_fortishield(terrascript.Resource):
pass
class fortios_spamfilter_iptrust(terrascript.Resource):
pass
class fortios_spamfilter_mheader(terrascript.Resource):
pass
class fortios_spamfilter_options(terrascript.Resource):
pass
class fortios_spamfilter_profile(terrascript.Resource):
pass
class fortios_sshfilter_profile(terrascript.Resource):
pass
class fortios_switchcontroller_8021Xsettings(terrascript.Resource):
pass
class fortios_switchcontroller_customcommand(terrascript.Resource):
pass
class fortios_switchcontroller_flowtracking(terrascript.Resource):
pass
class fortios_switchcontroller_global(terrascript.Resource):
pass
class fortios_switchcontroller_igmpsnooping(terrascript.Resource):
pass
class fortios_switchcontroller_lldpprofile(terrascript.Resource):
pass
class fortios_switchcontroller_lldpsettings(terrascript.Resource):
pass
class fortios_switchcontroller_location(terrascript.Resource):
pass
class fortios_switchcontroller_macsyncsettings(terrascript.Resource):
pass
class fortios_switchcontroller_managedswitch(terrascript.Resource):
pass
class fortios_switchcontroller_nacdevice(terrascript.Resource):
pass
class fortios_switchcontroller_nacsettings(terrascript.Resource):
pass
class fortios_switchcontroller_networkmonitorsettings(terrascript.Resource):
pass
class fortios_switchcontroller_portpolicy(terrascript.Resource):
pass
class fortios_switchcontroller_quarantine(terrascript.Resource):
pass
class fortios_switchcontroller_remotelog(terrascript.Resource):
pass
class fortios_switchcontroller_sflow(terrascript.Resource):
pass
class fortios_switchcontroller_snmpcommunity(terrascript.Resource):
pass
class fortios_switchcontroller_snmpsysinfo(terrascript.Resource):
pass
class fortios_switchcontroller_snmptrapthreshold(terrascript.Resource):
pass
class fortios_switchcontroller_snmpuser(terrascript.Resource):
pass
class fortios_switchcontroller_stormcontrol(terrascript.Resource):
pass
class fortios_switchcontroller_stormcontrolpolicy(terrascript.Resource):
pass
class fortios_switchcontroller_stpinstance(terrascript.Resource):
pass
class fortios_switchcontroller_stpsettings(terrascript.Resource):
pass
class fortios_switchcontroller_switchgroup(terrascript.Resource):
pass
class fortios_switchcontroller_switchinterfacetag(terrascript.Resource):
pass
class fortios_switchcontroller_switchlog(terrascript.Resource):
pass
class fortios_switchcontroller_switchprofile(terrascript.Resource):
pass
class fortios_switchcontroller_system(terrascript.Resource):
pass
class fortios_switchcontroller_trafficpolicy(terrascript.Resource):
pass
class fortios_switchcontroller_trafficsniffer(terrascript.Resource):
pass
class fortios_switchcontroller_virtualportpool(terrascript.Resource):
pass
class fortios_switchcontroller_vlan(terrascript.Resource):
pass
class fortios_switchcontroller_vlanpolicy(terrascript.Resource):
pass
class fortios_switchcontrollerautoconfig_custom(terrascript.Resource):
pass
class fortios_switchcontrollerautoconfig_default(terrascript.Resource):
pass
class fortios_switchcontrollerautoconfig_policy(terrascript.Resource):
pass
class fortios_switchcontrollerinitialconfig_template(terrascript.Resource):
pass
class fortios_switchcontrollerinitialconfig_vlans(terrascript.Resource):
pass
class fortios_switchcontrollerptp_policy(terrascript.Resource):
pass
class fortios_switchcontrollerptp_settings(terrascript.Resource):
pass
class fortios_switchcontrollerqos_dot1pmap(terrascript.Resource):
pass
class fortios_switchcontrollerqos_ipdscpmap(terrascript.Resource):
pass
class fortios_switchcontrollerqos_qospolicy(terrascript.Resource):
pass
class fortios_switchcontrollerqos_queuepolicy(terrascript.Resource):
pass
class fortios_switchcontrollersecuritypolicy_8021X(terrascript.Resource):
pass
class fortios_switchcontrollersecuritypolicy_captiveportal(terrascript.Resource):
pass
class fortios_switchcontrollersecuritypolicy_localaccess(terrascript.Resource):
pass
class fortios_system_accprofile(terrascript.Resource):
pass
class fortios_system_admin(terrascript.Resource):
pass
class fortios_system_admin_administrator(terrascript.Resource):
pass
class fortios_system_admin_profiles(terrascript.Resource):
pass
class fortios_system_affinityinterrupt(terrascript.Resource):
pass
class fortios_system_affinitypacketredistribution(terrascript.Resource):
pass
class fortios_system_alarm(terrascript.Resource):
pass
class fortios_system_alias(terrascript.Resource):
pass
class fortios_system_apiuser(terrascript.Resource):
pass
class fortios_system_apiuser_setting(terrascript.Resource):
pass
class fortios_system_arptable(terrascript.Resource):
pass
class fortios_system_autoinstall(terrascript.Resource):
pass
class fortios_system_automationaction(terrascript.Resource):
pass
class fortios_system_automationdestination(terrascript.Resource):
pass
class fortios_system_automationstitch(terrascript.Resource):
pass
class fortios_system_automationtrigger(terrascript.Resource):
pass
class fortios_system_autoscript(terrascript.Resource):
pass
class fortios_system_centralmanagement(terrascript.Resource):
pass
class fortios_system_clustersync(terrascript.Resource):
pass
class fortios_system_console(terrascript.Resource):
pass
class fortios_system_csf(terrascript.Resource):
pass
class fortios_system_customlanguage(terrascript.Resource):
pass
class fortios_system_ddns(terrascript.Resource):
pass
class fortios_system_dedicatedmgmt(terrascript.Resource):
pass
class fortios_system_dns(terrascript.Resource):
pass
class fortios_system_dnsdatabase(terrascript.Resource):
pass
class fortios_system_dnsserver(terrascript.Resource):
pass
class fortios_system_dscpbasedpriority(terrascript.Resource):
pass
class fortios_system_emailserver(terrascript.Resource):
pass
class fortios_system_externalresource(terrascript.Resource):
pass
class fortios_system_federatedupgrade(terrascript.Resource):
pass
class fortios_system_fipscc(terrascript.Resource):
pass
class fortios_system_fm(terrascript.Resource):
pass
class fortios_system_fortiguard(terrascript.Resource):
pass
class fortios_system_fortimanager(terrascript.Resource):
pass
class fortios_system_fortisandbox(terrascript.Resource):
pass
class fortios_system_fssopolling(terrascript.Resource):
pass
class fortios_system_ftmpush(terrascript.Resource):
pass
class fortios_system_geneve(terrascript.Resource):
pass
class fortios_system_geoipcountry(terrascript.Resource):
pass
class fortios_system_geoipoverride(terrascript.Resource):
pass
class fortios_system_global(terrascript.Resource):
pass
class fortios_system_gretunnel(terrascript.Resource):
pass
class fortios_system_ha(terrascript.Resource):
pass
class fortios_system_hamonitor(terrascript.Resource):
pass
class fortios_system_interface(terrascript.Resource):
pass
class fortios_system_ipiptunnel(terrascript.Resource):
pass
class fortios_system_ips(terrascript.Resource):
pass
class fortios_system_ipsecaggregate(terrascript.Resource):
pass
class fortios_system_ipsurlfilterdns(terrascript.Resource):
pass
class fortios_system_ipsurlfilterdns6(terrascript.Resource):
pass
class fortios_system_ipv6neighborcache(terrascript.Resource):
pass
class fortios_system_ipv6tunnel(terrascript.Resource):
pass
class fortios_system_license_forticare(terrascript.Resource):
pass
class fortios_system_license_vdom(terrascript.Resource):
pass
class fortios_system_license_vm(terrascript.Resource):
pass
class fortios_system_linkmonitor(terrascript.Resource):
pass
class fortios_system_macaddresstable(terrascript.Resource):
pass
class fortios_system_managementtunnel(terrascript.Resource):
pass
class fortios_system_mobiletunnel(terrascript.Resource):
pass
class fortios_system_nat64(terrascript.Resource):
pass
class fortios_system_ndproxy(terrascript.Resource):
pass
class fortios_system_netflow(terrascript.Resource):
pass
class fortios_system_networkvisibility(terrascript.Resource):
pass
class fortios_system_ntp(terrascript.Resource):
pass
class fortios_system_objecttagging(terrascript.Resource):
pass
class fortios_system_passwordpolicy(terrascript.Resource):
pass
class fortios_system_passwordpolicyguestadmin(terrascript.Resource):
pass
class fortios_system_pppoeinterface(terrascript.Resource):
pass
class fortios_system_proberesponse(terrascript.Resource):
pass
class fortios_system_proxyarp(terrascript.Resource):
pass
class fortios_system_ptp(terrascript.Resource):
pass
class fortios_system_replacemsggroup(terrascript.Resource):
pass
class fortios_system_replacemsgimage(terrascript.Resource):
pass
class fortios_system_resourcelimits(terrascript.Resource):
pass
class fortios_system_saml(terrascript.Resource):
pass
class fortios_system_sdnconnector(terrascript.Resource):
pass
class fortios_system_sdwan(terrascript.Resource):
pass
class fortios_system_sessionhelper(terrascript.Resource):
pass
class fortios_system_sessionttl(terrascript.Resource):
pass
class fortios_system_setting_dns(terrascript.Resource):
pass
class fortios_system_setting_global(terrascript.Resource):
pass
class fortios_system_setting_ntp(terrascript.Resource):
pass
class fortios_system_settings(terrascript.Resource):
pass
class fortios_system_sflow(terrascript.Resource):
pass
class fortios_system_sittunnel(terrascript.Resource):
pass
class fortios_system_smsserver(terrascript.Resource):
pass
class fortios_system_speedtestschedule(terrascript.Resource):
pass
class fortios_system_speedtestserver(terrascript.Resource):
pass
class fortios_system_ssoadmin(terrascript.Resource):
pass
class fortios_system_standalonecluster(terrascript.Resource):
pass
class fortios_system_storage(terrascript.Resource):
pass
class fortios_system_switchinterface(terrascript.Resource):
pass
class fortios_system_tosbasedpriority(terrascript.Resource):
pass
class fortios_system_vdom(terrascript.Resource):
pass
class fortios_system_vdom_setting(terrascript.Resource):
pass
class fortios_system_vdomdns(terrascript.Resource):
pass
class fortios_system_vdomexception(terrascript.Resource):
pass
class fortios_system_vdomlink(terrascript.Resource):
pass
class fortios_system_vdomnetflow(terrascript.Resource):
pass
class fortios_system_vdomproperty(terrascript.Resource):
pass
class fortios_system_vdomradiusserver(terrascript.Resource):
pass
class fortios_system_vdomsflow(terrascript.Resource):
pass
class fortios_system_virtualswitch(terrascript.Resource):
pass
class fortios_system_virtualwanlink(terrascript.Resource):
pass
class fortios_system_virtualwirepair(terrascript.Resource):
pass
class fortios_system_vnetunnel(terrascript.Resource):
pass
class fortios_system_vxlan(terrascript.Resource):
pass
class fortios_system_wccp(terrascript.Resource):
pass
class fortios_system_zone(terrascript.Resource):
pass
class fortios_systemautoupdate_pushupdate(terrascript.Resource):
pass
class fortios_systemautoupdate_schedule(terrascript.Resource):
pass
class fortios_systemautoupdate_tunneling(terrascript.Resource):
pass
class fortios_systemdhcp6_server(terrascript.Resource):
pass
class fortios_systemdhcp_server(terrascript.Resource):
pass
class fortios_systemlldp_networkpolicy(terrascript.Resource):
pass
class fortios_systemreplacemsg_admin(terrascript.Resource):
pass
class fortios_systemreplacemsg_alertmail(terrascript.Resource):
pass
class fortios_systemreplacemsg_auth(terrascript.Resource):
pass
class fortios_systemreplacemsg_automation(terrascript.Resource):
pass
class fortios_systemreplacemsg_devicedetectionportal(terrascript.Resource):
pass
class fortios_systemreplacemsg_ec(terrascript.Resource):
pass
class fortios_systemreplacemsg_fortiguardwf(terrascript.Resource):
pass
class fortios_systemreplacemsg_ftp(terrascript.Resource):
pass
class fortios_systemreplacemsg_http(terrascript.Resource):
pass
class fortios_systemreplacemsg_icap(terrascript.Resource):
pass
class fortios_systemreplacemsg_mail(terrascript.Resource):
pass
class fortios_systemreplacemsg_nacquar(terrascript.Resource):
pass
class fortios_systemreplacemsg_nntp(terrascript.Resource):
pass
class fortios_systemreplacemsg_spam(terrascript.Resource):
pass
class fortios_systemreplacemsg_sslvpn(terrascript.Resource):
pass
class fortios_systemreplacemsg_trafficquota(terrascript.Resource):
pass
class fortios_systemreplacemsg_utm(terrascript.Resource):
pass
class fortios_systemreplacemsg_webproxy(terrascript.Resource):
pass
class fortios_systemsnmp_community(terrascript.Resource):
pass
class fortios_systemsnmp_sysinfo(terrascript.Resource):
pass
class fortios_systemsnmp_user(terrascript.Resource):
pass
class fortios_user_adgrp(terrascript.Resource):
pass
class fortios_user_device(terrascript.Resource):
pass
class fortios_user_deviceaccesslist(terrascript.Resource):
pass
class fortios_user_devicecategory(terrascript.Resource):
pass
class fortios_user_devicegroup(terrascript.Resource):
pass
class fortios_user_domaincontroller(terrascript.Resource):
pass
class fortios_user_exchange(terrascript.Resource):
pass
class fortios_user_fortitoken(terrascript.Resource):
pass
class fortios_user_fsso(terrascript.Resource):
pass
class fortios_user_fssopolling(terrascript.Resource):
pass
class fortios_user_group(terrascript.Resource):
pass
class fortios_user_krbkeytab(terrascript.Resource):
pass
class fortios_user_ldap(terrascript.Resource):
pass
class fortios_user_local(terrascript.Resource):
pass
class fortios_user_nacpolicy(terrascript.Resource):
pass
class fortios_user_passwordpolicy(terrascript.Resource):
pass
class fortios_user_peer(terrascript.Resource):
pass
class fortios_user_peergrp(terrascript.Resource):
pass
class fortios_user_pop3(terrascript.Resource):
pass
class fortios_user_quarantine(terrascript.Resource):
pass
class fortios_user_radius(terrascript.Resource):
pass
class fortios_user_saml(terrascript.Resource):
pass
class fortios_user_securityexemptlist(terrascript.Resource):
pass
class fortios_user_setting(terrascript.Resource):
pass
class fortios_user_tacacs(terrascript.Resource):
pass
class fortios_voip_profile(terrascript.Resource):
pass
class fortios_vpn_ipsec_phase1interface(terrascript.Resource):
pass
class fortios_vpn_ipsec_phase2interface(terrascript.Resource):
pass
class fortios_vpn_l2tp(terrascript.Resource):
pass
class fortios_vpn_ocvpn(terrascript.Resource):
pass
class fortios_vpn_pptp(terrascript.Resource):
pass
class fortios_vpncertificate_ca(terrascript.Resource):
pass
class fortios_vpncertificate_crl(terrascript.Resource):
pass
class fortios_vpncertificate_local(terrascript.Resource):
pass
class fortios_vpncertificate_ocspserver(terrascript.Resource):
pass
class fortios_vpncertificate_remote(terrascript.Resource):
pass
class fortios_vpncertificate_setting(terrascript.Resource):
pass
class fortios_vpnipsec_concentrator(terrascript.Resource):
pass
class fortios_vpnipsec_forticlient(terrascript.Resource):
pass
class fortios_vpnipsec_manualkey(terrascript.Resource):
pass
class fortios_vpnipsec_manualkeyinterface(terrascript.Resource):
pass
class fortios_vpnipsec_phase1(terrascript.Resource):
pass
class fortios_vpnipsec_phase1interface(terrascript.Resource):
pass
class fortios_vpnipsec_phase2(terrascript.Resource):
pass
class fortios_vpnipsec_phase2interface(terrascript.Resource):
pass
class fortios_vpnssl_settings(terrascript.Resource):
pass
class fortios_vpnsslweb_hostchecksoftware(terrascript.Resource):
pass
class fortios_vpnsslweb_portal(terrascript.Resource):
pass
class fortios_vpnsslweb_realm(terrascript.Resource):
pass
class fortios_vpnsslweb_userbookmark(terrascript.Resource):
pass
class fortios_vpnsslweb_usergroupbookmark(terrascript.Resource):
pass
class fortios_waf_mainclass(terrascript.Resource):
pass
class fortios_waf_profile(terrascript.Resource):
pass
class fortios_waf_signature(terrascript.Resource):
pass
class fortios_waf_subclass(terrascript.Resource):
pass
class fortios_wanopt_authgroup(terrascript.Resource):
pass
class fortios_wanopt_cacheservice(terrascript.Resource):
pass
class fortios_wanopt_contentdeliverynetworkrule(terrascript.Resource):
pass
class fortios_wanopt_peer(terrascript.Resource):
pass
class fortios_wanopt_profile(terrascript.Resource):
pass
class fortios_wanopt_remotestorage(terrascript.Resource):
pass
class fortios_wanopt_settings(terrascript.Resource):
pass
class fortios_wanopt_webcache(terrascript.Resource):
pass
class fortios_webfilter_content(terrascript.Resource):
pass
class fortios_webfilter_contentheader(terrascript.Resource):
pass
class fortios_webfilter_fortiguard(terrascript.Resource):
pass
class fortios_webfilter_ftgdlocalcat(terrascript.Resource):
pass
class fortios_webfilter_ftgdlocalrating(terrascript.Resource):
pass
class fortios_webfilter_ipsurlfiltercachesetting(terrascript.Resource):
pass
class fortios_webfilter_ipsurlfiltersetting(terrascript.Resource):
pass
class fortios_webfilter_ipsurlfiltersetting6(terrascript.Resource):
pass
class fortios_webfilter_override(terrascript.Resource):
pass
class fortios_webfilter_profile(terrascript.Resource):
pass
class fortios_webfilter_searchengine(terrascript.Resource):
pass
class fortios_webfilter_urlfilter(terrascript.Resource):
pass
class fortios_webproxy_debugurl(terrascript.Resource):
pass
class fortios_webproxy_explicit(terrascript.Resource):
pass
class fortios_webproxy_forwardserver(terrascript.Resource):
pass
class fortios_webproxy_forwardservergroup(terrascript.Resource):
pass
class fortios_webproxy_global(terrascript.Resource):
pass
class fortios_webproxy_profile(terrascript.Resource):
pass
class fortios_webproxy_urlmatch(terrascript.Resource):
pass
class fortios_webproxy_wisp(terrascript.Resource):
pass
class fortios_wirelesscontroller_accesscontrollist(terrascript.Resource):
pass
class fortios_wirelesscontroller_address(terrascript.Resource):
pass
class fortios_wirelesscontroller_addrgrp(terrascript.Resource):
pass
class fortios_wirelesscontroller_apcfgprofile(terrascript.Resource):
pass
class fortios_wirelesscontroller_apstatus(terrascript.Resource):
pass
class fortios_wirelesscontroller_arrpprofile(terrascript.Resource):
pass
class fortios_wirelesscontroller_bleprofile(terrascript.Resource):
pass
class fortios_wirelesscontroller_bonjourprofile(terrascript.Resource):
pass
class fortios_wirelesscontroller_global(terrascript.Resource):
pass
class fortios_wirelesscontroller_intercontroller(terrascript.Resource):
pass
class fortios_wirelesscontroller_log(terrascript.Resource):
pass
class fortios_wirelesscontroller_mpskprofile(terrascript.Resource):
pass
class fortios_wirelesscontroller_qosprofile(terrascript.Resource):
pass
class fortios_wirelesscontroller_region(terrascript.Resource):
pass
class fortios_wirelesscontroller_setting(terrascript.Resource):
pass
class fortios_wirelesscontroller_snmp(terrascript.Resource):
pass
class fortios_wirelesscontroller_timers(terrascript.Resource):
pass
class fortios_wirelesscontroller_utmprofile(terrascript.Resource):
pass
class fortios_wirelesscontroller_vap(terrascript.Resource):
pass
class fortios_wirelesscontroller_vapgroup(terrascript.Resource):
pass
class fortios_wirelesscontroller_wagprofile(terrascript.Resource):
pass
class fortios_wirelesscontroller_widsprofile(terrascript.Resource):
pass
class fortios_wirelesscontroller_wtp(terrascript.Resource):
pass
class fortios_wirelesscontroller_wtpgroup(terrascript.Resource):
pass
class fortios_wirelesscontroller_wtpprofile(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_anqp3gppcellular(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_anqpipaddresstype(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_anqpnairealm(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_anqpnetworkauthtype(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_anqproamingconsortium(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_anqpvenuename(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_h2qpconncapability(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_h2qpoperatorname(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_h2qposuprovider(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_h2qpwanmetric(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_hsprofile(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_icon(terrascript.Resource):
pass
class fortios_wirelesscontrollerhotspot20_qosmap(terrascript.Resource):
pass
|
py | 1a4dff7506831772849750d419f5ee2f2d6259ef | from decimal import Decimal as D
from oscar.core.loading import get_class
from oscar.test import factories
Default = get_class('partner.strategy', 'Default')
def add_product(basket, price=None, quantity=1, product=None):
"""
Helper to add a product to the basket.
"""
has_strategy = False
try:
has_strategy = hasattr(basket, 'strategy')
except RuntimeError:
pass
if not has_strategy:
basket.strategy = Default()
if price is None:
price = D('1')
if product and product.has_stockrecords:
record = product.stockrecords.all()[0]
else:
record = factories.create_stockrecord(
product=product, price_excl_tax=price,
num_in_stock=quantity + 1)
basket.add_product(record.product, quantity)
def add_products(basket, args):
"""
Helper to add a series of products to the passed basket
"""
for price, quantity in args:
add_product(basket, price, quantity)
|
py | 1a4dff887a5dc473df25728b5372dc4c424b806d | import pytest
import spacy
from spacy.language import Language
from timexy.languages.en import en
label = "timexy_label"
lang = "en"
@pytest.fixture()
def nlp() -> Language:
nlp = spacy.blank(lang)
nlp.add_pipe("timexy", config={"label": label})
return nlp
test_data = [t for rule in en.rules for t in rule.tests]
@pytest.mark.parametrize("text,date_start,date_end", test_data)
def test_rule(nlp: Language, text: str, date_start: int, date_end: int) -> None:
doc = nlp(text)
assert [
e
for e in doc.ents
if e.start_char == date_start and e.end_char == date_end and e.label_ == label
]
|
py | 1a4e00a55ea4acd5b5558482c30f46055ab30654 | #AQUÍ irán las pruebas que se realicen sobre GitHub.
#Importamos las librerías necesarias.
from github import Github
from github.GithubException import UnknownObjectException
import aux_functions as aux
import dataF_functions as d
import ci_tools as ci
import github_search as ghs
# import openpyxl --> esta hay que instalarla en el venv para que funcione el generarEXCEL.
# Generamos un github_token para consultar la API de GitHub a través de la librería.
user = "jorcontrerasp"
token = aux.readFile("tokens/github_token.txt")
g = Github(user, token)
ciTool = ci.HerramientasCI.CI2
# zhihu/Matisse
# EOSIO/eos
# AMAI-GmbH/AI-Expert-Roadmap
# jwasham/coding-interview-university
# gztchan/awesome-design
# agalwood/motrix
# kr328/clashforandroid
# salomonelli/best-resume-ever
# facebook/create-react-app
# goldbergyoni/nodebestpractices
# freecad/freecad
# vuejs/core
# artf/grapesjs
# cocoapods/cocoapods
# google/gvisor
# adguardteam/adguardhome
# playframework/playframework
# hackiftekhar/IQKeyboardManager -> ¿etiqueta matrix?
# neovim/neovim -> en los jobs, ¿recojo la etiqueta 'env' como steps?
repoName = "neovim/neovim"
doTest = True
doSearchInAllCiTools = True
try:
repo = g.get_repo(repoName)
except UnknownObjectException as e:
print("El repositorio " + repoName + " no existe en GitHub: " + str(e))
doTest = False
if doTest:
filteredRepos = [repo]
df = d.makeDataFrame(filteredRepos, True)
df2 = d.makeCounterDataFrame()
df3 = d.makeEmptyLanguageDataFrame()
df6 = d.makeEmptyStageStatisticsDataFrame()
if doSearchInAllCiTools:
foundList = []
foundList = ghs.searchReposGitHubApi(filteredRepos, df, df2, df3, df6)
d.makeEXCEL(df2, "_counting")
else:
found,df,df3,df6 = ghs.searchLiteralPathFromRoot(repo, ciTool, df, df2, df3, df6)
#found,df,df3,df6 = ghs.searchLiteralPathFromRoot_REC(repo, ciTool, [], df, df2, df3, df6, [])
df,df2,df4,df5 = d.doAuxWithResultsDF(df, df2, df3, True)
d.makeEXCEL(df, "github/_github_results")
d.makeEXCEL(df2, "github/_counting")
d.makeEXCEL(df3, "github/_github_languages")
d.makeEXCEL(df4, "github/_github_language_statistics")
d.makeEXCEL(df5, "github/_github_ci_statistics")
d.makeEXCEL(df6, "github/_gitlab_stage_statistics")
print("Fin de la prueba.") |
py | 1a4e00d6527a069d185421b2c2b3e973e19a513e | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import io
import select
import socket
def safe_select(*args, **kwargs):
# N.B. This while loop is purely to facilitate SA_RESTART-like behavior for select(), which is
# (apparently) not covered by signal.siginterrupt(signal.SIGINT, False) when a timeout is passed.
# This helps avoid an unhandled select.error(4, 'Interrupted system call') on SIGINT.
# See https://bugs.python.org/issue12224 for more info.
while 1:
try:
return select.select(*args, **kwargs)
except select.error as e:
if e[0] != errno.EINTR:
raise
class RecvBufferedSocket(object):
"""A socket wrapper that simplifies recv() buffering."""
def __init__(self, socket, chunk_size=io.DEFAULT_BUFFER_SIZE, select_timeout=None):
"""
:param socket socket: The socket.socket object to wrap.
:param int chunk_size: The smallest max read size for calls to recv() in bytes.
:param float select_timeout: The select timeout for a socket read in seconds. An integer value
effectively makes self.recv non-blocking (default: None, blocking).
"""
self._socket = socket
self._chunk_size = chunk_size
self._select_timeout = select_timeout
self._buffer = b''
def recv(self, bufsize):
"""Buffers up to _chunk_size bytes when the internal buffer has less than `bufsize` bytes."""
assert bufsize > 0, 'a positive bufsize is required'
if len(self._buffer) < bufsize:
readable, _, _ = safe_select([self._socket], [], [], self._select_timeout)
if readable:
recvd = self._socket.recv(max(self._chunk_size, bufsize))
self._buffer = self._buffer + recvd
return_buf, self._buffer = self._buffer[:bufsize], self._buffer[bufsize:]
return return_buf
def __getattr__(self, attr):
return getattr(self._socket, attr)
|
py | 1a4e0128f8b18c46555d6ac974d8d7438c11d92e | from django.test import TestCase
from .models import Item
class TestModels(TestCase):
def test_new_item_defaults_to_done_false(self):
item = Item.objects.create(name="Test DoneFalse Item")
self.assertFalse(item.done)
def test_item_string_method_returns_name(self):
item_name = "Test Item StringMethod"
item = Item.objects.create(name=item_name)
self.assertEqual(str(item), item_name) |
py | 1a4e01a7b7b5f4099496064e6434eab7251d7faf | from .records_not_found_error import RecordsNotFoundException
from .provider_selection_error import ProviderSelectionException
__all__ = ['RecordsNotFoundException', 'ProviderSelectionException'] |
py | 1a4e01ad6bc063d6fec2e86c1a9c918aa1efcec3 | #!/usr/bin/env python3
from typing import List
from reagent import types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.models.base import ModelBase
from reagent.models.dqn import FullyConnectedDQN
from reagent.net_builder.discrete_dqn_net_builder import DiscreteDQNNetBuilder
from reagent.parameters import NormalizationData, param_hash
@dataclass
class FullyConnected(DiscreteDQNNetBuilder):
__hash__ = param_hash
sizes: List[int] = field(default_factory=lambda: [256, 128])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
dropout_ratio: float = 0.0
use_batch_norm: bool = False
def __post_init_post_parse__(self):
super().__init__()
assert len(self.sizes) == len(self.activations), (
f"Must have the same numbers of sizes and activations; got: "
f"{self.sizes}, {self.activations}"
)
def build_q_network(
self,
state_feature_config: rlt.ModelFeatureConfig,
state_normalization_data: NormalizationData,
output_dim: int,
) -> ModelBase:
state_dim = self._get_input_dim(state_normalization_data)
return FullyConnectedDQN(
state_dim=state_dim,
action_dim=output_dim,
sizes=self.sizes,
activations=self.activations,
dropout_ratio=self.dropout_ratio,
use_batch_norm=self.use_batch_norm,
)
|
py | 1a4e02447a4012994db47d42a8fe065d0bf4a900 | import pandas as pd
import qcportal as ptl
from simtk import unit
PARTICLE = unit.mole.create_unit(
6.02214076e23 ** -1,
"particle",
"particle",
)
HARTREE_PER_PARTICLE = unit.hartree / PARTICLE
HARTREE_TO_KCALMOL = HARTREE_PER_PARTICLE.conversion_factor_to(
unit.kilocalorie_per_mole
)
def main():
# Define the qcfractal server instance to download data from the datasets:
# 1. OpenFF Theory Benchmarking Set v1.1 - which contain the torsiondrives at different levels of theory
# link (https://github.com/openforcefield/qca-dataset-submission/tree/master/submissions/2020-12-18-OpenFF-Theory
# -Benchmarking-Set-v1.0)
client = ptl.FractalClient()
ds = client.get_collection(
"TorsionDriveDataset", "OpenFF Theory Benchmarking Set v1.0"
)
specifications = ds.list_specifications().index.to_list()
print(specifications)
# Create a dataframe to store the torsiondrives data
df = pd.DataFrame(columns=specifications)
for i, entry_index in enumerate(ds.df.index):
for spec_name in specifications:
data_entry = ds.get_entry(entry_index)
td_record_id = data_entry.object_map[spec_name]
td_dict = {}
td_dict["td_record_id"] = td_record_id
td_dict["attributes"] = data_entry.attributes
td_dict["mapped_smiles"] = data_entry.attributes[
"canonical_isomeric_explicit_hydrogen_mapped_smiles"
]
df.loc[entry_index + str(i), spec_name] = [td_dict]
td_record = client.query_procedures(td_record_id)[0]
print(f"{i:5d} : {entry_index:50s} status {td_record.status}")
if td_record.status == "COMPLETE":
angles = []
energies = []
dipoles = []
quadrupoles = []
for key, value in td_record.get_final_energies().items():
angles.append(key[0])
energies.append(value)
dipoles.append(
td_record.get_final_results()[key].extras["qcvars"][
"SCF DIPOLE"
]
)
quadrupoles.append(
td_record.get_final_results()[key].extras["qcvars"][
"SCF QUADRUPOLE"
]
)
angles, energies, dipoles, quadrupoles = zip(
*sorted(zip(angles, energies, dipoles, quadrupoles))
)
energy_min = min(energies)
relative_energies = [(x - energy_min) for x in energies]
dihedrals = td_record.keywords.dict()["dihedrals"][0]
df.loc[entry_index + str(i), spec_name][0].update(
{
"initial_molecules": client.query_molecules(
td_record.initial_molecule
),
"final_molecules": td_record.get_final_molecules(),
"final_energies": td_record.get_final_energies(),
"angles": angles,
"relative_energies": relative_energies,
"dipoles": dipoles,
"quadrupoles": quadrupoles,
"dihedrals": dihedrals,
"keywords": td_record.keywords.dict(),
}
)
# saving it to a pickle file
df.to_pickle("./torsiondrive_data.pkl")
if __name__ == "__main__":
main()
|
py | 1a4e02df20eb358c99e2f3820df727ece77ca6bc | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @File: cms.py
"""
envlib.cms
~~~~~~~~~~
Cms配置类预置库
"""
import json as json_tool
from copy import deepcopy
from envlib.env.envlogging import logger
from envlib.env.globals import current_app as app
from envlib.env.globals import g
from envlib.env.helpers import GetKeysMixin
from envlib.env_resources.preset_data import CMS_STORAGE_DIRECTORY, CMS_STORAGE_TYPE, cms_system_config_data, \
cms_archive_config_data
from envlib.envsetup.storage import Storage
from envlib.util import get_last_ip_str
from resources.data import STORAGE_CONFIG
__all__ = ['Cms', ]
class Cms(GetKeysMixin):
"""Cms配置类"""
def __init__(self):
pass
@classmethod
def query_cms_platform_config_by_rest(cls, check=False):
"""查询cms,系统配置,平台配置
查询结果绑定到 当前运行的Env实例关联的上下文环境信息AppCtxGlobals实例的代理 ``g`` 下::
key='cms_platform_config', value=查询接口返回值,cms,系统配置,平台配置
Args:
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值,cms,系统配置,平台配置
"""
res = app.send_by_rest('/api/demo@get')
app.bind_to_g(key='cms_platform_config', value=json_tool.loads(res.get('value')), lock=False)
return json_tool.loads(res.get('value'))
@classmethod
def config_cms_platform_by_rest(cls, json=cms_system_config_data, check=False):
"""cms,系统配置,平台配置
Args:
json (any): json数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
_config_cms_platform_json = {
"key": "viid",
"value": json_tool.dumps(json)
}
res = app.send_by_rest('/api/demo@post', json=_config_cms_platform_json, check=check)
cls.query_cms_platform_config_by_rest()
return res
@classmethod
def query_cms_archive_config_by_rest(cls, check=False):
"""查询cms,系统配置,一人一档配置
查询结果绑定到 当前运行的Env实例关联的上下文环境信息AppCtxGlobals实例的代理 ``g`` 下::
key='cms_archive_config', value=查询接口返回值,cms,系统配置,一人一档配置
Args:
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值,cms,系统配置,一人一档配置
"""
res = app.send_by_rest('/api/demo@get', check=check)
app.bind_to_g(key='cms_archive_config', value=res, lock=False)
return res
@classmethod
def config_cms_archive_by_rest(cls, json=cms_archive_config_data, check=False):
"""cms,系统配置,一人一档
Args:
json (any): json数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@put', json=json, check=check)
cls.query_cms_archive_config_by_rest()
return res
@classmethod
def query_cms_cloud_storage_list_by_rest(cls, check=False):
"""cms-查询存储集群列表
查询结果绑定到 当前运行的Env实例关联的上下文环境信息AppCtxGlobals实例的代理 ``g`` 下::
key='cms_cloud_storage_list', value=查询接口返回值,cms-查询存储集群列表
Args:
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值,cms-查询存储集群列表
"""
res = app.send_by_rest('/api/demo@get', check=check)
app.bind_to_g(key='cms_cloud_storage_list', value=res.get('data'), lock=False)
return res.get('data')
@classmethod
def add_cms_cloud_storage_by_rest_via_json(cls, json, check=False):
"""cms系统配置-云存储配置-添加存储集群
Args:
json (any): json数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@post', json=json, check=check)
return res
@classmethod
def query_cms_cloud_storage_capacity_by_rest(cls, ip=STORAGE_CONFIG.get('cm_ip'), check=False):
"""
Args:
ip (str): ip
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@get', params=f'ip={ip}&port=9001&protocal=0',
check=check)
return res.get('capacity')
@classmethod
def config_cms_cloud_storage_directory_by_rest_via_json(cls, json, check=False):
"""cms-存储集群存储目录配置
Args:
json (any): json数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@post', json=json, check=check)
return res
@classmethod
def query_cms_cloud_storage_directory_by_rest_via_params(cls, params, check=False):
"""cms-查询存储集群存储目录配置
Args:
params (any): params数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@get', params=params, check=check)
return res
@classmethod
def config_cms_cloud_storage_from_env_ini(cls):
"""cms,系统配置-云存储配置,根据env_ini中预设的存储集群,添加存储集群
Returns:
rest接口返回值
"""
_storage_list = cls.query_cms_cloud_storage_list_by_rest().get('data')
_exist_storage = [_storage for _storage in _storage_list if
_storage.get('storage_name') == STORAGE_CONFIG.get('cms_storage_name')]
if _exist_storage:
logger.warning(f"CMS已添加存储{STORAGE_CONFIG.get('cms_storage_name')},请勿重复添加!!")
else:
_storage_capacity = cls.query_cms_cloud_storage_capacity_by_rest(
ip=STORAGE_CONFIG.get('cm_ip'))
_set_storage_capacity = _storage_capacity if _storage_capacity else 30000
_cms_storage_add_json = {
"read_write_permission": 1,
"storage_id": get_last_ip_str(STORAGE_CONFIG.get('cm_ip')),
"storage_name": STORAGE_CONFIG.get('cms_storage_name'),
"storage_ip": STORAGE_CONFIG.get('cm_ip'),
"storage_port": 9001,
"storage_protocal": 0,
"storage_capacity": _set_storage_capacity,
"storage_desc": None
}
cls.add_cms_cloud_storage_by_rest_via_json(json=_cms_storage_add_json)
res = cls.query_cms_cloud_storage_list_by_rest()
return res
@classmethod
def get_storage_id_via_cm_ip(cls, cm_ip=STORAGE_CONFIG.get('cm_ip')):
"""通过cm ip获取存储id
Args:
cm_ip (str): cm ip
Returns:
rest接口返回值, 存储id
"""
cls.query_cms_cloud_storage_list_by_rest()
_storage_id = g.getk('cms_cloud_storage_list').extracting('storage_id', filter={'storage_ip': cm_ip})
if _storage_id is None:
cls.config_cms_cloud_storage_from_env_ini()
_storage_id = g.getk('cms_cloud_storage_list').extracting('storage_id', filter={'storage_ip': cm_ip})
return _storage_id
@classmethod
def config_cms_cloud_storage_directory_from_env_ini(cls):
"""cms,系统配置-云存储配置,根据env_ini中预设的存储集群,进行目录配置
Returns:
rest接口返回值
"""
_storage_id = cls.get_storage_id_via_cm_ip(cm_ip=STORAGE_CONFIG.get('cm_ip'))
for _bucket_name in CMS_STORAGE_DIRECTORY:
_quota = Storage.query_y3000_bucket_storage_quota_via_bucket_name(bucket_name=_bucket_name)
_quota = 200 if _quota == 0 else _quota
_bucket_id = CMS_STORAGE_TYPE.get(_bucket_name)
_query_storage_set = cls.query_cms_cloud_storage_directory_by_rest_via_params(
params=f'data_type={_bucket_id}&storage_id={_storage_id}')
_json = deepcopy(CMS_STORAGE_DIRECTORY.get(_bucket_name))
_json.update(storage_id=_storage_id)
_json.get('storage_info')[0].update(capacity=_quota)
if not _query_storage_set: # 未设置则调接口设置
cls.config_cms_cloud_storage_directory_by_rest_via_json(json=_json)
if __name__ == '__main__':
pass
|
py | 1a4e03c1546dd7365208cc543d69ecc1d80364ec | __author__ = 'Ulric Qin'
|
py | 1a4e04f37e654b012d0d39a5ad808db143129616 | _o_file = open("folder/test.txt", "r")
def read_file():
global _o_file
print(_o_file.read())
_o_file.close()
def read_file_loop():
global _o_file
for x in _o_file:
print(x)
_o_file.close()
|
py | 1a4e06594519bc549a9782f9d4bf752e9c1e7c87 | import unittest
from streamlink.plugins.crunchyroll import Crunchyroll
class TestPluginCrunchyroll(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(Crunchyroll.can_handle_url("http://www.crunchyroll.com/idol-incidents/episode-1-why-become-a-dietwoman-728233"))
self.assertTrue(Crunchyroll.can_handle_url("http://www.crunchyroll.com/ru/idol-incidents/episode-1-why-become-a-dietwoman-728233"))
self.assertTrue(Crunchyroll.can_handle_url("http://www.crunchyroll.com/idol-incidents/media-728233"))
self.assertTrue(Crunchyroll.can_handle_url("http://www.crunchyroll.com/fr/idol-incidents/media-728233"))
self.assertTrue(Crunchyroll.can_handle_url("http://www.crunchyroll.com/media-728233"))
self.assertTrue(Crunchyroll.can_handle_url("http://www.crunchyroll.com/de/media-728233"))
self.assertTrue(Crunchyroll.can_handle_url("http://www.crunchyroll.fr/media-728233"))
self.assertTrue(Crunchyroll.can_handle_url("http://www.crunchyroll.fr/es/media-728233"))
# shouldn't match
self.assertFalse(Crunchyroll.can_handle_url("http://www.crunchyroll.com/gintama"))
self.assertFalse(Crunchyroll.can_handle_url("http://www.crunchyroll.es/gintama"))
self.assertFalse(Crunchyroll.can_handle_url("http://www.youtube.com/"))
|
py | 1a4e06a5f68a7bec563d1d0f6351aece0503fcef | # coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning) # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import looker_client_30
from looker_client_30.looker_sdk.project_file import ProjectFile # noqa: E501
from looker_client_30.rest import ApiException
class TestProjectFile(unittest.TestCase):
"""ProjectFile unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProjectFile(self):
"""Test ProjectFile"""
# FIXME: construct object with mandatory attributes with example values
# model = looker_client_30.models.project_file.ProjectFile() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a4e076c0aab45db01867ed84fff661cbfa869b6 | from django.contrib import admin
from django.db.models import Count
from flicks.base.admin import BaseModelAdmin, NumVotesFilter
from flicks.videos import models
from flicks.videos.tasks import process_video
class SelectRelatedFieldListFilter(admin.filters.RelatedFieldListFilter):
template = 'admin/filters_select.html'
class Video2013Admin(BaseModelAdmin):
list_display = ['title', 'user_full_name', 'user_email', 'num_votes', 'created',
'vimeo_id', 'filename', 'processed', 'approved']
list_filter = ['processed', 'approved', NumVotesFilter]
search_fields = ['title', 'description', 'user__userprofile__full_name',
'user__email']
readonly_fields = ['filename', 'processed', 'user_notified', 'created']
fieldsets = (
(None, {
'fields': ('title', 'user', 'created', 'vimeo_id', 'filename',
'description', 'thumbnail')
}),
('Moderation', {
'fields': ('processed', 'approved')
})
)
actions = ['process_videos', 'download_thumbnails']
change_form_template = 'admin/video2013_change_form.html'
def queryset(self, request):
"""Add num_votes field to queryset."""
qs = super(Video2013Admin, self).queryset(request)
qs = qs.annotate(num_votes=Count('voters'))
return qs
def num_votes(self, video):
# Use method on admin so we can sort by this field.
return video.vote_count
num_votes.admin_order_field = 'num_votes'
def user_full_name(self, instance):
return instance.user.profile.full_name if instance.user.profile else ''
def user_email(self, instance):
return instance.user.email
def process_videos(self, request, queryset):
"""Synchronously run the video processing task on the selected videos."""
for video in queryset:
process_video(video.id)
msg = '{0} videos processed successfully.'
self.message_user(request, msg.format(len(queryset)))
process_videos.short_description = 'Manually run video processing'
def download_thumbnails(self, request, queryset):
"""Attempt to download thumbnails for the selected videos."""
errors = []
for video in queryset:
try:
video.download_thumbnail()
except Exception, e:
msg = 'Error downloading thumbnail for "{0}": {1}'
errors.append(msg.format(video, e))
# Notify user of results.
count = len(queryset) - len(errors)
if count > 0:
msg = '{0} videos updated successfully.'
self.message_user(request, msg.format(count))
for error in errors:
self.message_user_error(request, error)
download_thumbnails.short_description = 'Download thumbnails from Vimeo'
class VoteAdmin(BaseModelAdmin):
list_display = ['user_nickname', 'user_email', 'video', 'created']
list_filter = ['created', ('video', SelectRelatedFieldListFilter)]
search_fields = ['user__userprofile__full_name', 'user__userprofile__nickname', 'user__email',
'video__title']
def user_nickname(self, vote):
return vote.user.userprofile.nickname
def user_email(self, vote):
return vote.user.email
class Video2012Admin(BaseModelAdmin):
"""Configuration for the video admin pages."""
list_display = ['title', 'user_email', 'state', 'judge_mark', 'category',
'region', 'shortlink', 'created']
list_filter = ['state', 'judge_mark', 'category', 'region']
search_fields = ['title', 'description', 'user__email']
class AwardAdmin(BaseModelAdmin):
"""Configuration for the award admin pages."""
list_display = ['region', 'award_type', 'category', 'video', 'preview']
admin.site.register(models.Video2013, Video2013Admin)
admin.site.register(models.Vote, VoteAdmin)
admin.site.register(models.Video2012, Video2012Admin)
admin.site.register(models.Award, AwardAdmin)
|
py | 1a4e0780035091dc3937db73c6c1b2932ebb0bbb |
from django.shortcuts import get_object_or_404
from users.serializers import CandidateSerializer
from users.models import Candidate
from rest_framework import viewsets
class CandidateViewSet(viewsets.ModelViewSet):
"""
A viewset for viewing and editing Candidate instances.
"""
serializer_class = CandidateSerializer
queryset = Candidate.objects.all()
|
py | 1a4e080869467fd03f6bd752ab0643fe82b98c4f | import pytest
import requests
def test_swagger():
model_endpoint = 'http://localhost:5000/swagger.json'
r = requests.get(url=model_endpoint)
assert r.status_code == 200
assert r.headers['Content-Type'] == 'application/json'
json = r.json()
assert 'swagger' in json
assert json.get('info') and json.get('info').get('title') == 'Model Asset Exchange Server'
def test_metadata():
model_endpoint = 'http://localhost:5000/model/metadata'
r = requests.get(url=model_endpoint)
assert r.status_code == 200
metadata = r.json()
assert metadata['id'] == 'ssrnet'
assert metadata['name'] == 'SSR-Net Facial Age Estimator Model'
assert metadata['description'] == 'SSR-Net Facial Recognition and Age Prediction model; trained using Keras on ' \
'the IMDB-WIKI dataset'
assert metadata['license'] == 'MIT'
def test_predict():
model_endpoint = 'http://localhost:5000/model/predict'
file_path = 'assets/tom_cruise.jpg'
with open(file_path, 'rb') as file:
file_form = {'image': (file_path, file, 'image/jpeg')}
r = requests.post(url=model_endpoint, files=file_form)
assert r.status_code == 200
json = r.json()
assert json['status'] == "ok"
assert 55 > json['predictions'][0]['age_estimation'] > 45
assert 310 > json['predictions'][0]['face_box'][0] > 290
assert 180 > json['predictions'][0]['face_box'][1] > 160
assert 390 > json['predictions'][0]['face_box'][2] > 370
assert 525 > json['predictions'][0]['face_box'][3] > 500
if __name__ == '__main__':
pytest.main([__file__])
|
py | 1a4e085cdaa3dc5aee10ad12b88df4024ecedfd2 | # -*- coding: utf-8 -*-
import numpy
from matplotlib import pyplot
def lif(v, ge, gi, i):
dv = (v * -0.01) + ge - gi + i
spk = v > 1
dv[spk] = -v[spk]
return dv, spk
def lif_net(num_neurons, duration):
offset = -numpy.linspace(0, 4 * numpy.pi, num_neurons)
offset[:num_neurons / 2] = -3 * numpy.pi
v = numpy.zeros((duration, num_neurons))
ge = numpy.zeros(num_neurons)
gi = numpy.zeros(num_neurons)
i = 0.019 * numpy.random.rand(duration, num_neurons)
spikes = numpy.zeros((duration, num_neurons))
v[0,:] = numpy.random.rand(num_neurons)
for t in numpy.arange(1, duration):
ge[num_neurons / 2:] = 0.15 * spikes[t-1,:num_neurons / 2]
gi = numpy.ones(num_neurons) * 0.001 * (numpy.sin(offset + t / 100) + 1)
dv, spikes[t,:] = lif(v[t-1,:], ge, gi, i[t,:])
v[t,:] = v[t-1,:] + dv
return spikes
spikes = lif_net(2000, 3000)
indices = numpy.where(spikes)
pyplot.figure()
ax = pyplot.subplot(121)
pyplot.scatter(indices[0][indices[1] < 1000], indices[1][indices[1] < 1000], marker='.', alpha=0.5)
indices = numpy.where(spikes)
pyplot.scatter(indices[0][indices[1] >= 1000], indices[1][indices[1] >= 1000], marker='.', alpha=0.5)
pyplot.xlabel('Time (ms)')
pyplot.yticks([])
pyplot.subplot(164)
pyplot.hist(indices[1], bins=50, orientation='horizontal')
pyplot.yticks([])
pyplot.xticks([])
pyplot.tight_layout()
|
py | 1a4e0920e63d475ac30f882123b5750dc9642a24 | # -*- coding: utf-8 -*-
# file: BERT_SPC.py
# author: songyouwei <[email protected]>
# Copyright (C) 2019. All Rights Reserved.
import torch
import torch.nn as nn
class BERT_SPC(nn.Module):
def __init__(self, bert, opt):
super(BERT_SPC, self).__init__()
self.bert = bert
self.dropout = nn.Dropout(0.4)
self.dense = nn.Linear(opt.bert_dim, opt.polarities_dim)
def forward(self, inputs):
text_bert_indices, bert_segments_ids = inputs[0], inputs[1]
_, pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids)
pooled_output = self.dropout(pooled_output)
logits = self.dense(pooled_output)
return logits
|
py | 1a4e0a28be4cbb46162cb3f5eee7a13d325bfb40 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Advanced Movie Selection for Dreambox-Enigma2
#
# The plugin is developed on the basis from a lot of single plugins (thx for the code @ all)
# Coded by JackDaniel @ cmikula (c)2011
# Support: www.i-have-a-dreambox.com
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported
# License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter to Creative
# Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
from __future__ import print_function
from __init__ import _
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.Pixmap import Pixmap
from Components.Button import Button
from Components.config import config, getConfigListEntry
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.MultiContent import MultiContentEntryText
from Components.GUIComponent import GUIComponent
from Components.Sources.StaticText import StaticText
from enigma import eListboxPythonMultiContent, eListbox, gFont, RT_HALIGN_LEFT, RT_HALIGN_RIGHT
from Source.Remote.MessageServer import serverInstance, getIpAddress
from Source.Remote.Client import getClients
from time import localtime, strftime
from Source.Globals import SkinTools
staticIP = None
class ClientSetupList(GUIComponent):
def __init__(self, ip_address):
GUIComponent.__init__(self)
self.l = eListboxPythonMultiContent()
self.l.setFont(0, gFont("Regular", 22))
self.l.setFont(1, gFont("Regular", 18))
self.l.setItemHeight(100)
self.l.setBuildFunc(self.buildMovieListEntry)
self.onSelectionChanged = []
self.staticIP = ip_address
def connectSelChanged(self, fnc):
if not fnc in self.onSelectionChanged:
self.onSelectionChanged.append(fnc)
def disconnectSelChanged(self, fnc):
if fnc in self.onSelectionChanged:
self.onSelectionChanged.remove(fnc)
def selectionChanged(self):
for x in self.onSelectionChanged:
x()
def buildMovieListEntry(self, client):
res = [None]
width = self.l.getItemSize().width()
width_up_r = 250
width_up_l = width - width_up_r
width_dn_r = width / 2
width_dn_l = width - width_dn_r
pos_up_r = width - width_up_r
pos_dn_r = width - width_dn_r
if client.isRecording():
stby_text = _("Status:") + ' ' + _("Recording")
elif client.inStandby():
stby_text = _("Status:") + ' ' + _("Standby")
else:
stby_text = _("Status:") + ' ' + _("Switched on")
last_trash_clean_status = ""
lastEvent = client.lastTrashEvent()
if lastEvent == -1:
last_trash_clean_status = (_("The %s is a client box") % client.getDeviceName())
elif lastEvent > 0:
t = localtime(lastEvent)
last_trash_clean_status = _("Last remote wastebasket empty at %s") % (strftime(("%02d.%02d.%04d" % (t[2], t[1], t[0])) + ' ' + _("at") + ' ' + ("%02d:%02d" % (t[3], t[4])) + ' ' + _("Clock")))
next_trash_clean_status = ""
nextEvent = client.nextTrashEvent()
if nextEvent == -1:
trash_clean_status = (_("The %s is a client box") % client.getDeviceName())
elif nextEvent > 0:
t = localtime(nextEvent)
next_trash_clean_status = _("Next remote wastebasket empty at %s") % (strftime(("%02d.%02d.%04d" % (t[2], t[1], t[0])) + ' ' + _("at") + ' ' + ("%02d:%02d" % (t[3], t[4])) + ' ' + _("Clock")))
hostname = _("Hostname:") + ' ' + client.getDeviceName()
ip_addr = client.getAddress()
addr = _("IP:") + ' ' + ip_addr
if ip_addr == self.staticIP:
addr = addr + ' ' + _("<Local device>")
port = _("Port:") + ' ' + str(client.getPort())
res.append(MultiContentEntryText(pos=(5, 2), size=(width_up_l, 30), font=0, flags=RT_HALIGN_LEFT, text=hostname))
res.append(MultiContentEntryText(pos=(pos_up_r, 3), size=(width_up_r, 22), font=1, flags=RT_HALIGN_RIGHT, text=stby_text))
res.append(MultiContentEntryText(pos=(5, 26), size=(width_dn_l, 30), font=1, flags=RT_HALIGN_LEFT, text=addr))
res.append(MultiContentEntryText(pos=(pos_dn_r, 28), size=(width_dn_r, 22), font=1, flags=RT_HALIGN_RIGHT, text=port))
res.append(MultiContentEntryText(pos=(5, 50), size=(width, 30), font=1, flags=RT_HALIGN_LEFT, text=last_trash_clean_status))
res.append(MultiContentEntryText(pos=(5, 75), size=(width, 30), font=1, flags=RT_HALIGN_LEFT, text=next_trash_clean_status))
return res
def moveToIndex(self, index):
self.instance.moveSelectionTo(index)
def getCurrentIndex(self):
return self.instance.getCurrentIndex()
def getCurrent(self):
l = self.l.getCurrentSelection()
return l and l[0]
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
instance.setContent(self.l)
instance.selectionChanged.get().append(self.selectionChanged)
def preWidgetRemove(self, instance):
instance.setContent(None)
instance.selectionChanged.get().remove(self.selectionChanged)
def reload(self):
self.list = []
for client in getClients():
self.list.append((client,))
print(client.getAddress())
self.l.setList(self.list)
def remove(self, x):
for l in self.list[:]:
if l[0] == x:
self.list.remove(l)
self.l.setList(self.list)
def __len__(self):
return len(self.list)
def moveTo(self, client):
count = 0
for x in self.list:
if x[0] == client:
self.instance.moveSelectionTo(count)
return True
count += 1
return False
class ClientSetup(ConfigListScreen, Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = SkinTools.appendResolution("AdvancedMovieSelection_ClientSetup_")
self.staticIP = getIpAddress('eth0')
self.session = session
self["key_red"] = Button(_("Close"))
self["key_green"] = StaticText("")
self["key_yellow"] = StaticText("")
self["actions"] = ActionMap(["WizardActions", "MenuActions", "ShortcutActions", "EPGSelectActions"],
{
"ok": self.keySave,
"back": self.keyCancel,
"red": self.keyCancel,
"green": self.keySave,
"yellow": self.keyYellow,
"up": self.keyUp,
"down": self.keyDown,
"nextBouquet": self.keyBouquetUp,
"prevBouquet": self.keyBouquetDown,
}, -1)
self["status"] = StaticText("")
self["help"] = StaticText("")
self["green_button"] = Pixmap()
self["yellow_button"] = Pixmap()
self["green_button"].hide()
self["yellow_button"].hide()
self["clienttxt"] = StaticText("")
self["list"] = ClientSetupList(self.staticIP)
self.list = self["list"]
self.list.reload()
self.configList = []
ConfigListScreen.__init__(self, self.configList, session=self.session)
if not self.showHelp in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.showHelp)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("Advanced Movie Selection - Clientbox setup"))
if self.staticIP:
self.createSetup()
self["key_green"].setText(_("Save"))
self["key_yellow"].setText(_("Manual search"))
self["green_button"].show()
self["yellow_button"].show()
self["status"].setText(_("Local IP: %s") % self.staticIP)
if config.AdvancedMovieSelection.server_enabled.value:
self["clienttxt"].setText(_("Available Server/Clients"))
else:
self["clienttxt"].setText(_("Remoteserver disabled!"))
else:
self["status"].setText(_("ATTENTION: DHCP in lan configuration is activ, no clientbox services available!"))
def createSetup(self):
self.configList = []
self.configList.append(getConfigListEntry(_("Port address:"), config.AdvancedMovieSelection.server_port, _("Set the port address for client and server. Port address from connected clients will be automatically updated.")))
self.configList.append(getConfigListEntry(_("Start search IP:"), config.AdvancedMovieSelection.start_search_ip, _("Only last three digits from the IP must be set.")))
self.configList.append(getConfigListEntry(_("Stop search IP:"), config.AdvancedMovieSelection.stop_search_ip, _("Only last three digits from the IP must be set.")))
self["config"].setList(self.configList)
def showHelp(self):
current = self["config"].getCurrent()
if len(current) > 2 and current[2] is not None:
self["help"].setText(current[2])
else:
self["help"].setText(_("No Helptext available!"))
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
else:
self.close()
def keySave(self):
if config.AdvancedMovieSelection.server_port.isChanged():
self.setPort()
if self.staticIP:
ConfigListScreen.keySave(self)
def keyYellow(self):
if self.staticIP:
if config.AdvancedMovieSelection.server_port.isChanged():
self.setPort()
self["status"].setText(_("Searching for clients, please wait ...")) #TODO: status wird nicht angezeigt ;(
serverInstance.setSearchRange(config.AdvancedMovieSelection.start_search_ip.value, config.AdvancedMovieSelection.stop_search_ip.value)
serverInstance.findClients()
self.finishedState()
def finishedState(self):
self["status"].setText(_("Manual search finished"))
self.list.reload()
def setPort(self):
config.AdvancedMovieSelection.server_port.save()
port = config.AdvancedMovieSelection.server_port.value
for client in getClients():
if client.getAddress() != self.staticIP:
client.setPort(port)
else:
# this only set the port of local client !don't reconnect it!
client.port = port
serverInstance.reconnect(port=port)
def keyUp(self):
self["config"].instance.moveSelection(self["config"].instance.moveUp)
def keyDown(self):
self["config"].instance.moveSelection(self["config"].instance.moveDown)
def keyBouquetUp(self):
self["list"].instance.moveSelection(self["list"].instance.pageUp)
def keyBouquetDown(self):
self["list"].instance.moveSelection(self["list"].instance.pageDown)
|
py | 1a4e0aacde095cf4afdea0ad372e420de8d181ef | import glob, os, shutil
if not os.path.exists('./converted'):
os.makedirs('./converted')
os.chdir('./labels')
for file in glob.glob("*.txt"):
f = open(file, "r")
line = f.read()
lineVals = line.split()
if (len(lineVals) > 19):
newLine = lineVals[0] + ' ' + lineVals[1] + ' ' + lineVals[2] + ' ' + lineVals[19] + ' ' + lineVals[20]
else:
newLine = ' '
with open('../converted/' + file, 'w') as file:
file.write(newLine)
os.chdir('../')
# delete all files in labels
shutil.rmtree('./labels')
# move converted to labels
os.rename('./converted', './labels')
# fix train and test
cwd = os.getcwd()
with open('./train_new.txt', 'w') as file:
f = open('./train.txt', "r")
for x in f:
file.write(x.replace('sspdFormat', cwd).replace('g ', 'g'))
f.close()
with open('./test_new.txt', 'w') as file:
f = open('./test.txt', "r")
for x in f:
file.write(x.replace('sspdFormat', cwd).replace('g ', 'g'))
f.close()
os.remove('./train.txt')
os.remove('./test.txt')
os.rename('./train_new.txt', './train.txt')
os.rename('./test_new.txt', './test.txt')
|
py | 1a4e0b0922aff9bc671516f3004245a368699a5c | import networkx as nx
import pyparsing as pp
from .parser import obo_parser
__all__ = ['obo_parser', 'parse_result_to_networkx']
def parse_result_to_networkx(result: pp.ParseResults) -> nx.MultiDiGraph:
common_attrs = {}
try:
common_attrs['name'] = result['headers']['ontology'][1]
common_attrs['version'] = result['headers']['data-version'][1]
except (KeyError, IndexError):
pass
graph = nx.MultiDiGraph(**common_attrs)
for stanza in result[1]:
name, tvps = stanza
if name == 'Term':
try:
if tvps['is_obsolete'][1]:
continue
except KeyError:
pass
_id = tvps['id'][1]
for key, *vals in tvps:
if key == 'is_a':
graph.add_edge(vals[0], _id, key='is_a')
elif key == 'relationship':
graph.add_edge(vals[1], _id, key=vals[0])
return graph
|
py | 1a4e0b895f665004da1431f5f5f50a227b9d3316 | from .models import Image
from django import forms
class NewImageForm(forms.ModelForm):
class Meta:
model = Image
fields = ['name','profile','image']
|
py | 1a4e0cafdcb892d8f610903892a2b5a945129f40 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_dns
version_added: '2.8'
short_description: Configure FlashArray DNS settings
description:
- Set or erase configuration for the DNS settings.
- Nameservers provided will overwrite any existing nameservers.
author:
- Pure Storage Ansible Team (@sdodsley) <[email protected]>
options:
state:
description:
- Set or delete directory service configuration
default: present
type: str
choices: [ absent, present ]
domain:
description:
- Domain suffix to be appended when perofrming DNS lookups.
type: str
nameservers:
description:
- List of up to 3 unique DNS server IP addresses. These can be
IPv4 or IPv6 - No validation is done of the addresses is performed.
type: list
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Delete exisitng DNS settings
purefa_dns:
state: absent
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Set DNS settings
purefa_dns:
domain: purestorage.com
nameservers:
- 8.8.8.8
- 8.8.4.4
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
def remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
def delete_dns(module, array):
"""Delete DNS settings"""
changed = False
current_dns = array.get_dns()
if current_dns['domain'] == '' and current_dns['nameservers'] == ['']:
module.exit_json(changed=changed)
else:
try:
array.set_dns(domain='', nameservers=[])
changed = True
except Exception:
module.fail_json(msg='Delete DNS settigs failed')
module.exit_json(changed=changed)
def create_dns(module, array):
"""Set DNS settings"""
changed = False
current_dns = array.get_dns()
if current_dns['domain'] != module.params['domain'] or sorted(module.params['nameservers']) != sorted(current_dns['nameservers']):
try:
array.set_dns(domain=module.params['domain'],
nameservers=module.params['nameservers'][0:3])
changed = True
except Exception:
module.fail_json(msg='Set DNS settings failed: Check configuration')
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
domain=dict(type='str'),
nameservers=dict(type='list'),
))
required_if = [('state', 'present', ['domain', 'nameservers'])]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=False)
state = module.params['state']
array = get_system(module)
if state == 'absent':
delete_dns(module, array)
elif state == 'present':
module.params['nameservers'] = remove(module.params['nameservers'])
create_dns(module, array)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
py | 1a4e0e4af1315cc56c19affed4e55335703aa5a6 | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core and Devcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test Taproot softfork (BIPs 340-342)
from test_framework.blocktools import (
COINBASE_MATURITY,
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
NORMAL_GBT_REQUEST_PARAMS,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
)
from test_framework.script import (
ANNEX_TAG,
CScript,
CScriptNum,
CScriptOp,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureHash,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureHash,
TaprootSignatureHash,
is_op_success,
taproot_construct,
)
from test_framework.script_util import (
key_to_p2wpkh_script,
keyhash_to_p2pkh_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.test_framework import DevcoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
# === Framework for building spending transactions. ===
#
# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that
# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing
# process can be overridden.
#
# Specifically, a context object is a dict that maps names to compositions of:
# - values
# - lists of values
# - callables which, when fed the context object as argument, produce any of these
#
# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs.
#
# The get(ctx, name) function can evaluate a name, and cache its result in the context.
# getter(name) can be used to construct a callable that evaluates name. For example:
#
# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']}
#
# creates a context where the script inputs are a signature plus the bytes 0x01.
#
# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively
# modified context. For example:
#
# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)}
#
# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from
#
# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT}
#
# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature)
# while ctx2 only uses the modified hashtype inside the sighash calculation.
def deep_eval(ctx, expr):
"""Recursively replace any callables c in expr (including inside lists) with c(ctx)."""
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them).
Final = namedtuple("Final", "value")
def get(ctx, name):
"""Evaluate name in context ctx."""
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
# Evaluate and cache the result.
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
"""Return a callable that evaluates name in its passed context."""
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
"""Return a callable that evaluates expr in a modified context."""
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
# === Implementations for the various default expressions in DEFAULT_CONTEXT ===
def default_hashtype(ctx):
"""Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise."""
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
"""Default expression for "tapleaf": looking up leaf in tap[2]."""
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
"""Default expression for "script_taproot": tapleaf.script."""
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
"""Default expression for "leafversion": tapleaf.version"""
return get(ctx, "tapleaf").version
def default_negflag(ctx):
"""Default expression for "negflag": tap.negflag."""
return get(ctx, "tap").negflag
def default_pubkey_internal(ctx):
"""Default expression for "pubkey_internal": tap.internal_pubkey."""
return get(ctx, "tap").internal_pubkey
def default_merklebranch(ctx):
"""Default expression for "merklebranch": tapleaf.merklebranch."""
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
"""Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch."""
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch")
def default_sighash(ctx):
"""Default expression for "sighash": depending on mode, compute BIP341, BIP143, or legacy sighash."""
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
mode = get(ctx, "mode")
if mode == "taproot":
# BIP341 signature hash
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
# BIP143 signature hash
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
else:
# Pre-segwit signature hash
scriptcode = get(ctx, "scriptcode")
return LegacySignatureHash(scriptcode, tx, idx, hashtype)[0]
def default_tweak(ctx):
"""Default expression for "tweak": None if a leaf is specified, tap[0] otherwise."""
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
"""Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise."""
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
"""Default expression for "signature": BIP340 signature or ECDSA signature depending on mode."""
sighash = get(ctx, "sighash")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash)
def default_hashtype_actual(ctx):
"""Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot."""
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
"""Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise."""
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
"""Default expression for "sign": concatenation of signature and bytes_hashtype."""
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
"""Default expression for "inputs_keypath": a signature."""
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
"""Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed."""
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
"""Default expression for "witness_witv0", consisting of inputs and witness script, as needed."""
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
"""Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed."""
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
"""Default expression for "scriptsig", consisting of inputs and redeemscript, as needed."""
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
# The default context object.
DEFAULT_CONTEXT = {
# == The main expressions to evaluate. Only override these for unusual or invalid spends. ==
# The overall witness stack, as a list of bytes objects.
"witness": default_witness,
# The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed)
"scriptsig": default_scriptsig,
# == Expressions you'll generally only override for intentionally invalid spends. ==
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The internal pubkey for a taproot script path spend (32 bytes).
"pubkey_internal": default_pubkey_internal,
# The negation flag of the internal pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
"""Sign transaction input idx of tx, provided utxos is the list of outputs being spent.
Additional arguments may be provided that override any aspect of the signing process.
See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.
"""
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
"""If fed a CScript, return it; if fed bytes, return a CScript that pushes it."""
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
"""Helper for constructing Spender objects using the context signing framework.
* tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)
* witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)
* script: the actual script executed (for bare/P2WSH/P2SH spending)
* pkh: the public key for P2PKH or P2WPKH spending
* p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)
* spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)
* failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)
* standard: whether the (valid version of) spending is expected to be standard
* err_msg: a string with an expected error message for failure (or None, if not cared about)
* sigops_weight: the pre-taproot sigops weight consumed by a successful spend
* need_vin_vout_mismatch: whether this test requires being tested in a transaction input that has no corresponding
transaction output.
"""
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = key_to_p2wpkh_script(pkh)
conf["scriptcode"] = keyhash_to_p2pkh_script(pubkeyhash)
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = script_to_p2wsh_script(script)
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = keyhash_to_p2pkh_script(pubkeyhash)
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = script_to_p2sh_script(spk)
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
"""Make a spender using make_spender, and add it to spenders."""
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if opcode == OP_CHECKSIGVERIFY:
ret = CScript([pubkey, opcode, OP_1])
elif opcode == OP_CHECKSIGADD:
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
"""Return a callable that evaluates expr and returns it with a random bitflip."""
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
"""Return a callable that evaluates expr and returns it with a zero added."""
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
"""Return a callable that evaluates expr and returns it with its last byte removed."""
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 600
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
"""Return a list of Spenders for testing post-Taproot activation behavior."""
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
# Test that hashtype 0 cannot have a hashtype byte, and 1 must have one.
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test that hashtype 0 and hashtype 1 cannot be transmuted into each other.
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test aspects of signatures with unusual lengths
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
# Empty signatures
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
# Appending a zero byte to signatures invalidates them
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
# Removing the last byte from signatures invalidates them
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
# Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail.
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH ==
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
# == Test various aspects of BIP341 spending paths ==
# A set of functions that compute the hashing partner in a Merkle tree, designed to exercise
# edge cases. This relies on the taproot_construct feature that a lambda can be passed in
# instead of a subtree, to compute the partner to be hashed with.
PARTNER_MERKLE_FN = [
# Combine with itself
lambda h: h,
# Combine with hash 0
lambda h: bytes([0 for _ in range(32)]),
# Combine with hash 2^256-1
lambda h: bytes([0xff for _ in range(32)]),
# Combine with itself-1 (BE)
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (BE)
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
# Combine with itself-1 (LE)
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (LE)
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
# Combine with random bitflipped version of self.
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
# Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep".
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
# Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths.
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
# Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it).
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the internal pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_internal": bitflipper(default_pubkey_internal)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
"""Helper function to construct the script input for t33/t34 below."""
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
# Test that an input stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
"""Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test."""
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode
# == Legacy tests ==
# Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too.
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
# Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_inactive():
"""Spenders for testing that pre-activation Taproot rules don't apply."""
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("pk", CScript([pub, OP_CHECKSIG])),
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that keypath spending is valid & non-standard, regardless of validity.
add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap, standard=False)
add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[])
# Same for scriptpath spending (and features like annex, leaf versions, or OP_SUCCESS don't change this)
add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock))
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix.
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
# Consensus validation flags to use in dumps for all other tests.
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
# Determine flags to dump
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
# The "final" field indicates that a spend should be always valid, even with more validation flags enabled
# than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate).
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
# Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the
# file naming scheme compatible with fuzzing infrastructure).
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
# Data type to keep track of UTXOs, where they were created, and how to spend them.
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(DevcoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
parser.add_argument("--previous_release", dest="previous_release", default=False, action="store_true",
help="Use a previous release as taproot-inactive node")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
if self.options.previous_release:
self.skip_if_no_previous_releases()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Node 0 has Taproot inactive, Node 1 active.
self.extra_args = [["-par=1"], ["-par=1"]]
if self.options.previous_release:
self.wallet_names = [None, self.default_wallet_name]
else:
self.extra_args[0].append("-vbparams=taproot:1:1")
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args, versions=[
200100 if self.options.previous_release else None,
None,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
# Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output.
# It is not impossible to fit enough tapscript sigops to hit the old 80k limit without
# busting txin-level limits. We simply have to account for the p2pk outputs in all
# transactions.
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if accept:
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def test_spenders(self, node, spenders, input_counts):
"""Run randomized tests with a number of "spenders".
Steps:
1) Generate an appropriate UTXO for each spender to test spend conditions
2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh
3) Select random number of inputs from (1)
4) Select random number of addresses from (2) as outputs
Each spender embodies a test; in a large randomized test, it is verified
that toggling the valid argument to each lambda toggles the validity of
the transaction. This is accomplished by constructing transactions consisting
of all valid inputs, except one invalid one.
"""
# Construct a bunch of sPKs that send coins back to the host wallet
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
# Initialize variables used by block_submit().
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
# Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(fund_tx.serialize().hex())["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
# Top up until we hit num_inputs (but include at least one normal UTXO always).
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
# The first input cannot require a mismatching output (as there is at least one output).
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
# Decide fee, and add CTxIns to tx.
amount = sum(utxo.output.nValue for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
# Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs)
num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = DUST_LIMIT
elif i < num_outputs - 1:
tx.vout[-1].nValue = in_value
else:
tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)
in_value -= tx.vout[-1].nValue
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
# Select coinbase pubkey
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
# Precompute one satisfying and one failing scriptSig/witness for each input.
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
# Sign each input incorrectly once on each complete signing pass, except the very last.
for fail_input in list(range(len(input_utxos))) + [None]:
# Skip trying to fail at spending something that can't be made to fail.
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def run_test(self):
# Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot).
self.log.info("Post-activation tests...")
self.nodes[1].generate(COINBASE_MATURITY + 1)
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
# Re-connect nodes in case they have been disconnected
self.disconnect_nodes(0, 1)
self.connect_nodes(0, 1)
# Transfer value of the largest 500 coins to pre-taproot node.
addr = self.nodes[0].getnewaddress()
unsp = self.nodes[1].listunspent()
unsp = sorted(unsp, key=lambda i: i['amount'], reverse=True)
unsp = unsp[:500]
rawtx = self.nodes[1].createrawtransaction(
inputs=[{
'txid': i['txid'],
'vout': i['vout']
} for i in unsp],
outputs={addr: sum(i['amount'] for i in unsp)}
)
rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex']
# Mine a block with the transaction
block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx])
add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None, self.nodes[1].submitblock(block.serialize().hex()))
self.sync_blocks()
# Pre-taproot activation tests.
self.log.info("Pre-activation tests...")
# Run each test twice; once in isolation, and once combined with others. Testing in isolation
# means that the standardness is verified in every test (as combined transactions are only standard
# when all their inputs are standard).
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
|
py | 1a4e10375c3142a0f83e8da0bdd7d81443201d16 | from .trie import * # noqa: F401, F403
|
py | 1a4e123a6ad29edf25bd3c8385368076c482fdb9 | # Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cupy as cp
from string import Template
from ..utils._caches import _cupy_kernel_cache
_SUPPORTED_TYPES = ["float32", "float64"]
# Custom Cupy raw kernel
# Matthew Nicely - [email protected]
cuda_code_cupy_v7 = Template(
"""
extern "C" {
// Compute linalg.inv(S)
__device__ ${T} inverse(
const int & ltx,
const int & lty,
const int & ltz,
${T}(&s_ZZ_A)[${BLOCKS}][${DIM_Z}][${DIM_Z}],
${T}(&s_ZZ_I)[${BLOCKS}][${DIM_Z}][${DIM_Z}]) {
${T} temp {};
// Interchange the row of matrix
if ( lty == 0 && ltx < ${DIM_Z}) {
#pragma unroll ( ${DIM_Z} - 1 )
for ( int i = ${DIM_Z} - 1; i > 0; i-- ) {
if ( s_ZZ_A[ltz][i - 1][0] < s_ZZ_A[ltz][i][0] ) {
temp = s_ZZ_A[ltz][i][ltx];
s_ZZ_A[ltz][i][ltx] = s_ZZ_A[ltz][i - 1][ltx];
s_ZZ_A[ltz][i - 1][ltx] = temp;
temp = s_ZZ_I[ltz][i][ltx];
s_ZZ_I[ltz][i][ltx] = s_ZZ_I[ltz][i - 1][ltx];
s_ZZ_I[ltz][i - 1][ltx] = temp;
}
}
}
// Replace a row by sum of itself and a
// constant multiple of another row of the matrix
#pragma unroll ${DIM_Z}
for ( int i = 0; i < ${DIM_Z}; i++ ) {
if ( lty < ${DIM_Z} && ltx < ${DIM_Z} ) {
if ( lty != i ) {
temp = s_ZZ_A[ltz][lty][i] / s_ZZ_A[ltz][i][i];
}
}
__syncthreads();
if ( lty < ${DIM_Z} && ltx < ${DIM_Z} ) {
if ( lty != i ) {
s_ZZ_A[ltz][lty][ltx] -= s_ZZ_A[ltz][i][ltx] * temp;
s_ZZ_I[ltz][lty][ltx] -= s_ZZ_I[ltz][i][ltx] * temp;
}
}
__syncthreads();
}
if ( lty < ${DIM_Z} && ltx < ${DIM_Z} ) {
// Multiply each row by a nonzero integer.
// Divide row element by the diagonal element
temp = s_ZZ_A[ltz][lty][lty];
}
__syncthreads();
if ( lty < ${DIM_Z} && ltx < ${DIM_Z} ) {
s_ZZ_A[ltz][lty][ltx] = s_ZZ_A[ltz][lty][ltx] / temp;
s_ZZ_I[ltz][lty][ltx] = s_ZZ_I[ltz][lty][ltx] / temp;
}
__syncthreads();
return ( s_ZZ_I[ltz][lty][ltx] );
}
__global__ void __launch_bounds__(${MAX_TPB}) _cupy_predict(
const int num_points,
const ${T} * __restrict__ alpha_sq,
${T} * __restrict__ x_in,
const ${T} * __restrict__ u,
const ${T} * __restrict__ B,
const ${T} * __restrict__ F,
${T} * __restrict__ P,
const ${T} * __restrict__ Q,
const bool skip
) {
__shared__ ${T} s_XX_A[${BLOCKS}][${DIM_X}][${DIM_X}];
__shared__ ${T} s_XX_F[${BLOCKS}][${DIM_X}][${DIM_X}];
__shared__ ${T} s_XX_P[${BLOCKS}][${DIM_X}][${DIM_X}];
const auto ltx = threadIdx.x;
const auto lty = threadIdx.y;
const auto ltz = threadIdx.z;
const int btz {
static_cast<int>(blockIdx.z * blockDim.z + threadIdx.z) };
const int stride_z { static_cast<int>( blockDim.z * gridDim.z ) };
const int x_value { lty * ${DIM_X} + ltx };
for ( int gtz = btz; gtz < num_points; gtz += stride_z ) {
s_XX_F[ltz][lty][ltx] = F[gtz * ${DIM_X} * ${DIM_X} + x_value];
__syncthreads();
${T} alpha2 { alpha_sq[gtz] };
${T} localQ { Q[gtz * ${DIM_X} * ${DIM_X} + x_value] };
${T} localP { P[gtz * ${DIM_X} * ${DIM_X} + x_value] };
${T} temp {};
//${T} temp2 {};
/*
if ( !skip ) {
// Compute self.x = dot(B, u)
if ( ltx == 0 ) {
#pragma unroll ${DIM_U}
for ( int j = 0; j < ${DIM_U}; j++ ) {
temp2 += B[gtz * ${DIM_X} * ${DIM_U} +
lty * ${DIM_U} + j] *
u[gtz * ${DIM_U} + j];
}
printf("%d: %f\\n", lty, temp2);
}
}
*/
// Compute self.x = dot(F, self.x)
if ( ltx == 0 ) {
#pragma unroll ${DIM_X}
for ( int j = 0; j < ${DIM_X}; j++ ) {
temp += s_XX_F[ltz][lty][j] *
x_in[gtz * ${DIM_X} + j + ltx];
}
// x_in[gtz * ${DIM_X} * 1 + lty * 1 + ltx]
//x_in[gtz * ${DIM_X} + lty + ltx] = temp + temp2;
x_in[gtz * ${DIM_X} + lty + ltx] = temp;
}
s_XX_P[ltz][lty][ltx] = localP;
__syncthreads();
// Compute dot(F, self.P)
temp = 0.0;
#pragma unroll ${DIM_X}
for ( int j = 0; j < ${DIM_X}; j++ ) {
temp += s_XX_F[ltz][lty][j] *
s_XX_P[ltz][j][ltx];
}
s_XX_A[ltz][lty][ltx] = temp;
__syncthreads();
// Compute dot(dot(F, self.P), F.T)
temp = 0.0;
#pragma unroll ${DIM_X}
for ( int j = 0; j < ${DIM_X}; j++ ) {
temp += s_XX_A[ltz][lty][j] * //133
s_XX_F[ltz][ltx][j];
}
__syncthreads();
// Compute self._alpha_sq * dot(dot(F, self.P), F.T) + Q
// Where temp = dot(dot(F, self.P), F.T)
P[gtz * ${DIM_X} * ${DIM_X} + x_value] =
alpha2 * temp + localQ;
}
}
__global__ void __launch_bounds__(${MAX_TPB}) _cupy_update(
const int num_points,
${T} * __restrict__ x_in,
const ${T} * __restrict__ z_in,
const ${T} * __restrict__ H,
${T} * __restrict__ P,
const ${T} * __restrict__ R
) {
__shared__ ${T} s_XX_A[${BLOCKS}][${DIM_X}][${DIM_X}];
__shared__ ${T} s_XX_B[${BLOCKS}][${DIM_X}][${DIM_X}];
__shared__ ${T} s_XX_P[${BLOCKS}][${DIM_X}][${DIM_X}];
__shared__ ${T} s_ZX_H[${BLOCKS}][${DIM_Z}][${DIM_X}];
__shared__ ${T} s_XZ_K[${BLOCKS}][${DIM_X}][${DIM_Z}];
__shared__ ${T} s_XZ_A[${BLOCKS}][${DIM_X}][${DIM_Z}];
__shared__ ${T} s_ZZ_A[${BLOCKS}][${DIM_Z}][${DIM_Z}];
__shared__ ${T} s_ZZ_R[${BLOCKS}][${DIM_Z}][${DIM_Z}];
__shared__ ${T} s_ZZ_I[${BLOCKS}][${DIM_Z}][${DIM_Z}];
__shared__ ${T} s_Z1_y[${BLOCKS}][${DIM_Z}][1];
const auto ltx = threadIdx.x;
const auto lty = threadIdx.y;
const auto ltz = threadIdx.z;
const int btz {
static_cast<int>( blockIdx.z * blockDim.z + threadIdx.z ) };
const int stride_z { static_cast<int>( blockDim.z * gridDim.z ) };
const int x_value { lty * ${DIM_X} + ltx };
const int z_value { lty * ${DIM_Z} + ltx };
for ( int gtz = btz; gtz < num_points; gtz += stride_z ) {
if ( lty < ${DIM_Z} ) {
s_ZX_H[ltz][lty][ltx] =
H[gtz * ${DIM_Z} * ${DIM_X} + x_value];
}
__syncthreads();
s_XX_P[ltz][lty][ltx] = P[gtz * ${DIM_X} * ${DIM_X} + x_value];
if ( ( lty < ${DIM_Z} ) && ( ltx < ${DIM_Z} ) ) {
s_ZZ_R[ltz][lty][ltx] =
R[gtz * ${DIM_Z} * ${DIM_Z} + z_value];
if ( lty == ltx ) {
s_ZZ_I[ltz][lty][ltx] = 1.0;
} else {
s_ZZ_I[ltz][lty][ltx] = 0.0;
}
}
${T} temp {};
// Compute self.y : z = dot(self.H, self.x) --> Z1
if ( ( ltx == 0 ) && ( lty < ${DIM_Z} ) ) {
${T} temp_z { z_in[gtz * ${DIM_Z} + lty] };
#pragma unroll ${DIM_X}
for ( int j = 0; j < ${DIM_X}; j++ ) {
temp += s_ZX_H[ltz][lty][j] *
x_in[gtz * ${DIM_X} + j];
}
s_Z1_y[ltz][lty][ltx] = temp_z - temp;
}
__syncthreads();
// Compute PHT : dot(self.P, self.H.T) --> XZ
temp = 0.0;
if ( ltx < ${DIM_Z} ) {
#pragma unroll ${DIM_X}
for ( int j = 0; j < ${DIM_X}; j++ ) {
temp += s_XX_P[ltz][lty][j] *
s_ZX_H[ltz][ltx][j];
}
// s_XX_A holds PHT
s_XZ_A[ltz][lty][ltx] = temp;
}
__syncthreads();
// Compute self.S : dot(self.H, PHT) + self.R --> ZZ
temp = 0.0;
if ( ( ltx < ${DIM_Z} ) && ( lty < ${DIM_Z} ) ) {
#pragma unroll ${DIM_X}
for ( int j = 0; j < ${DIM_X}; j++ ) {
temp += s_ZX_H[ltz][lty][j] *
s_XZ_A[ltz][j][ltx];
}
// s_XX_B holds S - system uncertainty
s_ZZ_A[ltz][lty][ltx] = temp + s_ZZ_R[ltz][lty][ltx];
}
__syncthreads();
// Compute matrix inversion
temp = inverse(ltx, lty, ltz, s_ZZ_A, s_ZZ_I);
__syncthreads();
if ( ( ltx < ${DIM_Z} ) && ( lty < ${DIM_Z} ) ) {
// s_XX_B hold SI - inverse system uncertainty
s_ZZ_A[ltz][lty][ltx] = temp;
}
__syncthreads();
// Compute self.K : dot(PHT, self.SI) --> ZZ
// kalman gain
temp = 0.0;
if ( ltx < ${DIM_Z} ) {
#pragma unroll ${DIM_Z}
for ( int j = 0; j < ${DIM_Z}; j++ ) {
temp += s_XZ_A[ltz][lty][j] *
s_ZZ_A[ltz][ltx][j];
}
s_XZ_K[ltz][lty][ltx] = temp;
}
__syncthreads();
// Compute self.x : self.x + cp.dot(self.K, self.y) --> X1
temp = 0.0;
if ( ltx == 0 ) {
#pragma unroll ${DIM_Z}
for ( int j = 0; j < ${DIM_Z}; j++ ) {
temp += s_XZ_K[ltz][lty][j] *
s_Z1_y[ltz][j][ltx];
}
x_in[gtz * ${DIM_X} * 1 + lty * 1 + ltx] += temp;
}
// Compute I_KH = self_I - dot(self.K, self.H) --> XX
temp = 0.0;
#pragma unroll ${DIM_Z}
for ( int j = 0; j < ${DIM_Z}; j++ ) {
temp += s_XZ_K[ltz][lty][j] *
s_ZX_H[ltz][j][ltx];
}
// s_XX_A holds I_KH
s_XX_A[ltz][lty][ltx] = ( ( ltx == lty ) ? 1 : 0 ) - temp;
__syncthreads();
// Compute self.P = dot(dot(I_KH, self.P), I_KH.T) +
// dot(dot(self.K, self.R), self.K.T)
// Compute dot(I_KH, self.P) --> XX
temp = 0.0;
#pragma unroll ${DIM_X}
for ( int j = 0; j < ${DIM_X}; j++ ) {
temp += s_XX_A[ltz][lty][j] *
s_XX_P[ltz][j][ltx];
}
s_XX_B[ltz][lty][ltx] = temp;
__syncthreads();
// Compute dot(dot(I_KH, self.P), I_KH.T) --> XX
temp = 0.0;
#pragma unroll ${DIM_X}
for ( int j = 0; j < ${DIM_X}; j++ ) {
temp += s_XX_B[ltz][lty][j] *
s_XX_A[ltz][ltx][j];
}
s_XX_P[ltz][lty][ltx] = temp;
// Compute dot(self.K, self.R) --> XZ
temp = 0.0;
if ( ltx < ${DIM_Z} ) {
#pragma unroll ${DIM_Z}
for ( int j = 0; j < ${DIM_Z}; j++ ) {
temp += s_XZ_K[ltz][lty][j] *
s_ZZ_R[ltz][j][ltx];
}
// s_XZ_A holds dot(self.K, self.R)
s_XZ_A[ltz][lty][ltx] = temp;
}
__syncthreads();
// Compute dot(dot(self.K, self.R), self.K.T) --> XX
temp = 0.0;
#pragma unroll ${DIM_Z}
for ( int j = 0; j < ${DIM_Z}; j++ ) {
temp += s_XZ_A[ltz][lty][j] *
s_XZ_K[ltz][ltx][j];
}
P[gtz * ${DIM_X} * ${DIM_X} + x_value] =
s_XX_P[ltz][lty][ltx] + temp;
}
}
}
"""
)
cuda_code_cupy_v8 = """
// Compute linalg.inv(S)
template<typename T, int BLOCKS, int DIM_Z>
__device__ T inverse(
const int & ltx,
const int & lty,
const int & ltz,
T(&s_ZZ_A)[BLOCKS][DIM_Z][DIM_Z],
T(&s_ZZ_I)[BLOCKS][DIM_Z][DIM_Z]) {
T temp {};
// Interchange the row of matrix
if ( lty == 0 && ltx < DIM_Z) {
#pragma unroll ( DIM_Z - 1 )
for ( int i = DIM_Z - 1; i > 0; i-- ) {
if ( s_ZZ_A[ltz][i - 1][0] < s_ZZ_A[ltz][i][0] ) {
temp = s_ZZ_A[ltz][i][ltx];
s_ZZ_A[ltz][i][ltx] = s_ZZ_A[ltz][i - 1][ltx];
s_ZZ_A[ltz][i - 1][ltx] = temp;
temp = s_ZZ_I[ltz][i][ltx];
s_ZZ_I[ltz][i][ltx] = s_ZZ_I[ltz][i - 1][ltx];
s_ZZ_I[ltz][i - 1][ltx] = temp;
}
}
}
// Replace a row by sum of itself and a
// constant multiple of another row of the matrix
#pragma unroll DIM_Z
for ( int i = 0; i < DIM_Z; i++ ) {
if ( lty < DIM_Z && ltx < DIM_Z ) {
if ( lty != i ) {
temp = s_ZZ_A[ltz][lty][i] / s_ZZ_A[ltz][i][i];
}
}
__syncthreads();
if ( lty < DIM_Z && ltx < DIM_Z ) {
if ( lty != i ) {
s_ZZ_A[ltz][lty][ltx] -= s_ZZ_A[ltz][i][ltx] * temp;
s_ZZ_I[ltz][lty][ltx] -= s_ZZ_I[ltz][i][ltx] * temp;
}
}
__syncthreads();
}
if ( lty < DIM_Z && ltx < DIM_Z ) {
// Multiply each row by a nonzero integer.
// Divide row element by the diagonal element
temp = s_ZZ_A[ltz][lty][lty];
}
__syncthreads();
if ( lty < DIM_Z && ltx < DIM_Z ) {
s_ZZ_A[ltz][lty][ltx] = s_ZZ_A[ltz][lty][ltx] / temp;
s_ZZ_I[ltz][lty][ltx] = s_ZZ_I[ltz][lty][ltx] / temp;
}
__syncthreads();
return ( s_ZZ_I[ltz][lty][ltx] );
}
template<typename T, int BLOCKS, int DIM_X, int DIM_U, int MAX_TPB>
__global__ void __launch_bounds__(MAX_TPB) _cupy_predict(
const int num_points,
const T * __restrict__ alpha_sq,
T * __restrict__ x_in,
const T * __restrict__ u,
const T * __restrict__ B,
const T * __restrict__ F,
T * __restrict__ P,
const T * __restrict__ Q,
const bool skip
) {
__shared__ T s_XX_A[BLOCKS][DIM_X][DIM_X];
__shared__ T s_XX_F[BLOCKS][DIM_X][DIM_X];
__shared__ T s_XX_P[BLOCKS][DIM_X][DIM_X];
const auto ltx = threadIdx.x;
const auto lty = threadIdx.y;
const auto ltz = threadIdx.z;
const int btz { static_cast<int>(blockIdx.z * blockDim.z + threadIdx.z) };
const int stride_z { static_cast<int>( blockDim.z * gridDim.z ) };
const int x_value { lty * DIM_X + ltx };
for ( int gtz = btz; gtz < num_points; gtz += stride_z ) {
s_XX_F[ltz][lty][ltx] = F[gtz * DIM_X * DIM_X + x_value];
__syncthreads();
T alpha2 { alpha_sq[gtz] };
T localQ { Q[gtz * DIM_X * DIM_X + x_value] };
T localP { P[gtz * DIM_X * DIM_X + x_value] };
T temp {};
//T temp2 {};
/*
if ( !skip ) {
// Compute self.x = dot(B, u)
if ( ltx == 0 ) {
#pragma unroll DIM_U
for ( int j = 0; j < DIM_U; j++ ) {
temp2 += B[gtz * DIM_X * DIM_U + lty * DIM_U + j] *
u[gtz * DIM_U + j];
}
printf("%d: %f\\n", lty, temp2);
}
}
*/
// Compute self.x = dot(F, self.x)
if ( ltx == 0 ) {
#pragma unroll DIM_X
for ( int j = 0; j < DIM_X; j++ ) {
temp += s_XX_F[ltz][lty][j] *
x_in[gtz * DIM_X + j + ltx];
}
// x_in[gtz * DIM_X * 1 + lty * 1 + ltx]
//x_in[gtz * DIM_X + lty + ltx] = temp + temp2;
x_in[gtz * DIM_X + lty + ltx] = temp;
}
s_XX_P[ltz][lty][ltx] = localP;
__syncthreads();
// Compute dot(F, self.P)
temp = 0.0;
#pragma unroll DIM_X
for ( int j = 0; j < DIM_X; j++ ) {
temp += s_XX_F[ltz][lty][j] *
s_XX_P[ltz][j][ltx];
}
s_XX_A[ltz][lty][ltx] = temp;
__syncthreads();
// Compute dot(dot(F, self.P), F.T)
temp = 0.0;
#pragma unroll DIM_X
for ( int j = 0; j < DIM_X; j++ ) {
temp += s_XX_A[ltz][lty][j] * //133
s_XX_F[ltz][ltx][j];
}
__syncthreads();
// Compute self._alpha_sq * dot(dot(F, self.P), F.T) + Q
// Where temp = dot(dot(F, self.P), F.T)
P[gtz * DIM_X * DIM_X + x_value] =
alpha2 * temp + localQ;
}
}
template<typename T, int BLOCKS, int DIM_X, int DIM_Z, int MAX_TPB>
__global__ void __launch_bounds__(MAX_TPB) _cupy_update(
const int num_points,
T * __restrict__ x_in,
const T * __restrict__ z_in,
const T * __restrict__ H,
T * __restrict__ P,
const T * __restrict__ R
) {
__shared__ T s_XX_A[BLOCKS][DIM_X][DIM_X];
__shared__ T s_XX_B[BLOCKS][DIM_X][DIM_X];
__shared__ T s_XX_P[BLOCKS][DIM_X][DIM_X];
__shared__ T s_ZX_H[BLOCKS][DIM_Z][DIM_X];
__shared__ T s_XZ_K[BLOCKS][DIM_X][DIM_Z];
__shared__ T s_XZ_A[BLOCKS][DIM_X][DIM_Z];
__shared__ T s_ZZ_A[BLOCKS][DIM_Z][DIM_Z];
__shared__ T s_ZZ_R[BLOCKS][DIM_Z][DIM_Z];
__shared__ T s_ZZ_I[BLOCKS][DIM_Z][DIM_Z];
__shared__ T s_Z1_y[BLOCKS][DIM_Z][1];
const auto ltx = threadIdx.x;
const auto lty = threadIdx.y;
const auto ltz = threadIdx.z;
const int btz {
static_cast<int>( blockIdx.z * blockDim.z + threadIdx.z ) };
const int stride_z { static_cast<int>( blockDim.z * gridDim.z ) };
const int x_value { lty * DIM_X + ltx };
const int z_value { lty * DIM_Z + ltx };
for ( int gtz = btz; gtz < num_points; gtz += stride_z ) {
if ( lty < DIM_Z ) {
s_ZX_H[ltz][lty][ltx] =
H[gtz * DIM_Z * DIM_X + x_value];
}
__syncthreads();
s_XX_P[ltz][lty][ltx] = P[gtz * DIM_X * DIM_X + x_value];
if ( ( lty < DIM_Z ) && ( ltx < DIM_Z ) ) {
s_ZZ_R[ltz][lty][ltx] =
R[gtz * DIM_Z * DIM_Z + z_value];
if ( lty == ltx ) {
s_ZZ_I[ltz][lty][ltx] = 1.0;
} else {
s_ZZ_I[ltz][lty][ltx] = 0.0;
}
}
T temp {};
// Compute self.y : z = dot(self.H, self.x) --> Z1
if ( ( ltx == 0 ) && ( lty < DIM_Z ) ) {
T temp_z { z_in[gtz * DIM_Z + lty] };
#pragma unroll DIM_X
for ( int j = 0; j < DIM_X; j++ ) {
temp += s_ZX_H[ltz][lty][j] *
x_in[gtz * DIM_X + j];
}
s_Z1_y[ltz][lty][ltx] = temp_z - temp;
}
__syncthreads();
// Compute PHT : dot(self.P, self.H.T) --> XZ
temp = 0.0;
if ( ltx < DIM_Z ) {
#pragma unroll DIM_X
for ( int j = 0; j < DIM_X; j++ ) {
temp += s_XX_P[ltz][lty][j] *
s_ZX_H[ltz][ltx][j];
}
// s_XX_A holds PHT
s_XZ_A[ltz][lty][ltx] = temp;
}
__syncthreads();
// Compute self.S : dot(self.H, PHT) + self.R --> ZZ
temp = 0.0;
if ( ( ltx < DIM_Z ) && ( lty < DIM_Z ) ) {
#pragma unroll DIM_X
for ( int j = 0; j < DIM_X; j++ ) {
temp += s_ZX_H[ltz][lty][j] *
s_XZ_A[ltz][j][ltx];
}
// s_XX_B holds S - system uncertainty
s_ZZ_A[ltz][lty][ltx] = temp + s_ZZ_R[ltz][lty][ltx];
}
__syncthreads();
// Compute matrix inversion
temp = inverse(ltx, lty, ltz, s_ZZ_A, s_ZZ_I);
__syncthreads();
if ( ( ltx < DIM_Z ) && ( lty < DIM_Z ) ) {
// s_XX_B hold SI - inverse system uncertainty
s_ZZ_A[ltz][lty][ltx] = temp;
}
__syncthreads();
// Compute self.K : dot(PHT, self.SI) --> ZZ
// kalman gain
temp = 0.0;
if ( ltx < DIM_Z ) {
#pragma unroll DIM_Z
for ( int j = 0; j < DIM_Z; j++ ) {
temp += s_XZ_A[ltz][lty][j] *
s_ZZ_A[ltz][ltx][j];
}
s_XZ_K[ltz][lty][ltx] = temp;
}
__syncthreads();
// Compute self.x : self.x + cp.dot(self.K, self.y) --> X1
temp = 0.0;
if ( ltx == 0 ) {
#pragma unroll DIM_Z
for ( int j = 0; j < DIM_Z; j++ ) {
temp += s_XZ_K[ltz][lty][j] *
s_Z1_y[ltz][j][ltx];
}
x_in[gtz * DIM_X * 1 + lty * 1 + ltx] += temp;
}
// Compute I_KH = self_I - dot(self.K, self.H) --> XX
temp = 0.0;
#pragma unroll DIM_Z
for ( int j = 0; j < DIM_Z; j++ ) {
temp += s_XZ_K[ltz][lty][j] *
s_ZX_H[ltz][j][ltx];
}
// s_XX_A holds I_KH
s_XX_A[ltz][lty][ltx] = ( ( ltx == lty ) ? 1 : 0 ) - temp;
__syncthreads();
// Compute self.P = dot(dot(I_KH, self.P), I_KH.T) +
// dot(dot(self.K, self.R), self.K.T)
// Compute dot(I_KH, self.P) --> XX
temp = 0.0;
#pragma unroll DIM_X
for ( int j = 0; j < DIM_X; j++ ) {
temp += s_XX_A[ltz][lty][j] *
s_XX_P[ltz][j][ltx];
}
s_XX_B[ltz][lty][ltx] = temp;
__syncthreads();
// Compute dot(dot(I_KH, self.P), I_KH.T) --> XX
temp = 0.0;
#pragma unroll DIM_X
for ( int j = 0; j < DIM_X; j++ ) {
temp += s_XX_B[ltz][lty][j] *
s_XX_A[ltz][ltx][j];
}
s_XX_P[ltz][lty][ltx] = temp;
// Compute dot(self.K, self.R) --> XZ
temp = 0.0;
if ( ltx < DIM_Z ) {
#pragma unroll DIM_Z
for ( int j = 0; j < DIM_Z; j++ ) {
temp += s_XZ_K[ltz][lty][j] *
s_ZZ_R[ltz][j][ltx];
}
// s_XZ_A holds dot(self.K, self.R)
s_XZ_A[ltz][lty][ltx] = temp;
}
__syncthreads();
// Compute dot(dot(self.K, self.R), self.K.T) --> XX
temp = 0.0;
#pragma unroll DIM_Z
for ( int j = 0; j < DIM_Z; j++ ) {
temp += s_XZ_A[ltz][lty][j] *
s_XZ_K[ltz][ltx][j];
}
P[gtz * DIM_X * DIM_X + x_value] =
s_XX_P[ltz][lty][ltx] + temp;
}
}
"""
class _cupy_predict_wrapper(object):
def __init__(self, grid, block, kernel):
if isinstance(grid, int):
grid = (grid,)
if isinstance(block, int):
block = (block,)
self.grid = grid
self.block = block
self.kernel = kernel
def __call__(
self,
alpha_sq,
x,
u,
B,
F,
P,
Q,
):
if B is not None and u is not None:
skip = False
else:
skip = True
kernel_args = (x.shape[0], alpha_sq, x, u, B, F, P, Q, skip)
self.kernel(self.grid, self.block, kernel_args)
class _cupy_update_wrapper(object):
def __init__(self, grid, block, kernel):
if isinstance(grid, int):
grid = (grid,)
if isinstance(block, int):
block = (block,)
self.grid = grid
self.block = block
self.kernel = kernel
def __call__(self, x, z, H, P, R):
kernel_args = (x.shape[0], x, z, H, P, R)
self.kernel(self.grid, self.block, kernel_args)
def _populate_kernel_cache(np_type, blocks, dim_x, dim_z, dim_u, max_tpb):
# Check in np_type is a supported option
if np_type not in _SUPPORTED_TYPES:
raise ValueError(
"Datatype {} not found for Kalman Filter".format(np_type)
)
if np_type == "float32":
c_type = "float"
else:
c_type = "double"
# Check CuPy version
# Update to only check for v8.X in cuSignal 0.16
valid = ["8.0.0b4", "8.0.0b5", "8.0.0rc1", "8.0.0"]
ver = cp.__version__
if ver not in valid:
src = cuda_code_cupy_v7.substitute(
T=c_type,
BLOCKS=blocks,
DIM_X=dim_x,
DIM_Z=dim_z,
DIM_U=dim_u,
MAX_TPB=max_tpb,
)
module = cp.RawModule(code=src, options=("-std=c++11",))
_cupy_kernel_cache[(str(np_type), "predict")] = module.get_function(
"_cupy_predict"
)
_cupy_kernel_cache[(str(np_type), "update")] = module.get_function(
"_cupy_update"
)
else:
# Instantiate the cupy kernel for this type and compile
specializations = (
"_cupy_predict<{}, {}, {}, {}, {}>".format(
c_type, blocks, dim_x, dim_u, max_tpb
),
"_cupy_update<{}, {}, {}, {}, {}>".format(
c_type, blocks, dim_x, dim_z, max_tpb
),
)
module = cp.RawModule(
code=cuda_code_cupy_v8,
options=(
"-std=c++11",
"-fmad=true",
),
name_expressions=specializations,
)
_cupy_kernel_cache[(str(np_type), "predict")] = module.get_function(
specializations[0]
)
_cupy_kernel_cache[(str(np_type), "update")] = module.get_function(
specializations[1]
)
def _get_backend_kernel(dtype, grid, block, k_type):
kernel = _cupy_kernel_cache[(str(dtype), k_type)]
if kernel:
if k_type == "predict":
return _cupy_predict_wrapper(grid, block, kernel)
elif k_type == "update":
return _cupy_update_wrapper(grid, block, kernel)
else:
raise NotImplementedError(
"No CuPY kernel found for k_type {}, datatype {}".format(
k_type, dtype
)
)
else:
raise ValueError(
"Kernel {} not found in _cupy_kernel_cache".format(k_type)
)
|
py | 1a4e12d699589b017b8323da5df24b979ce2a21a | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
"""
__docformat__ = 'restructuredtext'
|
py | 1a4e138779f692df295f05999756fadc66fd0f16 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionPeeringsOperations:
"""ExpressRouteCrossConnectionPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
cross_connection_name: str,
**kwargs
) -> AsyncIterable["models.ExpressRouteCrossConnectionPeeringList"]:
"""Gets all peerings in a specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionPeeringList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.ExpressRouteCrossConnectionPeeringList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeeringList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeeringList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> "models.ExpressRouteCrossConnectionPeering":
"""Gets the specified peering for the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnectionPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.ExpressRouteCrossConnectionPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> "models.ExpressRouteCrossConnectionPeering":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCrossConnectionPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> AsyncLROPoller["models.ExpressRouteCrossConnectionPeering"]:
"""Creates or updates a peering in the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
ExpressRouteCrossConnection peering operation.
:type peering_parameters: ~azure.mgmt.network.v2019_06_01.models.ExpressRouteCrossConnectionPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnectionPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.ExpressRouteCrossConnectionPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
|
py | 1a4e1469289cf91873473fb21a233d6e65bcb83e | import itertools
import logging
from collections import defaultdict, deque
from BaseClasses import DoorType
from Regions import dungeon_events
from Dungeons import dungeon_keys, dungeon_bigs
from DungeonGenerator import ExplorationState, special_big_key_doors
class KeyLayout(object):
def __init__(self, sector, starts, proposal):
self.sector = sector
self.start_regions = starts
self.proposal = proposal
self.key_logic = KeyLogic(sector.name)
self.key_counters = None
self.flat_prop = None
self.max_chests = None
self.max_drops = None
self.all_chest_locations = {}
self.big_key_special = False
self.all_locations = set()
self.item_locations = set()
# bk special?
# bk required? True if big chests or big doors exists
def reset(self, proposal, builder, world, player):
self.proposal = proposal
self.flat_prop = flatten_pair_list(self.proposal)
self.key_logic = KeyLogic(self.sector.name)
self.max_chests = calc_max_chests(builder, self, world, player)
self.all_locations = set()
self.item_locations = set()
class KeyLogic(object):
def __init__(self, dungeon_name):
self.door_rules = {}
self.bk_restricted = set() # subset of free locations
self.bk_locked = set() # includes potentially other locations and key only locations
self.sm_restricted = set()
self.small_key_name = dungeon_keys[dungeon_name]
self.bk_name = dungeon_bigs[dungeon_name]
self.bk_doors = set()
self.bk_chests = set()
self.logic_min = {}
self.logic_max = {}
self.placement_rules = []
self.location_rules = {}
self.outside_keys = 0
self.dungeon = dungeon_name
def check_placement(self, unplaced_keys, big_key_loc=None):
for rule in self.placement_rules:
if not rule.is_satisfiable(self.outside_keys, unplaced_keys):
return False
if big_key_loc:
for rule_a, rule_b in itertools.combinations(self.placement_rules, 2):
if rule_a.contradicts(rule_b, unplaced_keys, big_key_loc):
return False
return True
class DoorRules(object):
def __init__(self, number, is_valid):
self.small_key_num = number
self.is_valid = is_valid
# allowing a different number if bk is behind this door in a set of locations
self.alternate_small_key = None
self.alternate_big_key_loc = set()
# for a place with only 1 free location/key_only_location behind it ... no goals and locations
self.allow_small = False
self.small_location = None
self.opposite = None
class LocationRule(object):
def __init__(self):
self.small_key_num = 0
self.conditional_sets = []
class ConditionalLocationRule(object):
def __init__(self, conditional_set):
self.conditional_set = conditional_set
self.small_key_num = 0
class PlacementRule(object):
def __init__(self):
self.door_reference = None
self.small_key = None
self.bk_conditional_set = None # the location that means
self.needed_keys_w_bk = None
self.needed_keys_wo_bk = None
self.check_locations_w_bk = None
self.check_locations_wo_bk = None
self.bk_relevant = True
self.key_reduced = False
def contradicts(self, rule, unplaced_keys, big_key_loc):
bk_blocked = big_key_loc in self.bk_conditional_set if self.bk_conditional_set else False
rule_blocked = big_key_loc in rule.bk_conditional_set if rule.bk_conditional_set else False
check_locations = self.check_locations_wo_bk if bk_blocked else self.check_locations_w_bk
rule_locations = rule.check_locations_wo_bk if rule_blocked else rule.check_locations_w_bk
if check_locations is None or rule_locations is None:
return False
check_locations = check_locations - {big_key_loc}
rule_locations = rule_locations - {big_key_loc}
threshold = self.needed_keys_wo_bk if bk_blocked else self.needed_keys_w_bk
rule_threshold = rule.needed_keys_wo_bk if rule_blocked else rule.needed_keys_w_bk
common_locations = rule_locations & check_locations
shared = len(common_locations)
if min(rule_threshold, threshold) - shared > 0:
left = unplaced_keys - shared
check_locations = check_locations - common_locations
check_needed = threshold - shared
if len(check_locations) < check_needed or left < check_needed:
return True
else:
left -= check_needed
rule_locations = rule_locations - common_locations
rule_needed = rule_threshold - shared
if len(rule_locations) < rule_needed or left < rule_needed:
return True
else:
left -= rule_needed
return False
def is_satisfiable(self, outside_keys, unplaced_keys):
bk_blocked = False
if self.bk_conditional_set:
for loc in self.bk_conditional_set:
if loc.item and loc.item.bigkey:
bk_blocked = True
break
check_locations = self.check_locations_wo_bk if bk_blocked else self.check_locations_w_bk
if not bk_blocked and check_locations is None:
return True
available_keys = outside_keys
empty_chests = 0
threshold = self.needed_keys_wo_bk if bk_blocked else self.needed_keys_w_bk
for loc in check_locations:
if not loc.item:
empty_chests += 1
elif loc.item and loc.item.name == self.small_key:
available_keys += 1
place_able_keys = min(empty_chests, unplaced_keys)
available_keys += place_able_keys
return available_keys >= threshold
class KeyCounter(object):
def __init__(self, max_chests):
self.max_chests = max_chests
self.free_locations = {}
self.key_only_locations = {}
self.child_doors = {}
self.open_doors = {}
self.used_keys = 0
self.big_key_opened = False
self.important_location = False
self.other_locations = {}
self.important_locations = {}
def used_smalls_loc(self, reserve=0):
return max(self.used_keys + reserve - len(self.key_only_locations), 0)
def build_key_layout(builder, start_regions, proposal, world, player):
key_layout = KeyLayout(builder.master_sector, start_regions, proposal)
key_layout.flat_prop = flatten_pair_list(key_layout.proposal)
key_layout.max_drops = count_key_drops(key_layout.sector)
key_layout.max_chests = calc_max_chests(builder, key_layout, world, player)
key_layout.big_key_special = check_bk_special(key_layout.sector.region_set(), world, player)
key_layout.all_locations = find_all_locations(key_layout.sector)
return key_layout
def count_key_drops(sector):
cnt = 0
for region in sector.regions:
for loc in region.locations:
if loc.forced_item and 'Small Key' in loc.item.name:
cnt += 1
return cnt
def find_all_locations(sector):
all_locations = set()
for region in sector.regions:
for loc in region.locations:
all_locations.add(loc)
return all_locations
def calc_max_chests(builder, key_layout, world, player):
if world.doorShuffle[player] != 'crossed':
return len(world.get_dungeon(key_layout.sector.name, player).small_keys)
return max(0, builder.key_doors_num - key_layout.max_drops)
def analyze_dungeon(key_layout, world, player):
key_layout.key_counters = create_key_counters(key_layout, world, player)
key_logic = key_layout.key_logic
find_bk_locked_sections(key_layout, world, player)
key_logic.bk_chests.update(find_big_chest_locations(key_layout.all_chest_locations))
key_logic.bk_chests.update(find_big_key_locked_locations(key_layout.all_chest_locations))
if world.retro[player] and world.mode[player] != 'standard':
return
original_key_counter = find_counter({}, False, key_layout)
queue = deque([(None, original_key_counter)])
doors_completed = set()
visited_cid = set()
visited_cid.add(cid(original_key_counter, key_layout))
while len(queue) > 0:
queue = deque(sorted(queue, key=queue_sorter))
parent_door, key_counter = queue.popleft()
chest_keys = available_chest_small_keys(key_counter, world, player)
raw_avail = chest_keys + len(key_counter.key_only_locations)
available = raw_avail - key_counter.used_keys
possible_smalls = count_unique_small_doors(key_counter, key_layout.flat_prop)
avail_bigs = exist_relevant_big_doors(key_counter, key_layout) or exist_big_chest(key_counter)
non_big_locs = count_locations_big_optional(key_counter.free_locations)
big_avail = key_counter.big_key_opened or (key_layout.big_key_special and any(x for x in key_counter.other_locations.keys() if x.forced_item and x.forced_item.bigkey))
if not big_avail:
if chest_keys == non_big_locs and chest_keys > 0 and available <= possible_smalls and not avail_bigs:
key_logic.bk_restricted.update(filter_big_chest(key_counter.free_locations))
# try to relax the rules here? - smallest requirement that doesn't force a softlock
child_queue = deque()
for child in key_counter.child_doors.keys():
if not child.bigKey or not key_layout.big_key_special or big_avail:
odd_counter = create_odd_key_counter(child, key_counter, key_layout, world, player)
empty_flag = empty_counter(odd_counter)
child_queue.append((child, odd_counter, empty_flag))
while len(child_queue) > 0:
child, odd_counter, empty_flag = child_queue.popleft()
if not child.bigKey and child not in doors_completed:
best_counter = find_best_counter(child, odd_counter, key_counter, key_layout, world, player, False, empty_flag)
rule = create_rule(best_counter, key_counter, key_layout, world, player)
check_for_self_lock_key(rule, child, best_counter, key_layout, world, player)
bk_restricted_rules(rule, child, odd_counter, empty_flag, key_counter, key_layout, world, player)
key_logic.door_rules[child.name] = rule
doors_completed.add(child)
next_counter = find_next_counter(child, key_counter, key_layout)
ctr_id = cid(next_counter, key_layout)
if ctr_id not in visited_cid:
queue.append((child, next_counter))
visited_cid.add(ctr_id)
check_rules(original_key_counter, key_layout, world, player)
# Flip bk rules if more restrictive, to prevent placing a big key in a softlocking location
for rule in key_logic.door_rules.values():
if rule.alternate_small_key is not None and rule.alternate_small_key > rule.small_key_num:
max_counter = find_max_counter(key_layout)
rule.alternate_big_key_loc = set(max_counter.free_locations.keys()).difference(rule.alternate_big_key_loc)
rule.small_key_num, rule.alternate_small_key = rule.alternate_small_key, rule.small_key_num
create_exhaustive_placement_rules(key_layout, world, player)
set_paired_rules(key_logic, world, player)
def create_exhaustive_placement_rules(key_layout, world, player):
key_logic = key_layout.key_logic
max_ctr = find_max_counter(key_layout)
for code, key_counter in key_layout.key_counters.items():
accessible_loc = set()
accessible_loc.update(key_counter.free_locations)
accessible_loc.update(key_counter.key_only_locations)
blocked_loc = key_layout.item_locations.difference(accessible_loc)
valid_rule = True
# min_keys = max(count_unique_sm_doors(key_counter.child_doors), key_counter.used_keys + 1)
min_keys = key_counter.used_keys + 1
if len(blocked_loc) > 0 and len(key_counter.key_only_locations) < min_keys:
rule = PlacementRule()
rule.door_reference = code
rule.small_key = key_logic.small_key_name
if key_counter.big_key_opened or not big_key_progress(key_counter):
rule.needed_keys_w_bk = min_keys
rule.bk_relevant = key_counter.big_key_opened
if key_counter.big_key_opened and rule.needed_keys_w_bk + 1 > len(accessible_loc):
valid_rule = False # indicates that the big key cannot be in the accessible locations
key_logic.bk_restricted.update(accessible_loc.difference(max_ctr.key_only_locations))
else:
placement_self_lock_adjustment(rule, max_ctr, blocked_loc, key_counter, world, player)
rule.check_locations_w_bk = accessible_loc
check_sm_restriction_needed(key_layout, max_ctr, rule, blocked_loc)
else:
if big_key_progress(key_counter) and only_sm_doors(key_counter):
create_inclusive_rule(key_layout, max_ctr, code, key_counter, blocked_loc, accessible_loc, min_keys, world, player)
rule.bk_conditional_set = blocked_loc
rule.needed_keys_wo_bk = min_keys
rule.check_locations_wo_bk = set(filter_big_chest(accessible_loc))
if valid_rule:
key_logic.placement_rules.append(rule)
adjust_locations_rules(key_logic, rule, accessible_loc, key_layout, key_counter, max_ctr)
refine_placement_rules(key_layout, max_ctr)
refine_location_rules(key_layout)
def placement_self_lock_adjustment(rule, max_ctr, blocked_loc, ctr, world, player):
if len(blocked_loc) == 1 and world.accessibility[player] != 'locations':
blocked_others = set(max_ctr.other_locations).difference(set(ctr.other_locations))
important_found = False
for loc in blocked_others:
if important_location(loc, world, player):
important_found = True
break
if not important_found:
rule.needed_keys_w_bk -= 1
def check_sm_restriction_needed(key_layout, max_ctr, rule, blocked):
if rule.needed_keys_w_bk == key_layout.max_chests + len(max_ctr.key_only_locations):
key_layout.key_logic.sm_restricted.update(blocked.difference(max_ctr.key_only_locations))
return True
return False
def adjust_locations_rules(key_logic, rule, accessible_loc, key_layout, key_counter, max_ctr):
if rule.bk_conditional_set:
test_set = (rule.bk_conditional_set - key_logic.bk_locked) - set(max_ctr.key_only_locations.keys())
needed = rule.needed_keys_wo_bk if test_set else 0
else:
test_set = None
needed = rule.needed_keys_w_bk
if needed > 0:
all_accessible = set(accessible_loc)
all_accessible.update(key_counter.other_locations)
blocked_loc = key_layout.all_locations-all_accessible
for location in blocked_loc:
if location not in key_logic.location_rules.keys():
loc_rule = LocationRule()
key_logic.location_rules[location] = loc_rule
else:
loc_rule = key_logic.location_rules[location]
if test_set:
if location not in key_logic.bk_locked:
cond_rule = None
for other in loc_rule.conditional_sets:
if other.conditional_set == test_set:
cond_rule = other
break
if not cond_rule:
cond_rule = ConditionalLocationRule(test_set)
loc_rule.conditional_sets.append(cond_rule)
cond_rule.small_key_num = max(needed, cond_rule.small_key_num)
else:
loc_rule.small_key_num = max(needed, loc_rule.small_key_num)
def refine_placement_rules(key_layout, max_ctr):
key_logic = key_layout.key_logic
changed = True
while changed:
changed = False
rules_to_remove = []
for rule in key_logic.placement_rules:
if rule.check_locations_w_bk:
rule.check_locations_w_bk.difference_update(key_logic.sm_restricted)
key_onlys = rule.check_locations_w_bk.intersection(max_ctr.key_only_locations)
if len(key_onlys) > 0:
rule.check_locations_w_bk.difference_update(key_onlys)
rule.needed_keys_w_bk -= len(key_onlys)
if rule.needed_keys_w_bk == 0:
rules_to_remove.append(rule)
# todo: evaluate this usage
# if rule.bk_relevant and len(rule.check_locations_w_bk) == rule.needed_keys_w_bk + 1:
# new_restricted = set(max_ctr.free_locations) - rule.check_locations_w_bk
# if len(new_restricted | key_logic.bk_restricted) < len(key_layout.all_chest_locations):
# if len(new_restricted - key_logic.bk_restricted) > 0:
# key_logic.bk_restricted.update(new_restricted) # bk must be in one of the check_locations
# changed = True
# else:
# rules_to_remove.append(rule)
# changed = True
if rule.needed_keys_w_bk > key_layout.max_chests or len(rule.check_locations_w_bk) < rule.needed_keys_w_bk:
logging.getLogger('').warning('Invalid rule - what went wrong here??')
rules_to_remove.append(rule)
changed = True
if rule.bk_conditional_set is not None:
rule.bk_conditional_set.difference_update(key_logic.bk_restricted)
rule.bk_conditional_set.difference_update(max_ctr.key_only_locations)
if len(rule.bk_conditional_set) == 0:
rules_to_remove.append(rule)
if rule.check_locations_wo_bk:
rule.check_locations_wo_bk.difference_update(key_logic.sm_restricted)
key_onlys = rule.check_locations_wo_bk.intersection(max_ctr.key_only_locations)
if len(key_onlys) > 0:
rule.check_locations_wo_bk.difference_update(key_onlys)
rule.needed_keys_wo_bk -= len(key_onlys)
if rule.needed_keys_wo_bk == 0:
rules_to_remove.append(rule)
if len(rule.check_locations_wo_bk) < rule.needed_keys_wo_bk or rule.needed_keys_wo_bk > key_layout.max_chests:
if len(rule.bk_conditional_set) > 0:
key_logic.bk_restricted.update(rule.bk_conditional_set)
rules_to_remove.append(rule)
changed = True # impossible for bk to be here, I think
for rule_a, rule_b in itertools.combinations([x for x in key_logic.placement_rules if x not in rules_to_remove], 2):
if rule_b.bk_conditional_set and rule_a.check_locations_w_bk:
temp = rule_a
rule_a = rule_b
rule_b = temp
if rule_a.bk_conditional_set and rule_b.check_locations_w_bk:
common_needed = min(rule_a.needed_keys_wo_bk, rule_b.needed_keys_w_bk)
if len(rule_b.check_locations_w_bk & rule_a.check_locations_wo_bk) < common_needed:
key_logic.bk_restricted.update(rule_a.bk_conditional_set)
rules_to_remove.append(rule_a)
changed = True
break
equivalent_rules = []
for rule in key_logic.placement_rules:
for rule2 in key_logic.placement_rules:
if rule != rule2:
if rule.check_locations_w_bk and rule2.check_locations_w_bk:
if rule2.check_locations_w_bk == rule.check_locations_w_bk and rule2.needed_keys_w_bk > rule.needed_keys_w_bk:
rules_to_remove.append(rule)
elif rule2.needed_keys_w_bk == rule.needed_keys_w_bk and rule2.check_locations_w_bk < rule.check_locations_w_bk:
rules_to_remove.append(rule)
elif rule2.check_locations_w_bk == rule.check_locations_w_bk and rule2.needed_keys_w_bk == rule.needed_keys_w_bk:
equivalent_rules.append((rule, rule2))
if rule.check_locations_wo_bk and rule2.check_locations_wo_bk and rule.bk_conditional_set == rule2.bk_conditional_set:
if rule2.check_locations_wo_bk == rule.check_locations_wo_bk and rule2.needed_keys_wo_bk > rule.needed_keys_wo_bk:
rules_to_remove.append(rule)
elif rule2.needed_keys_wo_bk == rule.needed_keys_wo_bk and rule2.check_locations_wo_bk < rule.check_locations_wo_bk:
rules_to_remove.append(rule)
elif rule2.check_locations_wo_bk == rule.check_locations_wo_bk and rule2.needed_keys_wo_bk == rule.needed_keys_wo_bk:
equivalent_rules.append((rule, rule2))
if len(rules_to_remove) > 0:
key_logic.placement_rules = [x for x in key_logic.placement_rules if x not in rules_to_remove]
equivalent_rules = [x for x in equivalent_rules if x[0] not in rules_to_remove and x[1] not in rules_to_remove]
if len(equivalent_rules) > 0:
removed_rules = {}
for r1, r2 in equivalent_rules:
if r1 in removed_rules.keys():
r1 = removed_rules[r1]
if r2 in removed_rules.keys():
r2 = removed_rules[r2]
if r1 != r2:
r1.door_reference += ','+r2.door_reference
key_logic.placement_rules.remove(r2)
removed_rules[r2] = r1
def refine_location_rules(key_layout):
locs_to_remove = []
for loc, rule in key_layout.key_logic.location_rules.items():
conditions_to_remove = []
for cond_rule in rule.conditional_sets:
if cond_rule.small_key_num <= rule.small_key_num:
conditions_to_remove.append(cond_rule)
rule.conditional_sets = [x for x in rule.conditional_sets if x not in conditions_to_remove]
if rule.small_key_num == 0 and len(rule.conditional_sets) == 0:
locs_to_remove.append(loc)
for loc in locs_to_remove:
del key_layout.key_logic.location_rules[loc]
def create_inclusive_rule(key_layout, max_ctr, code, key_counter, blocked_loc, accessible_loc, min_keys, world, player):
key_logic = key_layout.key_logic
rule = PlacementRule()
rule.door_reference = code
rule.small_key = key_logic.small_key_name
rule.needed_keys_w_bk = min_keys
if key_counter.big_key_opened and rule.needed_keys_w_bk + 1 > len(accessible_loc):
# indicates that the big key cannot be in the accessible locations
key_logic.bk_restricted.update(accessible_loc.difference(max_ctr.key_only_locations))
else:
placement_self_lock_adjustment(rule, max_ctr, blocked_loc, key_counter, world, player)
rule.check_locations_w_bk = accessible_loc
check_sm_restriction_needed(key_layout, max_ctr, rule, blocked_loc)
key_logic.placement_rules.append(rule)
adjust_locations_rules(key_logic, rule, accessible_loc, key_layout, key_counter, max_ctr)
def queue_sorter(queue_item):
door, counter = queue_item
if door is None:
return 0
return 1 if door.bigKey else 0
def queue_sorter_2(queue_item):
door, counter, key_only = queue_item
if door is None:
return 0
return 1 if door.bigKey else 0
def find_bk_locked_sections(key_layout, world, player):
key_counters = key_layout.key_counters
key_logic = key_layout.key_logic
bk_not_required = set()
big_chest_allowed_big_key = world.accessibility[player] != 'locations'
for counter in key_counters.values():
key_layout.all_chest_locations.update(counter.free_locations)
key_layout.item_locations.update(counter.free_locations)
key_layout.item_locations.update(counter.key_only_locations)
key_layout.all_locations.update(key_layout.item_locations)
key_layout.all_locations.update(counter.other_locations)
if counter.big_key_opened and counter.important_location:
big_chest_allowed_big_key = False
if not counter.big_key_opened:
bk_not_required.update(counter.free_locations)
bk_not_required.update(counter.key_only_locations)
bk_not_required.update(counter.other_locations)
# todo?: handle bk special differently in cross dungeon
# notably: things behind bk doors - relying on the bk door logic atm
if not key_layout.big_key_special:
key_logic.bk_restricted.update(dict.fromkeys(set(key_layout.all_chest_locations).difference(bk_not_required)))
key_logic.bk_locked.update(dict.fromkeys(set(key_layout.all_locations) - bk_not_required))
if not big_chest_allowed_big_key:
bk_required_locations = find_big_chest_locations(key_layout.all_chest_locations)
bk_required_locations += find_big_key_locked_locations(key_layout.all_chest_locations)
key_logic.bk_restricted.update(bk_required_locations)
key_logic.bk_locked.update(bk_required_locations)
def empty_counter(counter):
if len(counter.key_only_locations) != 0 or len(counter.free_locations) != 0 or len(counter.child_doors) != 0:
return False
return not counter.important_location
def relative_empty_counter(odd_counter, key_counter):
if len(set(odd_counter.key_only_locations).difference(key_counter.key_only_locations)) > 0:
return False
if len(set(odd_counter.free_locations).difference(key_counter.free_locations)) > 0:
return False
# important only
if len(set(odd_counter.important_locations).difference(key_counter.important_locations)) > 0:
return False
new_child_door = False
for child in odd_counter.child_doors:
if unique_child_door(child, key_counter):
new_child_door = True
break
if new_child_door:
return False
return True
def relative_empty_counter_2(odd_counter, key_counter):
if len(set(odd_counter.key_only_locations).difference(key_counter.key_only_locations)) > 0:
return False
if len(set(odd_counter.free_locations).difference(key_counter.free_locations)) > 0:
return False
# important only
if len(set(odd_counter.important_locations).difference(key_counter.important_locations)) > 0:
return False
for child in odd_counter.child_doors:
if unique_child_door_2(child, key_counter):
return False
return True
def progressive_ctr(new_counter, last_counter):
if len(set(new_counter.key_only_locations).difference(last_counter.key_only_locations)) > 0:
return True
if len(set(new_counter.free_locations).difference(last_counter.free_locations)) > 0:
return True
for child in new_counter.child_doors:
if unique_child_door_2(child, last_counter):
return True
return False
def unique_child_door(child, key_counter):
if child in key_counter.child_doors or child.dest in key_counter.child_doors:
return False
if child in key_counter.open_doors or child.dest in key_counter.open_doors:
return False
if child.bigKey and key_counter.big_key_opened:
return False
return True
def unique_child_door_2(child, key_counter):
if child in key_counter.child_doors or child.dest in key_counter.child_doors:
return False
if child in key_counter.open_doors or child.dest in key_counter.open_doors:
return False
return True
def find_best_counter(door, odd_counter, key_counter, key_layout, world, player, skip_bk, empty_flag): # try to waste as many keys as possible?
ignored_doors = {door, door.dest} if door is not None else {}
finished = False
opened_doors = dict(key_counter.open_doors)
bk_opened = key_counter.big_key_opened
# new_counter = key_counter
last_counter = key_counter
while not finished:
door_set = find_potential_open_doors(last_counter, ignored_doors, key_layout, skip_bk)
if door_set is None or len(door_set) == 0:
finished = True
continue
for new_door in door_set:
proposed_doors = {**opened_doors, **dict.fromkeys([new_door, new_door.dest])}
bk_open = bk_opened or new_door.bigKey
new_counter = find_counter(proposed_doors, bk_open, key_layout)
bk_open = new_counter.big_key_opened
# this means the new_door invalidates the door / leads to the same stuff
if not empty_flag and relative_empty_counter(odd_counter, new_counter):
ignored_doors.add(new_door)
elif empty_flag or key_wasted(new_door, door, last_counter, new_counter, key_layout, world, player):
last_counter = new_counter
opened_doors = proposed_doors
bk_opened = bk_open
else:
ignored_doors.add(new_door)
return last_counter
def find_worst_counter(door, odd_counter, key_counter, key_layout, skip_bk): # try to waste as many keys as possible?
ignored_doors = {door, door.dest} if door is not None else {}
finished = False
opened_doors = dict(key_counter.open_doors)
bk_opened = key_counter.big_key_opened
# new_counter = key_counter
last_counter = key_counter
while not finished:
door_set = find_potential_open_doors(last_counter, ignored_doors, key_layout, skip_bk, 0)
if door_set is None or len(door_set) == 0:
finished = True
continue
for new_door in door_set:
proposed_doors = {**opened_doors, **dict.fromkeys([new_door, new_door.dest])}
bk_open = bk_opened or new_door.bigKey
new_counter = find_counter(proposed_doors, bk_open, key_layout)
bk_open = new_counter.big_key_opened
if not new_door.bigKey and progressive_ctr(new_counter, last_counter) and relative_empty_counter_2(odd_counter, new_counter):
ignored_doors.add(new_door)
else:
last_counter = new_counter
opened_doors = proposed_doors
bk_opened = bk_open
# this means the new_door invalidates the door / leads to the same stuff
return last_counter
def find_potential_open_doors(key_counter, ignored_doors, key_layout, skip_bk, reserve=1):
small_doors = []
big_doors = []
if key_layout.big_key_special:
big_key_available = any(x for x in key_counter.other_locations.keys() if x.forced_item and x.forced_item.bigkey)
else:
big_key_available = len(key_counter.free_locations) - key_counter.used_smalls_loc(reserve) > 0
for other in key_counter.child_doors:
if other not in ignored_doors and other.dest not in ignored_doors:
if other.bigKey:
if not skip_bk and (not key_layout.big_key_special or big_key_available):
big_doors.append(other)
elif other.dest not in small_doors:
small_doors.append(other)
if len(small_doors) == 0 and (not skip_bk and (len(big_doors) == 0 or not big_key_available)):
return None
return small_doors + big_doors
def key_wasted(new_door, old_door, old_counter, new_counter, key_layout, world, player):
if new_door.bigKey: # big keys are not wastes - it uses up a location
return True
chest_keys = available_chest_small_keys(old_counter, world, player)
old_key_diff = len(old_counter.key_only_locations) - old_counter.used_keys
old_avail = chest_keys + old_key_diff
new_chest_keys = available_chest_small_keys(new_counter, world, player)
new_key_diff = len(new_counter.key_only_locations) - new_counter.used_keys
new_avail = new_chest_keys + new_key_diff
if new_key_diff < old_key_diff or new_avail < old_avail:
return True
if new_avail >= old_avail:
wasted_keys = 0
old_children = old_counter.child_doors.keys()
new_children = [x for x in new_counter.child_doors.keys() if x != old_door and x.dest != old_door and (not x.bigKey or x not in old_children)]
current_counter = new_counter
opened_doors = dict(current_counter.open_doors)
bk_opened = current_counter.big_key_opened
for new_child in new_children:
proposed_doors = {**opened_doors, **dict.fromkeys([new_child, new_child.dest])}
bk_open = bk_opened or new_door.bigKey
new_counter = find_counter(proposed_doors, bk_open, key_layout)
if key_wasted(new_child, old_door, current_counter, new_counter, key_layout, world, player):
wasted_keys += 1
if new_avail - wasted_keys < old_avail:
return True # waste is possible
return False
def find_next_counter(new_door, old_counter, key_layout):
proposed_doors = {**old_counter.open_doors, **dict.fromkeys([new_door, new_door.dest])}
bk_open = old_counter.big_key_opened or new_door.bigKey
return find_counter(proposed_doors, bk_open, key_layout)
def check_special_locations(locations):
for loc in locations:
if loc.name == 'Hyrule Castle - Zelda\'s Chest':
return True
return False
def calc_avail_keys(key_counter, world, player):
chest_keys = available_chest_small_keys(key_counter, world, player)
raw_avail = chest_keys + len(key_counter.key_only_locations)
return raw_avail - key_counter.used_keys
def create_rule(key_counter, prev_counter, key_layout, world, player):
# prev_chest_keys = available_chest_small_keys(prev_counter, world)
# prev_avail = prev_chest_keys + len(prev_counter.key_only_locations)
chest_keys = available_chest_small_keys(key_counter, world, player)
key_gain = len(key_counter.key_only_locations) - len(prev_counter.key_only_locations)
# previous method
# raw_avail = chest_keys + len(key_counter.key_only_locations)
# available = raw_avail - key_counter.used_keys
# possible_smalls = count_unique_small_doors(key_counter, key_layout.flat_prop)
# required_keys = min(available, possible_smalls) + key_counter.used_keys
required_keys = key_counter.used_keys + 1 # this makes more sense, if key_counter has wasted all keys
adj_chest_keys = min(chest_keys, required_keys)
needed_chests = required_keys - len(key_counter.key_only_locations)
is_valid = needed_chests <= chest_keys
unneeded_chests = min(key_gain, max(0, adj_chest_keys - needed_chests))
rule_num = required_keys - unneeded_chests
return DoorRules(rule_num, is_valid)
def check_for_self_lock_key(rule, door, parent_counter, key_layout, world, player):
if world.accessibility[player] != 'locations':
counter = find_inverted_counter(door, parent_counter, key_layout, world, player)
if not self_lock_possible(counter):
return
if len(counter.free_locations) == 1 and len(counter.key_only_locations) == 0 and not counter.important_location:
rule.allow_small = True
rule.small_location = next(iter(counter.free_locations))
def find_inverted_counter(door, parent_counter, key_layout, world, player):
# open all doors in counter
counter = open_all_counter(parent_counter, key_layout, door=door)
max_counter = find_max_counter(key_layout)
# find the difference
inverted_counter = KeyCounter(key_layout.max_chests)
inverted_counter.free_locations = dict_difference(max_counter.free_locations, counter.free_locations)
inverted_counter.key_only_locations = dict_difference(max_counter.key_only_locations, counter.key_only_locations)
# child doors? used_keys?
# inverted_counter.child_doors = dict_difference(max_counter.child_doors, counter.child_doors)
inverted_counter.open_doors = dict_difference(max_counter.open_doors, counter.open_doors)
inverted_counter.other_locations = dict_difference(max_counter.other_locations, counter.other_locations)
for loc in inverted_counter.other_locations:
if important_location(loc, world, player):
inverted_counter.important_location = True
return inverted_counter
def open_all_counter(parent_counter, key_layout, door=None, skipBk=False):
changed = True
counter = parent_counter
proposed_doors = dict.fromkeys(parent_counter.open_doors.keys())
while changed:
changed = False
doors_to_open = {}
for child in counter.child_doors:
if door is None or (child != door and child != door.dest):
if skipBk:
if not child.bigKey:
doors_to_open[child] = None
elif not child.bigKey or not key_layout.big_key_special or counter.big_key_opened:
doors_to_open[child] = None
if len(doors_to_open.keys()) > 0:
proposed_doors = {**proposed_doors, **doors_to_open}
bk_hint = counter.big_key_opened
for d in doors_to_open.keys():
bk_hint = bk_hint or d.bigKey
counter = find_counter(proposed_doors, bk_hint, key_layout)
changed = True
return counter
def open_some_counter(parent_counter, key_layout, ignored_doors):
changed = True
counter = parent_counter
proposed_doors = dict.fromkeys(parent_counter.open_doors.keys())
while changed:
changed = False
doors_to_open = {}
for child in counter.child_doors:
if child not in ignored_doors:
if not child.bigKey:
doors_to_open[child] = None
if len(doors_to_open.keys()) > 0:
proposed_doors = {**proposed_doors, **doors_to_open}
bk_hint = counter.big_key_opened
for d in doors_to_open.keys():
bk_hint = bk_hint or d.bigKey
counter = find_counter(proposed_doors, bk_hint, key_layout)
changed = True
return counter
def self_lock_possible(counter):
return len(counter.free_locations) <= 1 and len(counter.key_only_locations) == 0 and not counter.important_location
def available_chest_small_keys(key_counter, world, player):
if not world.keyshuffle[player] and not world.retro[player]:
cnt = 0
for loc in key_counter.free_locations:
if key_counter.big_key_opened or '- Big Chest' not in loc.name:
cnt += 1
return min(cnt, key_counter.max_chests)
else:
return key_counter.max_chests
def available_chest_small_keys_logic(key_counter, world, player, sm_restricted):
if not world.keyshuffle[player] and not world.retro[player]:
cnt = 0
for loc in key_counter.free_locations:
if loc not in sm_restricted and (key_counter.big_key_opened or '- Big Chest' not in loc.name):
cnt += 1
return min(cnt, key_counter.max_chests)
else:
return key_counter.max_chests
def big_key_drop_available(key_counter):
for loc in key_counter.other_locations:
if loc.forced_big_key():
return True
return False
def bk_restricted_rules(rule, door, odd_counter, empty_flag, key_counter, key_layout, world, player):
if key_counter.big_key_opened:
return
best_counter = find_best_counter(door, odd_counter, key_counter, key_layout, world, player, True, empty_flag)
bk_rule = create_rule(best_counter, key_counter, key_layout, world, player)
if bk_rule.small_key_num >= rule.small_key_num:
return
door_open = find_next_counter(door, best_counter, key_layout)
ignored_doors = dict_intersection(best_counter.child_doors, door_open.child_doors)
dest_ignored = []
for door in ignored_doors.keys():
if door.dest not in ignored_doors:
dest_ignored.append(door.dest)
ignored_doors = {**ignored_doors, **dict.fromkeys(dest_ignored)}
post_counter = open_some_counter(door_open, key_layout, ignored_doors.keys())
unique_loc = dict_difference(post_counter.free_locations, best_counter.free_locations)
# todo: figure out the intention behind this change - better way to detect the big key is blocking needed key onlys?
if len(unique_loc) > 0: # and bk_rule.is_valid
rule.alternate_small_key = bk_rule.small_key_num
rule.alternate_big_key_loc.update(unique_loc)
# elif not bk_rule.is_valid:
# key_layout.key_logic.bk_restricted.update(unique_loc)
def find_worst_counter_wo_bk(small_key_num, accessible_set, door, odd_ctr, key_counter, key_layout):
if key_counter.big_key_opened:
return None, None, None
worst_counter = find_worst_counter(door, odd_ctr, key_counter, key_layout, True)
bk_rule_num = worst_counter.used_keys + 1
bk_access_set = set()
bk_access_set.update(worst_counter.free_locations)
bk_access_set.update(worst_counter.key_only_locations)
if bk_rule_num == small_key_num and len(bk_access_set ^ accessible_set) == 0:
return None, None, None
door_open = find_next_counter(door, worst_counter, key_layout)
ignored_doors = dict_intersection(worst_counter.child_doors, door_open.child_doors)
dest_ignored = []
for door in ignored_doors.keys():
if door.dest not in ignored_doors:
dest_ignored.append(door.dest)
ignored_doors = {**ignored_doors, **dict.fromkeys(dest_ignored)}
post_counter = open_some_counter(door_open, key_layout, ignored_doors.keys())
return worst_counter, post_counter, bk_rule_num
def open_a_door(door, child_state, flat_proposal):
if door.bigKey or door.name in special_big_key_doors:
child_state.big_key_opened = True
child_state.avail_doors.extend(child_state.big_doors)
child_state.opened_doors.extend(set([d.door for d in child_state.big_doors]))
child_state.big_doors.clear()
else:
child_state.opened_doors.append(door)
doors_to_open = [x for x in child_state.small_doors if x.door == door]
child_state.small_doors[:] = [x for x in child_state.small_doors if x.door != door]
child_state.avail_doors.extend(doors_to_open)
dest_door = door.dest
if dest_door in flat_proposal and door.type != DoorType.SpiralStairs:
child_state.opened_doors.append(dest_door)
if child_state.in_door_list_ic(dest_door, child_state.small_doors):
now_available = [x for x in child_state.small_doors if x.door == dest_door]
child_state.small_doors[:] = [x for x in child_state.small_doors if x.door != dest_door]
child_state.avail_doors.extend(now_available)
# allows dest doors
def unique_doors(doors):
unique_d_set = []
for d in doors:
if d.door not in unique_d_set:
unique_d_set.append(d.door)
return unique_d_set
# does not allow dest doors
def count_unique_sm_doors(doors):
unique_d_set = set()
for d in doors:
if d not in unique_d_set and (d.dest not in unique_d_set or d.type == DoorType.SpiralStairs) and not d.bigKey:
unique_d_set.add(d)
return len(unique_d_set)
def big_key_progress(key_counter):
return not only_sm_doors(key_counter) or exist_big_chest(key_counter)
def only_sm_doors(key_counter):
for door in key_counter.child_doors:
if door.bigKey:
return False
return True
# doesn't count dest doors
def count_unique_small_doors(key_counter, proposal):
cnt = 0
counted = set()
for door in key_counter.child_doors:
if door in proposal and door not in counted:
cnt += 1
counted.add(door)
if door.type != DoorType.SpiralStairs:
counted.add(door.dest)
return cnt
def exist_relevant_big_doors(key_counter, key_layout):
bk_counter = find_counter(key_counter.open_doors, True, key_layout, False)
if bk_counter is not None:
diff = dict_difference(bk_counter.free_locations, key_counter.free_locations)
if len(diff) > 0:
return True
diff = dict_difference(bk_counter.key_only_locations, key_counter.key_only_locations)
if len(diff) > 0:
return True
diff = dict_difference(bk_counter.child_doors, key_counter.child_doors)
if len(diff) > 0:
return True
return False
def exist_big_chest(key_counter):
for loc in key_counter.free_locations:
if '- Big Chest' in loc.name:
return True
return False
def count_locations_big_optional(locations, bk=False):
cnt = 0
for loc in locations:
if bk or '- Big Chest' not in loc.name:
cnt += 1
return cnt
def filter_big_chest(locations):
return [x for x in locations if '- Big Chest' not in x.name]
def count_locations_exclude_logic(locations, key_logic):
cnt = 0
for loc in locations:
if not location_is_bk_locked(loc, key_logic) and not loc.forced_item and not prize_or_event(loc):
cnt += 1
return cnt
def location_is_bk_locked(loc, key_logic):
return loc in key_logic.bk_chests or loc in key_logic.bk_locked
def prize_or_event(loc):
return loc.name in dungeon_events or '- Prize' in loc.name or loc.name in ['Agahnim 1', 'Agahnim 2']
def count_free_locations(state):
cnt = 0
for loc in state.found_locations:
if not prize_or_event(loc) and not loc.forced_item:
cnt += 1
return cnt
def count_locations_exclude_big_chest(state):
cnt = 0
for loc in state.found_locations:
if '- Big Chest' not in loc.name and not loc.forced_item and not prize_or_event(loc):
cnt += 1
return cnt
def count_small_key_only_locations(state):
cnt = 0
for loc in state.found_locations:
if loc.forced_item and loc.item.smallkey:
cnt += 1
return cnt
def big_chest_in_locations(locations):
return len(find_big_chest_locations(locations)) > 0
def find_big_chest_locations(locations):
ret = []
for loc in locations:
if 'Big Chest' in loc.name:
ret.append(loc)
return ret
def find_big_key_locked_locations(locations):
ret = []
for loc in locations:
if loc.name in ["Thieves' Town - Blind's Cell", "Hyrule Castle - Zelda's Chest"]:
ret.append(loc)
return ret
def expand_key_state(state, flat_proposal, world, player):
while len(state.avail_doors) > 0:
exp_door = state.next_avail_door()
door = exp_door.door
connect_region = world.get_entrance(door.name, player).connected_region
if state.validate(door, connect_region, world, player):
state.visit_region(connect_region, key_checks=True)
state.add_all_doors_check_keys(connect_region, flat_proposal, world, player)
def flatten_pair_list(paired_list):
flat_list = []
for d in paired_list:
if type(d) is tuple:
flat_list.append(d[0])
flat_list.append(d[1])
else:
flat_list.append(d)
return flat_list
def check_rules(original_counter, key_layout, world, player):
all_key_only = set()
key_only_map = {}
queue = deque([(None, original_counter, original_counter.key_only_locations)])
completed = set()
completed.add(cid(original_counter, key_layout))
while len(queue) > 0:
queue = deque(sorted(queue, key=queue_sorter_2))
access_door, counter, key_only_loc = queue.popleft()
for loc in key_only_loc:
if loc not in all_key_only:
all_key_only.add(loc)
access_rules = []
key_only_map[loc] = access_rules
else:
access_rules = key_only_map[loc]
if access_door is None or access_door.name not in key_layout.key_logic.door_rules.keys():
if access_door is None or not access_door.bigKey:
access_rules.append(DoorRules(0, True))
else:
rule = key_layout.key_logic.door_rules[access_door.name]
if rule not in access_rules:
access_rules.append(rule)
for child in counter.child_doors.keys():
if not child.bigKey or not key_layout.big_key_special or counter.big_key_opened:
next_counter = find_next_counter(child, counter, key_layout)
c_id = cid(next_counter, key_layout)
if c_id not in completed:
completed.add(c_id)
new_key_only = dict_difference(next_counter.key_only_locations, counter.key_only_locations)
queue.append((child, next_counter, new_key_only))
min_rule_bk = defaultdict(list)
min_rule_non_bk = defaultdict(list)
check_non_bk = False
for loc, rule_list in key_only_map.items():
m_bk = None
m_nbk = None
for rule in rule_list:
if m_bk is None or rule.small_key_num <= m_bk:
min_rule_bk[loc].append(rule)
m_bk = rule.small_key_num
if rule.alternate_small_key is None:
ask = rule.small_key_num
else:
check_non_bk = True
ask = rule.alternate_small_key
if m_nbk is None or ask <= m_nbk:
min_rule_non_bk[loc].append(rule)
m_nbk = rule.alternate_small_key
adjust_key_location_mins(key_layout, min_rule_bk, lambda r: r.small_key_num, lambda r, v: setattr(r, 'small_key_num', v))
if check_non_bk:
adjust_key_location_mins(key_layout, min_rule_non_bk, lambda r: r.small_key_num if r.alternate_small_key is None else r.alternate_small_key,
lambda r, v: r if r.alternate_small_key is None else setattr(r, 'alternate_small_key', v))
check_rules_deep(original_counter, key_layout, world, player)
def adjust_key_location_mins(key_layout, min_rules, getter, setter):
collected_keys = key_layout.max_chests
collected_locs = set()
changed = True
while changed:
changed = False
for_removal = []
for loc, rules in min_rules.items():
if loc in collected_locs:
for_removal.append(loc)
for rule in rules:
if getter(rule) <= collected_keys and loc not in collected_locs:
changed = True
collected_keys += 1
collected_locs.add(loc)
for_removal.append(loc)
for loc in for_removal:
del min_rules[loc]
if len(min_rules) > 0:
for loc, rules in min_rules.items():
for rule in rules:
setter(rule, collected_keys)
def check_rules_deep(original_counter, key_layout, world, player):
key_logic = key_layout.key_logic
big_locations = {x for x in key_layout.all_chest_locations if x not in key_logic.bk_restricted}
queue = deque([original_counter])
completed = set()
completed.add(cid(original_counter, key_layout))
last_counter = None
bail = 0
while len(queue) > 0:
counter = queue.popleft()
if counter == last_counter:
bail += 1
if bail > 10:
raise Exception('Key logic issue, during deep rule check: %s' % key_layout.sector.name)
else:
bail = 0
last_counter = counter
chest_keys = available_chest_small_keys_logic(counter, world, player, key_logic.sm_restricted)
bk_drop = big_key_drop_available(counter)
big_avail = counter.big_key_opened or bk_drop
big_maybe_not_found = not counter.big_key_opened and not bk_drop # better named as big_missing?
if not key_layout.big_key_special and not big_avail:
if world.bigkeyshuffle[player]:
big_avail = True
else:
for location in counter.free_locations:
if location not in key_logic.bk_restricted:
big_avail = True
break
outstanding_big_locs = {x for x in big_locations if x not in counter.free_locations}
if big_maybe_not_found:
if len(outstanding_big_locs) == 0 and not key_layout.big_key_special:
big_maybe_not_found = False
big_uses_chest = big_avail and not key_layout.big_key_special
collected_alt = len(counter.key_only_locations) + chest_keys
if big_uses_chest and chest_keys == count_locations_big_optional(counter.free_locations, counter.big_key_opened):
chest_keys -= 1
collected = len(counter.key_only_locations) + chest_keys
can_progress = len(counter.child_doors) == 0
smalls_opened, big_opened = False, False
small_rules = []
for door in counter.child_doors.keys():
can_open = False
if door.bigKey and big_avail:
can_open = True
elif door.name in key_logic.door_rules.keys():
rule = key_logic.door_rules[door.name]
small_rules.append(rule)
if rule_satisfied(rule, collected, collected_alt, outstanding_big_locs, chest_keys, key_layout):
can_open = True
smalls_opened = True
elif not door.bigKey:
can_open = True
if can_open:
can_progress = smalls_opened or not big_maybe_not_found
next_counter = find_next_counter(door, counter, key_layout)
c_id = cid(next_counter, key_layout)
if c_id not in completed:
completed.add(c_id)
queue.append(next_counter)
if not can_progress:
if len(small_rules) > 0: # zero could be indicative of a problem, but also, the big key is now required
reduce_rules(small_rules, collected, collected_alt)
queue.append(counter) # run it through again
else:
raise Exception('Possible problem with generation or bk rules')
def rule_satisfied(rule, collected, collected_alt, outstanding_big_locs, chest_keys, key_layout):
if collected >= rule.small_key_num:
return True
if rule.allow_small and collected >= rule.small_key_num-1 and chest_keys < key_layout.max_chests:
return True
rule_diff = outstanding_big_locs.difference(rule.alternate_big_key_loc)
if rule.alternate_small_key is not None and len(rule_diff) == 0 and collected >= rule.alternate_small_key:
return True
if collected_alt > collected:
if collected_alt >= rule.small_key_num:
return True
if rule.allow_small and collected_alt >= rule.small_key_num-1 and chest_keys+1 < key_layout.max_chests:
return True
if rule.alternate_small_key is not None and len(rule_diff) == 0 and collected_alt >= rule.alternate_small_key:
return True
return False
def reduce_rules(small_rules, collected, collected_alt):
smallest_rules = []
min_num = None
for rule in small_rules:
if min_num is None or rule.small_key_num <= min_num:
if min_num is not None and rule.small_key_num < min_num:
min_num = rule.small_key_num
smallest_rules.clear()
elif min_num is None:
min_num = rule.small_key_num
smallest_rules.append(rule)
for rule in smallest_rules:
if rule.allow_small: # we are already reducing it
rule.allow_small = False
if min_num > collected_alt > collected:
rule.small_key_num = collected_alt
else:
rule.small_key_num = collected
def set_paired_rules(key_logic, world, player):
for d_name, rule in key_logic.door_rules.items():
door = world.get_door(d_name, player)
if door.dest.name in key_logic.door_rules.keys():
rule.opposite = key_logic.door_rules[door.dest.name]
def check_bk_special(regions, world, player):
for r_name in regions:
region = world.get_region(r_name, player)
for loc in region.locations:
if loc.forced_big_key():
return True
return False
# Soft lock stuff
def validate_key_layout(key_layout, world, player):
# retro is all good - except for hyrule castle in standard mode
if (world.retro[player] and (world.mode[player] != 'standard' or key_layout.sector.name != 'Hyrule Castle')) or world.logic[player] == 'nologic':
return True
flat_proposal = key_layout.flat_prop
state = ExplorationState(dungeon=key_layout.sector.name)
state.key_locations = key_layout.max_chests
state.big_key_special = check_bk_special(key_layout.sector.regions, world, player)
for region in key_layout.start_regions:
state.visit_region(region, key_checks=True)
state.add_all_doors_check_keys(region, flat_proposal, world, player)
return validate_key_layout_sub_loop(key_layout, state, {}, flat_proposal, None, 0, world, player)
def validate_key_layout_sub_loop(key_layout, state, checked_states, flat_proposal, prev_state, prev_avail, world, player):
expand_key_state(state, flat_proposal, world, player)
smalls_avail = len(state.small_doors) > 0 # de-dup crystal repeats
num_bigs = 1 if len(state.big_doors) > 0 else 0 # all or nothing
if not smalls_avail and num_bigs == 0:
return True # I think that's the end
# todo: fix state to separate out these types
ttl_locations = count_free_locations(state) if state.big_key_opened else count_locations_exclude_big_chest(state)
ttl_small_key_only = count_small_key_only_locations(state)
available_small_locations = cnt_avail_small_locations(ttl_locations, ttl_small_key_only, state, world, player)
available_big_locations = cnt_avail_big_locations(ttl_locations, state, world, player)
if invalid_self_locking_key(key_layout, state, prev_state, prev_avail, world, player):
return False
# todo: allow more key shuffles - refine placement rules
# if (not smalls_avail or available_small_locations == 0) and (state.big_key_opened or num_bigs == 0 or available_big_locations == 0):
found_forced_bk = state.found_forced_bk()
smalls_done = not smalls_avail or not enough_small_locations(state, available_small_locations)
bk_done = state.big_key_opened or num_bigs == 0 or (available_big_locations == 0 and not found_forced_bk)
if smalls_done and bk_done:
return False
else:
if smalls_avail and available_small_locations > 0:
for exp_door in state.small_doors:
state_copy = state.copy()
open_a_door(exp_door.door, state_copy, flat_proposal)
state_copy.used_smalls += 1
if state_copy.used_smalls > ttl_small_key_only:
state_copy.used_locations += 1
code = state_id(state_copy, flat_proposal)
if code not in checked_states.keys():
valid = validate_key_layout_sub_loop(key_layout, state_copy, checked_states, flat_proposal,
state, available_small_locations, world, player)
checked_states[code] = valid
else:
valid = checked_states[code]
if not valid:
return False
if not state.big_key_opened and (available_big_locations >= num_bigs > 0 or (found_forced_bk and num_bigs > 0)):
state_copy = state.copy()
open_a_door(state.big_doors[0].door, state_copy, flat_proposal)
if not found_forced_bk:
state_copy.used_locations += 1
code = state_id(state_copy, flat_proposal)
if code not in checked_states.keys():
valid = validate_key_layout_sub_loop(key_layout, state_copy, checked_states, flat_proposal,
state, available_small_locations, world, player)
checked_states[code] = valid
else:
valid = checked_states[code]
if not valid:
return False
return True
def invalid_self_locking_key(key_layout, state, prev_state, prev_avail, world, player):
if prev_state is None or state.used_smalls == prev_state.used_smalls:
return False
new_bk_doors = set(state.big_doors).difference(set(prev_state.big_doors))
state_copy = state.copy()
while len(new_bk_doors) > 0:
for door in new_bk_doors:
open_a_door(door.door, state_copy, key_layout.flat_prop)
new_bk_doors = set(state_copy.big_doors).difference(set(prev_state.big_doors))
expand_key_state(state_copy, key_layout.flat_prop, world, player)
new_locations = set(state_copy.found_locations).difference(set(prev_state.found_locations))
important_found = False
for loc in new_locations:
important_found |= important_location(loc, world, player)
if not important_found:
return False
new_small_doors = set(state.small_doors).difference(set(prev_state.small_doors))
if len(new_small_doors) > 0:
return False
return prev_avail - 1 == 0
def enough_small_locations(state, avail_small_loc):
unique_d_set = set()
for exp_door in state.small_doors:
door = exp_door.door
if door not in unique_d_set and door.dest not in unique_d_set:
unique_d_set.add(door)
return avail_small_loc >= len(unique_d_set)
def cnt_avail_small_locations(free_locations, key_only, state, world, player):
if not world.keyshuffle[player] and not world.retro[player]:
bk_adj = 1 if state.big_key_opened and not state.big_key_special else 0
avail_chest_keys = min(free_locations - bk_adj, state.key_locations - key_only)
return max(0, avail_chest_keys + key_only - state.used_smalls)
return state.key_locations - state.used_smalls
def cnt_avail_big_locations(ttl_locations, state, world, player):
if not world.bigkeyshuffle[player]:
return max(0, ttl_locations - state.used_locations) if not state.big_key_special else 0
return 1 if not state.big_key_special else 0
def create_key_counters(key_layout, world, player):
key_counters = {}
flat_proposal = key_layout.flat_prop
state = ExplorationState(dungeon=key_layout.sector.name)
if world.doorShuffle[player] == 'vanilla':
state.key_locations = len(world.get_dungeon(key_layout.sector.name, player).small_keys)
else:
state.key_locations = world.dungeon_layouts[player][key_layout.sector.name].key_doors_num
state.big_key_special, special_region = False, None
for region in key_layout.sector.regions:
for location in region.locations:
if location.forced_big_key():
state.big_key_special = True
special_region = region
for region in key_layout.start_regions:
state.visit_region(region, key_checks=True)
state.add_all_doors_check_keys(region, flat_proposal, world, player)
expand_key_state(state, flat_proposal, world, player)
code = state_id(state, key_layout.flat_prop)
key_counters[code] = create_key_counter(state, key_layout, world, player)
queue = deque([(key_counters[code], state)])
while len(queue) > 0:
next_key_counter, parent_state = queue.popleft()
for door in next_key_counter.child_doors:
child_state = parent_state.copy()
if door.bigKey or door.name in special_big_key_doors:
key_layout.key_logic.bk_doors.add(door)
# open the door, if possible
if not door.bigKey or not child_state.big_key_special or child_state.visited_at_all(special_region):
open_a_door(door, child_state, flat_proposal)
expand_key_state(child_state, flat_proposal, world, player)
code = state_id(child_state, key_layout.flat_prop)
if code not in key_counters.keys():
child_kr = create_key_counter(child_state, key_layout, world, player)
key_counters[code] = child_kr
queue.append((child_kr, child_state))
return key_counters
def create_key_counter(state, key_layout, world, player):
key_counter = KeyCounter(key_layout.max_chests)
key_counter.child_doors.update(dict.fromkeys(unique_doors(state.small_doors+state.big_doors)))
for loc in state.found_locations:
if important_location(loc, world, player):
key_counter.important_location = True
key_counter.other_locations[loc] = None
key_counter.important_locations[loc] = None
elif loc.forced_item and loc.item.name == key_layout.key_logic.small_key_name:
key_counter.key_only_locations[loc] = None
elif loc.forced_item and loc.item.name == key_layout.key_logic.bk_name:
key_counter.other_locations[loc] = None
elif loc.name not in dungeon_events:
key_counter.free_locations[loc] = None
else:
key_counter.other_locations[loc] = None
key_counter.open_doors.update(dict.fromkeys(state.opened_doors))
key_counter.used_keys = count_unique_sm_doors(state.opened_doors)
key_counter.big_key_opened = state.big_key_opened
return key_counter
imp_locations = None
def imp_locations_factory(world, player):
global imp_locations
if imp_locations:
return imp_locations
imp_locations = ['Agahnim 1', 'Agahnim 2', 'Attic Cracked Floor', 'Suspicious Maiden']
if world.mode[player] == 'standard':
imp_locations.append('Zelda Pickup')
imp_locations.append('Zelda Dropoff')
return imp_locations
def important_location(loc, world, player):
return '- Prize' in loc.name or loc.name in imp_locations_factory(world, player) or (loc.forced_big_key())
def create_odd_key_counter(door, parent_counter, key_layout, world, player):
odd_counter = KeyCounter(key_layout.max_chests)
next_counter = find_next_counter(door, parent_counter, key_layout)
odd_counter.free_locations = dict_difference(next_counter.free_locations, parent_counter.free_locations)
odd_counter.key_only_locations = dict_difference(next_counter.key_only_locations, parent_counter.key_only_locations)
odd_counter.child_doors = {}
for d in next_counter.child_doors:
if d not in parent_counter.child_doors and (d.type == DoorType.SpiralStairs or d.dest not in parent_counter.child_doors):
odd_counter.child_doors[d] = None
odd_counter.other_locations = dict_difference(next_counter.other_locations, parent_counter.other_locations)
odd_counter.important_locations = dict_difference(next_counter.important_locations, parent_counter.important_locations)
for loc in odd_counter.other_locations:
if important_location(loc, world, player):
odd_counter.important_location = True
return odd_counter
def dict_difference(dict_a, dict_b):
return dict.fromkeys([x for x in dict_a.keys() if x not in dict_b.keys()])
def dict_intersection(dict_a, dict_b):
return dict.fromkeys([x for x in dict_a.keys() if x in dict_b.keys()])
def state_id(state, flat_proposal):
s_id = '1' if state.big_key_opened else '0'
for d in flat_proposal:
s_id += '1' if d in state.opened_doors else '0'
return s_id
def find_counter(opened_doors, bk_hint, key_layout, raise_on_error=True):
counter = find_counter_hint(opened_doors, bk_hint, key_layout)
if counter is not None:
return counter
more_doors = []
for door in opened_doors.keys():
more_doors.append(door)
if door.dest not in opened_doors.keys():
more_doors.append(door.dest)
if len(more_doors) > len(opened_doors.keys()):
counter = find_counter_hint(dict.fromkeys(more_doors), bk_hint, key_layout)
if counter is not None:
return counter
if raise_on_error:
raise Exception('Unable to find door permutation. Init CID: %s' % counter_id(opened_doors, bk_hint, key_layout.flat_prop))
return None
def find_counter_hint(opened_doors, bk_hint, key_layout):
cid = counter_id(opened_doors, bk_hint, key_layout.flat_prop)
if cid in key_layout.key_counters.keys():
return key_layout.key_counters[cid]
if not bk_hint:
cid = counter_id(opened_doors, True, key_layout.flat_prop)
if cid in key_layout.key_counters.keys():
return key_layout.key_counters[cid]
return None
def find_max_counter(key_layout):
max_counter = find_counter_hint(dict.fromkeys(key_layout.flat_prop), False, key_layout)
if max_counter is None:
raise Exception("Max Counter is none - something is amiss")
if len(max_counter.child_doors) > 0:
max_counter = find_counter_hint(dict.fromkeys(key_layout.flat_prop), True, key_layout)
return max_counter
def counter_id(opened_doors, bk_unlocked, flat_proposal):
s_id = '1' if bk_unlocked else '0'
for d in flat_proposal:
s_id += '1' if d in opened_doors.keys() else '0'
return s_id
def cid(counter, key_layout):
return counter_id(counter.open_doors, counter.big_key_opened, key_layout.flat_prop)
# class SoftLockException(Exception):
# pass
# vanilla validation code
def validate_vanilla_key_logic(world, player):
validators = {
'Hyrule Castle': val_hyrule,
'Eastern Palace': val_eastern,
'Desert Palace': val_desert,
'Tower of Hera': val_hera,
'Agahnims Tower': val_tower,
'Palace of Darkness': val_pod,
'Swamp Palace': val_swamp,
'Skull Woods': val_skull,
'Thieves Town': val_thieves,
'Ice Palace': val_ice,
'Misery Mire': val_mire,
'Turtle Rock': val_turtle,
'Ganons Tower': val_ganons
}
key_logic_dict = world.key_logic[player]
for key, key_logic in key_logic_dict.items():
validators[key](key_logic, world, player)
def val_hyrule(key_logic, world, player):
if world.mode[player] == 'standard':
val_rule(key_logic.door_rules['Hyrule Dungeon Map Room Key Door S'], 1)
val_rule(key_logic.door_rules['Hyrule Dungeon Armory Interior Key Door N'], 2)
val_rule(key_logic.door_rules['Sewers Dark Cross Key Door N'], 3)
val_rule(key_logic.door_rules['Sewers Key Rat Key Door N'], 4)
else:
val_rule(key_logic.door_rules['Sewers Secret Room Key Door S'], 2)
val_rule(key_logic.door_rules['Sewers Dark Cross Key Door N'], 2)
val_rule(key_logic.door_rules['Hyrule Dungeon Map Room Key Door S'], 2)
val_rule(key_logic.door_rules['Hyrule Dungeon Armory Interior Key Door N'], 4)
def val_eastern(key_logic, world, player):
val_rule(key_logic.door_rules['Eastern Dark Square Key Door WN'], 2, True, 'Eastern Palace - Big Key Chest', 1, {'Eastern Palace - Big Key Chest'})
val_rule(key_logic.door_rules['Eastern Darkness Up Stairs'], 2)
assert world.get_location('Eastern Palace - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Eastern Palace - Boss', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_desert(key_logic, world, player):
val_rule(key_logic.door_rules['Desert East Wing Key Door EN'], 4)
val_rule(key_logic.door_rules['Desert Tiles 1 Up Stairs'], 2)
val_rule(key_logic.door_rules['Desert Beamos Hall NE'], 3)
val_rule(key_logic.door_rules['Desert Tiles 2 NE'], 4)
assert world.get_location('Desert Palace - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Desert Palace - Boss', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_hera(key_logic, world, player):
val_rule(key_logic.door_rules['Hera Lobby Key Stairs'], 1, True, 'Tower of Hera - Big Key Chest')
assert world.get_location('Tower of Hera - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Tower of Hera - Compass Chest', player) in key_logic.bk_restricted
assert world.get_location('Tower of Hera - Boss', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 3
def val_tower(key_logic, world, player):
val_rule(key_logic.door_rules['Tower Room 03 Up Stairs'], 1)
val_rule(key_logic.door_rules['Tower Dark Maze ES'], 2)
val_rule(key_logic.door_rules['Tower Dark Archers Up Stairs'], 3)
val_rule(key_logic.door_rules['Tower Circle of Pots ES'], 4)
def val_pod(key_logic, world, player):
val_rule(key_logic.door_rules['PoD Arena Main NW'], 4)
val_rule(key_logic.door_rules['PoD Basement Ledge Up Stairs'], 6, True, 'Palace of Darkness - Big Key Chest')
val_rule(key_logic.door_rules['PoD Compass Room SE'], 6, True, 'Palace of Darkness - Harmless Hellway')
val_rule(key_logic.door_rules['PoD Falling Bridge WN'], 6)
val_rule(key_logic.door_rules['PoD Dark Pegs WN'], 6)
assert world.get_location('Palace of Darkness - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Palace of Darkness - Boss', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_swamp(key_logic, world, player):
val_rule(key_logic.door_rules['Swamp Entrance Down Stairs'], 1)
val_rule(key_logic.door_rules['Swamp Pot Row WS'], 2)
val_rule(key_logic.door_rules['Swamp Trench 1 Key Ledge NW'], 3)
val_rule(key_logic.door_rules['Swamp Hub North Ledge N'], 5)
val_rule(key_logic.door_rules['Swamp Hub WN'], 6)
val_rule(key_logic.door_rules['Swamp Waterway NW'], 6)
assert world.get_location('Swamp Palace - Entrance', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 1
def val_skull(key_logic, world, player):
val_rule(key_logic.door_rules['Skull 3 Lobby NW'], 4)
val_rule(key_logic.door_rules['Skull Spike Corner ES'], 5)
def val_thieves(key_logic, world, player):
val_rule(key_logic.door_rules['Thieves Hallway WS'], 1)
val_rule(key_logic.door_rules['Thieves Spike Switch Up Stairs'], 3)
val_rule(key_logic.door_rules['Thieves Conveyor Bridge WS'], 3, True, 'Thieves\' Town - Big Chest')
assert world.get_location('Thieves\' Town - Attic', player) in key_logic.bk_restricted
assert world.get_location('Thieves\' Town - Boss', player) in key_logic.bk_restricted
assert world.get_location('Thieves\' Town - Blind\'s Cell', player) in key_logic.bk_restricted
assert world.get_location('Thieves\' Town - Big Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 4
def val_ice(key_logic, world, player):
val_rule(key_logic.door_rules['Ice Jelly Key Down Stairs'], 1)
val_rule(key_logic.door_rules['Ice Conveyor SW'], 2)
val_rule(key_logic.door_rules['Ice Backwards Room Down Stairs'], 5)
assert world.get_location('Ice Palace - Boss', player) in key_logic.bk_restricted
assert world.get_location('Ice Palace - Big Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_mire(key_logic, world, player):
mire_west_wing = {'Misery Mire - Big Key Chest', 'Misery Mire - Compass Chest'}
val_rule(key_logic.door_rules['Mire Spikes NW'], 3) # todo: is sometimes 3 or 5? best_counter order matters
# val_rule(key_logic.door_rules['Mire Spike Barrier NE'], 4) # kind of a waste mostly
val_rule(key_logic.door_rules['Mire Hub WS'], 5, False, None, 3, mire_west_wing)
val_rule(key_logic.door_rules['Mire Conveyor Crystal WS'], 6, False, None, 4, mire_west_wing)
assert world.get_location('Misery Mire - Boss', player) in key_logic.bk_restricted
assert world.get_location('Misery Mire - Big Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_turtle(key_logic, world, player):
# todo: check vanilla key logic when TR back doors are accessible
if world.shuffle[player] == 'vanilla' and world.mode[player] != 'inverted':
val_rule(key_logic.door_rules['TR Hub NW'], 1)
val_rule(key_logic.door_rules['TR Pokey 1 NW'], 2)
val_rule(key_logic.door_rules['TR Chain Chomps Down Stairs'], 3)
val_rule(key_logic.door_rules['TR Pokey 2 ES'], 6, True, 'Turtle Rock - Big Key Chest', 4, {'Turtle Rock - Big Key Chest'})
val_rule(key_logic.door_rules['TR Crystaroller Down Stairs'], 5)
val_rule(key_logic.door_rules['TR Dash Bridge WS'], 6)
assert world.get_location('Turtle Rock - Eye Bridge - Bottom Right', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Eye Bridge - Top Left', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Eye Bridge - Top Right', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Eye Bridge - Bottom Left', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Boss', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Crystaroller Room', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Big Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 7
def val_ganons(key_logic, world, player):
rando_room = {'Ganons Tower - Randomizer Room - Top Left', 'Ganons Tower - Randomizer Room - Top Right', 'Ganons Tower - Randomizer Room - Bottom Left', 'Ganons Tower - Randomizer Room - Bottom Right'}
compass_room = {'Ganons Tower - Compass Room - Top Left', 'Ganons Tower - Compass Room - Top Right', 'Ganons Tower - Compass Room - Bottom Left', 'Ganons Tower - Compass Room - Bottom Right'}
gt_middle = {'Ganons Tower - Big Key Room - Left', 'Ganons Tower - Big Key Chest', 'Ganons Tower - Big Key Room - Right', 'Ganons Tower - Bob\'s Chest', 'Ganons Tower - Big Chest'}
val_rule(key_logic.door_rules['GT Double Switch EN'], 6, False, None, 4, rando_room.union({'Ganons Tower - Firesnake Room'}))
val_rule(key_logic.door_rules['GT Hookshot ES'], 7, False, 'Ganons Tower - Map Chest', 5, {'Ganons Tower - Map Chest'})
val_rule(key_logic.door_rules['GT Tile Room EN'], 6, False, None, 5, compass_room)
val_rule(key_logic.door_rules['GT Firesnake Room SW'], 7, False, None, 5, rando_room)
val_rule(key_logic.door_rules['GT Conveyor Star Pits EN'], 6, False, None, 5, gt_middle) # should be 7?
val_rule(key_logic.door_rules['GT Mini Helmasaur Room WN'], 6) # not sure about this 6...
val_rule(key_logic.door_rules['GT Crystal Circles SW'], 8)
assert world.get_location('Ganons Tower - Mini Helmasaur Room - Left', player) in key_logic.bk_restricted
assert world.get_location('Ganons Tower - Mini Helmasaur Room - Right', player) in key_logic.bk_restricted
assert world.get_location('Ganons Tower - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Ganons Tower - Pre-Moldorm Chest', player) in key_logic.bk_restricted
assert world.get_location('Ganons Tower - Validation Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 5
def val_rule(rule, skn, allow=False, loc=None, askn=None, setCheck=None):
if setCheck is None:
setCheck = set()
assert rule.small_key_num == skn
assert rule.allow_small == allow
assert rule.small_location == loc or rule.small_location.name == loc
assert rule.alternate_small_key == askn
assert len(setCheck) == len(rule.alternate_big_key_loc)
for loc in rule.alternate_big_key_loc:
assert loc.name in setCheck
# Soft lock stuff
def validate_key_placement(key_layout, world, player):
if world.retro[player] or world.accessibility[player] == 'none':
return True # Can't keylock in retro. Expected if beatable only.
max_counter = find_max_counter(key_layout)
keys_outside = 0
big_key_outside = False
smallkey_name = dungeon_keys[key_layout.sector.name]
bigkey_name = dungeon_bigs[key_layout.sector.name]
if world.keyshuffle[player]:
keys_outside = key_layout.max_chests - sum(1 for i in max_counter.free_locations if i.item is not None and i.item.name == smallkey_name and i.item.player == player)
if world.bigkeyshuffle[player]:
max_counter = find_max_counter(key_layout)
big_key_outside = bigkey_name not in (l.item.name for l in max_counter.free_locations if l.item)
for code, counter in key_layout.key_counters.items():
if len(counter.child_doors) == 0:
continue
if key_layout.big_key_special:
big_found = any(i.forced_item is not None and i.item.bigkey for i in counter.other_locations) or big_key_outside
else:
big_found = any(i.item is not None and i.item.name == bigkey_name for i in counter.free_locations if "- Big Chest" not in i.name) or big_key_outside
if counter.big_key_opened and not big_found:
continue # Can't get to this state
found_locations = set(i for i in counter.free_locations if big_found or "- Big Chest" not in i.name)
found_keys = sum(1 for i in found_locations if i.item is not None and i.item.name == smallkey_name and i.item.player == player) + \
len(counter.key_only_locations) + keys_outside
can_progress = (not counter.big_key_opened and big_found and any(d.bigKey for d in counter.child_doors)) or \
found_keys > counter.used_keys and any(not d.bigKey for d in counter.child_doors)
if not can_progress:
missing_locations = set(max_counter.free_locations.keys()).difference(found_locations)
missing_items = [l for l in missing_locations if l.item is None or (l.item.name != smallkey_name and l.item.name != bigkey_name) or "- Boss" in l.name]
# missing_key_only = set(max_counter.key_only_locations.keys()).difference(counter.key_only_locations.keys()) # do freestanding keys matter for locations?
if len(missing_items) > 0: # world.accessibility[player]=='locations' and (len(missing_locations)>0 or len(missing_key_only) > 0):
logging.getLogger('').error("Keylock - can't open locations: ")
logging.getLogger('').error("code: " + code)
for i in missing_locations:
logging.getLogger('').error(i)
return False
return True
|
py | 1a4e14c4d2e4db5e683b4e651fdbe6f0d4382bf1 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'GooglePrivacyDlpV2ActionArgs',
'GooglePrivacyDlpV2AuxiliaryTableArgs',
'GooglePrivacyDlpV2BigQueryFieldArgs',
'GooglePrivacyDlpV2BigQueryOptionsArgs',
'GooglePrivacyDlpV2BigQueryTableArgs',
'GooglePrivacyDlpV2BucketingConfigArgs',
'GooglePrivacyDlpV2BucketArgs',
'GooglePrivacyDlpV2CategoricalStatsConfigArgs',
'GooglePrivacyDlpV2CharacterMaskConfigArgs',
'GooglePrivacyDlpV2CharsToIgnoreArgs',
'GooglePrivacyDlpV2CloudStorageFileSetArgs',
'GooglePrivacyDlpV2CloudStorageOptionsArgs',
'GooglePrivacyDlpV2CloudStoragePathArgs',
'GooglePrivacyDlpV2CloudStorageRegexFileSetArgs',
'GooglePrivacyDlpV2ConditionsArgs',
'GooglePrivacyDlpV2ConditionArgs',
'GooglePrivacyDlpV2CryptoDeterministicConfigArgs',
'GooglePrivacyDlpV2CryptoHashConfigArgs',
'GooglePrivacyDlpV2CryptoKeyArgs',
'GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigArgs',
'GooglePrivacyDlpV2CustomInfoTypeArgs',
'GooglePrivacyDlpV2DatastoreOptionsArgs',
'GooglePrivacyDlpV2DateShiftConfigArgs',
'GooglePrivacyDlpV2DeidentifyConfigArgs',
'GooglePrivacyDlpV2DeltaPresenceEstimationConfigArgs',
'GooglePrivacyDlpV2DetectionRuleArgs',
'GooglePrivacyDlpV2DictionaryArgs',
'GooglePrivacyDlpV2EntityIdArgs',
'GooglePrivacyDlpV2ExcludeInfoTypesArgs',
'GooglePrivacyDlpV2ExclusionRuleArgs',
'GooglePrivacyDlpV2ExpressionsArgs',
'GooglePrivacyDlpV2FieldIdArgs',
'GooglePrivacyDlpV2FieldTransformationArgs',
'GooglePrivacyDlpV2FileSetArgs',
'GooglePrivacyDlpV2FindingLimitsArgs',
'GooglePrivacyDlpV2FixedSizeBucketingConfigArgs',
'GooglePrivacyDlpV2HotwordRuleArgs',
'GooglePrivacyDlpV2HybridOptionsArgs',
'GooglePrivacyDlpV2InfoTypeLimitArgs',
'GooglePrivacyDlpV2InfoTypeTransformationsArgs',
'GooglePrivacyDlpV2InfoTypeTransformationArgs',
'GooglePrivacyDlpV2InfoTypeArgs',
'GooglePrivacyDlpV2InspectConfigArgs',
'GooglePrivacyDlpV2InspectJobConfigArgs',
'GooglePrivacyDlpV2InspectionRuleSetArgs',
'GooglePrivacyDlpV2InspectionRuleArgs',
'GooglePrivacyDlpV2JobNotificationEmailsArgs',
'GooglePrivacyDlpV2KAnonymityConfigArgs',
'GooglePrivacyDlpV2KMapEstimationConfigArgs',
'GooglePrivacyDlpV2KindExpressionArgs',
'GooglePrivacyDlpV2KmsWrappedCryptoKeyArgs',
'GooglePrivacyDlpV2LDiversityConfigArgs',
'GooglePrivacyDlpV2LargeCustomDictionaryConfigArgs',
'GooglePrivacyDlpV2LeaveUntransformedArgs',
'GooglePrivacyDlpV2LikelihoodAdjustmentArgs',
'GooglePrivacyDlpV2ManualArgs',
'GooglePrivacyDlpV2NumericalStatsConfigArgs',
'GooglePrivacyDlpV2OutputStorageConfigArgs',
'GooglePrivacyDlpV2PartitionIdArgs',
'GooglePrivacyDlpV2PrimitiveTransformationArgs',
'GooglePrivacyDlpV2PrivacyMetricArgs',
'GooglePrivacyDlpV2ProximityArgs',
'GooglePrivacyDlpV2PublishFindingsToCloudDataCatalogArgs',
'GooglePrivacyDlpV2PublishSummaryToCsccArgs',
'GooglePrivacyDlpV2PublishToPubSubArgs',
'GooglePrivacyDlpV2PublishToStackdriverArgs',
'GooglePrivacyDlpV2QuasiIdFieldArgs',
'GooglePrivacyDlpV2QuasiIdentifierFieldArgs',
'GooglePrivacyDlpV2QuasiIdArgs',
'GooglePrivacyDlpV2RecordConditionArgs',
'GooglePrivacyDlpV2RecordSuppressionArgs',
'GooglePrivacyDlpV2RecordTransformationsArgs',
'GooglePrivacyDlpV2RedactConfigArgs',
'GooglePrivacyDlpV2RegexArgs',
'GooglePrivacyDlpV2ReplaceDictionaryConfigArgs',
'GooglePrivacyDlpV2ReplaceValueConfigArgs',
'GooglePrivacyDlpV2ReplaceWithInfoTypeConfigArgs',
'GooglePrivacyDlpV2RiskAnalysisJobConfigArgs',
'GooglePrivacyDlpV2SaveFindingsArgs',
'GooglePrivacyDlpV2ScheduleArgs',
'GooglePrivacyDlpV2StatisticalTableArgs',
'GooglePrivacyDlpV2StorageConfigArgs',
'GooglePrivacyDlpV2StoredInfoTypeConfigArgs',
'GooglePrivacyDlpV2StoredTypeArgs',
'GooglePrivacyDlpV2SurrogateTypeArgs',
'GooglePrivacyDlpV2TableOptionsArgs',
'GooglePrivacyDlpV2TaggedFieldArgs',
'GooglePrivacyDlpV2ThrowErrorArgs',
'GooglePrivacyDlpV2TimePartConfigArgs',
'GooglePrivacyDlpV2TimespanConfigArgs',
'GooglePrivacyDlpV2TransformationErrorHandlingArgs',
'GooglePrivacyDlpV2TransientCryptoKeyArgs',
'GooglePrivacyDlpV2TriggerArgs',
'GooglePrivacyDlpV2UnwrappedCryptoKeyArgs',
'GooglePrivacyDlpV2ValueArgs',
'GooglePrivacyDlpV2WordListArgs',
'GoogleProtobufEmptyArgs',
'GoogleTypeDateArgs',
'GoogleTypeTimeOfDayArgs',
]
@pulumi.input_type
class GooglePrivacyDlpV2ActionArgs:
def __init__(__self__, *,
job_notification_emails: Optional[pulumi.Input['GooglePrivacyDlpV2JobNotificationEmailsArgs']] = None,
pub_sub: Optional[pulumi.Input['GooglePrivacyDlpV2PublishToPubSubArgs']] = None,
publish_findings_to_cloud_data_catalog: Optional[pulumi.Input['GooglePrivacyDlpV2PublishFindingsToCloudDataCatalogArgs']] = None,
publish_summary_to_cscc: Optional[pulumi.Input['GooglePrivacyDlpV2PublishSummaryToCsccArgs']] = None,
publish_to_stackdriver: Optional[pulumi.Input['GooglePrivacyDlpV2PublishToStackdriverArgs']] = None,
save_findings: Optional[pulumi.Input['GooglePrivacyDlpV2SaveFindingsArgs']] = None):
"""
A task to execute on the completion of a job. See https://cloud.google.com/dlp/docs/concepts-actions to learn more.
:param pulumi.Input['GooglePrivacyDlpV2JobNotificationEmailsArgs'] job_notification_emails: Enable email notification for project owners and editors on job's completion/failure.
:param pulumi.Input['GooglePrivacyDlpV2PublishToPubSubArgs'] pub_sub: Publish a notification to a pubsub topic.
:param pulumi.Input['GooglePrivacyDlpV2PublishFindingsToCloudDataCatalogArgs'] publish_findings_to_cloud_data_catalog: Publish findings to Cloud Datahub.
:param pulumi.Input['GooglePrivacyDlpV2PublishSummaryToCsccArgs'] publish_summary_to_cscc: Publish summary to Cloud Security Command Center (Alpha).
:param pulumi.Input['GooglePrivacyDlpV2PublishToStackdriverArgs'] publish_to_stackdriver: Enable Stackdriver metric dlp.googleapis.com/finding_count.
:param pulumi.Input['GooglePrivacyDlpV2SaveFindingsArgs'] save_findings: Save resulting findings in a provided location.
"""
if job_notification_emails is not None:
pulumi.set(__self__, "job_notification_emails", job_notification_emails)
if pub_sub is not None:
pulumi.set(__self__, "pub_sub", pub_sub)
if publish_findings_to_cloud_data_catalog is not None:
pulumi.set(__self__, "publish_findings_to_cloud_data_catalog", publish_findings_to_cloud_data_catalog)
if publish_summary_to_cscc is not None:
pulumi.set(__self__, "publish_summary_to_cscc", publish_summary_to_cscc)
if publish_to_stackdriver is not None:
pulumi.set(__self__, "publish_to_stackdriver", publish_to_stackdriver)
if save_findings is not None:
pulumi.set(__self__, "save_findings", save_findings)
@property
@pulumi.getter(name="jobNotificationEmails")
def job_notification_emails(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2JobNotificationEmailsArgs']]:
"""
Enable email notification for project owners and editors on job's completion/failure.
"""
return pulumi.get(self, "job_notification_emails")
@job_notification_emails.setter
def job_notification_emails(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2JobNotificationEmailsArgs']]):
pulumi.set(self, "job_notification_emails", value)
@property
@pulumi.getter(name="pubSub")
def pub_sub(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2PublishToPubSubArgs']]:
"""
Publish a notification to a pubsub topic.
"""
return pulumi.get(self, "pub_sub")
@pub_sub.setter
def pub_sub(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2PublishToPubSubArgs']]):
pulumi.set(self, "pub_sub", value)
@property
@pulumi.getter(name="publishFindingsToCloudDataCatalog")
def publish_findings_to_cloud_data_catalog(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2PublishFindingsToCloudDataCatalogArgs']]:
"""
Publish findings to Cloud Datahub.
"""
return pulumi.get(self, "publish_findings_to_cloud_data_catalog")
@publish_findings_to_cloud_data_catalog.setter
def publish_findings_to_cloud_data_catalog(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2PublishFindingsToCloudDataCatalogArgs']]):
pulumi.set(self, "publish_findings_to_cloud_data_catalog", value)
@property
@pulumi.getter(name="publishSummaryToCscc")
def publish_summary_to_cscc(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2PublishSummaryToCsccArgs']]:
"""
Publish summary to Cloud Security Command Center (Alpha).
"""
return pulumi.get(self, "publish_summary_to_cscc")
@publish_summary_to_cscc.setter
def publish_summary_to_cscc(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2PublishSummaryToCsccArgs']]):
pulumi.set(self, "publish_summary_to_cscc", value)
@property
@pulumi.getter(name="publishToStackdriver")
def publish_to_stackdriver(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2PublishToStackdriverArgs']]:
"""
Enable Stackdriver metric dlp.googleapis.com/finding_count.
"""
return pulumi.get(self, "publish_to_stackdriver")
@publish_to_stackdriver.setter
def publish_to_stackdriver(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2PublishToStackdriverArgs']]):
pulumi.set(self, "publish_to_stackdriver", value)
@property
@pulumi.getter(name="saveFindings")
def save_findings(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2SaveFindingsArgs']]:
"""
Save resulting findings in a provided location.
"""
return pulumi.get(self, "save_findings")
@save_findings.setter
def save_findings(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2SaveFindingsArgs']]):
pulumi.set(self, "save_findings", value)
@pulumi.input_type
class GooglePrivacyDlpV2AuxiliaryTableArgs:
def __init__(__self__, *,
quasi_ids: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdFieldArgs']]],
relative_frequency: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'],
table: pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']):
"""
An auxiliary table contains statistical information on the relative frequency of different quasi-identifiers values. It has one or several quasi-identifiers columns, and one column that indicates the relative frequency of each quasi-identifier tuple. If a tuple is present in the data but not in the auxiliary table, the corresponding relative frequency is assumed to be zero (and thus, the tuple is highly reidentifiable).
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdFieldArgs']]] quasi_ids: Quasi-identifier columns.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] relative_frequency: The relative frequency column must contain a floating-point number between 0 and 1 (inclusive). Null values are assumed to be zero.
:param pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs'] table: Auxiliary table location.
"""
pulumi.set(__self__, "quasi_ids", quasi_ids)
pulumi.set(__self__, "relative_frequency", relative_frequency)
pulumi.set(__self__, "table", table)
@property
@pulumi.getter(name="quasiIds")
def quasi_ids(self) -> pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdFieldArgs']]]:
"""
Quasi-identifier columns.
"""
return pulumi.get(self, "quasi_ids")
@quasi_ids.setter
def quasi_ids(self, value: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdFieldArgs']]]):
pulumi.set(self, "quasi_ids", value)
@property
@pulumi.getter(name="relativeFrequency")
def relative_frequency(self) -> pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']:
"""
The relative frequency column must contain a floating-point number between 0 and 1 (inclusive). Null values are assumed to be zero.
"""
return pulumi.get(self, "relative_frequency")
@relative_frequency.setter
def relative_frequency(self, value: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']):
pulumi.set(self, "relative_frequency", value)
@property
@pulumi.getter
def table(self) -> pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']:
"""
Auxiliary table location.
"""
return pulumi.get(self, "table")
@table.setter
def table(self, value: pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']):
pulumi.set(self, "table", value)
@pulumi.input_type
class GooglePrivacyDlpV2BigQueryFieldArgs:
def __init__(__self__, *,
field: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None,
table: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']] = None):
"""
Message defining a field of a BigQuery table.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] field: Designated field in the BigQuery table.
:param pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs'] table: Source table of the field.
"""
if field is not None:
pulumi.set(__self__, "field", field)
if table is not None:
pulumi.set(__self__, "table", table)
@property
@pulumi.getter
def field(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
Designated field in the BigQuery table.
"""
return pulumi.get(self, "field")
@field.setter
def field(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "field", value)
@property
@pulumi.getter
def table(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']]:
"""
Source table of the field.
"""
return pulumi.get(self, "table")
@table.setter
def table(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']]):
pulumi.set(self, "table", value)
@pulumi.input_type
class GooglePrivacyDlpV2BigQueryOptionsArgs:
def __init__(__self__, *,
excluded_fields: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]] = None,
identifying_fields: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]] = None,
included_fields: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]] = None,
rows_limit: Optional[pulumi.Input[str]] = None,
rows_limit_percent: Optional[pulumi.Input[int]] = None,
sample_method: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryOptionsSampleMethod']] = None,
table_reference: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']] = None):
"""
Options defining BigQuery table and row identifiers.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]] excluded_fields: References to fields excluded from scanning. This allows you to skip inspection of entire columns which you know have no findings.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]] identifying_fields: Table fields that may uniquely identify a row within the table. When `actions.saveFindings.outputConfig.table` is specified, the values of columns specified here are available in the output table under `location.content_locations.record_location.record_key.id_values`. Nested fields such as `person.birthdate.year` are allowed.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]] included_fields: Limit scanning only to these fields.
:param pulumi.Input[str] rows_limit: Max number of rows to scan. If the table has more rows than this value, the rest of the rows are omitted. If not set, or if set to 0, all rows will be scanned. Only one of rows_limit and rows_limit_percent can be specified. Cannot be used in conjunction with TimespanConfig.
:param pulumi.Input[int] rows_limit_percent: Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of rows_limit and rows_limit_percent can be specified. Cannot be used in conjunction with TimespanConfig.
:param pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs'] table_reference: Complete BigQuery table reference.
"""
if excluded_fields is not None:
pulumi.set(__self__, "excluded_fields", excluded_fields)
if identifying_fields is not None:
pulumi.set(__self__, "identifying_fields", identifying_fields)
if included_fields is not None:
pulumi.set(__self__, "included_fields", included_fields)
if rows_limit is not None:
pulumi.set(__self__, "rows_limit", rows_limit)
if rows_limit_percent is not None:
pulumi.set(__self__, "rows_limit_percent", rows_limit_percent)
if sample_method is not None:
pulumi.set(__self__, "sample_method", sample_method)
if table_reference is not None:
pulumi.set(__self__, "table_reference", table_reference)
@property
@pulumi.getter(name="excludedFields")
def excluded_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]:
"""
References to fields excluded from scanning. This allows you to skip inspection of entire columns which you know have no findings.
"""
return pulumi.get(self, "excluded_fields")
@excluded_fields.setter
def excluded_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]):
pulumi.set(self, "excluded_fields", value)
@property
@pulumi.getter(name="identifyingFields")
def identifying_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]:
"""
Table fields that may uniquely identify a row within the table. When `actions.saveFindings.outputConfig.table` is specified, the values of columns specified here are available in the output table under `location.content_locations.record_location.record_key.id_values`. Nested fields such as `person.birthdate.year` are allowed.
"""
return pulumi.get(self, "identifying_fields")
@identifying_fields.setter
def identifying_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]):
pulumi.set(self, "identifying_fields", value)
@property
@pulumi.getter(name="includedFields")
def included_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]:
"""
Limit scanning only to these fields.
"""
return pulumi.get(self, "included_fields")
@included_fields.setter
def included_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]):
pulumi.set(self, "included_fields", value)
@property
@pulumi.getter(name="rowsLimit")
def rows_limit(self) -> Optional[pulumi.Input[str]]:
"""
Max number of rows to scan. If the table has more rows than this value, the rest of the rows are omitted. If not set, or if set to 0, all rows will be scanned. Only one of rows_limit and rows_limit_percent can be specified. Cannot be used in conjunction with TimespanConfig.
"""
return pulumi.get(self, "rows_limit")
@rows_limit.setter
def rows_limit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rows_limit", value)
@property
@pulumi.getter(name="rowsLimitPercent")
def rows_limit_percent(self) -> Optional[pulumi.Input[int]]:
"""
Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of rows_limit and rows_limit_percent can be specified. Cannot be used in conjunction with TimespanConfig.
"""
return pulumi.get(self, "rows_limit_percent")
@rows_limit_percent.setter
def rows_limit_percent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rows_limit_percent", value)
@property
@pulumi.getter(name="sampleMethod")
def sample_method(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryOptionsSampleMethod']]:
return pulumi.get(self, "sample_method")
@sample_method.setter
def sample_method(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryOptionsSampleMethod']]):
pulumi.set(self, "sample_method", value)
@property
@pulumi.getter(name="tableReference")
def table_reference(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']]:
"""
Complete BigQuery table reference.
"""
return pulumi.get(self, "table_reference")
@table_reference.setter
def table_reference(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']]):
pulumi.set(self, "table_reference", value)
@pulumi.input_type
class GooglePrivacyDlpV2BigQueryTableArgs:
def __init__(__self__, *,
dataset_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
table_id: Optional[pulumi.Input[str]] = None):
"""
Message defining the location of a BigQuery table. A table is uniquely identified by its project_id, dataset_id, and table_name. Within a query a table is often referenced with a string in the format of: `:.` or `..`.
:param pulumi.Input[str] dataset_id: Dataset ID of the table.
:param pulumi.Input[str] project: The Google Cloud Platform project ID of the project containing the table. If omitted, project ID is inferred from the API call.
:param pulumi.Input[str] table_id: Name of the table.
"""
if dataset_id is not None:
pulumi.set(__self__, "dataset_id", dataset_id)
if project is not None:
pulumi.set(__self__, "project", project)
if table_id is not None:
pulumi.set(__self__, "table_id", table_id)
@property
@pulumi.getter(name="datasetId")
def dataset_id(self) -> Optional[pulumi.Input[str]]:
"""
Dataset ID of the table.
"""
return pulumi.get(self, "dataset_id")
@dataset_id.setter
def dataset_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dataset_id", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The Google Cloud Platform project ID of the project containing the table. If omitted, project ID is inferred from the API call.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="tableId")
def table_id(self) -> Optional[pulumi.Input[str]]:
"""
Name of the table.
"""
return pulumi.get(self, "table_id")
@table_id.setter
def table_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "table_id", value)
@pulumi.input_type
class GooglePrivacyDlpV2BucketingConfigArgs:
def __init__(__self__, *,
buckets: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2BucketArgs']]]] = None):
"""
Generalization function that buckets values based on ranges. The ranges and replacement values are dynamically provided by the user for custom behavior, such as 1-30 -> LOW 31-65 -> MEDIUM 66-100 -> HIGH This can be used on data of type: number, long, string, timestamp. If the bound `Value` type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2BucketArgs']]] buckets: Set of buckets. Ranges must be non-overlapping.
"""
if buckets is not None:
pulumi.set(__self__, "buckets", buckets)
@property
@pulumi.getter
def buckets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2BucketArgs']]]]:
"""
Set of buckets. Ranges must be non-overlapping.
"""
return pulumi.get(self, "buckets")
@buckets.setter
def buckets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2BucketArgs']]]]):
pulumi.set(self, "buckets", value)
@pulumi.input_type
class GooglePrivacyDlpV2BucketArgs:
def __init__(__self__, *,
replacement_value: pulumi.Input['GooglePrivacyDlpV2ValueArgs'],
max: Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']] = None,
min: Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']] = None):
"""
Bucket is represented as a range, along with replacement values.
:param pulumi.Input['GooglePrivacyDlpV2ValueArgs'] replacement_value: Replacement value for this bucket.
:param pulumi.Input['GooglePrivacyDlpV2ValueArgs'] max: Upper bound of the range, exclusive; type must match min.
:param pulumi.Input['GooglePrivacyDlpV2ValueArgs'] min: Lower bound of the range, inclusive. Type should be the same as max if used.
"""
pulumi.set(__self__, "replacement_value", replacement_value)
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter(name="replacementValue")
def replacement_value(self) -> pulumi.Input['GooglePrivacyDlpV2ValueArgs']:
"""
Replacement value for this bucket.
"""
return pulumi.get(self, "replacement_value")
@replacement_value.setter
def replacement_value(self, value: pulumi.Input['GooglePrivacyDlpV2ValueArgs']):
pulumi.set(self, "replacement_value", value)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']]:
"""
Upper bound of the range, exclusive; type must match min.
"""
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']]:
"""
Lower bound of the range, inclusive. Type should be the same as max if used.
"""
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']]):
pulumi.set(self, "min", value)
@pulumi.input_type
class GooglePrivacyDlpV2CategoricalStatsConfigArgs:
def __init__(__self__, *,
field: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None):
"""
Compute numerical stats over an individual column, including number of distinct values and value count distribution.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] field: Field to compute categorical stats on. All column types are supported except for arrays and structs. However, it may be more informative to use NumericalStats when the field type is supported, depending on the data.
"""
if field is not None:
pulumi.set(__self__, "field", field)
@property
@pulumi.getter
def field(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
Field to compute categorical stats on. All column types are supported except for arrays and structs. However, it may be more informative to use NumericalStats when the field type is supported, depending on the data.
"""
return pulumi.get(self, "field")
@field.setter
def field(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "field", value)
@pulumi.input_type
class GooglePrivacyDlpV2CharacterMaskConfigArgs:
def __init__(__self__, *,
characters_to_ignore: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CharsToIgnoreArgs']]]] = None,
masking_character: Optional[pulumi.Input[str]] = None,
number_to_mask: Optional[pulumi.Input[int]] = None,
reverse_order: Optional[pulumi.Input[bool]] = None):
"""
Partially mask a string by replacing a given number of characters with a fixed character. Masking can start from the beginning or end of the string. This can be used on data of any type (numbers, longs, and so on) and when de-identifying structured data we'll attempt to preserve the original data's type. (This allows you to take a long like 123 and modify it to a string like **3.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CharsToIgnoreArgs']]] characters_to_ignore: When masking a string, items in this list will be skipped when replacing characters. For example, if the input string is `555-555-5555` and you instruct Cloud DLP to skip `-` and mask 5 characters with `*`, Cloud DLP returns `***-**5-5555`.
:param pulumi.Input[str] masking_character: Character to use to mask the sensitive values—for example, `*` for an alphabetic string such as a name, or `0` for a numeric string such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to `*` for strings, and `0` for digits.
:param pulumi.Input[int] number_to_mask: Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally.
:param pulumi.Input[bool] reverse_order: Mask characters in reverse order. For example, if `masking_character` is `0`, `number_to_mask` is `14`, and `reverse_order` is `false`, then the input string `1234-5678-9012-3456` is masked as `00000000000000-3456`. If `masking_character` is `*`, `number_to_mask` is `3`, and `reverse_order` is `true`, then the string `12345` is masked as `12***`.
"""
if characters_to_ignore is not None:
pulumi.set(__self__, "characters_to_ignore", characters_to_ignore)
if masking_character is not None:
pulumi.set(__self__, "masking_character", masking_character)
if number_to_mask is not None:
pulumi.set(__self__, "number_to_mask", number_to_mask)
if reverse_order is not None:
pulumi.set(__self__, "reverse_order", reverse_order)
@property
@pulumi.getter(name="charactersToIgnore")
def characters_to_ignore(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CharsToIgnoreArgs']]]]:
"""
When masking a string, items in this list will be skipped when replacing characters. For example, if the input string is `555-555-5555` and you instruct Cloud DLP to skip `-` and mask 5 characters with `*`, Cloud DLP returns `***-**5-5555`.
"""
return pulumi.get(self, "characters_to_ignore")
@characters_to_ignore.setter
def characters_to_ignore(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CharsToIgnoreArgs']]]]):
pulumi.set(self, "characters_to_ignore", value)
@property
@pulumi.getter(name="maskingCharacter")
def masking_character(self) -> Optional[pulumi.Input[str]]:
"""
Character to use to mask the sensitive values—for example, `*` for an alphabetic string such as a name, or `0` for a numeric string such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to `*` for strings, and `0` for digits.
"""
return pulumi.get(self, "masking_character")
@masking_character.setter
def masking_character(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "masking_character", value)
@property
@pulumi.getter(name="numberToMask")
def number_to_mask(self) -> Optional[pulumi.Input[int]]:
"""
Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally.
"""
return pulumi.get(self, "number_to_mask")
@number_to_mask.setter
def number_to_mask(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_to_mask", value)
@property
@pulumi.getter(name="reverseOrder")
def reverse_order(self) -> Optional[pulumi.Input[bool]]:
"""
Mask characters in reverse order. For example, if `masking_character` is `0`, `number_to_mask` is `14`, and `reverse_order` is `false`, then the input string `1234-5678-9012-3456` is masked as `00000000000000-3456`. If `masking_character` is `*`, `number_to_mask` is `3`, and `reverse_order` is `true`, then the string `12345` is masked as `12***`.
"""
return pulumi.get(self, "reverse_order")
@reverse_order.setter
def reverse_order(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "reverse_order", value)
@pulumi.input_type
class GooglePrivacyDlpV2CharsToIgnoreArgs:
def __init__(__self__, *,
characters_to_skip: Optional[pulumi.Input[str]] = None,
common_characters_to_ignore: Optional[pulumi.Input['GooglePrivacyDlpV2CharsToIgnoreCommonCharactersToIgnore']] = None):
"""
Characters to skip when doing deidentification of a value. These will be left alone and skipped.
:param pulumi.Input[str] characters_to_skip: Characters to not transform when masking.
:param pulumi.Input['GooglePrivacyDlpV2CharsToIgnoreCommonCharactersToIgnore'] common_characters_to_ignore: Common characters to not transform when masking. Useful to avoid removing punctuation.
"""
if characters_to_skip is not None:
pulumi.set(__self__, "characters_to_skip", characters_to_skip)
if common_characters_to_ignore is not None:
pulumi.set(__self__, "common_characters_to_ignore", common_characters_to_ignore)
@property
@pulumi.getter(name="charactersToSkip")
def characters_to_skip(self) -> Optional[pulumi.Input[str]]:
"""
Characters to not transform when masking.
"""
return pulumi.get(self, "characters_to_skip")
@characters_to_skip.setter
def characters_to_skip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "characters_to_skip", value)
@property
@pulumi.getter(name="commonCharactersToIgnore")
def common_characters_to_ignore(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CharsToIgnoreCommonCharactersToIgnore']]:
"""
Common characters to not transform when masking. Useful to avoid removing punctuation.
"""
return pulumi.get(self, "common_characters_to_ignore")
@common_characters_to_ignore.setter
def common_characters_to_ignore(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CharsToIgnoreCommonCharactersToIgnore']]):
pulumi.set(self, "common_characters_to_ignore", value)
@pulumi.input_type
class GooglePrivacyDlpV2CloudStorageFileSetArgs:
def __init__(__self__, *,
url: Optional[pulumi.Input[str]] = None):
"""
Message representing a set of files in Cloud Storage.
:param pulumi.Input[str] url: The url, in the format `gs:///`. Trailing wildcard in the path is allowed.
"""
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The url, in the format `gs:///`. Trailing wildcard in the path is allowed.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class GooglePrivacyDlpV2CloudStorageOptionsArgs:
def __init__(__self__, *,
bytes_limit_per_file: Optional[pulumi.Input[str]] = None,
bytes_limit_per_file_percent: Optional[pulumi.Input[int]] = None,
file_set: Optional[pulumi.Input['GooglePrivacyDlpV2FileSetArgs']] = None,
file_types: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsFileTypesItem']]]] = None,
files_limit_percent: Optional[pulumi.Input[int]] = None,
sample_method: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsSampleMethod']] = None):
"""
Options defining a file or a set of files within a Google Cloud Storage bucket.
:param pulumi.Input[str] bytes_limit_per_file: Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
:param pulumi.Input[int] bytes_limit_per_file_percent: Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
:param pulumi.Input['GooglePrivacyDlpV2FileSetArgs'] file_set: The set of one or more files to scan.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsFileTypesItem']]] file_types: List of file type groups to include in the scan. If empty, all files are scanned and available data format processors are applied. In addition, the binary content of the selected files is always scanned as well. Images are scanned only as binary if the specified region does not support image inspection and no file_types were specified. Image inspection is restricted to 'global', 'us', 'asia', and 'europe'.
:param pulumi.Input[int] files_limit_percent: Limits the number of files to scan to this percentage of the input FileSet. Number of files scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0.
"""
if bytes_limit_per_file is not None:
pulumi.set(__self__, "bytes_limit_per_file", bytes_limit_per_file)
if bytes_limit_per_file_percent is not None:
pulumi.set(__self__, "bytes_limit_per_file_percent", bytes_limit_per_file_percent)
if file_set is not None:
pulumi.set(__self__, "file_set", file_set)
if file_types is not None:
pulumi.set(__self__, "file_types", file_types)
if files_limit_percent is not None:
pulumi.set(__self__, "files_limit_percent", files_limit_percent)
if sample_method is not None:
pulumi.set(__self__, "sample_method", sample_method)
@property
@pulumi.getter(name="bytesLimitPerFile")
def bytes_limit_per_file(self) -> Optional[pulumi.Input[str]]:
"""
Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
"""
return pulumi.get(self, "bytes_limit_per_file")
@bytes_limit_per_file.setter
def bytes_limit_per_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bytes_limit_per_file", value)
@property
@pulumi.getter(name="bytesLimitPerFilePercent")
def bytes_limit_per_file_percent(self) -> Optional[pulumi.Input[int]]:
"""
Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
"""
return pulumi.get(self, "bytes_limit_per_file_percent")
@bytes_limit_per_file_percent.setter
def bytes_limit_per_file_percent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bytes_limit_per_file_percent", value)
@property
@pulumi.getter(name="fileSet")
def file_set(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FileSetArgs']]:
"""
The set of one or more files to scan.
"""
return pulumi.get(self, "file_set")
@file_set.setter
def file_set(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FileSetArgs']]):
pulumi.set(self, "file_set", value)
@property
@pulumi.getter(name="fileTypes")
def file_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsFileTypesItem']]]]:
"""
List of file type groups to include in the scan. If empty, all files are scanned and available data format processors are applied. In addition, the binary content of the selected files is always scanned as well. Images are scanned only as binary if the specified region does not support image inspection and no file_types were specified. Image inspection is restricted to 'global', 'us', 'asia', and 'europe'.
"""
return pulumi.get(self, "file_types")
@file_types.setter
def file_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsFileTypesItem']]]]):
pulumi.set(self, "file_types", value)
@property
@pulumi.getter(name="filesLimitPercent")
def files_limit_percent(self) -> Optional[pulumi.Input[int]]:
"""
Limits the number of files to scan to this percentage of the input FileSet. Number of files scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0.
"""
return pulumi.get(self, "files_limit_percent")
@files_limit_percent.setter
def files_limit_percent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "files_limit_percent", value)
@property
@pulumi.getter(name="sampleMethod")
def sample_method(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsSampleMethod']]:
return pulumi.get(self, "sample_method")
@sample_method.setter
def sample_method(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsSampleMethod']]):
pulumi.set(self, "sample_method", value)
@pulumi.input_type
class GooglePrivacyDlpV2CloudStoragePathArgs:
def __init__(__self__, *,
path: Optional[pulumi.Input[str]] = None):
"""
Message representing a single file or path in Cloud Storage.
:param pulumi.Input[str] path: A url representing a file or path (no wildcards) in Cloud Storage. Example: gs://[BUCKET_NAME]/dictionary.txt
"""
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
A url representing a file or path (no wildcards) in Cloud Storage. Example: gs://[BUCKET_NAME]/dictionary.txt
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@pulumi.input_type
class GooglePrivacyDlpV2CloudStorageRegexFileSetArgs:
def __init__(__self__, *,
bucket_name: Optional[pulumi.Input[str]] = None,
exclude_regex: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
include_regex: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Message representing a set of files in a Cloud Storage bucket. Regular expressions are used to allow fine-grained control over which files in the bucket to include. Included files are those that match at least one item in `include_regex` and do not match any items in `exclude_regex`. Note that a file that matches items from both lists will _not_ be included. For a match to occur, the entire file path (i.e., everything in the url after the bucket name) must match the regular expression. For example, given the input `{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}`: * `gs://mybucket/directory1/myfile` will be included * `gs://mybucket/directory1/directory2/myfile` will be included (`.*` matches across `/`) * `gs://mybucket/directory0/directory1/myfile` will _not_ be included (the full path doesn't match any items in `include_regex`) * `gs://mybucket/directory1/excludedfile` will _not_ be included (the path matches an item in `exclude_regex`) If `include_regex` is left empty, it will match all files by default (this is equivalent to setting `include_regex: [".*"]`). Some other common use cases: * `{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}` will include all files in `mybucket` except for .pdf files * `{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}` will include all files directly under `gs://mybucket/directory/`, without matching across `/`
:param pulumi.Input[str] bucket_name: The name of a Cloud Storage bucket. Required.
:param pulumi.Input[Sequence[pulumi.Input[str]]] exclude_regex: A list of regular expressions matching file paths to exclude. All files in the bucket that match at least one of these regular expressions will be excluded from the scan. Regular expressions use RE2 [syntax](https://github.com/google/re2/wiki/Syntax); a guide can be found under the google/re2 repository on GitHub.
:param pulumi.Input[Sequence[pulumi.Input[str]]] include_regex: A list of regular expressions matching file paths to include. All files in the bucket that match at least one of these regular expressions will be included in the set of files, except for those that also match an item in `exclude_regex`. Leaving this field empty will match all files by default (this is equivalent to including `.*` in the list). Regular expressions use RE2 [syntax](https://github.com/google/re2/wiki/Syntax); a guide can be found under the google/re2 repository on GitHub.
"""
if bucket_name is not None:
pulumi.set(__self__, "bucket_name", bucket_name)
if exclude_regex is not None:
pulumi.set(__self__, "exclude_regex", exclude_regex)
if include_regex is not None:
pulumi.set(__self__, "include_regex", include_regex)
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of a Cloud Storage bucket. Required.
"""
return pulumi.get(self, "bucket_name")
@bucket_name.setter
def bucket_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket_name", value)
@property
@pulumi.getter(name="excludeRegex")
def exclude_regex(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of regular expressions matching file paths to exclude. All files in the bucket that match at least one of these regular expressions will be excluded from the scan. Regular expressions use RE2 [syntax](https://github.com/google/re2/wiki/Syntax); a guide can be found under the google/re2 repository on GitHub.
"""
return pulumi.get(self, "exclude_regex")
@exclude_regex.setter
def exclude_regex(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "exclude_regex", value)
@property
@pulumi.getter(name="includeRegex")
def include_regex(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of regular expressions matching file paths to include. All files in the bucket that match at least one of these regular expressions will be included in the set of files, except for those that also match an item in `exclude_regex`. Leaving this field empty will match all files by default (this is equivalent to including `.*` in the list). Regular expressions use RE2 [syntax](https://github.com/google/re2/wiki/Syntax); a guide can be found under the google/re2 repository on GitHub.
"""
return pulumi.get(self, "include_regex")
@include_regex.setter
def include_regex(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "include_regex", value)
@pulumi.input_type
class GooglePrivacyDlpV2ConditionsArgs:
def __init__(__self__, *,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ConditionArgs']]]] = None):
"""
A collection of conditions.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ConditionArgs']]] conditions: A collection of conditions.
"""
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ConditionArgs']]]]:
"""
A collection of conditions.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ConditionArgs']]]]):
pulumi.set(self, "conditions", value)
@pulumi.input_type
class GooglePrivacyDlpV2ConditionArgs:
def __init__(__self__, *,
field: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'],
operator: pulumi.Input['GooglePrivacyDlpV2ConditionOperator'],
value: Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']] = None):
"""
The field type of `value` and `field` do not need to match to be considered equal, but not all comparisons are possible. EQUAL_TO and NOT_EQUAL_TO attempt to compare even with incompatible types, but all other comparisons are invalid with incompatible types. A `value` of type: - `string` can be compared against all other types - `boolean` can only be compared against other booleans - `integer` can be compared against doubles or a string if the string value can be parsed as an integer. - `double` can be compared against integers or a string if the string can be parsed as a double. - `Timestamp` can be compared against strings in RFC 3339 date string format. - `TimeOfDay` can be compared against timestamps and strings in the format of 'HH:mm:ss'. If we fail to compare do to type mismatch, a warning will be given and the condition will evaluate to false.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] field: Field within the record this condition is evaluated against.
:param pulumi.Input['GooglePrivacyDlpV2ConditionOperator'] operator: Operator used to compare the field or infoType to the value.
:param pulumi.Input['GooglePrivacyDlpV2ValueArgs'] value: Value to compare against. [Mandatory, except for `EXISTS` tests.]
"""
pulumi.set(__self__, "field", field)
pulumi.set(__self__, "operator", operator)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def field(self) -> pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']:
"""
Field within the record this condition is evaluated against.
"""
return pulumi.get(self, "field")
@field.setter
def field(self, value: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']):
pulumi.set(self, "field", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input['GooglePrivacyDlpV2ConditionOperator']:
"""
Operator used to compare the field or infoType to the value.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input['GooglePrivacyDlpV2ConditionOperator']):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']]:
"""
Value to compare against. [Mandatory, except for `EXISTS` tests.]
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']]):
pulumi.set(self, "value", value)
@pulumi.input_type
class GooglePrivacyDlpV2CryptoDeterministicConfigArgs:
def __init__(__self__, *,
context: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None,
crypto_key: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']] = None,
surrogate_info_type: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']] = None):
"""
Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC https://tools.ietf.org/html/rfc5297.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] context: A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. If the context is not set, plaintext would be used as is for encryption. If the context is set but: 1. there is no record present when transforming a given value or 2. the field is not present when transforming a given value, plaintext would be used as is for encryption. Note that case (1) is expected when an `InfoTypeTransformation` is applied to both structured and non-structured `ContentItem`s.
:param pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs'] crypto_key: The key used by the encryption function. For deterministic encryption using AES-SIV, the provided key is internally expanded to 64 bytes prior to use.
:param pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs'] surrogate_info_type: The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - reverse a surrogate that does not correspond to an actual identifier - be unable to parse the surrogate and result in an error Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE.
"""
if context is not None:
pulumi.set(__self__, "context", context)
if crypto_key is not None:
pulumi.set(__self__, "crypto_key", crypto_key)
if surrogate_info_type is not None:
pulumi.set(__self__, "surrogate_info_type", surrogate_info_type)
@property
@pulumi.getter
def context(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. If the context is not set, plaintext would be used as is for encryption. If the context is set but: 1. there is no record present when transforming a given value or 2. the field is not present when transforming a given value, plaintext would be used as is for encryption. Note that case (1) is expected when an `InfoTypeTransformation` is applied to both structured and non-structured `ContentItem`s.
"""
return pulumi.get(self, "context")
@context.setter
def context(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "context", value)
@property
@pulumi.getter(name="cryptoKey")
def crypto_key(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']]:
"""
The key used by the encryption function. For deterministic encryption using AES-SIV, the provided key is internally expanded to 64 bytes prior to use.
"""
return pulumi.get(self, "crypto_key")
@crypto_key.setter
def crypto_key(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']]):
pulumi.set(self, "crypto_key", value)
@property
@pulumi.getter(name="surrogateInfoType")
def surrogate_info_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]:
"""
The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - reverse a surrogate that does not correspond to an actual identifier - be unable to parse the surrogate and result in an error Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE.
"""
return pulumi.get(self, "surrogate_info_type")
@surrogate_info_type.setter
def surrogate_info_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]):
pulumi.set(self, "surrogate_info_type", value)
@pulumi.input_type
class GooglePrivacyDlpV2CryptoHashConfigArgs:
def __init__(__self__, *,
crypto_key: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']] = None):
"""
Pseudonymization method that generates surrogates via cryptographic hashing. Uses SHA-256. The key size must be either 32 or 64 bytes. Outputs a base64 encoded representation of the hashed output (for example, L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=). Currently, only string and integer values can be hashed. See https://cloud.google.com/dlp/docs/pseudonymization to learn more.
:param pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs'] crypto_key: The key used by the hash function.
"""
if crypto_key is not None:
pulumi.set(__self__, "crypto_key", crypto_key)
@property
@pulumi.getter(name="cryptoKey")
def crypto_key(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']]:
"""
The key used by the hash function.
"""
return pulumi.get(self, "crypto_key")
@crypto_key.setter
def crypto_key(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']]):
pulumi.set(self, "crypto_key", value)
@pulumi.input_type
class GooglePrivacyDlpV2CryptoKeyArgs:
def __init__(__self__, *,
kms_wrapped: Optional[pulumi.Input['GooglePrivacyDlpV2KmsWrappedCryptoKeyArgs']] = None,
transient: Optional[pulumi.Input['GooglePrivacyDlpV2TransientCryptoKeyArgs']] = None,
unwrapped: Optional[pulumi.Input['GooglePrivacyDlpV2UnwrappedCryptoKeyArgs']] = None):
"""
This is a data encryption key (DEK) (as opposed to a key encryption key (KEK) stored by Cloud Key Management Service (Cloud KMS). When using Cloud KMS to wrap or unwrap a DEK, be sure to set an appropriate IAM policy on the KEK to ensure an attacker cannot unwrap the DEK.
:param pulumi.Input['GooglePrivacyDlpV2KmsWrappedCryptoKeyArgs'] kms_wrapped: Key wrapped using Cloud KMS
:param pulumi.Input['GooglePrivacyDlpV2TransientCryptoKeyArgs'] transient: Transient crypto key
:param pulumi.Input['GooglePrivacyDlpV2UnwrappedCryptoKeyArgs'] unwrapped: Unwrapped crypto key
"""
if kms_wrapped is not None:
pulumi.set(__self__, "kms_wrapped", kms_wrapped)
if transient is not None:
pulumi.set(__self__, "transient", transient)
if unwrapped is not None:
pulumi.set(__self__, "unwrapped", unwrapped)
@property
@pulumi.getter(name="kmsWrapped")
def kms_wrapped(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2KmsWrappedCryptoKeyArgs']]:
"""
Key wrapped using Cloud KMS
"""
return pulumi.get(self, "kms_wrapped")
@kms_wrapped.setter
def kms_wrapped(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2KmsWrappedCryptoKeyArgs']]):
pulumi.set(self, "kms_wrapped", value)
@property
@pulumi.getter
def transient(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2TransientCryptoKeyArgs']]:
"""
Transient crypto key
"""
return pulumi.get(self, "transient")
@transient.setter
def transient(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2TransientCryptoKeyArgs']]):
pulumi.set(self, "transient", value)
@property
@pulumi.getter
def unwrapped(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2UnwrappedCryptoKeyArgs']]:
"""
Unwrapped crypto key
"""
return pulumi.get(self, "unwrapped")
@unwrapped.setter
def unwrapped(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2UnwrappedCryptoKeyArgs']]):
pulumi.set(self, "unwrapped", value)
@pulumi.input_type
class GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigArgs:
def __init__(__self__, *,
crypto_key: pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs'],
common_alphabet: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigCommonAlphabet']] = None,
context: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None,
custom_alphabet: Optional[pulumi.Input[str]] = None,
radix: Optional[pulumi.Input[int]] = None,
surrogate_info_type: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']] = None):
"""
Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the `ReidentifyContent` API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See https://cloud.google.com/dlp/docs/pseudonymization to learn more. Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity.
:param pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs'] crypto_key: The key used by the encryption algorithm.
:param pulumi.Input['GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigCommonAlphabet'] common_alphabet: Common alphabets.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] context: The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. If the context is set but: 1. there is no record present when transforming a given value or 1. the field is not present when transforming a given value, a default tweak will be used. Note that case (1) is expected when an `InfoTypeTransformation` is applied to both structured and non-structured `ContentItem`s. Currently, the referenced field may be of value type integer or string. The tweak is constructed as a sequence of bytes in big endian byte order such that: - a 64 bit integer is encoded followed by a single byte of value 1 - a string is encoded in UTF-8 format followed by a single byte of value 2
:param pulumi.Input[str] custom_alphabet: This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range [2, 95]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~`!@#$%^&*()_-+={[}]|\:;"'<,>.?/
:param pulumi.Input[int] radix: The native way to select the alphabet. Must be in the range [2, 95].
:param pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs'] surrogate_info_type: The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info_type_name(surrogate_character_count):surrogate For example, if the name of custom infoType is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE
"""
pulumi.set(__self__, "crypto_key", crypto_key)
if common_alphabet is not None:
pulumi.set(__self__, "common_alphabet", common_alphabet)
if context is not None:
pulumi.set(__self__, "context", context)
if custom_alphabet is not None:
pulumi.set(__self__, "custom_alphabet", custom_alphabet)
if radix is not None:
pulumi.set(__self__, "radix", radix)
if surrogate_info_type is not None:
pulumi.set(__self__, "surrogate_info_type", surrogate_info_type)
@property
@pulumi.getter(name="cryptoKey")
def crypto_key(self) -> pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']:
"""
The key used by the encryption algorithm.
"""
return pulumi.get(self, "crypto_key")
@crypto_key.setter
def crypto_key(self, value: pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']):
pulumi.set(self, "crypto_key", value)
@property
@pulumi.getter(name="commonAlphabet")
def common_alphabet(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigCommonAlphabet']]:
"""
Common alphabets.
"""
return pulumi.get(self, "common_alphabet")
@common_alphabet.setter
def common_alphabet(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigCommonAlphabet']]):
pulumi.set(self, "common_alphabet", value)
@property
@pulumi.getter
def context(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. If the context is set but: 1. there is no record present when transforming a given value or 1. the field is not present when transforming a given value, a default tweak will be used. Note that case (1) is expected when an `InfoTypeTransformation` is applied to both structured and non-structured `ContentItem`s. Currently, the referenced field may be of value type integer or string. The tweak is constructed as a sequence of bytes in big endian byte order such that: - a 64 bit integer is encoded followed by a single byte of value 1 - a string is encoded in UTF-8 format followed by a single byte of value 2
"""
return pulumi.get(self, "context")
@context.setter
def context(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "context", value)
@property
@pulumi.getter(name="customAlphabet")
def custom_alphabet(self) -> Optional[pulumi.Input[str]]:
"""
This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range [2, 95]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~`!@#$%^&*()_-+={[}]|\:;"'<,>.?/
"""
return pulumi.get(self, "custom_alphabet")
@custom_alphabet.setter
def custom_alphabet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_alphabet", value)
@property
@pulumi.getter
def radix(self) -> Optional[pulumi.Input[int]]:
"""
The native way to select the alphabet. Must be in the range [2, 95].
"""
return pulumi.get(self, "radix")
@radix.setter
def radix(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "radix", value)
@property
@pulumi.getter(name="surrogateInfoType")
def surrogate_info_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]:
"""
The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info_type_name(surrogate_character_count):surrogate For example, if the name of custom infoType is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE
"""
return pulumi.get(self, "surrogate_info_type")
@surrogate_info_type.setter
def surrogate_info_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]):
pulumi.set(self, "surrogate_info_type", value)
@pulumi.input_type
class GooglePrivacyDlpV2CustomInfoTypeArgs:
def __init__(__self__, *,
detection_rules: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2DetectionRuleArgs']]]] = None,
dictionary: Optional[pulumi.Input['GooglePrivacyDlpV2DictionaryArgs']] = None,
exclusion_type: Optional[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeExclusionType']] = None,
info_type: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']] = None,
likelihood: Optional[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeLikelihood']] = None,
regex: Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']] = None,
stored_type: Optional[pulumi.Input['GooglePrivacyDlpV2StoredTypeArgs']] = None,
surrogate_type: Optional[pulumi.Input['GooglePrivacyDlpV2SurrogateTypeArgs']] = None):
"""
Custom information type provided by the user. Used to find domain-specific sensitive information configurable to the data in question.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2DetectionRuleArgs']]] detection_rules: Set of detection rules to apply to all findings of this CustomInfoType. Rules are applied in order that they are specified. Not supported for the `surrogate_type` CustomInfoType.
:param pulumi.Input['GooglePrivacyDlpV2DictionaryArgs'] dictionary: A list of phrases to detect as a CustomInfoType.
:param pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeExclusionType'] exclusion_type: If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding to be returned. It still can be used for rules matching.
:param pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs'] info_type: CustomInfoType can either be a new infoType, or an extension of built-in infoType, when the name matches one of existing infoTypes and that infoType is specified in `InspectContent.info_types` field. Specifying the latter adds findings to the one detected by the system. If built-in info type is not specified in `InspectContent.info_types` list then the name is treated as a custom info type.
:param pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeLikelihood'] likelihood: Likelihood to return for this CustomInfoType. This base value can be altered by a detection rule if the finding meets the criteria specified by the rule. Defaults to `VERY_LIKELY` if not specified.
:param pulumi.Input['GooglePrivacyDlpV2RegexArgs'] regex: Regular expression based CustomInfoType.
:param pulumi.Input['GooglePrivacyDlpV2StoredTypeArgs'] stored_type: Load an existing `StoredInfoType` resource for use in `InspectDataSource`. Not currently supported in `InspectContent`.
:param pulumi.Input['GooglePrivacyDlpV2SurrogateTypeArgs'] surrogate_type: Message for detecting output from deidentification transformations that support reversing.
"""
if detection_rules is not None:
pulumi.set(__self__, "detection_rules", detection_rules)
if dictionary is not None:
pulumi.set(__self__, "dictionary", dictionary)
if exclusion_type is not None:
pulumi.set(__self__, "exclusion_type", exclusion_type)
if info_type is not None:
pulumi.set(__self__, "info_type", info_type)
if likelihood is not None:
pulumi.set(__self__, "likelihood", likelihood)
if regex is not None:
pulumi.set(__self__, "regex", regex)
if stored_type is not None:
pulumi.set(__self__, "stored_type", stored_type)
if surrogate_type is not None:
pulumi.set(__self__, "surrogate_type", surrogate_type)
@property
@pulumi.getter(name="detectionRules")
def detection_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2DetectionRuleArgs']]]]:
"""
Set of detection rules to apply to all findings of this CustomInfoType. Rules are applied in order that they are specified. Not supported for the `surrogate_type` CustomInfoType.
"""
return pulumi.get(self, "detection_rules")
@detection_rules.setter
def detection_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2DetectionRuleArgs']]]]):
pulumi.set(self, "detection_rules", value)
@property
@pulumi.getter
def dictionary(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2DictionaryArgs']]:
"""
A list of phrases to detect as a CustomInfoType.
"""
return pulumi.get(self, "dictionary")
@dictionary.setter
def dictionary(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2DictionaryArgs']]):
pulumi.set(self, "dictionary", value)
@property
@pulumi.getter(name="exclusionType")
def exclusion_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeExclusionType']]:
"""
If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding to be returned. It still can be used for rules matching.
"""
return pulumi.get(self, "exclusion_type")
@exclusion_type.setter
def exclusion_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeExclusionType']]):
pulumi.set(self, "exclusion_type", value)
@property
@pulumi.getter(name="infoType")
def info_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]:
"""
CustomInfoType can either be a new infoType, or an extension of built-in infoType, when the name matches one of existing infoTypes and that infoType is specified in `InspectContent.info_types` field. Specifying the latter adds findings to the one detected by the system. If built-in info type is not specified in `InspectContent.info_types` list then the name is treated as a custom info type.
"""
return pulumi.get(self, "info_type")
@info_type.setter
def info_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]):
pulumi.set(self, "info_type", value)
@property
@pulumi.getter
def likelihood(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeLikelihood']]:
"""
Likelihood to return for this CustomInfoType. This base value can be altered by a detection rule if the finding meets the criteria specified by the rule. Defaults to `VERY_LIKELY` if not specified.
"""
return pulumi.get(self, "likelihood")
@likelihood.setter
def likelihood(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeLikelihood']]):
pulumi.set(self, "likelihood", value)
@property
@pulumi.getter
def regex(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']]:
"""
Regular expression based CustomInfoType.
"""
return pulumi.get(self, "regex")
@regex.setter
def regex(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']]):
pulumi.set(self, "regex", value)
@property
@pulumi.getter(name="storedType")
def stored_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2StoredTypeArgs']]:
"""
Load an existing `StoredInfoType` resource for use in `InspectDataSource`. Not currently supported in `InspectContent`.
"""
return pulumi.get(self, "stored_type")
@stored_type.setter
def stored_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2StoredTypeArgs']]):
pulumi.set(self, "stored_type", value)
@property
@pulumi.getter(name="surrogateType")
def surrogate_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2SurrogateTypeArgs']]:
"""
Message for detecting output from deidentification transformations that support reversing.
"""
return pulumi.get(self, "surrogate_type")
@surrogate_type.setter
def surrogate_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2SurrogateTypeArgs']]):
pulumi.set(self, "surrogate_type", value)
@pulumi.input_type
class GooglePrivacyDlpV2DatastoreOptionsArgs:
def __init__(__self__, *,
kind: Optional[pulumi.Input['GooglePrivacyDlpV2KindExpressionArgs']] = None,
partition_id: Optional[pulumi.Input['GooglePrivacyDlpV2PartitionIdArgs']] = None):
"""
Options defining a data set within Google Cloud Datastore.
:param pulumi.Input['GooglePrivacyDlpV2KindExpressionArgs'] kind: The kind to process.
:param pulumi.Input['GooglePrivacyDlpV2PartitionIdArgs'] partition_id: A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty.
"""
if kind is not None:
pulumi.set(__self__, "kind", kind)
if partition_id is not None:
pulumi.set(__self__, "partition_id", partition_id)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2KindExpressionArgs']]:
"""
The kind to process.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2KindExpressionArgs']]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="partitionId")
def partition_id(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2PartitionIdArgs']]:
"""
A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty.
"""
return pulumi.get(self, "partition_id")
@partition_id.setter
def partition_id(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2PartitionIdArgs']]):
pulumi.set(self, "partition_id", value)
@pulumi.input_type
class GooglePrivacyDlpV2DateShiftConfigArgs:
def __init__(__self__, *,
lower_bound_days: pulumi.Input[int],
upper_bound_days: pulumi.Input[int],
context: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None,
crypto_key: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']] = None):
"""
Shifts dates by random number of days, with option to be consistent for the same context. See https://cloud.google.com/dlp/docs/concepts-date-shifting to learn more.
:param pulumi.Input[int] lower_bound_days: For example, -5 means shift date to at most 5 days back in the past.
:param pulumi.Input[int] upper_bound_days: Range of shift in days. Actual shift will be selected at random within this range (inclusive ends). Negative means shift to earlier in time. Must not be more than 365250 days (1000 years) each direction. For example, 3 means shift date to at most 3 days into the future.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] context: Points to the field that contains the context, for example, an entity id. If set, must also set cryptoKey. If set, shift will be consistent for the given context.
:param pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs'] crypto_key: Causes the shift to be computed based on this key and the context. This results in the same shift for the same context and crypto_key. If set, must also set context. Can only be applied to table items.
"""
pulumi.set(__self__, "lower_bound_days", lower_bound_days)
pulumi.set(__self__, "upper_bound_days", upper_bound_days)
if context is not None:
pulumi.set(__self__, "context", context)
if crypto_key is not None:
pulumi.set(__self__, "crypto_key", crypto_key)
@property
@pulumi.getter(name="lowerBoundDays")
def lower_bound_days(self) -> pulumi.Input[int]:
"""
For example, -5 means shift date to at most 5 days back in the past.
"""
return pulumi.get(self, "lower_bound_days")
@lower_bound_days.setter
def lower_bound_days(self, value: pulumi.Input[int]):
pulumi.set(self, "lower_bound_days", value)
@property
@pulumi.getter(name="upperBoundDays")
def upper_bound_days(self) -> pulumi.Input[int]:
"""
Range of shift in days. Actual shift will be selected at random within this range (inclusive ends). Negative means shift to earlier in time. Must not be more than 365250 days (1000 years) each direction. For example, 3 means shift date to at most 3 days into the future.
"""
return pulumi.get(self, "upper_bound_days")
@upper_bound_days.setter
def upper_bound_days(self, value: pulumi.Input[int]):
pulumi.set(self, "upper_bound_days", value)
@property
@pulumi.getter
def context(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
Points to the field that contains the context, for example, an entity id. If set, must also set cryptoKey. If set, shift will be consistent for the given context.
"""
return pulumi.get(self, "context")
@context.setter
def context(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "context", value)
@property
@pulumi.getter(name="cryptoKey")
def crypto_key(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']]:
"""
Causes the shift to be computed based on this key and the context. This results in the same shift for the same context and crypto_key. If set, must also set context. Can only be applied to table items.
"""
return pulumi.get(self, "crypto_key")
@crypto_key.setter
def crypto_key(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoKeyArgs']]):
pulumi.set(self, "crypto_key", value)
@pulumi.input_type
class GooglePrivacyDlpV2DeidentifyConfigArgs:
def __init__(__self__, *,
info_type_transformations: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationsArgs']] = None,
record_transformations: Optional[pulumi.Input['GooglePrivacyDlpV2RecordTransformationsArgs']] = None,
transformation_error_handling: Optional[pulumi.Input['GooglePrivacyDlpV2TransformationErrorHandlingArgs']] = None):
"""
The configuration that controls how the data will change.
:param pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationsArgs'] info_type_transformations: Treat the dataset as free-form text and apply the same free text transformation everywhere.
:param pulumi.Input['GooglePrivacyDlpV2RecordTransformationsArgs'] record_transformations: Treat the dataset as structured. Transformations can be applied to specific locations within structured datasets, such as transforming a column within a table.
:param pulumi.Input['GooglePrivacyDlpV2TransformationErrorHandlingArgs'] transformation_error_handling: Mode for handling transformation errors. If left unspecified, the default mode is `TransformationErrorHandling.ThrowError`.
"""
if info_type_transformations is not None:
pulumi.set(__self__, "info_type_transformations", info_type_transformations)
if record_transformations is not None:
pulumi.set(__self__, "record_transformations", record_transformations)
if transformation_error_handling is not None:
pulumi.set(__self__, "transformation_error_handling", transformation_error_handling)
@property
@pulumi.getter(name="infoTypeTransformations")
def info_type_transformations(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationsArgs']]:
"""
Treat the dataset as free-form text and apply the same free text transformation everywhere.
"""
return pulumi.get(self, "info_type_transformations")
@info_type_transformations.setter
def info_type_transformations(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationsArgs']]):
pulumi.set(self, "info_type_transformations", value)
@property
@pulumi.getter(name="recordTransformations")
def record_transformations(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2RecordTransformationsArgs']]:
"""
Treat the dataset as structured. Transformations can be applied to specific locations within structured datasets, such as transforming a column within a table.
"""
return pulumi.get(self, "record_transformations")
@record_transformations.setter
def record_transformations(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2RecordTransformationsArgs']]):
pulumi.set(self, "record_transformations", value)
@property
@pulumi.getter(name="transformationErrorHandling")
def transformation_error_handling(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2TransformationErrorHandlingArgs']]:
"""
Mode for handling transformation errors. If left unspecified, the default mode is `TransformationErrorHandling.ThrowError`.
"""
return pulumi.get(self, "transformation_error_handling")
@transformation_error_handling.setter
def transformation_error_handling(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2TransformationErrorHandlingArgs']]):
pulumi.set(self, "transformation_error_handling", value)
@pulumi.input_type
class GooglePrivacyDlpV2DeltaPresenceEstimationConfigArgs:
def __init__(__self__, *,
quasi_ids: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdArgs']]],
auxiliary_tables: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2StatisticalTableArgs']]]] = None,
region_code: Optional[pulumi.Input[str]] = None):
"""
δ-presence metric, used to estimate how likely it is for an attacker to figure out that one given individual appears in a de-identified dataset. Similarly to the k-map metric, we cannot compute δ-presence exactly without knowing the attack dataset, so we use a statistical model instead.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdArgs']]] quasi_ids: Fields considered to be quasi-identifiers. No two fields can have the same tag.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2StatisticalTableArgs']]] auxiliary_tables: Several auxiliary tables can be used in the analysis. Each custom_tag used to tag a quasi-identifiers field must appear in exactly one field of one auxiliary table.
:param pulumi.Input[str] region_code: ISO 3166-1 alpha-2 region code to use in the statistical modeling. Set if no column is tagged with a region-specific InfoType (like US_ZIP_5) or a region code.
"""
pulumi.set(__self__, "quasi_ids", quasi_ids)
if auxiliary_tables is not None:
pulumi.set(__self__, "auxiliary_tables", auxiliary_tables)
if region_code is not None:
pulumi.set(__self__, "region_code", region_code)
@property
@pulumi.getter(name="quasiIds")
def quasi_ids(self) -> pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdArgs']]]:
"""
Fields considered to be quasi-identifiers. No two fields can have the same tag.
"""
return pulumi.get(self, "quasi_ids")
@quasi_ids.setter
def quasi_ids(self, value: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdArgs']]]):
pulumi.set(self, "quasi_ids", value)
@property
@pulumi.getter(name="auxiliaryTables")
def auxiliary_tables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2StatisticalTableArgs']]]]:
"""
Several auxiliary tables can be used in the analysis. Each custom_tag used to tag a quasi-identifiers field must appear in exactly one field of one auxiliary table.
"""
return pulumi.get(self, "auxiliary_tables")
@auxiliary_tables.setter
def auxiliary_tables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2StatisticalTableArgs']]]]):
pulumi.set(self, "auxiliary_tables", value)
@property
@pulumi.getter(name="regionCode")
def region_code(self) -> Optional[pulumi.Input[str]]:
"""
ISO 3166-1 alpha-2 region code to use in the statistical modeling. Set if no column is tagged with a region-specific InfoType (like US_ZIP_5) or a region code.
"""
return pulumi.get(self, "region_code")
@region_code.setter
def region_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region_code", value)
@pulumi.input_type
class GooglePrivacyDlpV2DetectionRuleArgs:
def __init__(__self__, *,
hotword_rule: Optional[pulumi.Input['GooglePrivacyDlpV2HotwordRuleArgs']] = None):
"""
Deprecated; use `InspectionRuleSet` instead. Rule for modifying a `CustomInfoType` to alter behavior under certain circumstances, depending on the specific details of the rule. Not supported for the `surrogate_type` custom infoType.
:param pulumi.Input['GooglePrivacyDlpV2HotwordRuleArgs'] hotword_rule: Hotword-based detection rule.
"""
if hotword_rule is not None:
pulumi.set(__self__, "hotword_rule", hotword_rule)
@property
@pulumi.getter(name="hotwordRule")
def hotword_rule(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2HotwordRuleArgs']]:
"""
Hotword-based detection rule.
"""
return pulumi.get(self, "hotword_rule")
@hotword_rule.setter
def hotword_rule(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2HotwordRuleArgs']]):
pulumi.set(self, "hotword_rule", value)
@pulumi.input_type
class GooglePrivacyDlpV2DictionaryArgs:
def __init__(__self__, *,
cloud_storage_path: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStoragePathArgs']] = None,
word_list: Optional[pulumi.Input['GooglePrivacyDlpV2WordListArgs']] = None):
"""
Custom information type based on a dictionary of words or phrases. This can be used to match sensitive information specific to the data, such as a list of employee IDs or job titles. Dictionary words are case-insensitive and all characters other than letters and digits in the unicode [Basic Multilingual Plane](https://en.wikipedia.org/wiki/Plane_%28Unicode%29#Basic_Multilingual_Plane) will be replaced with whitespace when scanning for matches, so the dictionary phrase "Sam Johnson" will match all three phrases "sam johnson", "Sam, Johnson", and "Sam (Johnson)". Additionally, the characters surrounding any match must be of a different type than the adjacent characters within the word, so letters must be next to non-letters and digits next to non-digits. For example, the dictionary word "jen" will match the first three letters of the text "jen123" but will return no matches for "jennifer". Dictionary words containing a large number of characters that are not letters or digits may result in unexpected findings because such characters are treated as whitespace. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries. For dictionaries that do not fit within these constraints, consider using `LargeCustomDictionaryConfig` in the `StoredInfoType` API.
:param pulumi.Input['GooglePrivacyDlpV2CloudStoragePathArgs'] cloud_storage_path: Newline-delimited file of words in Cloud Storage. Only a single file is accepted.
:param pulumi.Input['GooglePrivacyDlpV2WordListArgs'] word_list: List of words or phrases to search for.
"""
if cloud_storage_path is not None:
pulumi.set(__self__, "cloud_storage_path", cloud_storage_path)
if word_list is not None:
pulumi.set(__self__, "word_list", word_list)
@property
@pulumi.getter(name="cloudStoragePath")
def cloud_storage_path(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CloudStoragePathArgs']]:
"""
Newline-delimited file of words in Cloud Storage. Only a single file is accepted.
"""
return pulumi.get(self, "cloud_storage_path")
@cloud_storage_path.setter
def cloud_storage_path(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStoragePathArgs']]):
pulumi.set(self, "cloud_storage_path", value)
@property
@pulumi.getter(name="wordList")
def word_list(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2WordListArgs']]:
"""
List of words or phrases to search for.
"""
return pulumi.get(self, "word_list")
@word_list.setter
def word_list(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2WordListArgs']]):
pulumi.set(self, "word_list", value)
@pulumi.input_type
class GooglePrivacyDlpV2EntityIdArgs:
def __init__(__self__, *,
field: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None):
"""
An entity in a dataset is a field or set of fields that correspond to a single person. For example, in medical records the `EntityId` might be a patient identifier, or for financial records it might be an account identifier. This message is used when generalizations or analysis must take into account that multiple rows correspond to the same entity.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] field: Composite key indicating which field contains the entity identifier.
"""
if field is not None:
pulumi.set(__self__, "field", field)
@property
@pulumi.getter
def field(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
Composite key indicating which field contains the entity identifier.
"""
return pulumi.get(self, "field")
@field.setter
def field(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "field", value)
@pulumi.input_type
class GooglePrivacyDlpV2ExcludeInfoTypesArgs:
def __init__(__self__, *,
info_types: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]] = None):
"""
List of exclude infoTypes.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]] info_types: InfoType list in ExclusionRule rule drops a finding when it overlaps or contained within with a finding of an infoType from this list. For example, for `InspectionRuleSet.info_types` containing "PHONE_NUMBER"` and `exclusion_rule` containing `exclude_info_types.info_types` with "EMAIL_ADDRESS" the phone number findings are dropped if they overlap with EMAIL_ADDRESS finding. That leads to "[email protected]" to generate only a single finding, namely email address.
"""
if info_types is not None:
pulumi.set(__self__, "info_types", info_types)
@property
@pulumi.getter(name="infoTypes")
def info_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]]:
"""
InfoType list in ExclusionRule rule drops a finding when it overlaps or contained within with a finding of an infoType from this list. For example, for `InspectionRuleSet.info_types` containing "PHONE_NUMBER"` and `exclusion_rule` containing `exclude_info_types.info_types` with "EMAIL_ADDRESS" the phone number findings are dropped if they overlap with EMAIL_ADDRESS finding. That leads to "[email protected]" to generate only a single finding, namely email address.
"""
return pulumi.get(self, "info_types")
@info_types.setter
def info_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]]):
pulumi.set(self, "info_types", value)
@pulumi.input_type
class GooglePrivacyDlpV2ExclusionRuleArgs:
def __init__(__self__, *,
dictionary: Optional[pulumi.Input['GooglePrivacyDlpV2DictionaryArgs']] = None,
exclude_info_types: Optional[pulumi.Input['GooglePrivacyDlpV2ExcludeInfoTypesArgs']] = None,
matching_type: Optional[pulumi.Input['GooglePrivacyDlpV2ExclusionRuleMatchingType']] = None,
regex: Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']] = None):
"""
The rule that specifies conditions when findings of infoTypes specified in `InspectionRuleSet` are removed from results.
:param pulumi.Input['GooglePrivacyDlpV2DictionaryArgs'] dictionary: Dictionary which defines the rule.
:param pulumi.Input['GooglePrivacyDlpV2ExcludeInfoTypesArgs'] exclude_info_types: Set of infoTypes for which findings would affect this rule.
:param pulumi.Input['GooglePrivacyDlpV2ExclusionRuleMatchingType'] matching_type: How the rule is applied, see MatchingType documentation for details.
:param pulumi.Input['GooglePrivacyDlpV2RegexArgs'] regex: Regular expression which defines the rule.
"""
if dictionary is not None:
pulumi.set(__self__, "dictionary", dictionary)
if exclude_info_types is not None:
pulumi.set(__self__, "exclude_info_types", exclude_info_types)
if matching_type is not None:
pulumi.set(__self__, "matching_type", matching_type)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def dictionary(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2DictionaryArgs']]:
"""
Dictionary which defines the rule.
"""
return pulumi.get(self, "dictionary")
@dictionary.setter
def dictionary(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2DictionaryArgs']]):
pulumi.set(self, "dictionary", value)
@property
@pulumi.getter(name="excludeInfoTypes")
def exclude_info_types(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ExcludeInfoTypesArgs']]:
"""
Set of infoTypes for which findings would affect this rule.
"""
return pulumi.get(self, "exclude_info_types")
@exclude_info_types.setter
def exclude_info_types(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ExcludeInfoTypesArgs']]):
pulumi.set(self, "exclude_info_types", value)
@property
@pulumi.getter(name="matchingType")
def matching_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ExclusionRuleMatchingType']]:
"""
How the rule is applied, see MatchingType documentation for details.
"""
return pulumi.get(self, "matching_type")
@matching_type.setter
def matching_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ExclusionRuleMatchingType']]):
pulumi.set(self, "matching_type", value)
@property
@pulumi.getter
def regex(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']]:
"""
Regular expression which defines the rule.
"""
return pulumi.get(self, "regex")
@regex.setter
def regex(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']]):
pulumi.set(self, "regex", value)
@pulumi.input_type
class GooglePrivacyDlpV2ExpressionsArgs:
def __init__(__self__, *,
conditions: Optional[pulumi.Input['GooglePrivacyDlpV2ConditionsArgs']] = None,
logical_operator: Optional[pulumi.Input['GooglePrivacyDlpV2ExpressionsLogicalOperator']] = None):
"""
An expression, consisting or an operator and conditions.
:param pulumi.Input['GooglePrivacyDlpV2ConditionsArgs'] conditions: Conditions to apply to the expression.
:param pulumi.Input['GooglePrivacyDlpV2ExpressionsLogicalOperator'] logical_operator: The operator to apply to the result of conditions. Default and currently only supported value is `AND`.
"""
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if logical_operator is not None:
pulumi.set(__self__, "logical_operator", logical_operator)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ConditionsArgs']]:
"""
Conditions to apply to the expression.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ConditionsArgs']]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="logicalOperator")
def logical_operator(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ExpressionsLogicalOperator']]:
"""
The operator to apply to the result of conditions. Default and currently only supported value is `AND`.
"""
return pulumi.get(self, "logical_operator")
@logical_operator.setter
def logical_operator(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ExpressionsLogicalOperator']]):
pulumi.set(self, "logical_operator", value)
@pulumi.input_type
class GooglePrivacyDlpV2FieldIdArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
General identifier of a data field in a storage service.
:param pulumi.Input[str] name: Name describing the field.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name describing the field.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class GooglePrivacyDlpV2FieldTransformationArgs:
def __init__(__self__, *,
fields: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]],
condition: Optional[pulumi.Input['GooglePrivacyDlpV2RecordConditionArgs']] = None,
info_type_transformations: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationsArgs']] = None,
primitive_transformation: Optional[pulumi.Input['GooglePrivacyDlpV2PrimitiveTransformationArgs']] = None):
"""
The transformation to apply to the field.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]] fields: Input field(s) to apply the transformation to. When you have columns that reference their position within a list, omit the index from the FieldId. FieldId name matching ignores the index. For example, instead of "contact.nums[0].type", use "contact.nums.type".
:param pulumi.Input['GooglePrivacyDlpV2RecordConditionArgs'] condition: Only apply the transformation if the condition evaluates to true for the given `RecordCondition`. The conditions are allowed to reference fields that are not used in the actual transformation. Example Use Cases: - Apply a different bucket transformation to an age column if the zip code column for the same record is within a specific range. - Redact a field if the date of birth field is greater than 85.
:param pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationsArgs'] info_type_transformations: Treat the contents of the field as free text, and selectively transform content that matches an `InfoType`.
:param pulumi.Input['GooglePrivacyDlpV2PrimitiveTransformationArgs'] primitive_transformation: Apply the transformation to the entire field.
"""
pulumi.set(__self__, "fields", fields)
if condition is not None:
pulumi.set(__self__, "condition", condition)
if info_type_transformations is not None:
pulumi.set(__self__, "info_type_transformations", info_type_transformations)
if primitive_transformation is not None:
pulumi.set(__self__, "primitive_transformation", primitive_transformation)
@property
@pulumi.getter
def fields(self) -> pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]:
"""
Input field(s) to apply the transformation to. When you have columns that reference their position within a list, omit the index from the FieldId. FieldId name matching ignores the index. For example, instead of "contact.nums[0].type", use "contact.nums.type".
"""
return pulumi.get(self, "fields")
@fields.setter
def fields(self, value: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]):
pulumi.set(self, "fields", value)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2RecordConditionArgs']]:
"""
Only apply the transformation if the condition evaluates to true for the given `RecordCondition`. The conditions are allowed to reference fields that are not used in the actual transformation. Example Use Cases: - Apply a different bucket transformation to an age column if the zip code column for the same record is within a specific range. - Redact a field if the date of birth field is greater than 85.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2RecordConditionArgs']]):
pulumi.set(self, "condition", value)
@property
@pulumi.getter(name="infoTypeTransformations")
def info_type_transformations(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationsArgs']]:
"""
Treat the contents of the field as free text, and selectively transform content that matches an `InfoType`.
"""
return pulumi.get(self, "info_type_transformations")
@info_type_transformations.setter
def info_type_transformations(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationsArgs']]):
pulumi.set(self, "info_type_transformations", value)
@property
@pulumi.getter(name="primitiveTransformation")
def primitive_transformation(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2PrimitiveTransformationArgs']]:
"""
Apply the transformation to the entire field.
"""
return pulumi.get(self, "primitive_transformation")
@primitive_transformation.setter
def primitive_transformation(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2PrimitiveTransformationArgs']]):
pulumi.set(self, "primitive_transformation", value)
@pulumi.input_type
class GooglePrivacyDlpV2FileSetArgs:
def __init__(__self__, *,
regex_file_set: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageRegexFileSetArgs']] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Set of files to scan.
:param pulumi.Input['GooglePrivacyDlpV2CloudStorageRegexFileSetArgs'] regex_file_set: The regex-filtered set of files to scan. Exactly one of `url` or `regex_file_set` must be set.
:param pulumi.Input[str] url: The Cloud Storage url of the file(s) to scan, in the format `gs:///`. Trailing wildcard in the path is allowed. If the url ends in a trailing slash, the bucket or directory represented by the url will be scanned non-recursively (content in sub-directories will not be scanned). This means that `gs://mybucket/` is equivalent to `gs://mybucket/*`, and `gs://mybucket/directory/` is equivalent to `gs://mybucket/directory/*`. Exactly one of `url` or `regex_file_set` must be set.
"""
if regex_file_set is not None:
pulumi.set(__self__, "regex_file_set", regex_file_set)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="regexFileSet")
def regex_file_set(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageRegexFileSetArgs']]:
"""
The regex-filtered set of files to scan. Exactly one of `url` or `regex_file_set` must be set.
"""
return pulumi.get(self, "regex_file_set")
@regex_file_set.setter
def regex_file_set(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageRegexFileSetArgs']]):
pulumi.set(self, "regex_file_set", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The Cloud Storage url of the file(s) to scan, in the format `gs:///`. Trailing wildcard in the path is allowed. If the url ends in a trailing slash, the bucket or directory represented by the url will be scanned non-recursively (content in sub-directories will not be scanned). This means that `gs://mybucket/` is equivalent to `gs://mybucket/*`, and `gs://mybucket/directory/` is equivalent to `gs://mybucket/directory/*`. Exactly one of `url` or `regex_file_set` must be set.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class GooglePrivacyDlpV2FindingLimitsArgs:
def __init__(__self__, *,
max_findings_per_info_type: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeLimitArgs']]]] = None,
max_findings_per_item: Optional[pulumi.Input[int]] = None,
max_findings_per_request: Optional[pulumi.Input[int]] = None):
"""
Configuration to control the number of findings returned. Cannot be set if de-identification is requested.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeLimitArgs']]] max_findings_per_info_type: Configuration of findings limit given for specified infoTypes.
:param pulumi.Input[int] max_findings_per_item: Max number of findings that will be returned for each item scanned. When set within `InspectJobConfig`, the maximum returned is 2000 regardless if this is set higher. When set within `InspectContentRequest`, this field is ignored.
:param pulumi.Input[int] max_findings_per_request: Max number of findings that will be returned per request/job. When set within `InspectContentRequest`, the maximum returned is 2000 regardless if this is set higher.
"""
if max_findings_per_info_type is not None:
pulumi.set(__self__, "max_findings_per_info_type", max_findings_per_info_type)
if max_findings_per_item is not None:
pulumi.set(__self__, "max_findings_per_item", max_findings_per_item)
if max_findings_per_request is not None:
pulumi.set(__self__, "max_findings_per_request", max_findings_per_request)
@property
@pulumi.getter(name="maxFindingsPerInfoType")
def max_findings_per_info_type(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeLimitArgs']]]]:
"""
Configuration of findings limit given for specified infoTypes.
"""
return pulumi.get(self, "max_findings_per_info_type")
@max_findings_per_info_type.setter
def max_findings_per_info_type(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeLimitArgs']]]]):
pulumi.set(self, "max_findings_per_info_type", value)
@property
@pulumi.getter(name="maxFindingsPerItem")
def max_findings_per_item(self) -> Optional[pulumi.Input[int]]:
"""
Max number of findings that will be returned for each item scanned. When set within `InspectJobConfig`, the maximum returned is 2000 regardless if this is set higher. When set within `InspectContentRequest`, this field is ignored.
"""
return pulumi.get(self, "max_findings_per_item")
@max_findings_per_item.setter
def max_findings_per_item(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_findings_per_item", value)
@property
@pulumi.getter(name="maxFindingsPerRequest")
def max_findings_per_request(self) -> Optional[pulumi.Input[int]]:
"""
Max number of findings that will be returned per request/job. When set within `InspectContentRequest`, the maximum returned is 2000 regardless if this is set higher.
"""
return pulumi.get(self, "max_findings_per_request")
@max_findings_per_request.setter
def max_findings_per_request(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_findings_per_request", value)
@pulumi.input_type
class GooglePrivacyDlpV2FixedSizeBucketingConfigArgs:
def __init__(__self__, *,
bucket_size: pulumi.Input[float],
lower_bound: pulumi.Input['GooglePrivacyDlpV2ValueArgs'],
upper_bound: pulumi.Input['GooglePrivacyDlpV2ValueArgs']):
"""
Buckets values based on fixed size ranges. The Bucketing transformation can provide all of this functionality, but requires more configuration. This message is provided as a convenience to the user for simple bucketing strategies. The transformed value will be a hyphenated string of {lower_bound}-{upper_bound}. For example, if lower_bound = 10 and upper_bound = 20, all values that are within this bucket will be replaced with "10-20". This can be used on data of type: double, long. If the bound Value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.
:param pulumi.Input[float] bucket_size: Size of each bucket (except for minimum and maximum buckets). So if `lower_bound` = 10, `upper_bound` = 89, and `bucket_size` = 10, then the following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89, 89+. Precision up to 2 decimals works.
:param pulumi.Input['GooglePrivacyDlpV2ValueArgs'] lower_bound: Lower bound value of buckets. All values less than `lower_bound` are grouped together into a single bucket; for example if `lower_bound` = 10, then all values less than 10 are replaced with the value "-10".
:param pulumi.Input['GooglePrivacyDlpV2ValueArgs'] upper_bound: Upper bound value of buckets. All values greater than upper_bound are grouped together into a single bucket; for example if `upper_bound` = 89, then all values greater than 89 are replaced with the value "89+".
"""
pulumi.set(__self__, "bucket_size", bucket_size)
pulumi.set(__self__, "lower_bound", lower_bound)
pulumi.set(__self__, "upper_bound", upper_bound)
@property
@pulumi.getter(name="bucketSize")
def bucket_size(self) -> pulumi.Input[float]:
"""
Size of each bucket (except for minimum and maximum buckets). So if `lower_bound` = 10, `upper_bound` = 89, and `bucket_size` = 10, then the following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89, 89+. Precision up to 2 decimals works.
"""
return pulumi.get(self, "bucket_size")
@bucket_size.setter
def bucket_size(self, value: pulumi.Input[float]):
pulumi.set(self, "bucket_size", value)
@property
@pulumi.getter(name="lowerBound")
def lower_bound(self) -> pulumi.Input['GooglePrivacyDlpV2ValueArgs']:
"""
Lower bound value of buckets. All values less than `lower_bound` are grouped together into a single bucket; for example if `lower_bound` = 10, then all values less than 10 are replaced with the value "-10".
"""
return pulumi.get(self, "lower_bound")
@lower_bound.setter
def lower_bound(self, value: pulumi.Input['GooglePrivacyDlpV2ValueArgs']):
pulumi.set(self, "lower_bound", value)
@property
@pulumi.getter(name="upperBound")
def upper_bound(self) -> pulumi.Input['GooglePrivacyDlpV2ValueArgs']:
"""
Upper bound value of buckets. All values greater than upper_bound are grouped together into a single bucket; for example if `upper_bound` = 89, then all values greater than 89 are replaced with the value "89+".
"""
return pulumi.get(self, "upper_bound")
@upper_bound.setter
def upper_bound(self, value: pulumi.Input['GooglePrivacyDlpV2ValueArgs']):
pulumi.set(self, "upper_bound", value)
@pulumi.input_type
class GooglePrivacyDlpV2HotwordRuleArgs:
def __init__(__self__, *,
hotword_regex: Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']] = None,
likelihood_adjustment: Optional[pulumi.Input['GooglePrivacyDlpV2LikelihoodAdjustmentArgs']] = None,
proximity: Optional[pulumi.Input['GooglePrivacyDlpV2ProximityArgs']] = None):
"""
The rule that adjusts the likelihood of findings within a certain proximity of hotwords.
:param pulumi.Input['GooglePrivacyDlpV2RegexArgs'] hotword_regex: Regular expression pattern defining what qualifies as a hotword.
:param pulumi.Input['GooglePrivacyDlpV2LikelihoodAdjustmentArgs'] likelihood_adjustment: Likelihood adjustment to apply to all matching findings.
:param pulumi.Input['GooglePrivacyDlpV2ProximityArgs'] proximity: Proximity of the finding within which the entire hotword must reside. The total length of the window cannot exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be used to match substrings of the finding itself. For example, the certainty of a phone number regex "\(\d{3}\) \d{3}-\d{4}" could be adjusted upwards if the area code is known to be the local area code of a company office using the hotword regex "\(xxx\)", where "xxx" is the area code in question.
"""
if hotword_regex is not None:
pulumi.set(__self__, "hotword_regex", hotword_regex)
if likelihood_adjustment is not None:
pulumi.set(__self__, "likelihood_adjustment", likelihood_adjustment)
if proximity is not None:
pulumi.set(__self__, "proximity", proximity)
@property
@pulumi.getter(name="hotwordRegex")
def hotword_regex(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']]:
"""
Regular expression pattern defining what qualifies as a hotword.
"""
return pulumi.get(self, "hotword_regex")
@hotword_regex.setter
def hotword_regex(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']]):
pulumi.set(self, "hotword_regex", value)
@property
@pulumi.getter(name="likelihoodAdjustment")
def likelihood_adjustment(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2LikelihoodAdjustmentArgs']]:
"""
Likelihood adjustment to apply to all matching findings.
"""
return pulumi.get(self, "likelihood_adjustment")
@likelihood_adjustment.setter
def likelihood_adjustment(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2LikelihoodAdjustmentArgs']]):
pulumi.set(self, "likelihood_adjustment", value)
@property
@pulumi.getter
def proximity(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ProximityArgs']]:
"""
Proximity of the finding within which the entire hotword must reside. The total length of the window cannot exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be used to match substrings of the finding itself. For example, the certainty of a phone number regex "\(\d{3}\) \d{3}-\d{4}" could be adjusted upwards if the area code is known to be the local area code of a company office using the hotword regex "\(xxx\)", where "xxx" is the area code in question.
"""
return pulumi.get(self, "proximity")
@proximity.setter
def proximity(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ProximityArgs']]):
pulumi.set(self, "proximity", value)
@pulumi.input_type
class GooglePrivacyDlpV2HybridOptionsArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
required_finding_label_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
table_options: Optional[pulumi.Input['GooglePrivacyDlpV2TableOptionsArgs']] = None):
"""
Configuration to control jobs where the content being inspected is outside of Google Cloud Platform.
:param pulumi.Input[str] description: A short description of where the data is coming from. Will be stored once in the job. 256 max length.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: To organize findings, these labels will be added to each finding. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. No more than 10 labels can be associated with a given finding. Examples: * `"environment" : "production"` * `"pipeline" : "etl"`
:param pulumi.Input[Sequence[pulumi.Input[str]]] required_finding_label_keys: These are labels that each inspection request must include within their 'finding_labels' map. Request may contain others, but any missing one of these will be rejected. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. No more than 10 keys can be required.
:param pulumi.Input['GooglePrivacyDlpV2TableOptionsArgs'] table_options: If the container is a table, additional information to make findings meaningful such as the columns that are primary keys.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if required_finding_label_keys is not None:
pulumi.set(__self__, "required_finding_label_keys", required_finding_label_keys)
if table_options is not None:
pulumi.set(__self__, "table_options", table_options)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A short description of where the data is coming from. Will be stored once in the job. 256 max length.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
To organize findings, these labels will be added to each finding. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. No more than 10 labels can be associated with a given finding. Examples: * `"environment" : "production"` * `"pipeline" : "etl"`
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="requiredFindingLabelKeys")
def required_finding_label_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
These are labels that each inspection request must include within their 'finding_labels' map. Request may contain others, but any missing one of these will be rejected. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. No more than 10 keys can be required.
"""
return pulumi.get(self, "required_finding_label_keys")
@required_finding_label_keys.setter
def required_finding_label_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required_finding_label_keys", value)
@property
@pulumi.getter(name="tableOptions")
def table_options(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2TableOptionsArgs']]:
"""
If the container is a table, additional information to make findings meaningful such as the columns that are primary keys.
"""
return pulumi.get(self, "table_options")
@table_options.setter
def table_options(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2TableOptionsArgs']]):
pulumi.set(self, "table_options", value)
@pulumi.input_type
class GooglePrivacyDlpV2InfoTypeLimitArgs:
def __init__(__self__, *,
info_type: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']] = None,
max_findings: Optional[pulumi.Input[int]] = None):
"""
Max findings configuration per infoType, per content item or long running DlpJob.
:param pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs'] info_type: Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
:param pulumi.Input[int] max_findings: Max findings limit for the given infoType.
"""
if info_type is not None:
pulumi.set(__self__, "info_type", info_type)
if max_findings is not None:
pulumi.set(__self__, "max_findings", max_findings)
@property
@pulumi.getter(name="infoType")
def info_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]:
"""
Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
"""
return pulumi.get(self, "info_type")
@info_type.setter
def info_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]):
pulumi.set(self, "info_type", value)
@property
@pulumi.getter(name="maxFindings")
def max_findings(self) -> Optional[pulumi.Input[int]]:
"""
Max findings limit for the given infoType.
"""
return pulumi.get(self, "max_findings")
@max_findings.setter
def max_findings(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_findings", value)
@pulumi.input_type
class GooglePrivacyDlpV2InfoTypeTransformationsArgs:
def __init__(__self__, *,
transformations: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationArgs']]]):
"""
A type of transformation that will scan unstructured text and apply various `PrimitiveTransformation`s to each finding, where the transformation is applied to only values that were identified as a specific info_type.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationArgs']]] transformations: Transformation for each infoType. Cannot specify more than one for a given infoType.
"""
pulumi.set(__self__, "transformations", transformations)
@property
@pulumi.getter
def transformations(self) -> pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationArgs']]]:
"""
Transformation for each infoType. Cannot specify more than one for a given infoType.
"""
return pulumi.get(self, "transformations")
@transformations.setter
def transformations(self, value: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeTransformationArgs']]]):
pulumi.set(self, "transformations", value)
@pulumi.input_type
class GooglePrivacyDlpV2InfoTypeTransformationArgs:
def __init__(__self__, *,
primitive_transformation: pulumi.Input['GooglePrivacyDlpV2PrimitiveTransformationArgs'],
info_types: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]] = None):
"""
A transformation to apply to text that is identified as a specific info_type.
:param pulumi.Input['GooglePrivacyDlpV2PrimitiveTransformationArgs'] primitive_transformation: Primitive transformation to apply to the infoType.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]] info_types: InfoTypes to apply the transformation to. An empty list will cause this transformation to apply to all findings that correspond to infoTypes that were requested in `InspectConfig`.
"""
pulumi.set(__self__, "primitive_transformation", primitive_transformation)
if info_types is not None:
pulumi.set(__self__, "info_types", info_types)
@property
@pulumi.getter(name="primitiveTransformation")
def primitive_transformation(self) -> pulumi.Input['GooglePrivacyDlpV2PrimitiveTransformationArgs']:
"""
Primitive transformation to apply to the infoType.
"""
return pulumi.get(self, "primitive_transformation")
@primitive_transformation.setter
def primitive_transformation(self, value: pulumi.Input['GooglePrivacyDlpV2PrimitiveTransformationArgs']):
pulumi.set(self, "primitive_transformation", value)
@property
@pulumi.getter(name="infoTypes")
def info_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]]:
"""
InfoTypes to apply the transformation to. An empty list will cause this transformation to apply to all findings that correspond to infoTypes that were requested in `InspectConfig`.
"""
return pulumi.get(self, "info_types")
@info_types.setter
def info_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]]):
pulumi.set(self, "info_types", value)
@pulumi.input_type
class GooglePrivacyDlpV2InfoTypeArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
Type of information detected by the API.
:param pulumi.Input[str] name: Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
:param pulumi.Input[str] version: Optional version name for this InfoType.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Optional version name for this InfoType.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class GooglePrivacyDlpV2InspectConfigArgs:
def __init__(__self__, *,
content_options: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectConfigContentOptionsItem']]]] = None,
custom_info_types: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeArgs']]]] = None,
exclude_info_types: Optional[pulumi.Input[bool]] = None,
include_quote: Optional[pulumi.Input[bool]] = None,
info_types: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]] = None,
limits: Optional[pulumi.Input['GooglePrivacyDlpV2FindingLimitsArgs']] = None,
min_likelihood: Optional[pulumi.Input['GooglePrivacyDlpV2InspectConfigMinLikelihood']] = None,
rule_set: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectionRuleSetArgs']]]] = None):
"""
Configuration description of the scanning process. When used with redactContent only info_types and min_likelihood are currently used.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectConfigContentOptionsItem']]] content_options: List of options defining data content to scan. If empty, text, images, and other content will be included.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeArgs']]] custom_info_types: CustomInfoTypes provided by the user. See https://cloud.google.com/dlp/docs/creating-custom-infotypes to learn more.
:param pulumi.Input[bool] exclude_info_types: When true, excludes type information of the findings.
:param pulumi.Input[bool] include_quote: When true, a contextual quote from the data that triggered a finding is included in the response; see Finding.quote.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]] info_types: Restricts what info_types to look for. The values must correspond to InfoType values returned by ListInfoTypes or listed at https://cloud.google.com/dlp/docs/infotypes-reference. When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. By default this may be all types, but may change over time as detectors are updated. If you need precise control and predictability as to what detectors are run you should specify specific InfoTypes listed in the reference, otherwise a default list will be used, which may change over time.
:param pulumi.Input['GooglePrivacyDlpV2FindingLimitsArgs'] limits: Configuration to control the number of findings returned.
:param pulumi.Input['GooglePrivacyDlpV2InspectConfigMinLikelihood'] min_likelihood: Only returns findings equal or above this threshold. The default is POSSIBLE. See https://cloud.google.com/dlp/docs/likelihood to learn more.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectionRuleSetArgs']]] rule_set: Set of rules to apply to the findings for this InspectConfig. Exclusion rules, contained in the set are executed in the end, other rules are executed in the order they are specified for each info type.
"""
if content_options is not None:
pulumi.set(__self__, "content_options", content_options)
if custom_info_types is not None:
pulumi.set(__self__, "custom_info_types", custom_info_types)
if exclude_info_types is not None:
pulumi.set(__self__, "exclude_info_types", exclude_info_types)
if include_quote is not None:
pulumi.set(__self__, "include_quote", include_quote)
if info_types is not None:
pulumi.set(__self__, "info_types", info_types)
if limits is not None:
pulumi.set(__self__, "limits", limits)
if min_likelihood is not None:
pulumi.set(__self__, "min_likelihood", min_likelihood)
if rule_set is not None:
pulumi.set(__self__, "rule_set", rule_set)
@property
@pulumi.getter(name="contentOptions")
def content_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectConfigContentOptionsItem']]]]:
"""
List of options defining data content to scan. If empty, text, images, and other content will be included.
"""
return pulumi.get(self, "content_options")
@content_options.setter
def content_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectConfigContentOptionsItem']]]]):
pulumi.set(self, "content_options", value)
@property
@pulumi.getter(name="customInfoTypes")
def custom_info_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeArgs']]]]:
"""
CustomInfoTypes provided by the user. See https://cloud.google.com/dlp/docs/creating-custom-infotypes to learn more.
"""
return pulumi.get(self, "custom_info_types")
@custom_info_types.setter
def custom_info_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2CustomInfoTypeArgs']]]]):
pulumi.set(self, "custom_info_types", value)
@property
@pulumi.getter(name="excludeInfoTypes")
def exclude_info_types(self) -> Optional[pulumi.Input[bool]]:
"""
When true, excludes type information of the findings.
"""
return pulumi.get(self, "exclude_info_types")
@exclude_info_types.setter
def exclude_info_types(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exclude_info_types", value)
@property
@pulumi.getter(name="includeQuote")
def include_quote(self) -> Optional[pulumi.Input[bool]]:
"""
When true, a contextual quote from the data that triggered a finding is included in the response; see Finding.quote.
"""
return pulumi.get(self, "include_quote")
@include_quote.setter
def include_quote(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_quote", value)
@property
@pulumi.getter(name="infoTypes")
def info_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]]:
"""
Restricts what info_types to look for. The values must correspond to InfoType values returned by ListInfoTypes or listed at https://cloud.google.com/dlp/docs/infotypes-reference. When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. By default this may be all types, but may change over time as detectors are updated. If you need precise control and predictability as to what detectors are run you should specify specific InfoTypes listed in the reference, otherwise a default list will be used, which may change over time.
"""
return pulumi.get(self, "info_types")
@info_types.setter
def info_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]]):
pulumi.set(self, "info_types", value)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FindingLimitsArgs']]:
"""
Configuration to control the number of findings returned.
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FindingLimitsArgs']]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter(name="minLikelihood")
def min_likelihood(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InspectConfigMinLikelihood']]:
"""
Only returns findings equal or above this threshold. The default is POSSIBLE. See https://cloud.google.com/dlp/docs/likelihood to learn more.
"""
return pulumi.get(self, "min_likelihood")
@min_likelihood.setter
def min_likelihood(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InspectConfigMinLikelihood']]):
pulumi.set(self, "min_likelihood", value)
@property
@pulumi.getter(name="ruleSet")
def rule_set(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectionRuleSetArgs']]]]:
"""
Set of rules to apply to the findings for this InspectConfig. Exclusion rules, contained in the set are executed in the end, other rules are executed in the order they are specified for each info type.
"""
return pulumi.get(self, "rule_set")
@rule_set.setter
def rule_set(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectionRuleSetArgs']]]]):
pulumi.set(self, "rule_set", value)
@pulumi.input_type
class GooglePrivacyDlpV2InspectJobConfigArgs:
def __init__(__self__, *,
actions: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ActionArgs']]]] = None,
inspect_config: Optional[pulumi.Input['GooglePrivacyDlpV2InspectConfigArgs']] = None,
inspect_template_name: Optional[pulumi.Input[str]] = None,
storage_config: Optional[pulumi.Input['GooglePrivacyDlpV2StorageConfigArgs']] = None):
"""
Controls what and how to inspect for findings.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ActionArgs']]] actions: Actions to execute at the completion of the job.
:param pulumi.Input['GooglePrivacyDlpV2InspectConfigArgs'] inspect_config: How and what to scan for.
:param pulumi.Input[str] inspect_template_name: If provided, will be used as the default for all values in InspectConfig. `inspect_config` will be merged into the values persisted as part of the template.
:param pulumi.Input['GooglePrivacyDlpV2StorageConfigArgs'] storage_config: The data to scan.
"""
if actions is not None:
pulumi.set(__self__, "actions", actions)
if inspect_config is not None:
pulumi.set(__self__, "inspect_config", inspect_config)
if inspect_template_name is not None:
pulumi.set(__self__, "inspect_template_name", inspect_template_name)
if storage_config is not None:
pulumi.set(__self__, "storage_config", storage_config)
@property
@pulumi.getter
def actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ActionArgs']]]]:
"""
Actions to execute at the completion of the job.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ActionArgs']]]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter(name="inspectConfig")
def inspect_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InspectConfigArgs']]:
"""
How and what to scan for.
"""
return pulumi.get(self, "inspect_config")
@inspect_config.setter
def inspect_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InspectConfigArgs']]):
pulumi.set(self, "inspect_config", value)
@property
@pulumi.getter(name="inspectTemplateName")
def inspect_template_name(self) -> Optional[pulumi.Input[str]]:
"""
If provided, will be used as the default for all values in InspectConfig. `inspect_config` will be merged into the values persisted as part of the template.
"""
return pulumi.get(self, "inspect_template_name")
@inspect_template_name.setter
def inspect_template_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inspect_template_name", value)
@property
@pulumi.getter(name="storageConfig")
def storage_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2StorageConfigArgs']]:
"""
The data to scan.
"""
return pulumi.get(self, "storage_config")
@storage_config.setter
def storage_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2StorageConfigArgs']]):
pulumi.set(self, "storage_config", value)
@pulumi.input_type
class GooglePrivacyDlpV2InspectionRuleSetArgs:
def __init__(__self__, *,
info_types: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectionRuleArgs']]]] = None):
"""
Rule set for modifying a set of infoTypes to alter behavior under certain circumstances, depending on the specific details of the rules within the set.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]] info_types: List of infoTypes this rule set is applied to.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectionRuleArgs']]] rules: Set of rules to be applied to infoTypes. The rules are applied in order.
"""
if info_types is not None:
pulumi.set(__self__, "info_types", info_types)
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter(name="infoTypes")
def info_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]]:
"""
List of infoTypes this rule set is applied to.
"""
return pulumi.get(self, "info_types")
@info_types.setter
def info_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]]]):
pulumi.set(self, "info_types", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectionRuleArgs']]]]:
"""
Set of rules to be applied to infoTypes. The rules are applied in order.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2InspectionRuleArgs']]]]):
pulumi.set(self, "rules", value)
@pulumi.input_type
class GooglePrivacyDlpV2InspectionRuleArgs:
def __init__(__self__, *,
exclusion_rule: Optional[pulumi.Input['GooglePrivacyDlpV2ExclusionRuleArgs']] = None,
hotword_rule: Optional[pulumi.Input['GooglePrivacyDlpV2HotwordRuleArgs']] = None):
"""
A single inspection rule to be applied to infoTypes, specified in `InspectionRuleSet`.
:param pulumi.Input['GooglePrivacyDlpV2ExclusionRuleArgs'] exclusion_rule: Exclusion rule.
:param pulumi.Input['GooglePrivacyDlpV2HotwordRuleArgs'] hotword_rule: Hotword-based detection rule.
"""
if exclusion_rule is not None:
pulumi.set(__self__, "exclusion_rule", exclusion_rule)
if hotword_rule is not None:
pulumi.set(__self__, "hotword_rule", hotword_rule)
@property
@pulumi.getter(name="exclusionRule")
def exclusion_rule(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ExclusionRuleArgs']]:
"""
Exclusion rule.
"""
return pulumi.get(self, "exclusion_rule")
@exclusion_rule.setter
def exclusion_rule(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ExclusionRuleArgs']]):
pulumi.set(self, "exclusion_rule", value)
@property
@pulumi.getter(name="hotwordRule")
def hotword_rule(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2HotwordRuleArgs']]:
"""
Hotword-based detection rule.
"""
return pulumi.get(self, "hotword_rule")
@hotword_rule.setter
def hotword_rule(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2HotwordRuleArgs']]):
pulumi.set(self, "hotword_rule", value)
@pulumi.input_type
class GooglePrivacyDlpV2JobNotificationEmailsArgs:
def __init__(__self__):
"""
Enable email notification to project owners and editors on jobs's completion/failure.
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2KAnonymityConfigArgs:
def __init__(__self__, *,
entity_id: Optional[pulumi.Input['GooglePrivacyDlpV2EntityIdArgs']] = None,
quasi_ids: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]] = None):
"""
k-anonymity metric, used for analysis of reidentification risk.
:param pulumi.Input['GooglePrivacyDlpV2EntityIdArgs'] entity_id: Message indicating that multiple rows might be associated to a single individual. If the same entity_id is associated to multiple quasi-identifier tuples over distinct rows, we consider the entire collection of tuples as the composite quasi-identifier. This collection is a multiset: the order in which the different tuples appear in the dataset is ignored, but their frequency is taken into account. Important note: a maximum of 1000 rows can be associated to a single entity ID. If more rows are associated with the same entity ID, some might be ignored.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]] quasi_ids: Set of fields to compute k-anonymity over. When multiple fields are specified, they are considered a single composite key. Structs and repeated data types are not supported; however, nested fields are supported so long as they are not structs themselves or nested within a repeated field.
"""
if entity_id is not None:
pulumi.set(__self__, "entity_id", entity_id)
if quasi_ids is not None:
pulumi.set(__self__, "quasi_ids", quasi_ids)
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2EntityIdArgs']]:
"""
Message indicating that multiple rows might be associated to a single individual. If the same entity_id is associated to multiple quasi-identifier tuples over distinct rows, we consider the entire collection of tuples as the composite quasi-identifier. This collection is a multiset: the order in which the different tuples appear in the dataset is ignored, but their frequency is taken into account. Important note: a maximum of 1000 rows can be associated to a single entity ID. If more rows are associated with the same entity ID, some might be ignored.
"""
return pulumi.get(self, "entity_id")
@entity_id.setter
def entity_id(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2EntityIdArgs']]):
pulumi.set(self, "entity_id", value)
@property
@pulumi.getter(name="quasiIds")
def quasi_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]:
"""
Set of fields to compute k-anonymity over. When multiple fields are specified, they are considered a single composite key. Structs and repeated data types are not supported; however, nested fields are supported so long as they are not structs themselves or nested within a repeated field.
"""
return pulumi.get(self, "quasi_ids")
@quasi_ids.setter
def quasi_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]):
pulumi.set(self, "quasi_ids", value)
@pulumi.input_type
class GooglePrivacyDlpV2KMapEstimationConfigArgs:
def __init__(__self__, *,
quasi_ids: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2TaggedFieldArgs']]],
auxiliary_tables: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2AuxiliaryTableArgs']]]] = None,
region_code: Optional[pulumi.Input[str]] = None):
"""
Reidentifiability metric. This corresponds to a risk model similar to what is called "journalist risk" in the literature, except the attack dataset is statistically modeled instead of being perfectly known. This can be done using publicly available data (like the US Census), or using a custom statistical model (indicated as one or several BigQuery tables), or by extrapolating from the distribution of values in the input dataset.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2TaggedFieldArgs']]] quasi_ids: Fields considered to be quasi-identifiers. No two columns can have the same tag.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2AuxiliaryTableArgs']]] auxiliary_tables: Several auxiliary tables can be used in the analysis. Each custom_tag used to tag a quasi-identifiers column must appear in exactly one column of one auxiliary table.
:param pulumi.Input[str] region_code: ISO 3166-1 alpha-2 region code to use in the statistical modeling. Set if no column is tagged with a region-specific InfoType (like US_ZIP_5) or a region code.
"""
pulumi.set(__self__, "quasi_ids", quasi_ids)
if auxiliary_tables is not None:
pulumi.set(__self__, "auxiliary_tables", auxiliary_tables)
if region_code is not None:
pulumi.set(__self__, "region_code", region_code)
@property
@pulumi.getter(name="quasiIds")
def quasi_ids(self) -> pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2TaggedFieldArgs']]]:
"""
Fields considered to be quasi-identifiers. No two columns can have the same tag.
"""
return pulumi.get(self, "quasi_ids")
@quasi_ids.setter
def quasi_ids(self, value: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2TaggedFieldArgs']]]):
pulumi.set(self, "quasi_ids", value)
@property
@pulumi.getter(name="auxiliaryTables")
def auxiliary_tables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2AuxiliaryTableArgs']]]]:
"""
Several auxiliary tables can be used in the analysis. Each custom_tag used to tag a quasi-identifiers column must appear in exactly one column of one auxiliary table.
"""
return pulumi.get(self, "auxiliary_tables")
@auxiliary_tables.setter
def auxiliary_tables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2AuxiliaryTableArgs']]]]):
pulumi.set(self, "auxiliary_tables", value)
@property
@pulumi.getter(name="regionCode")
def region_code(self) -> Optional[pulumi.Input[str]]:
"""
ISO 3166-1 alpha-2 region code to use in the statistical modeling. Set if no column is tagged with a region-specific InfoType (like US_ZIP_5) or a region code.
"""
return pulumi.get(self, "region_code")
@region_code.setter
def region_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region_code", value)
@pulumi.input_type
class GooglePrivacyDlpV2KindExpressionArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
A representation of a Datastore kind.
:param pulumi.Input[str] name: The name of the kind.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the kind.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class GooglePrivacyDlpV2KmsWrappedCryptoKeyArgs:
def __init__(__self__, *,
crypto_key_name: pulumi.Input[str],
wrapped_key: pulumi.Input[str]):
"""
Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt For more information, see [Creating a wrapped key] (https://cloud.google.com/dlp/docs/create-wrapped-key). Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).
:param pulumi.Input[str] crypto_key_name: The resource name of the KMS CryptoKey to use for unwrapping.
:param pulumi.Input[str] wrapped_key: The wrapped data crypto key.
"""
pulumi.set(__self__, "crypto_key_name", crypto_key_name)
pulumi.set(__self__, "wrapped_key", wrapped_key)
@property
@pulumi.getter(name="cryptoKeyName")
def crypto_key_name(self) -> pulumi.Input[str]:
"""
The resource name of the KMS CryptoKey to use for unwrapping.
"""
return pulumi.get(self, "crypto_key_name")
@crypto_key_name.setter
def crypto_key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "crypto_key_name", value)
@property
@pulumi.getter(name="wrappedKey")
def wrapped_key(self) -> pulumi.Input[str]:
"""
The wrapped data crypto key.
"""
return pulumi.get(self, "wrapped_key")
@wrapped_key.setter
def wrapped_key(self, value: pulumi.Input[str]):
pulumi.set(self, "wrapped_key", value)
@pulumi.input_type
class GooglePrivacyDlpV2LDiversityConfigArgs:
def __init__(__self__, *,
quasi_ids: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]] = None,
sensitive_attribute: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None):
"""
l-diversity metric, used for analysis of reidentification risk.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]] quasi_ids: Set of quasi-identifiers indicating how equivalence classes are defined for the l-diversity computation. When multiple fields are specified, they are considered a single composite key.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] sensitive_attribute: Sensitive field for computing the l-value.
"""
if quasi_ids is not None:
pulumi.set(__self__, "quasi_ids", quasi_ids)
if sensitive_attribute is not None:
pulumi.set(__self__, "sensitive_attribute", sensitive_attribute)
@property
@pulumi.getter(name="quasiIds")
def quasi_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]:
"""
Set of quasi-identifiers indicating how equivalence classes are defined for the l-diversity computation. When multiple fields are specified, they are considered a single composite key.
"""
return pulumi.get(self, "quasi_ids")
@quasi_ids.setter
def quasi_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]):
pulumi.set(self, "quasi_ids", value)
@property
@pulumi.getter(name="sensitiveAttribute")
def sensitive_attribute(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
Sensitive field for computing the l-value.
"""
return pulumi.get(self, "sensitive_attribute")
@sensitive_attribute.setter
def sensitive_attribute(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "sensitive_attribute", value)
@pulumi.input_type
class GooglePrivacyDlpV2LargeCustomDictionaryConfigArgs:
def __init__(__self__, *,
big_query_field: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryFieldArgs']] = None,
cloud_storage_file_set: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageFileSetArgs']] = None,
output_path: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStoragePathArgs']] = None):
"""
Configuration for a custom dictionary created from a data source of any size up to the maximum size defined in the [limits](https://cloud.google.com/dlp/limits) page. The artifacts of dictionary creation are stored in the specified Google Cloud Storage location. Consider using `CustomInfoType.Dictionary` for smaller dictionaries that satisfy the size requirements.
:param pulumi.Input['GooglePrivacyDlpV2BigQueryFieldArgs'] big_query_field: Field in a BigQuery table where each cell represents a dictionary phrase.
:param pulumi.Input['GooglePrivacyDlpV2CloudStorageFileSetArgs'] cloud_storage_file_set: Set of files containing newline-delimited lists of dictionary phrases.
:param pulumi.Input['GooglePrivacyDlpV2CloudStoragePathArgs'] output_path: Location to store dictionary artifacts in Google Cloud Storage. These files will only be accessible by project owners and the DLP API. If any of these artifacts are modified, the dictionary is considered invalid and can no longer be used.
"""
if big_query_field is not None:
pulumi.set(__self__, "big_query_field", big_query_field)
if cloud_storage_file_set is not None:
pulumi.set(__self__, "cloud_storage_file_set", cloud_storage_file_set)
if output_path is not None:
pulumi.set(__self__, "output_path", output_path)
@property
@pulumi.getter(name="bigQueryField")
def big_query_field(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryFieldArgs']]:
"""
Field in a BigQuery table where each cell represents a dictionary phrase.
"""
return pulumi.get(self, "big_query_field")
@big_query_field.setter
def big_query_field(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryFieldArgs']]):
pulumi.set(self, "big_query_field", value)
@property
@pulumi.getter(name="cloudStorageFileSet")
def cloud_storage_file_set(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageFileSetArgs']]:
"""
Set of files containing newline-delimited lists of dictionary phrases.
"""
return pulumi.get(self, "cloud_storage_file_set")
@cloud_storage_file_set.setter
def cloud_storage_file_set(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageFileSetArgs']]):
pulumi.set(self, "cloud_storage_file_set", value)
@property
@pulumi.getter(name="outputPath")
def output_path(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CloudStoragePathArgs']]:
"""
Location to store dictionary artifacts in Google Cloud Storage. These files will only be accessible by project owners and the DLP API. If any of these artifacts are modified, the dictionary is considered invalid and can no longer be used.
"""
return pulumi.get(self, "output_path")
@output_path.setter
def output_path(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStoragePathArgs']]):
pulumi.set(self, "output_path", value)
@pulumi.input_type
class GooglePrivacyDlpV2LeaveUntransformedArgs:
def __init__(__self__):
"""
Skips the data without modifying it if the requested transformation would cause an error. For example, if a `DateShift` transformation were applied an an IP address, this mode would leave the IP address unchanged in the response.
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2LikelihoodAdjustmentArgs:
def __init__(__self__, *,
fixed_likelihood: Optional[pulumi.Input['GooglePrivacyDlpV2LikelihoodAdjustmentFixedLikelihood']] = None,
relative_likelihood: Optional[pulumi.Input[int]] = None):
"""
Message for specifying an adjustment to the likelihood of a finding as part of a detection rule.
:param pulumi.Input['GooglePrivacyDlpV2LikelihoodAdjustmentFixedLikelihood'] fixed_likelihood: Set the likelihood of a finding to a fixed value.
:param pulumi.Input[int] relative_likelihood: Increase or decrease the likelihood by the specified number of levels. For example, if a finding would be `POSSIBLE` without the detection rule and `relative_likelihood` is 1, then it is upgraded to `LIKELY`, while a value of -1 would downgrade it to `UNLIKELY`. Likelihood may never drop below `VERY_UNLIKELY` or exceed `VERY_LIKELY`, so applying an adjustment of 1 followed by an adjustment of -1 when base likelihood is `VERY_LIKELY` will result in a final likelihood of `LIKELY`.
"""
if fixed_likelihood is not None:
pulumi.set(__self__, "fixed_likelihood", fixed_likelihood)
if relative_likelihood is not None:
pulumi.set(__self__, "relative_likelihood", relative_likelihood)
@property
@pulumi.getter(name="fixedLikelihood")
def fixed_likelihood(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2LikelihoodAdjustmentFixedLikelihood']]:
"""
Set the likelihood of a finding to a fixed value.
"""
return pulumi.get(self, "fixed_likelihood")
@fixed_likelihood.setter
def fixed_likelihood(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2LikelihoodAdjustmentFixedLikelihood']]):
pulumi.set(self, "fixed_likelihood", value)
@property
@pulumi.getter(name="relativeLikelihood")
def relative_likelihood(self) -> Optional[pulumi.Input[int]]:
"""
Increase or decrease the likelihood by the specified number of levels. For example, if a finding would be `POSSIBLE` without the detection rule and `relative_likelihood` is 1, then it is upgraded to `LIKELY`, while a value of -1 would downgrade it to `UNLIKELY`. Likelihood may never drop below `VERY_UNLIKELY` or exceed `VERY_LIKELY`, so applying an adjustment of 1 followed by an adjustment of -1 when base likelihood is `VERY_LIKELY` will result in a final likelihood of `LIKELY`.
"""
return pulumi.get(self, "relative_likelihood")
@relative_likelihood.setter
def relative_likelihood(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "relative_likelihood", value)
@pulumi.input_type
class GooglePrivacyDlpV2ManualArgs:
def __init__(__self__):
"""
Job trigger option for hybrid jobs. Jobs must be manually created and finished.
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2NumericalStatsConfigArgs:
def __init__(__self__, *,
field: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None):
"""
Compute numerical stats over an individual column, including min, max, and quantiles.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] field: Field to compute numerical stats on. Supported types are integer, float, date, datetime, timestamp, time.
"""
if field is not None:
pulumi.set(__self__, "field", field)
@property
@pulumi.getter
def field(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
Field to compute numerical stats on. Supported types are integer, float, date, datetime, timestamp, time.
"""
return pulumi.get(self, "field")
@field.setter
def field(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "field", value)
@pulumi.input_type
class GooglePrivacyDlpV2OutputStorageConfigArgs:
def __init__(__self__, *,
output_schema: Optional[pulumi.Input['GooglePrivacyDlpV2OutputStorageConfigOutputSchema']] = None,
table: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']] = None):
"""
Cloud repository for storing output.
:param pulumi.Input['GooglePrivacyDlpV2OutputStorageConfigOutputSchema'] output_schema: Schema used for writing the findings for Inspect jobs. This field is only used for Inspect and must be unspecified for Risk jobs. Columns are derived from the `Finding` object. If appending to an existing table, any columns from the predefined schema that are missing will be added. No columns in the existing table will be deleted. If unspecified, then all available columns will be used for a new table or an (existing) table with no schema, and no changes will be made to an existing table that has a schema. Only for use with external storage.
:param pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs'] table: Store findings in an existing table or a new table in an existing dataset. If table_id is not set a new one will be generated for you with the following format: dlp_googleapis_yyyy_mm_dd_[dlp_job_id]. Pacific timezone will be used for generating the date details. For Inspect, each column in an existing output table must have the same name, type, and mode of a field in the `Finding` object. For Risk, an existing output table should be the output of a previous Risk analysis job run on the same source table, with the same privacy metric and quasi-identifiers. Risk jobs that analyze the same table but compute a different privacy metric, or use different sets of quasi-identifiers, cannot store their results in the same table.
"""
if output_schema is not None:
pulumi.set(__self__, "output_schema", output_schema)
if table is not None:
pulumi.set(__self__, "table", table)
@property
@pulumi.getter(name="outputSchema")
def output_schema(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2OutputStorageConfigOutputSchema']]:
"""
Schema used for writing the findings for Inspect jobs. This field is only used for Inspect and must be unspecified for Risk jobs. Columns are derived from the `Finding` object. If appending to an existing table, any columns from the predefined schema that are missing will be added. No columns in the existing table will be deleted. If unspecified, then all available columns will be used for a new table or an (existing) table with no schema, and no changes will be made to an existing table that has a schema. Only for use with external storage.
"""
return pulumi.get(self, "output_schema")
@output_schema.setter
def output_schema(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2OutputStorageConfigOutputSchema']]):
pulumi.set(self, "output_schema", value)
@property
@pulumi.getter
def table(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']]:
"""
Store findings in an existing table or a new table in an existing dataset. If table_id is not set a new one will be generated for you with the following format: dlp_googleapis_yyyy_mm_dd_[dlp_job_id]. Pacific timezone will be used for generating the date details. For Inspect, each column in an existing output table must have the same name, type, and mode of a field in the `Finding` object. For Risk, an existing output table should be the output of a previous Risk analysis job run on the same source table, with the same privacy metric and quasi-identifiers. Risk jobs that analyze the same table but compute a different privacy metric, or use different sets of quasi-identifiers, cannot store their results in the same table.
"""
return pulumi.get(self, "table")
@table.setter
def table(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']]):
pulumi.set(self, "table", value)
@pulumi.input_type
class GooglePrivacyDlpV2PartitionIdArgs:
def __init__(__self__, *,
namespace_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
Datastore partition ID. A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID.
:param pulumi.Input[str] namespace_id: If not empty, the ID of the namespace to which the entities belong.
:param pulumi.Input[str] project: The ID of the project to which the entities belong.
"""
if namespace_id is not None:
pulumi.set(__self__, "namespace_id", namespace_id)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="namespaceId")
def namespace_id(self) -> Optional[pulumi.Input[str]]:
"""
If not empty, the ID of the namespace to which the entities belong.
"""
return pulumi.get(self, "namespace_id")
@namespace_id.setter
def namespace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace_id", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project to which the entities belong.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class GooglePrivacyDlpV2PrimitiveTransformationArgs:
def __init__(__self__, *,
bucketing_config: Optional[pulumi.Input['GooglePrivacyDlpV2BucketingConfigArgs']] = None,
character_mask_config: Optional[pulumi.Input['GooglePrivacyDlpV2CharacterMaskConfigArgs']] = None,
crypto_deterministic_config: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoDeterministicConfigArgs']] = None,
crypto_hash_config: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoHashConfigArgs']] = None,
crypto_replace_ffx_fpe_config: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigArgs']] = None,
date_shift_config: Optional[pulumi.Input['GooglePrivacyDlpV2DateShiftConfigArgs']] = None,
fixed_size_bucketing_config: Optional[pulumi.Input['GooglePrivacyDlpV2FixedSizeBucketingConfigArgs']] = None,
redact_config: Optional[pulumi.Input['GooglePrivacyDlpV2RedactConfigArgs']] = None,
replace_config: Optional[pulumi.Input['GooglePrivacyDlpV2ReplaceValueConfigArgs']] = None,
replace_dictionary_config: Optional[pulumi.Input['GooglePrivacyDlpV2ReplaceDictionaryConfigArgs']] = None,
replace_with_info_type_config: Optional[pulumi.Input['GooglePrivacyDlpV2ReplaceWithInfoTypeConfigArgs']] = None,
time_part_config: Optional[pulumi.Input['GooglePrivacyDlpV2TimePartConfigArgs']] = None):
"""
A rule for transforming a value.
:param pulumi.Input['GooglePrivacyDlpV2BucketingConfigArgs'] bucketing_config: Bucketing
:param pulumi.Input['GooglePrivacyDlpV2CharacterMaskConfigArgs'] character_mask_config: Mask
:param pulumi.Input['GooglePrivacyDlpV2CryptoDeterministicConfigArgs'] crypto_deterministic_config: Deterministic Crypto
:param pulumi.Input['GooglePrivacyDlpV2CryptoHashConfigArgs'] crypto_hash_config: Crypto
:param pulumi.Input['GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigArgs'] crypto_replace_ffx_fpe_config: Ffx-Fpe
:param pulumi.Input['GooglePrivacyDlpV2DateShiftConfigArgs'] date_shift_config: Date Shift
:param pulumi.Input['GooglePrivacyDlpV2FixedSizeBucketingConfigArgs'] fixed_size_bucketing_config: Fixed size bucketing
:param pulumi.Input['GooglePrivacyDlpV2RedactConfigArgs'] redact_config: Redact
:param pulumi.Input['GooglePrivacyDlpV2ReplaceValueConfigArgs'] replace_config: Replace with a specified value.
:param pulumi.Input['GooglePrivacyDlpV2ReplaceDictionaryConfigArgs'] replace_dictionary_config: Replace with a value randomly drawn (with replacement) from a dictionary.
:param pulumi.Input['GooglePrivacyDlpV2ReplaceWithInfoTypeConfigArgs'] replace_with_info_type_config: Replace with infotype
:param pulumi.Input['GooglePrivacyDlpV2TimePartConfigArgs'] time_part_config: Time extraction
"""
if bucketing_config is not None:
pulumi.set(__self__, "bucketing_config", bucketing_config)
if character_mask_config is not None:
pulumi.set(__self__, "character_mask_config", character_mask_config)
if crypto_deterministic_config is not None:
pulumi.set(__self__, "crypto_deterministic_config", crypto_deterministic_config)
if crypto_hash_config is not None:
pulumi.set(__self__, "crypto_hash_config", crypto_hash_config)
if crypto_replace_ffx_fpe_config is not None:
pulumi.set(__self__, "crypto_replace_ffx_fpe_config", crypto_replace_ffx_fpe_config)
if date_shift_config is not None:
pulumi.set(__self__, "date_shift_config", date_shift_config)
if fixed_size_bucketing_config is not None:
pulumi.set(__self__, "fixed_size_bucketing_config", fixed_size_bucketing_config)
if redact_config is not None:
pulumi.set(__self__, "redact_config", redact_config)
if replace_config is not None:
pulumi.set(__self__, "replace_config", replace_config)
if replace_dictionary_config is not None:
pulumi.set(__self__, "replace_dictionary_config", replace_dictionary_config)
if replace_with_info_type_config is not None:
pulumi.set(__self__, "replace_with_info_type_config", replace_with_info_type_config)
if time_part_config is not None:
pulumi.set(__self__, "time_part_config", time_part_config)
@property
@pulumi.getter(name="bucketingConfig")
def bucketing_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2BucketingConfigArgs']]:
"""
Bucketing
"""
return pulumi.get(self, "bucketing_config")
@bucketing_config.setter
def bucketing_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2BucketingConfigArgs']]):
pulumi.set(self, "bucketing_config", value)
@property
@pulumi.getter(name="characterMaskConfig")
def character_mask_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CharacterMaskConfigArgs']]:
"""
Mask
"""
return pulumi.get(self, "character_mask_config")
@character_mask_config.setter
def character_mask_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CharacterMaskConfigArgs']]):
pulumi.set(self, "character_mask_config", value)
@property
@pulumi.getter(name="cryptoDeterministicConfig")
def crypto_deterministic_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CryptoDeterministicConfigArgs']]:
"""
Deterministic Crypto
"""
return pulumi.get(self, "crypto_deterministic_config")
@crypto_deterministic_config.setter
def crypto_deterministic_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoDeterministicConfigArgs']]):
pulumi.set(self, "crypto_deterministic_config", value)
@property
@pulumi.getter(name="cryptoHashConfig")
def crypto_hash_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CryptoHashConfigArgs']]:
"""
Crypto
"""
return pulumi.get(self, "crypto_hash_config")
@crypto_hash_config.setter
def crypto_hash_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoHashConfigArgs']]):
pulumi.set(self, "crypto_hash_config", value)
@property
@pulumi.getter(name="cryptoReplaceFfxFpeConfig")
def crypto_replace_ffx_fpe_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigArgs']]:
"""
Ffx-Fpe
"""
return pulumi.get(self, "crypto_replace_ffx_fpe_config")
@crypto_replace_ffx_fpe_config.setter
def crypto_replace_ffx_fpe_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CryptoReplaceFfxFpeConfigArgs']]):
pulumi.set(self, "crypto_replace_ffx_fpe_config", value)
@property
@pulumi.getter(name="dateShiftConfig")
def date_shift_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2DateShiftConfigArgs']]:
"""
Date Shift
"""
return pulumi.get(self, "date_shift_config")
@date_shift_config.setter
def date_shift_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2DateShiftConfigArgs']]):
pulumi.set(self, "date_shift_config", value)
@property
@pulumi.getter(name="fixedSizeBucketingConfig")
def fixed_size_bucketing_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FixedSizeBucketingConfigArgs']]:
"""
Fixed size bucketing
"""
return pulumi.get(self, "fixed_size_bucketing_config")
@fixed_size_bucketing_config.setter
def fixed_size_bucketing_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FixedSizeBucketingConfigArgs']]):
pulumi.set(self, "fixed_size_bucketing_config", value)
@property
@pulumi.getter(name="redactConfig")
def redact_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2RedactConfigArgs']]:
"""
Redact
"""
return pulumi.get(self, "redact_config")
@redact_config.setter
def redact_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2RedactConfigArgs']]):
pulumi.set(self, "redact_config", value)
@property
@pulumi.getter(name="replaceConfig")
def replace_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ReplaceValueConfigArgs']]:
"""
Replace with a specified value.
"""
return pulumi.get(self, "replace_config")
@replace_config.setter
def replace_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ReplaceValueConfigArgs']]):
pulumi.set(self, "replace_config", value)
@property
@pulumi.getter(name="replaceDictionaryConfig")
def replace_dictionary_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ReplaceDictionaryConfigArgs']]:
"""
Replace with a value randomly drawn (with replacement) from a dictionary.
"""
return pulumi.get(self, "replace_dictionary_config")
@replace_dictionary_config.setter
def replace_dictionary_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ReplaceDictionaryConfigArgs']]):
pulumi.set(self, "replace_dictionary_config", value)
@property
@pulumi.getter(name="replaceWithInfoTypeConfig")
def replace_with_info_type_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ReplaceWithInfoTypeConfigArgs']]:
"""
Replace with infotype
"""
return pulumi.get(self, "replace_with_info_type_config")
@replace_with_info_type_config.setter
def replace_with_info_type_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ReplaceWithInfoTypeConfigArgs']]):
pulumi.set(self, "replace_with_info_type_config", value)
@property
@pulumi.getter(name="timePartConfig")
def time_part_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2TimePartConfigArgs']]:
"""
Time extraction
"""
return pulumi.get(self, "time_part_config")
@time_part_config.setter
def time_part_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2TimePartConfigArgs']]):
pulumi.set(self, "time_part_config", value)
@pulumi.input_type
class GooglePrivacyDlpV2PrivacyMetricArgs:
def __init__(__self__, *,
categorical_stats_config: Optional[pulumi.Input['GooglePrivacyDlpV2CategoricalStatsConfigArgs']] = None,
delta_presence_estimation_config: Optional[pulumi.Input['GooglePrivacyDlpV2DeltaPresenceEstimationConfigArgs']] = None,
k_anonymity_config: Optional[pulumi.Input['GooglePrivacyDlpV2KAnonymityConfigArgs']] = None,
k_map_estimation_config: Optional[pulumi.Input['GooglePrivacyDlpV2KMapEstimationConfigArgs']] = None,
l_diversity_config: Optional[pulumi.Input['GooglePrivacyDlpV2LDiversityConfigArgs']] = None,
numerical_stats_config: Optional[pulumi.Input['GooglePrivacyDlpV2NumericalStatsConfigArgs']] = None):
"""
Privacy metric to compute for reidentification risk analysis.
:param pulumi.Input['GooglePrivacyDlpV2CategoricalStatsConfigArgs'] categorical_stats_config: Categorical stats
:param pulumi.Input['GooglePrivacyDlpV2DeltaPresenceEstimationConfigArgs'] delta_presence_estimation_config: delta-presence
:param pulumi.Input['GooglePrivacyDlpV2KAnonymityConfigArgs'] k_anonymity_config: K-anonymity
:param pulumi.Input['GooglePrivacyDlpV2KMapEstimationConfigArgs'] k_map_estimation_config: k-map
:param pulumi.Input['GooglePrivacyDlpV2LDiversityConfigArgs'] l_diversity_config: l-diversity
:param pulumi.Input['GooglePrivacyDlpV2NumericalStatsConfigArgs'] numerical_stats_config: Numerical stats
"""
if categorical_stats_config is not None:
pulumi.set(__self__, "categorical_stats_config", categorical_stats_config)
if delta_presence_estimation_config is not None:
pulumi.set(__self__, "delta_presence_estimation_config", delta_presence_estimation_config)
if k_anonymity_config is not None:
pulumi.set(__self__, "k_anonymity_config", k_anonymity_config)
if k_map_estimation_config is not None:
pulumi.set(__self__, "k_map_estimation_config", k_map_estimation_config)
if l_diversity_config is not None:
pulumi.set(__self__, "l_diversity_config", l_diversity_config)
if numerical_stats_config is not None:
pulumi.set(__self__, "numerical_stats_config", numerical_stats_config)
@property
@pulumi.getter(name="categoricalStatsConfig")
def categorical_stats_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CategoricalStatsConfigArgs']]:
"""
Categorical stats
"""
return pulumi.get(self, "categorical_stats_config")
@categorical_stats_config.setter
def categorical_stats_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CategoricalStatsConfigArgs']]):
pulumi.set(self, "categorical_stats_config", value)
@property
@pulumi.getter(name="deltaPresenceEstimationConfig")
def delta_presence_estimation_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2DeltaPresenceEstimationConfigArgs']]:
"""
delta-presence
"""
return pulumi.get(self, "delta_presence_estimation_config")
@delta_presence_estimation_config.setter
def delta_presence_estimation_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2DeltaPresenceEstimationConfigArgs']]):
pulumi.set(self, "delta_presence_estimation_config", value)
@property
@pulumi.getter(name="kAnonymityConfig")
def k_anonymity_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2KAnonymityConfigArgs']]:
"""
K-anonymity
"""
return pulumi.get(self, "k_anonymity_config")
@k_anonymity_config.setter
def k_anonymity_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2KAnonymityConfigArgs']]):
pulumi.set(self, "k_anonymity_config", value)
@property
@pulumi.getter(name="kMapEstimationConfig")
def k_map_estimation_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2KMapEstimationConfigArgs']]:
"""
k-map
"""
return pulumi.get(self, "k_map_estimation_config")
@k_map_estimation_config.setter
def k_map_estimation_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2KMapEstimationConfigArgs']]):
pulumi.set(self, "k_map_estimation_config", value)
@property
@pulumi.getter(name="lDiversityConfig")
def l_diversity_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2LDiversityConfigArgs']]:
"""
l-diversity
"""
return pulumi.get(self, "l_diversity_config")
@l_diversity_config.setter
def l_diversity_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2LDiversityConfigArgs']]):
pulumi.set(self, "l_diversity_config", value)
@property
@pulumi.getter(name="numericalStatsConfig")
def numerical_stats_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2NumericalStatsConfigArgs']]:
"""
Numerical stats
"""
return pulumi.get(self, "numerical_stats_config")
@numerical_stats_config.setter
def numerical_stats_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2NumericalStatsConfigArgs']]):
pulumi.set(self, "numerical_stats_config", value)
@pulumi.input_type
class GooglePrivacyDlpV2ProximityArgs:
def __init__(__self__, *,
window_after: Optional[pulumi.Input[int]] = None,
window_before: Optional[pulumi.Input[int]] = None):
"""
Message for specifying a window around a finding to apply a detection rule.
:param pulumi.Input[int] window_after: Number of characters after the finding to consider.
:param pulumi.Input[int] window_before: Number of characters before the finding to consider.
"""
if window_after is not None:
pulumi.set(__self__, "window_after", window_after)
if window_before is not None:
pulumi.set(__self__, "window_before", window_before)
@property
@pulumi.getter(name="windowAfter")
def window_after(self) -> Optional[pulumi.Input[int]]:
"""
Number of characters after the finding to consider.
"""
return pulumi.get(self, "window_after")
@window_after.setter
def window_after(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "window_after", value)
@property
@pulumi.getter(name="windowBefore")
def window_before(self) -> Optional[pulumi.Input[int]]:
"""
Number of characters before the finding to consider.
"""
return pulumi.get(self, "window_before")
@window_before.setter
def window_before(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "window_before", value)
@pulumi.input_type
class GooglePrivacyDlpV2PublishFindingsToCloudDataCatalogArgs:
def __init__(__self__):
"""
Publish findings of a DlpJob to Data Catalog. Labels summarizing the results of the DlpJob will be applied to the entry for the resource scanned in Data Catalog. Any labels previously written by another DlpJob will be deleted. InfoType naming patterns are strictly enforced when using this feature. Note that the findings will be persisted in Data Catalog storage and are governed by Data Catalog service-specific policy, see https://cloud.google.com/terms/service-terms Only a single instance of this action can be specified and only allowed if all resources being scanned are BigQuery tables. Compatible with: Inspect
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2PublishSummaryToCsccArgs:
def __init__(__self__):
"""
Publish the result summary of a DlpJob to the Cloud Security Command Center (CSCC Alpha). This action is only available for projects which are parts of an organization and whitelisted for the alpha Cloud Security Command Center. The action will publish count of finding instances and their info types. The summary of findings will be persisted in CSCC and are governed by CSCC service-specific policy, see https://cloud.google.com/terms/service-terms Only a single instance of this action can be specified. Compatible with: Inspect
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2PublishToPubSubArgs:
def __init__(__self__, *,
topic: Optional[pulumi.Input[str]] = None):
"""
Publish a message into given Pub/Sub topic when DlpJob has completed. The message contains a single field, `DlpJobName`, which is equal to the finished job's [`DlpJob.name`](https://cloud.google.com/dlp/docs/reference/rest/v2/projects.dlpJobs#DlpJob). Compatible with: Inspect, Risk
:param pulumi.Input[str] topic: Cloud Pub/Sub topic to send notifications to. The topic must have given publishing access rights to the DLP API service account executing the long running DlpJob sending the notifications. Format is projects/{project}/topics/{topic}.
"""
if topic is not None:
pulumi.set(__self__, "topic", topic)
@property
@pulumi.getter
def topic(self) -> Optional[pulumi.Input[str]]:
"""
Cloud Pub/Sub topic to send notifications to. The topic must have given publishing access rights to the DLP API service account executing the long running DlpJob sending the notifications. Format is projects/{project}/topics/{topic}.
"""
return pulumi.get(self, "topic")
@topic.setter
def topic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "topic", value)
@pulumi.input_type
class GooglePrivacyDlpV2PublishToStackdriverArgs:
def __init__(__self__):
"""
Enable Stackdriver metric dlp.googleapis.com/finding_count. This will publish a metric to stack driver on each infotype requested and how many findings were found for it. CustomDetectors will be bucketed as 'Custom' under the Stackdriver label 'info_type'.
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2QuasiIdFieldArgs:
def __init__(__self__, *,
custom_tag: Optional[pulumi.Input[str]] = None,
field: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None):
"""
A quasi-identifier column has a custom_tag, used to know which column in the data corresponds to which column in the statistical model.
:param pulumi.Input[str] custom_tag: A auxiliary field.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] field: Identifies the column.
"""
if custom_tag is not None:
pulumi.set(__self__, "custom_tag", custom_tag)
if field is not None:
pulumi.set(__self__, "field", field)
@property
@pulumi.getter(name="customTag")
def custom_tag(self) -> Optional[pulumi.Input[str]]:
"""
A auxiliary field.
"""
return pulumi.get(self, "custom_tag")
@custom_tag.setter
def custom_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_tag", value)
@property
@pulumi.getter
def field(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
Identifies the column.
"""
return pulumi.get(self, "field")
@field.setter
def field(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "field", value)
@pulumi.input_type
class GooglePrivacyDlpV2QuasiIdentifierFieldArgs:
def __init__(__self__, *,
custom_tag: Optional[pulumi.Input[str]] = None,
field: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None):
"""
A quasi-identifier column has a custom_tag, used to know which column in the data corresponds to which column in the statistical model.
:param pulumi.Input[str] custom_tag: A column can be tagged with a custom tag. In this case, the user must indicate an auxiliary table that contains statistical information on the possible values of this column (below).
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] field: Identifies the column.
"""
if custom_tag is not None:
pulumi.set(__self__, "custom_tag", custom_tag)
if field is not None:
pulumi.set(__self__, "field", field)
@property
@pulumi.getter(name="customTag")
def custom_tag(self) -> Optional[pulumi.Input[str]]:
"""
A column can be tagged with a custom tag. In this case, the user must indicate an auxiliary table that contains statistical information on the possible values of this column (below).
"""
return pulumi.get(self, "custom_tag")
@custom_tag.setter
def custom_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_tag", value)
@property
@pulumi.getter
def field(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
Identifies the column.
"""
return pulumi.get(self, "field")
@field.setter
def field(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "field", value)
@pulumi.input_type
class GooglePrivacyDlpV2QuasiIdArgs:
def __init__(__self__, *,
field: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'],
custom_tag: Optional[pulumi.Input[str]] = None,
inferred: Optional[pulumi.Input['GoogleProtobufEmptyArgs']] = None,
info_type: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']] = None):
"""
A column with a semantic tag attached.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] field: Identifies the column.
:param pulumi.Input[str] custom_tag: A column can be tagged with a custom tag. In this case, the user must indicate an auxiliary table that contains statistical information on the possible values of this column (below).
:param pulumi.Input['GoogleProtobufEmptyArgs'] inferred: If no semantic tag is indicated, we infer the statistical model from the distribution of values in the input data
:param pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs'] info_type: A column can be tagged with a InfoType to use the relevant public dataset as a statistical model of population, if available. We currently support US ZIP codes, region codes, ages and genders. To programmatically obtain the list of supported InfoTypes, use ListInfoTypes with the supported_by=RISK_ANALYSIS filter.
"""
pulumi.set(__self__, "field", field)
if custom_tag is not None:
pulumi.set(__self__, "custom_tag", custom_tag)
if inferred is not None:
pulumi.set(__self__, "inferred", inferred)
if info_type is not None:
pulumi.set(__self__, "info_type", info_type)
@property
@pulumi.getter
def field(self) -> pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']:
"""
Identifies the column.
"""
return pulumi.get(self, "field")
@field.setter
def field(self, value: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']):
pulumi.set(self, "field", value)
@property
@pulumi.getter(name="customTag")
def custom_tag(self) -> Optional[pulumi.Input[str]]:
"""
A column can be tagged with a custom tag. In this case, the user must indicate an auxiliary table that contains statistical information on the possible values of this column (below).
"""
return pulumi.get(self, "custom_tag")
@custom_tag.setter
def custom_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_tag", value)
@property
@pulumi.getter
def inferred(self) -> Optional[pulumi.Input['GoogleProtobufEmptyArgs']]:
"""
If no semantic tag is indicated, we infer the statistical model from the distribution of values in the input data
"""
return pulumi.get(self, "inferred")
@inferred.setter
def inferred(self, value: Optional[pulumi.Input['GoogleProtobufEmptyArgs']]):
pulumi.set(self, "inferred", value)
@property
@pulumi.getter(name="infoType")
def info_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]:
"""
A column can be tagged with a InfoType to use the relevant public dataset as a statistical model of population, if available. We currently support US ZIP codes, region codes, ages and genders. To programmatically obtain the list of supported InfoTypes, use ListInfoTypes with the supported_by=RISK_ANALYSIS filter.
"""
return pulumi.get(self, "info_type")
@info_type.setter
def info_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]):
pulumi.set(self, "info_type", value)
@pulumi.input_type
class GooglePrivacyDlpV2RecordConditionArgs:
def __init__(__self__, *,
expressions: Optional[pulumi.Input['GooglePrivacyDlpV2ExpressionsArgs']] = None):
"""
A condition for determining whether a transformation should be applied to a field.
:param pulumi.Input['GooglePrivacyDlpV2ExpressionsArgs'] expressions: An expression.
"""
if expressions is not None:
pulumi.set(__self__, "expressions", expressions)
@property
@pulumi.getter
def expressions(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ExpressionsArgs']]:
"""
An expression.
"""
return pulumi.get(self, "expressions")
@expressions.setter
def expressions(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ExpressionsArgs']]):
pulumi.set(self, "expressions", value)
@pulumi.input_type
class GooglePrivacyDlpV2RecordSuppressionArgs:
def __init__(__self__, *,
condition: Optional[pulumi.Input['GooglePrivacyDlpV2RecordConditionArgs']] = None):
"""
Configuration to suppress records whose suppression conditions evaluate to true.
:param pulumi.Input['GooglePrivacyDlpV2RecordConditionArgs'] condition: A condition that when it evaluates to true will result in the record being evaluated to be suppressed from the transformed content.
"""
if condition is not None:
pulumi.set(__self__, "condition", condition)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2RecordConditionArgs']]:
"""
A condition that when it evaluates to true will result in the record being evaluated to be suppressed from the transformed content.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2RecordConditionArgs']]):
pulumi.set(self, "condition", value)
@pulumi.input_type
class GooglePrivacyDlpV2RecordTransformationsArgs:
def __init__(__self__, *,
field_transformations: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldTransformationArgs']]]] = None,
record_suppressions: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2RecordSuppressionArgs']]]] = None):
"""
A type of transformation that is applied over structured data such as a table.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldTransformationArgs']]] field_transformations: Transform the record by applying various field transformations.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2RecordSuppressionArgs']]] record_suppressions: Configuration defining which records get suppressed entirely. Records that match any suppression rule are omitted from the output.
"""
if field_transformations is not None:
pulumi.set(__self__, "field_transformations", field_transformations)
if record_suppressions is not None:
pulumi.set(__self__, "record_suppressions", record_suppressions)
@property
@pulumi.getter(name="fieldTransformations")
def field_transformations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldTransformationArgs']]]]:
"""
Transform the record by applying various field transformations.
"""
return pulumi.get(self, "field_transformations")
@field_transformations.setter
def field_transformations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldTransformationArgs']]]]):
pulumi.set(self, "field_transformations", value)
@property
@pulumi.getter(name="recordSuppressions")
def record_suppressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2RecordSuppressionArgs']]]]:
"""
Configuration defining which records get suppressed entirely. Records that match any suppression rule are omitted from the output.
"""
return pulumi.get(self, "record_suppressions")
@record_suppressions.setter
def record_suppressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2RecordSuppressionArgs']]]]):
pulumi.set(self, "record_suppressions", value)
@pulumi.input_type
class GooglePrivacyDlpV2RedactConfigArgs:
def __init__(__self__):
"""
Redact a given value. For example, if used with an `InfoTypeTransformation` transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the output would be 'My phone number is '.
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2RegexArgs:
def __init__(__self__, *,
group_indexes: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
pattern: Optional[pulumi.Input[str]] = None):
"""
Message defining a custom regular expression.
:param pulumi.Input[Sequence[pulumi.Input[int]]] group_indexes: The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.
:param pulumi.Input[str] pattern: Pattern defining the regular expression. Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.
"""
if group_indexes is not None:
pulumi.set(__self__, "group_indexes", group_indexes)
if pattern is not None:
pulumi.set(__self__, "pattern", pattern)
@property
@pulumi.getter(name="groupIndexes")
def group_indexes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.
"""
return pulumi.get(self, "group_indexes")
@group_indexes.setter
def group_indexes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "group_indexes", value)
@property
@pulumi.getter
def pattern(self) -> Optional[pulumi.Input[str]]:
"""
Pattern defining the regular expression. Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.
"""
return pulumi.get(self, "pattern")
@pattern.setter
def pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pattern", value)
@pulumi.input_type
class GooglePrivacyDlpV2ReplaceDictionaryConfigArgs:
def __init__(__self__, *,
word_list: Optional[pulumi.Input['GooglePrivacyDlpV2WordListArgs']] = None):
"""
Replace each input value with a value randomly selected from the dictionary.
:param pulumi.Input['GooglePrivacyDlpV2WordListArgs'] word_list: A list of words to select from for random replacement. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries.
"""
if word_list is not None:
pulumi.set(__self__, "word_list", word_list)
@property
@pulumi.getter(name="wordList")
def word_list(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2WordListArgs']]:
"""
A list of words to select from for random replacement. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries.
"""
return pulumi.get(self, "word_list")
@word_list.setter
def word_list(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2WordListArgs']]):
pulumi.set(self, "word_list", value)
@pulumi.input_type
class GooglePrivacyDlpV2ReplaceValueConfigArgs:
def __init__(__self__, *,
new_value: Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']] = None):
"""
Replace each input value with a given `Value`.
:param pulumi.Input['GooglePrivacyDlpV2ValueArgs'] new_value: Value to replace it with.
"""
if new_value is not None:
pulumi.set(__self__, "new_value", new_value)
@property
@pulumi.getter(name="newValue")
def new_value(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']]:
"""
Value to replace it with.
"""
return pulumi.get(self, "new_value")
@new_value.setter
def new_value(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ValueArgs']]):
pulumi.set(self, "new_value", value)
@pulumi.input_type
class GooglePrivacyDlpV2ReplaceWithInfoTypeConfigArgs:
def __init__(__self__):
"""
Replace each matching finding with the name of the info_type.
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2RiskAnalysisJobConfigArgs:
def __init__(__self__, *,
actions: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ActionArgs']]]] = None,
privacy_metric: Optional[pulumi.Input['GooglePrivacyDlpV2PrivacyMetricArgs']] = None,
source_table: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']] = None):
"""
Configuration for a risk analysis job. See https://cloud.google.com/dlp/docs/concepts-risk-analysis to learn more.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ActionArgs']]] actions: Actions to execute at the completion of the job. Are executed in the order provided.
:param pulumi.Input['GooglePrivacyDlpV2PrivacyMetricArgs'] privacy_metric: Privacy metric to compute.
:param pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs'] source_table: Input dataset to compute metrics over.
"""
if actions is not None:
pulumi.set(__self__, "actions", actions)
if privacy_metric is not None:
pulumi.set(__self__, "privacy_metric", privacy_metric)
if source_table is not None:
pulumi.set(__self__, "source_table", source_table)
@property
@pulumi.getter
def actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ActionArgs']]]]:
"""
Actions to execute at the completion of the job. Are executed in the order provided.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2ActionArgs']]]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter(name="privacyMetric")
def privacy_metric(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2PrivacyMetricArgs']]:
"""
Privacy metric to compute.
"""
return pulumi.get(self, "privacy_metric")
@privacy_metric.setter
def privacy_metric(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2PrivacyMetricArgs']]):
pulumi.set(self, "privacy_metric", value)
@property
@pulumi.getter(name="sourceTable")
def source_table(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']]:
"""
Input dataset to compute metrics over.
"""
return pulumi.get(self, "source_table")
@source_table.setter
def source_table(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']]):
pulumi.set(self, "source_table", value)
@pulumi.input_type
class GooglePrivacyDlpV2SaveFindingsArgs:
def __init__(__self__, *,
output_config: Optional[pulumi.Input['GooglePrivacyDlpV2OutputStorageConfigArgs']] = None):
"""
If set, the detailed findings will be persisted to the specified OutputStorageConfig. Only a single instance of this action can be specified. Compatible with: Inspect, Risk
:param pulumi.Input['GooglePrivacyDlpV2OutputStorageConfigArgs'] output_config: Location to store findings outside of DLP.
"""
if output_config is not None:
pulumi.set(__self__, "output_config", output_config)
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2OutputStorageConfigArgs']]:
"""
Location to store findings outside of DLP.
"""
return pulumi.get(self, "output_config")
@output_config.setter
def output_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2OutputStorageConfigArgs']]):
pulumi.set(self, "output_config", value)
@pulumi.input_type
class GooglePrivacyDlpV2ScheduleArgs:
def __init__(__self__, *,
recurrence_period_duration: Optional[pulumi.Input[str]] = None):
"""
Schedule for inspect job triggers.
:param pulumi.Input[str] recurrence_period_duration: With this option a job is started a regular periodic basis. For example: every day (86400 seconds). A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days.
"""
if recurrence_period_duration is not None:
pulumi.set(__self__, "recurrence_period_duration", recurrence_period_duration)
@property
@pulumi.getter(name="recurrencePeriodDuration")
def recurrence_period_duration(self) -> Optional[pulumi.Input[str]]:
"""
With this option a job is started a regular periodic basis. For example: every day (86400 seconds). A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days.
"""
return pulumi.get(self, "recurrence_period_duration")
@recurrence_period_duration.setter
def recurrence_period_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recurrence_period_duration", value)
@pulumi.input_type
class GooglePrivacyDlpV2StatisticalTableArgs:
def __init__(__self__, *,
quasi_ids: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdentifierFieldArgs']]],
relative_frequency: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'],
table: pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']):
"""
An auxiliary table containing statistical information on the relative frequency of different quasi-identifiers values. It has one or several quasi-identifiers columns, and one column that indicates the relative frequency of each quasi-identifier tuple. If a tuple is present in the data but not in the auxiliary table, the corresponding relative frequency is assumed to be zero (and thus, the tuple is highly reidentifiable).
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdentifierFieldArgs']]] quasi_ids: Quasi-identifier columns.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] relative_frequency: The relative frequency column must contain a floating-point number between 0 and 1 (inclusive). Null values are assumed to be zero.
:param pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs'] table: Auxiliary table location.
"""
pulumi.set(__self__, "quasi_ids", quasi_ids)
pulumi.set(__self__, "relative_frequency", relative_frequency)
pulumi.set(__self__, "table", table)
@property
@pulumi.getter(name="quasiIds")
def quasi_ids(self) -> pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdentifierFieldArgs']]]:
"""
Quasi-identifier columns.
"""
return pulumi.get(self, "quasi_ids")
@quasi_ids.setter
def quasi_ids(self, value: pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2QuasiIdentifierFieldArgs']]]):
pulumi.set(self, "quasi_ids", value)
@property
@pulumi.getter(name="relativeFrequency")
def relative_frequency(self) -> pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']:
"""
The relative frequency column must contain a floating-point number between 0 and 1 (inclusive). Null values are assumed to be zero.
"""
return pulumi.get(self, "relative_frequency")
@relative_frequency.setter
def relative_frequency(self, value: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']):
pulumi.set(self, "relative_frequency", value)
@property
@pulumi.getter
def table(self) -> pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']:
"""
Auxiliary table location.
"""
return pulumi.get(self, "table")
@table.setter
def table(self, value: pulumi.Input['GooglePrivacyDlpV2BigQueryTableArgs']):
pulumi.set(self, "table", value)
@pulumi.input_type
class GooglePrivacyDlpV2StorageConfigArgs:
def __init__(__self__, *,
big_query_options: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryOptionsArgs']] = None,
cloud_storage_options: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsArgs']] = None,
datastore_options: Optional[pulumi.Input['GooglePrivacyDlpV2DatastoreOptionsArgs']] = None,
hybrid_options: Optional[pulumi.Input['GooglePrivacyDlpV2HybridOptionsArgs']] = None,
timespan_config: Optional[pulumi.Input['GooglePrivacyDlpV2TimespanConfigArgs']] = None):
"""
Shared message indicating Cloud storage type.
:param pulumi.Input['GooglePrivacyDlpV2BigQueryOptionsArgs'] big_query_options: BigQuery options.
:param pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsArgs'] cloud_storage_options: Google Cloud Storage options.
:param pulumi.Input['GooglePrivacyDlpV2DatastoreOptionsArgs'] datastore_options: Google Cloud Datastore options.
:param pulumi.Input['GooglePrivacyDlpV2HybridOptionsArgs'] hybrid_options: Hybrid inspection options.
"""
if big_query_options is not None:
pulumi.set(__self__, "big_query_options", big_query_options)
if cloud_storage_options is not None:
pulumi.set(__self__, "cloud_storage_options", cloud_storage_options)
if datastore_options is not None:
pulumi.set(__self__, "datastore_options", datastore_options)
if hybrid_options is not None:
pulumi.set(__self__, "hybrid_options", hybrid_options)
if timespan_config is not None:
pulumi.set(__self__, "timespan_config", timespan_config)
@property
@pulumi.getter(name="bigQueryOptions")
def big_query_options(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryOptionsArgs']]:
"""
BigQuery options.
"""
return pulumi.get(self, "big_query_options")
@big_query_options.setter
def big_query_options(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2BigQueryOptionsArgs']]):
pulumi.set(self, "big_query_options", value)
@property
@pulumi.getter(name="cloudStorageOptions")
def cloud_storage_options(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsArgs']]:
"""
Google Cloud Storage options.
"""
return pulumi.get(self, "cloud_storage_options")
@cloud_storage_options.setter
def cloud_storage_options(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2CloudStorageOptionsArgs']]):
pulumi.set(self, "cloud_storage_options", value)
@property
@pulumi.getter(name="datastoreOptions")
def datastore_options(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2DatastoreOptionsArgs']]:
"""
Google Cloud Datastore options.
"""
return pulumi.get(self, "datastore_options")
@datastore_options.setter
def datastore_options(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2DatastoreOptionsArgs']]):
pulumi.set(self, "datastore_options", value)
@property
@pulumi.getter(name="hybridOptions")
def hybrid_options(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2HybridOptionsArgs']]:
"""
Hybrid inspection options.
"""
return pulumi.get(self, "hybrid_options")
@hybrid_options.setter
def hybrid_options(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2HybridOptionsArgs']]):
pulumi.set(self, "hybrid_options", value)
@property
@pulumi.getter(name="timespanConfig")
def timespan_config(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2TimespanConfigArgs']]:
return pulumi.get(self, "timespan_config")
@timespan_config.setter
def timespan_config(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2TimespanConfigArgs']]):
pulumi.set(self, "timespan_config", value)
@pulumi.input_type
class GooglePrivacyDlpV2StoredInfoTypeConfigArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
dictionary: Optional[pulumi.Input['GooglePrivacyDlpV2DictionaryArgs']] = None,
display_name: Optional[pulumi.Input[str]] = None,
large_custom_dictionary: Optional[pulumi.Input['GooglePrivacyDlpV2LargeCustomDictionaryConfigArgs']] = None,
regex: Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']] = None):
"""
Configuration for stored infoTypes. All fields and subfield are provided by the user. For more information, see https://cloud.google.com/dlp/docs/creating-custom-infotypes.
:param pulumi.Input[str] description: Description of the StoredInfoType (max 256 characters).
:param pulumi.Input['GooglePrivacyDlpV2DictionaryArgs'] dictionary: Store dictionary-based CustomInfoType.
:param pulumi.Input[str] display_name: Display name of the StoredInfoType (max 256 characters).
:param pulumi.Input['GooglePrivacyDlpV2LargeCustomDictionaryConfigArgs'] large_custom_dictionary: StoredInfoType where findings are defined by a dictionary of phrases.
:param pulumi.Input['GooglePrivacyDlpV2RegexArgs'] regex: Store regular expression-based StoredInfoType.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if dictionary is not None:
pulumi.set(__self__, "dictionary", dictionary)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if large_custom_dictionary is not None:
pulumi.set(__self__, "large_custom_dictionary", large_custom_dictionary)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the StoredInfoType (max 256 characters).
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def dictionary(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2DictionaryArgs']]:
"""
Store dictionary-based CustomInfoType.
"""
return pulumi.get(self, "dictionary")
@dictionary.setter
def dictionary(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2DictionaryArgs']]):
pulumi.set(self, "dictionary", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Display name of the StoredInfoType (max 256 characters).
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="largeCustomDictionary")
def large_custom_dictionary(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2LargeCustomDictionaryConfigArgs']]:
"""
StoredInfoType where findings are defined by a dictionary of phrases.
"""
return pulumi.get(self, "large_custom_dictionary")
@large_custom_dictionary.setter
def large_custom_dictionary(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2LargeCustomDictionaryConfigArgs']]):
pulumi.set(self, "large_custom_dictionary", value)
@property
@pulumi.getter
def regex(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']]:
"""
Store regular expression-based StoredInfoType.
"""
return pulumi.get(self, "regex")
@regex.setter
def regex(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2RegexArgs']]):
pulumi.set(self, "regex", value)
@pulumi.input_type
class GooglePrivacyDlpV2StoredTypeArgs:
def __init__(__self__, *,
create_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
A reference to a StoredInfoType to use with scanning.
:param pulumi.Input[str] create_time: Timestamp indicating when the version of the `StoredInfoType` used for inspection was created. Output-only field, populated by the system.
:param pulumi.Input[str] name: Resource name of the requested `StoredInfoType`, for example `organizations/433245324/storedInfoTypes/432452342` or `projects/project-id/storedInfoTypes/432452342`.
"""
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp indicating when the version of the `StoredInfoType` used for inspection was created. Output-only field, populated by the system.
"""
return pulumi.get(self, "create_time")
@create_time.setter
def create_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name of the requested `StoredInfoType`, for example `organizations/433245324/storedInfoTypes/432452342` or `projects/project-id/storedInfoTypes/432452342`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class GooglePrivacyDlpV2SurrogateTypeArgs:
def __init__(__self__):
"""
Message for detecting output from deidentification transformations such as [`CryptoReplaceFfxFpeConfig`](https://cloud.google.com/dlp/docs/reference/rest/v2/organizations.deidentifyTemplates#cryptoreplaceffxfpeconfig). These types of transformations are those that perform pseudonymization, thereby producing a "surrogate" as output. This should be used in conjunction with a field on the transformation such as `surrogate_info_type`. This CustomInfoType does not support the use of `detection_rules`.
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2TableOptionsArgs:
def __init__(__self__, *,
identifying_fields: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]] = None):
"""
Instructions regarding the table content being inspected.
:param pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]] identifying_fields: The columns that are the primary keys for table objects included in ContentItem. A copy of this cell's value will stored alongside alongside each finding so that the finding can be traced to the specific row it came from. No more than 3 may be provided.
"""
if identifying_fields is not None:
pulumi.set(__self__, "identifying_fields", identifying_fields)
@property
@pulumi.getter(name="identifyingFields")
def identifying_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]:
"""
The columns that are the primary keys for table objects included in ContentItem. A copy of this cell's value will stored alongside alongside each finding so that the finding can be traced to the specific row it came from. No more than 3 may be provided.
"""
return pulumi.get(self, "identifying_fields")
@identifying_fields.setter
def identifying_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]]]):
pulumi.set(self, "identifying_fields", value)
@pulumi.input_type
class GooglePrivacyDlpV2TaggedFieldArgs:
def __init__(__self__, *,
field: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'],
custom_tag: Optional[pulumi.Input[str]] = None,
inferred: Optional[pulumi.Input['GoogleProtobufEmptyArgs']] = None,
info_type: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']] = None):
"""
A column with a semantic tag attached.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] field: Identifies the column.
:param pulumi.Input[str] custom_tag: A column can be tagged with a custom tag. In this case, the user must indicate an auxiliary table that contains statistical information on the possible values of this column (below).
:param pulumi.Input['GoogleProtobufEmptyArgs'] inferred: If no semantic tag is indicated, we infer the statistical model from the distribution of values in the input data
:param pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs'] info_type: A column can be tagged with a InfoType to use the relevant public dataset as a statistical model of population, if available. We currently support US ZIP codes, region codes, ages and genders. To programmatically obtain the list of supported InfoTypes, use ListInfoTypes with the supported_by=RISK_ANALYSIS filter.
"""
pulumi.set(__self__, "field", field)
if custom_tag is not None:
pulumi.set(__self__, "custom_tag", custom_tag)
if inferred is not None:
pulumi.set(__self__, "inferred", inferred)
if info_type is not None:
pulumi.set(__self__, "info_type", info_type)
@property
@pulumi.getter
def field(self) -> pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']:
"""
Identifies the column.
"""
return pulumi.get(self, "field")
@field.setter
def field(self, value: pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']):
pulumi.set(self, "field", value)
@property
@pulumi.getter(name="customTag")
def custom_tag(self) -> Optional[pulumi.Input[str]]:
"""
A column can be tagged with a custom tag. In this case, the user must indicate an auxiliary table that contains statistical information on the possible values of this column (below).
"""
return pulumi.get(self, "custom_tag")
@custom_tag.setter
def custom_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_tag", value)
@property
@pulumi.getter
def inferred(self) -> Optional[pulumi.Input['GoogleProtobufEmptyArgs']]:
"""
If no semantic tag is indicated, we infer the statistical model from the distribution of values in the input data
"""
return pulumi.get(self, "inferred")
@inferred.setter
def inferred(self, value: Optional[pulumi.Input['GoogleProtobufEmptyArgs']]):
pulumi.set(self, "inferred", value)
@property
@pulumi.getter(name="infoType")
def info_type(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]:
"""
A column can be tagged with a InfoType to use the relevant public dataset as a statistical model of population, if available. We currently support US ZIP codes, region codes, ages and genders. To programmatically obtain the list of supported InfoTypes, use ListInfoTypes with the supported_by=RISK_ANALYSIS filter.
"""
return pulumi.get(self, "info_type")
@info_type.setter
def info_type(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2InfoTypeArgs']]):
pulumi.set(self, "info_type", value)
@pulumi.input_type
class GooglePrivacyDlpV2ThrowErrorArgs:
def __init__(__self__):
"""
Throw an error and fail the request when a transformation error occurs.
"""
pass
@pulumi.input_type
class GooglePrivacyDlpV2TimePartConfigArgs:
def __init__(__self__, *,
part_to_extract: Optional[pulumi.Input['GooglePrivacyDlpV2TimePartConfigPartToExtract']] = None):
"""
For use with `Date`, `Timestamp`, and `TimeOfDay`, extract or preserve a portion of the value.
:param pulumi.Input['GooglePrivacyDlpV2TimePartConfigPartToExtract'] part_to_extract: The part of the time to keep.
"""
if part_to_extract is not None:
pulumi.set(__self__, "part_to_extract", part_to_extract)
@property
@pulumi.getter(name="partToExtract")
def part_to_extract(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2TimePartConfigPartToExtract']]:
"""
The part of the time to keep.
"""
return pulumi.get(self, "part_to_extract")
@part_to_extract.setter
def part_to_extract(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2TimePartConfigPartToExtract']]):
pulumi.set(self, "part_to_extract", value)
@pulumi.input_type
class GooglePrivacyDlpV2TimespanConfigArgs:
def __init__(__self__, *,
enable_auto_population_of_timespan_config: Optional[pulumi.Input[bool]] = None,
end_time: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[str]] = None,
timestamp_field: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']] = None):
"""
Configuration of the timespan of the items to include in scanning. Currently only supported when inspecting Google Cloud Storage and BigQuery.
:param pulumi.Input[bool] enable_auto_population_of_timespan_config: When the job is started by a JobTrigger we will automatically figure out a valid start_time to avoid scanning files that have not been modified since the last time the JobTrigger executed. This will be based on the time of the execution of the last run of the JobTrigger.
:param pulumi.Input[str] end_time: Exclude files, tables, or rows newer than this value. If not set, no upper time limit is applied.
:param pulumi.Input[str] start_time: Exclude files, tables, or rows older than this value. If not set, no lower time limit is applied.
:param pulumi.Input['GooglePrivacyDlpV2FieldIdArgs'] timestamp_field: Specification of the field containing the timestamp of scanned items. Used for data sources like Datastore and BigQuery. For BigQuery: If this value is not specified and the table was modified between the given start and end times, the entire table will be scanned. If this value is specified, then rows are filtered based on the given start and end times. Rows with a `NULL` value in the provided BigQuery column are skipped. Valid data types of the provided BigQuery column are: `INTEGER`, `DATE`, `TIMESTAMP`, and `DATETIME`. For Datastore: If this value is specified, then entities are filtered based on the given start and end times. If an entity does not contain the provided timestamp property or contains empty or invalid values, then it is included. Valid data types of the provided timestamp property are: `TIMESTAMP`.
"""
if enable_auto_population_of_timespan_config is not None:
pulumi.set(__self__, "enable_auto_population_of_timespan_config", enable_auto_population_of_timespan_config)
if end_time is not None:
pulumi.set(__self__, "end_time", end_time)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
if timestamp_field is not None:
pulumi.set(__self__, "timestamp_field", timestamp_field)
@property
@pulumi.getter(name="enableAutoPopulationOfTimespanConfig")
def enable_auto_population_of_timespan_config(self) -> Optional[pulumi.Input[bool]]:
"""
When the job is started by a JobTrigger we will automatically figure out a valid start_time to avoid scanning files that have not been modified since the last time the JobTrigger executed. This will be based on the time of the execution of the last run of the JobTrigger.
"""
return pulumi.get(self, "enable_auto_population_of_timespan_config")
@enable_auto_population_of_timespan_config.setter
def enable_auto_population_of_timespan_config(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_auto_population_of_timespan_config", value)
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[pulumi.Input[str]]:
"""
Exclude files, tables, or rows newer than this value. If not set, no upper time limit is applied.
"""
return pulumi.get(self, "end_time")
@end_time.setter
def end_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_time", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
"""
Exclude files, tables, or rows older than this value. If not set, no lower time limit is applied.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
@property
@pulumi.getter(name="timestampField")
def timestamp_field(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]:
"""
Specification of the field containing the timestamp of scanned items. Used for data sources like Datastore and BigQuery. For BigQuery: If this value is not specified and the table was modified between the given start and end times, the entire table will be scanned. If this value is specified, then rows are filtered based on the given start and end times. Rows with a `NULL` value in the provided BigQuery column are skipped. Valid data types of the provided BigQuery column are: `INTEGER`, `DATE`, `TIMESTAMP`, and `DATETIME`. For Datastore: If this value is specified, then entities are filtered based on the given start and end times. If an entity does not contain the provided timestamp property or contains empty or invalid values, then it is included. Valid data types of the provided timestamp property are: `TIMESTAMP`.
"""
return pulumi.get(self, "timestamp_field")
@timestamp_field.setter
def timestamp_field(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2FieldIdArgs']]):
pulumi.set(self, "timestamp_field", value)
@pulumi.input_type
class GooglePrivacyDlpV2TransformationErrorHandlingArgs:
def __init__(__self__, *,
leave_untransformed: Optional[pulumi.Input['GooglePrivacyDlpV2LeaveUntransformedArgs']] = None,
throw_error: Optional[pulumi.Input['GooglePrivacyDlpV2ThrowErrorArgs']] = None):
"""
How to handle transformation errors during de-identification. A transformation error occurs when the requested transformation is incompatible with the data. For example, trying to de-identify an IP address using a `DateShift` transformation would result in a transformation error, since date info cannot be extracted from an IP address. Information about any incompatible transformations, and how they were handled, is returned in the response as part of the `TransformationOverviews`.
:param pulumi.Input['GooglePrivacyDlpV2LeaveUntransformedArgs'] leave_untransformed: Ignore errors
:param pulumi.Input['GooglePrivacyDlpV2ThrowErrorArgs'] throw_error: Throw an error
"""
if leave_untransformed is not None:
pulumi.set(__self__, "leave_untransformed", leave_untransformed)
if throw_error is not None:
pulumi.set(__self__, "throw_error", throw_error)
@property
@pulumi.getter(name="leaveUntransformed")
def leave_untransformed(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2LeaveUntransformedArgs']]:
"""
Ignore errors
"""
return pulumi.get(self, "leave_untransformed")
@leave_untransformed.setter
def leave_untransformed(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2LeaveUntransformedArgs']]):
pulumi.set(self, "leave_untransformed", value)
@property
@pulumi.getter(name="throwError")
def throw_error(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ThrowErrorArgs']]:
"""
Throw an error
"""
return pulumi.get(self, "throw_error")
@throw_error.setter
def throw_error(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ThrowErrorArgs']]):
pulumi.set(self, "throw_error", value)
@pulumi.input_type
class GooglePrivacyDlpV2TransientCryptoKeyArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
Use this to have a random data crypto key generated. It will be discarded after the request finishes.
:param pulumi.Input[str] name: Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class GooglePrivacyDlpV2TriggerArgs:
def __init__(__self__, *,
manual: Optional[pulumi.Input['GooglePrivacyDlpV2ManualArgs']] = None,
schedule: Optional[pulumi.Input['GooglePrivacyDlpV2ScheduleArgs']] = None):
"""
What event needs to occur for a new job to be started.
:param pulumi.Input['GooglePrivacyDlpV2ManualArgs'] manual: For use with hybrid jobs. Jobs must be manually created and finished.
:param pulumi.Input['GooglePrivacyDlpV2ScheduleArgs'] schedule: Create a job on a repeating basis based on the elapse of time.
"""
if manual is not None:
pulumi.set(__self__, "manual", manual)
if schedule is not None:
pulumi.set(__self__, "schedule", schedule)
@property
@pulumi.getter
def manual(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ManualArgs']]:
"""
For use with hybrid jobs. Jobs must be manually created and finished.
"""
return pulumi.get(self, "manual")
@manual.setter
def manual(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ManualArgs']]):
pulumi.set(self, "manual", value)
@property
@pulumi.getter
def schedule(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ScheduleArgs']]:
"""
Create a job on a repeating basis based on the elapse of time.
"""
return pulumi.get(self, "schedule")
@schedule.setter
def schedule(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ScheduleArgs']]):
pulumi.set(self, "schedule", value)
@pulumi.input_type
class GooglePrivacyDlpV2UnwrappedCryptoKeyArgs:
def __init__(__self__, *,
key: pulumi.Input[str]):
"""
Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.
:param pulumi.Input[str] key: A 128/192/256 bit key.
"""
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
A 128/192/256 bit key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@pulumi.input_type
class GooglePrivacyDlpV2ValueArgs:
def __init__(__self__, *,
boolean_value: Optional[pulumi.Input[bool]] = None,
date_value: Optional[pulumi.Input['GoogleTypeDateArgs']] = None,
day_of_week_value: Optional[pulumi.Input['GooglePrivacyDlpV2ValueDayOfWeekValue']] = None,
float_value: Optional[pulumi.Input[float]] = None,
integer_value: Optional[pulumi.Input[str]] = None,
string_value: Optional[pulumi.Input[str]] = None,
time_value: Optional[pulumi.Input['GoogleTypeTimeOfDayArgs']] = None,
timestamp_value: Optional[pulumi.Input[str]] = None):
"""
Set of primitive values supported by the system. Note that for the purposes of inspection or transformation, the number of bytes considered to comprise a 'Value' is based on its representation as a UTF-8 encoded string. For example, if 'integer_value' is set to 123456789, the number of bytes would be counted as 9, even though an int64 only holds up to 8 bytes of data.
:param pulumi.Input[bool] boolean_value: boolean
:param pulumi.Input['GoogleTypeDateArgs'] date_value: date
:param pulumi.Input['GooglePrivacyDlpV2ValueDayOfWeekValue'] day_of_week_value: day of week
:param pulumi.Input[float] float_value: float
:param pulumi.Input[str] integer_value: integer
:param pulumi.Input[str] string_value: string
:param pulumi.Input['GoogleTypeTimeOfDayArgs'] time_value: time of day
:param pulumi.Input[str] timestamp_value: timestamp
"""
if boolean_value is not None:
pulumi.set(__self__, "boolean_value", boolean_value)
if date_value is not None:
pulumi.set(__self__, "date_value", date_value)
if day_of_week_value is not None:
pulumi.set(__self__, "day_of_week_value", day_of_week_value)
if float_value is not None:
pulumi.set(__self__, "float_value", float_value)
if integer_value is not None:
pulumi.set(__self__, "integer_value", integer_value)
if string_value is not None:
pulumi.set(__self__, "string_value", string_value)
if time_value is not None:
pulumi.set(__self__, "time_value", time_value)
if timestamp_value is not None:
pulumi.set(__self__, "timestamp_value", timestamp_value)
@property
@pulumi.getter(name="booleanValue")
def boolean_value(self) -> Optional[pulumi.Input[bool]]:
"""
boolean
"""
return pulumi.get(self, "boolean_value")
@boolean_value.setter
def boolean_value(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "boolean_value", value)
@property
@pulumi.getter(name="dateValue")
def date_value(self) -> Optional[pulumi.Input['GoogleTypeDateArgs']]:
"""
date
"""
return pulumi.get(self, "date_value")
@date_value.setter
def date_value(self, value: Optional[pulumi.Input['GoogleTypeDateArgs']]):
pulumi.set(self, "date_value", value)
@property
@pulumi.getter(name="dayOfWeekValue")
def day_of_week_value(self) -> Optional[pulumi.Input['GooglePrivacyDlpV2ValueDayOfWeekValue']]:
"""
day of week
"""
return pulumi.get(self, "day_of_week_value")
@day_of_week_value.setter
def day_of_week_value(self, value: Optional[pulumi.Input['GooglePrivacyDlpV2ValueDayOfWeekValue']]):
pulumi.set(self, "day_of_week_value", value)
@property
@pulumi.getter(name="floatValue")
def float_value(self) -> Optional[pulumi.Input[float]]:
"""
float
"""
return pulumi.get(self, "float_value")
@float_value.setter
def float_value(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "float_value", value)
@property
@pulumi.getter(name="integerValue")
def integer_value(self) -> Optional[pulumi.Input[str]]:
"""
integer
"""
return pulumi.get(self, "integer_value")
@integer_value.setter
def integer_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "integer_value", value)
@property
@pulumi.getter(name="stringValue")
def string_value(self) -> Optional[pulumi.Input[str]]:
"""
string
"""
return pulumi.get(self, "string_value")
@string_value.setter
def string_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "string_value", value)
@property
@pulumi.getter(name="timeValue")
def time_value(self) -> Optional[pulumi.Input['GoogleTypeTimeOfDayArgs']]:
"""
time of day
"""
return pulumi.get(self, "time_value")
@time_value.setter
def time_value(self, value: Optional[pulumi.Input['GoogleTypeTimeOfDayArgs']]):
pulumi.set(self, "time_value", value)
@property
@pulumi.getter(name="timestampValue")
def timestamp_value(self) -> Optional[pulumi.Input[str]]:
"""
timestamp
"""
return pulumi.get(self, "timestamp_value")
@timestamp_value.setter
def timestamp_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timestamp_value", value)
@pulumi.input_type
class GooglePrivacyDlpV2WordListArgs:
def __init__(__self__, *,
words: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Message defining a list of words or phrases to search for in the data.
:param pulumi.Input[Sequence[pulumi.Input[str]]] words: Words or phrases defining the dictionary. The dictionary must contain at least one phrase and every phrase must contain at least 2 characters that are letters or digits. [required]
"""
if words is not None:
pulumi.set(__self__, "words", words)
@property
@pulumi.getter
def words(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Words or phrases defining the dictionary. The dictionary must contain at least one phrase and every phrase must contain at least 2 characters that are letters or digits. [required]
"""
return pulumi.get(self, "words")
@words.setter
def words(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "words", value)
@pulumi.input_type
class GoogleProtobufEmptyArgs:
def __init__(__self__):
"""
A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
"""
pass
@pulumi.input_type
class GoogleTypeDateArgs:
def __init__(__self__, *,
day: Optional[pulumi.Input[int]] = None,
month: Optional[pulumi.Input[int]] = None,
year: Optional[pulumi.Input[int]] = None):
"""
Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values * A month and day value, with a zero year, such as an anniversary * A year on its own, with zero month and day values * A year and month value, with a zero day, such as a credit card expiration date Related types are google.type.TimeOfDay and `google.protobuf.Timestamp`.
:param pulumi.Input[int] day: Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
:param pulumi.Input[int] month: Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
:param pulumi.Input[int] year: Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
"""
if day is not None:
pulumi.set(__self__, "day", day)
if month is not None:
pulumi.set(__self__, "month", month)
if year is not None:
pulumi.set(__self__, "year", year)
@property
@pulumi.getter
def day(self) -> Optional[pulumi.Input[int]]:
"""
Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
"""
return pulumi.get(self, "day")
@day.setter
def day(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "day", value)
@property
@pulumi.getter
def month(self) -> Optional[pulumi.Input[int]]:
"""
Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
"""
return pulumi.get(self, "month")
@month.setter
def month(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "month", value)
@property
@pulumi.getter
def year(self) -> Optional[pulumi.Input[int]]:
"""
Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
"""
return pulumi.get(self, "year")
@year.setter
def year(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "year", value)
@pulumi.input_type
class GoogleTypeTimeOfDayArgs:
def __init__(__self__, *,
hours: Optional[pulumi.Input[int]] = None,
minutes: Optional[pulumi.Input[int]] = None,
nanos: Optional[pulumi.Input[int]] = None,
seconds: Optional[pulumi.Input[int]] = None):
"""
Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`.
:param pulumi.Input[int] hours: Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.
:param pulumi.Input[int] minutes: Minutes of hour of day. Must be from 0 to 59.
:param pulumi.Input[int] nanos: Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.
:param pulumi.Input[int] seconds: Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.
"""
if hours is not None:
pulumi.set(__self__, "hours", hours)
if minutes is not None:
pulumi.set(__self__, "minutes", minutes)
if nanos is not None:
pulumi.set(__self__, "nanos", nanos)
if seconds is not None:
pulumi.set(__self__, "seconds", seconds)
@property
@pulumi.getter
def hours(self) -> Optional[pulumi.Input[int]]:
"""
Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.
"""
return pulumi.get(self, "hours")
@hours.setter
def hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "hours", value)
@property
@pulumi.getter
def minutes(self) -> Optional[pulumi.Input[int]]:
"""
Minutes of hour of day. Must be from 0 to 59.
"""
return pulumi.get(self, "minutes")
@minutes.setter
def minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "minutes", value)
@property
@pulumi.getter
def nanos(self) -> Optional[pulumi.Input[int]]:
"""
Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.
"""
return pulumi.get(self, "nanos")
@nanos.setter
def nanos(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "nanos", value)
@property
@pulumi.getter
def seconds(self) -> Optional[pulumi.Input[int]]:
"""
Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.
"""
return pulumi.get(self, "seconds")
@seconds.setter
def seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "seconds", value)
|
py | 1a4e15bdf05105e1cd4d928ddeced21c34111d4d |
import os
import re
import numpy
import numpy as np
from speech_manip import get_speech, put_speech, rate2fftlength, rate2worldapsize
# [http://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays]
def cartesian(arrays, out=None):
"""
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in xrange(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
def destandardise(norm_speech, config):
### TEMP -- copied from standardise.pu
statsdir = os.path.join(config['workdir'], 'stats')
bap_size = rate2worldapsize(config['sample_rate'])
w = config['weights']
weight_vec = [w['mgc']] * (config['mcc_order']+1) + \
[w['lf0']] + \
[w['vuv']] + \
[w['bap']] * bap_size
weight_vec = np.array(weight_vec)
dim = config['mcc_order'] + 3 + bap_size
## 3 = f0, vuv, energy
mean_vec = np.loadtxt(os.path.join(statsdir, 'mean.txt'))
std_vec = np.loadtxt(os.path.join(statsdir, 'std.txt'))
vuv_dim = config['mcc_order'] + 2
m,n = np.shape(norm_speech)
mean_mat = np.tile(mean_vec,(m,1))
std_mat = np.tile(std_vec,(m,1))
weight_mat = np.tile(weight_vec,(m,1))
speech = norm_speech / weight_mat
speech *= std_mat
speech += mean_mat
speech[:,vuv_dim] = norm_speech[:,vuv_dim]
return speech
def safe_makedir(dir):
if not os.path.isdir(dir):
os.makedirs(dir)
def writelist(seq, fname):
f = open(fname, 'w')
f.write('\n'.join(seq) + '\n')
f.close()
def readlist(fname):
f = open(fname, 'r')
data = f.readlines()
f.close()
return [line.strip('\n') for line in data]
def read_norm_data(fname, stream_names):
out = {}
vals = np.loadtxt(fname)
mean_ix = 0
for stream in stream_names:
std_ix = mean_ix + 1
out[stream] = (vals[mean_ix], vals[std_ix])
mean_ix += 2
return out
def makedirecs(direcs):
for direc in direcs:
if not os.path.isdir(direc):
os.makedirs(direc)
def basename(fname):
path, name = os.path.split(fname)
base = re.sub('\.[^\.]+\Z','',name)
return base
def split_into_streams(speech, stream_names, datadims):
# if 'vuv' not in datadims:
# dim += 1 ## add 1 for vuv
# speech = get_speech(cmpfile, dim)
start = 0
outputs = {}
for stream in stream_names:
stream_dim = datadims[stream]
if stream == 'mgc':
stream_dim += 1
end = start + stream_dim
print stream
outputs[stream] = speech[:,start:end]
start = end
return outputs
def world_synth(cmpfile, wavefile, config, denorm=False):
stream_names = config['stream_names']
datadims = dict(zip(stream_names, config['datadims_list']))
datadims['vuv'] = 1
speech = get_speech(cmpfile, sum(datadims.values())+1)
#print config
if denorm:
speech = destandardise(speech, config)
streams = split_into_streams(speech, stream_names, datadims)
#print streams
if 'lf0' in streams:
fzero = numpy.exp(streams['lf0'])
vuv_thresh = 0.5
if 'vuv' in streams:
vuv = streams['vuv']
lf0 = streams['lf0']
fzero[vuv <= vuv_thresh] = 0.0
#fzero *= fzero_scale
streams['lf0'] = fzero
streams2wav(streams, wavefile, config)
def denorm_data(streams, config):
stream_names = config['stream_names']
norm_data = read_norm_data(config['norm_data_file'] , stream_names)
denorm_data = {}
weights = config['weights']
for (stream_name, stream_data) in streams.items():
(mean_val, std_val) = norm_data[stream_name]
stream_data /= weights[stream_name]
stream_data *= std_val
stream_data += mean_val
denorm_data[stream_name] = stream_data
return denorm_data
def streams2wav(streams, outfile, config):
bin_dir = config['bindir']
alpha = config['mcc_alpha']
order = config['mcc_order']
sr = config['sample_rate']
fftl = rate2fftlength(sr)
## TODO -- handle tmp better
os.system('rm /tmp/tmp*')
for (stream, data) in streams.items():
put_speech(data, '/tmp/tmp.%s'%(stream))
comm=bin_dir+"/x2x +fd /tmp/tmp."+stream+" >/tmp/tmp_d."+stream
print comm
os.system(comm)
comm = "%s/mgc2sp -a %s -g 0 -m %s -l %s -o 2 /tmp/tmp.mgc | %s/sopr -d 32768.0 -P | %s/x2x +fd -o > /tmp/tmp.spec"%(bin_dir, alpha, order, fftl, bin_dir, bin_dir)
print comm
os.system(comm)
'''Avoid: x2x : error: input data is over the range of type 'double'!
-o : clip by minimum and maximum of output data
type if input data is over the range of
output data type.
'''
comm = "%s/synth %s %s /tmp/tmp_d.lf0 /tmp/tmp.spec /tmp/tmp_d.bap %s"%(bin_dir, fftl, sr, outfile)
print comm
res = os.system(comm)
if res != 0:
print
print 'trouble with resynth command:'
print comm
print
else:
# os.system("mv /tmp/tmp.resyn.wav "+outfile)
print 'Produced %s'%(outfile)
def splice_data(data, splice_weights):
assert len(splice_weights) % 2 == 1, 'no. of weights should be odd'
middle = (len(splice_weights) - 1) / 2
assert splice_weights[middle] == 1.0, 'middle weight must be 1!'
#print data
#print '===='
offset = len(splice_weights)-1
stacked = []
for (i,w) in enumerate(splice_weights):
if offset == 0:
#print data[i:, :] #* w
stacked.append(data[i:, :])
else:
#print data[i:-offset, :] #* w
stacked.append(data[i:-offset, :])
#print i
#print offset
offset -= 1
stacked = np.hstack(stacked)
#print stacked
return stacked
def unsplice(data, splice_weights):
assert len(splice_weights) % 2 == 1, 'no. of weights should be odd'
middle = (len(splice_weights) - 1) / 2
# print splice_weights
# print splice_weights[middle]
# print middle
m,n = np.shape(data)
dim = n / len(splice_weights)
return data[:,(dim*middle):(dim*middle)+dim]
def comm(comm_line, quiet=False):
if not quiet:
print comm_line
os.system(comm_line)
def latex_matrix(X, integer=False):
"""
Print a numpy array to a string that will compile when pasted into latex .
Arbitary matric name M -- change this after.
See http://selinap.com/2009/05/how-to-create-a-matrix-in-latex/ on latex matrices
"""
m,n = X.shape
align_pattern = "c" * n
outstring="""\\begin{pmatrix}\n"""
for i in range(m):
for j in range(n):
if integer:
outstring += str(int(X[i,j]))
else:
outstring += "%.2f"%(X[i,j]) ### 2 decimal places for floats
if j == (n-1):
outstring += " \\\\ \n"
else:
outstring += " & "
outstring += """\end{pmatrix} \n"""
return outstring
def vector_to_string(vect):
'''
Version of vector suitable for use in filenames. Try to drop values after decimal point if possible
'''
string_values = []
for value in vect:
if int(value) == value:
string_values.append(str(int(value)))
else:
string_values.append(str(value))
## 'run length encode' repeated values to get sensible size string:
unique_values = []
counts = []
prev_val = ''
assert '' not in string_values
for val in string_values:
if val != prev_val:
unique_values.append(val)
if prev_val != '':
counts.append(count)
count = 1
else:
count += 1
prev_val = val
counts.append(count)
assert len(unique_values) == len(counts)
dedup_string_values = []
for (value, count) in zip(unique_values, counts):
dedup_string_values.append('%sx%s'%(count, value))
dedup_string_values = '-'.join(dedup_string_values)
return dedup_string_values |
py | 1a4e1661004272d9f8bb54997953ee4ee33c02be | """
Test calling user defined functions using expression evaluation.
This test checks that typesystem lookup works correctly for typedefs of
untagged structures.
Ticket: https://llvm.org/bugs/show_bug.cgi?id=26790
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestExprLookupAnonStructTypedef(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=['linux'],
archs=['arm'],
bugnumber="llvm.org/pr27868")
def test(self):
"""Test typedeffed untagged struct arguments for function call expressions"""
self.build()
lldbutil.run_to_source_breakpoint(self, "// break here", lldb.SBFileSpec("main.cpp"))
self.expect_expr("multiply(&s)", result_type="double", result_value="1")
|
py | 1a4e167906f5c03b491ad2d66a884008d8b1dd3e | from random import randint
from pygame import *
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_speed, player_x, player_y):
super().__init__()
self.image = transform.scale(image.load(player_image),(65, 65))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
background = transform.scale(image.load('desert.jpg'),(1100, 900))
class Player(GameSprite):
def update(self):
keys_pressed = key.get_pressed()
if keys_pressed[K_a] and self.rect.x > 0:
self.rect.x -= self.speed
if keys_pressed[K_d] and self.rect.x < 595:
self.rect.x += self.speed
if keys_pressed[K_w] and self.rect.y > 0:
self.rect.y -= self.speed
if keys_pressed[K_s] and self.rect.y < 395:
self.rect.y += self.speed
class Ball (GameSprite):
def hod(self):
self.rect.x += self.speed
self.rect.y += self.speed
window = display.set_mode((1100,900))
display.set_caption('Shooter')
background = transform.scale(image.load('desert.jpg'),(1100, 900))
run = True
FPS = 60
clock = time.Clock()
finish = False
score = 0
rocket = Player('raket.png', 30, 50, 450)
rocket1 = Player('raket.png', 30, 1050, 450)
shar = Ball('bol.png', 40, 550, 450)
while run:
for i in event.get():
if e.type == QUIT:
run = False
rocket.reset()
rocket1.reset()
time.delay(50) |
py | 1a4e16b151e8f74f0917b8f1273fddfabd4b6e30 | from . import _nnls
from numpy import asarray_chkfinite, zeros, double
__all__ = ['nnls']
def nnls(A, b, maxiter=None):
"""
Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper
for a FORTRAN non-negative least squares solver.
Parameters
----------
A : ndarray
Matrix ``A`` as shown above.
b : ndarray
Right-hand side vector.
maxiter: int, optional
Maximum number of iterations, optional.
Default is ``3 * A.shape[1]``.
Returns
-------
x : ndarray
Solution vector.
rnorm : float
The residual, ``|| Ax-b ||_2``.
See Also
--------
lsq_linear : Linear least squares with bounds on the variables
Notes
-----
The FORTRAN code was published in the book below. The algorithm
is an active set method. It solves the KKT (Karush-Kuhn-Tucker)
conditions for the non-negative least squares problem.
References
----------
Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM
Examples
--------
>>> from scipy.optimize import nnls
...
>>> A = np.array([[1, 0], [1, 0], [0, 1]])
>>> b = np.array([2, 1, 1])
>>> nnls(A, b)
(array([1.5, 1. ]), 0.7071067811865475)
>>> b = np.array([-1, -1, -1])
>>> nnls(A, b)
(array([0., 0.]), 1.7320508075688772)
"""
A, b = map(asarray_chkfinite, (A, b))
if len(A.shape) != 2:
raise ValueError("Expected a two-dimensional array (matrix)" +
", but the shape of A is %s" % (A.shape, ))
if len(b.shape) != 1:
raise ValueError("Expected a one-dimensional array (vector" +
", but the shape of b is %s" % (b.shape, ))
m, n = A.shape
if m != b.shape[0]:
raise ValueError(
"Incompatible dimensions. The first dimension of " +
"A is %s, while the shape of b is %s" % (m, (b.shape[0], )))
maxiter = -1 if maxiter is None else int(maxiter)
w = zeros((n,), dtype=double)
zz = zeros((m,), dtype=double)
index = zeros((n,), dtype=int)
x, rnorm, mode = _nnls.nnls(A, m, n, b, w, zz, index, maxiter)
if mode != 1:
raise RuntimeError("too many iterations")
return x, rnorm
|
py | 1a4e16fb224361ee20f623c6a68bb5f0e3bf072e | #!/usr/bin/env python3
# coding: utf8
"""
Loads and handels training and validation data collections.
"""
__author__ = 'David Flury, Andreas Kaufmann, Raphael Müller'
__email__ = "[email protected]"
import hashlib
import glob
import os
import random
from unmix.source.configuration import Configuration
from unmix.source.data.song import Song
from unmix.source.logging.logger import Logger
class DataLoader(object):
@staticmethod
def load(path=None, test_data_count=None):
if path is None:
folders = Configuration.get('collection.folders')
if folders is None:
return DataLoader.loadDataset(Configuration.get('collection.folder', optional=False), test_data_count)
else:
return DataLoader.loadMultipleDatasets(folders, test_data_count)
else:
return DataLoader.loadDataset(path, test_data_count)
@staticmethod
def loadDataset(path, test_data_count):
files = DataLoader.loadFiles(path)
training_files, validation_files, test_files = DataLoader.splitDataset(files, test_data_count)
Logger.debug(
"Found %d songs for training and %d songs for validation." % (len(training_files), len(validation_files)))
if test_files is not None:
test_frequency = Configuration.get('collection.test_frequency', default=0)
Logger.debug("Use %d songs for tests after every %d epoch." % (len(test_files), test_frequency))
if len(training_files) == 0:
Logger.warn("No training files assigned.")
if len(validation_files) == 0:
Logger.warn("No validation files assigned.")
return training_files, validation_files, test_files
@staticmethod
def loadFiles(path, ignore_song_limit=False):
if path is None:
path = Configuration.get_path('collection.folder', False)
instrument_filter = os.path.join(path, '**', '%s*.wav' % Song.PREFIX_INSTRUMENT)
files_instrument = [os.path.dirname(file) for file in glob.iglob(instrument_filter, recursive=True)]
rest_filter = os.path.join(path, '**', '%s*.wav' % Song.PREFIX_REST)
files_rest = [os.path.dirname(file) for file in glob.iglob(rest_filter, recursive=True)]
files = [f for f in files_instrument if f in files_rest] # make sure both instrument and rest file exists
skipped_count = len(set(files_instrument) - set(files_rest)) + len(set(files_rest) - set(files_instrument))
Logger.debug(f"Skipped {skipped_count} files (incomplete instrument/rest pair)")
# Sort files by hash value of folder to guarantee a consistent order
files.sort(key=lambda x: hashlib.md5(os.path.basename(x).encode('utf-8', 'surrogatepass')).hexdigest())
song_limit = Configuration.get('collection.song_limit', default=0)
if not ignore_song_limit and song_limit > 0:
if song_limit <= 1: # Configuration as percentage share
song_limit = song_limit * len(files)
song_limit = min(int(song_limit), len(files))
files = files[:song_limit]
return files
@staticmethod
def splitDataset(files, test_data_count):
test_files = None
test_frequency = Configuration.get('collection.test_frequency', default=0)
if not test_data_count:
test_data_count = Configuration.get('collection.test_data_count', default=0)
if test_data_count > 0:
test_data_count = int(test_data_count)
test_files = files[-test_data_count:]
files = files[:len(files) - test_data_count]
validation_ratio = Configuration.get('collection.validation_ratio', default=0.2)
validation_files = files[:int(len(files) * validation_ratio)]
training_files = files[len(validation_files):]
return training_files, validation_files, test_files
@staticmethod
def loadMultipleDatasets(folders, test_data_count):
datasets = []
ratio_sum = 0
smallest_dataset_length = None
smallest_dataset_ratio = None
for folder in folders:
ratio = folder['ratio']
dataset = DataLoader.loadFiles(folder['path'], True)
datasets.append((dataset, ratio, folder['path']))
ratio_sum = ratio_sum + ratio
dataset_length = len(dataset)
if smallest_dataset_length is None or dataset_length < smallest_dataset_length:
smallest_dataset_length = dataset_length
smallest_dataset_ratio = ratio
target_song_count = ratio_sum / smallest_dataset_ratio * smallest_dataset_length
song_limit = Configuration.get('collection.song_limit', default=0)
if song_limit < target_song_count:
if song_limit >= 1:
target_song_count = song_limit
elif song_limit > 0:
target_song_count = target_song_count * song_limit
training_files = []
validation_files = []
test_files = []
for dataset, ratio, folder in datasets:
requested_file_count = int(ratio / ratio_sum * target_song_count)
files = dataset[:requested_file_count]
print('Loaded %s files from %s' % (len(files), folder))
training, validation, test = DataLoader.splitDataset(files, test_data_count)
training_files.extend(training)
validation_files.extend(validation)
if test is not None:
test_files.extend(test)
return training_files, validation_files, test_files
|
py | 1a4e190d1a510e8d92675773c991c8fe0fc4ff89 | import cPickle
import os
import time
import mxnet as mx
import numpy as np
import sys
from scipy.io import savemat
from module import MutableModule
from rcnn.logger import logger
from rcnn.config import config, default
from rcnn.fio import image
from rcnn.fio.load_ct_img import map_box_back
from rcnn.processing.bbox_transform import bbox_pred, clip_boxes
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
from rcnn.utils.timer import Timer
from rcnn.utils.evaluation import recall_all
class Predictor(object):
def __init__(self, symbol, data_names, label_names,
context=mx.cpu(), max_data_shapes=None,
provide_data=None, provide_label=None,
arg_params=None, aux_params=None):
self._mod = MutableModule(symbol, data_names, label_names,
context=context, max_data_shapes=max_data_shapes)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return dict(zip(self._mod.output_names, self._mod.get_outputs()))
def im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
# drop the batch index
boxes = output['rois_output'].asnumpy()[:, 1:]
scores = output['rois_score'].asnumpy()
# transform to original scale
boxes = boxes / scale
return scores, boxes, data_dict
def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):
"""
Generate detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param vis: controls visualization
:param thresh: thresh for valid detections
:return: list of detected boxes
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
i = 0
t = time.time()
imdb_boxes = list()
original_boxes = list()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
# assemble proposals
dets = np.hstack((boxes, scores))
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4:] > thresh)[0]
dets = dets[keep, :]
imdb_boxes.append(dets)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)
logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +
'proposal %d ' % (dets.shape[0]) +
'data %.4fs net %.4fs' % (t1, t2))
i += 1
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
# save results
rpn_folder = os.path.join(imdb.root_path, 'rpn_data')
if not os.path.exists(rpn_folder):
os.mkdir(rpn_folder)
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
if thresh > 0:
full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')
with open(full_rpn_file, 'wb') as f:
cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)
logger.info('wrote rpn proposals to %s' % rpn_file)
return imdb_boxes
def im_detect(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
data_dict = dict(zip(data_names, data_batch.data))
if config.TEST.HAS_RPN:
rois = output['rois_output'].asnumpy()[:, 1:]
else:
rois = data_dict['rois'].asnumpy().reshape((-1, 5))[:, 1:]
im_shape = data_dict['data'].shape
# save output
scores = output['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output['bbox_pred_reshape_output'].asnumpy()[0]
# print 'im_det', scores[:,1]
# post processing
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, im_shape[-2:])
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = pred_boxes / scale
return scores, pred_boxes, data_dict
from rcnn.utils.evaluation import sens_at_FP
def my_evaluate_detections(all_boxes, all_gts):
print 'Sensitivity @', default.val_avg_fp, 'average FPs per image:',
res = sens_at_FP(all_boxes[1], all_gts[1], default.val_avg_fp, default.val_iou_th) # cls 0 is background
print res
return res[3] # sens@4FP
def pred_eval(predictor, test_data, imdb, vis=False, max_box=-1, thresh=1e-3):
"""
wrapper for calculating offline validation for faster data analysis
in this example, all threshold are set by hand
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffle
:param imdb: image database
:param vis: controls visualization
:param max_box: maximum number of boxes detected in each image
:param thresh: valid detection threshold
:return:
"""
# assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
nms = py_nms_wrapper(config.TEST.NMS)
# limit detections to max_per_image over all classes
max_per_image = max_box
num_images = imdb.num_images
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
kept_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
all_gts = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
all_iminfos = []
all_imnames = []
all_crops = []
i = 0
_t = {'data': Timer(), 'im_detect' : Timer(), 'misc' : Timer()}
_t['data'].tic()
num_image = config.NUM_IMAGES_3DCE
key_idx = (num_image - 1) / 2 # adjust image for 3DCE
for im_info, imname, crop, data_batch in test_data:
_t['data'].toc()
_t['im_detect'].tic()
all_iminfos.append(im_info)
all_imnames.append(imname)
all_crops.append(crop)
# scale = im_info[0, 2]
scale = 1. # we have scaled the label in get_image(), so no need to scale the pred_box
gt_boxes = data_batch.label[0].asnumpy()[key_idx, :, :]
data_batch.label = None
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, scale)
_t['im_detect'].toc()
_t['misc'].tic()
for j in range(1, imdb.num_classes):
indexes = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]
cls_boxes = map_box_back(cls_boxes, crop[2], crop[0], im_info[0,2])
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets)
all_boxes[j][i] = cls_dets[keep, :]
all_gts[j][i] = map_box_back(gt_boxes, crop[2], crop[0], im_info[0,2])
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
kept_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
boxes_this_image = [[]] + [kept_boxes[j][i] for j in range(1, imdb.num_classes)]
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, imdb.classes, scale)
_t['misc'].toc()
if i % 200 == 0:
if i <= 400:
logger.info('im_detect: {:d}/{:d} data {:.3f}s im_detect {:.3f}s misc {:.3f}s'
.format(i, imdb.num_images, _t['data'].average_time, _t['im_detect'].average_time,
_t['misc'].average_time))
else:
print i,
sys.stdout.flush()
# logger.info('testing %d/%d data %.4fs net %.4fs post %.4fs' % (i, imdb.num_images, t1, t2, t3))
i += 1
_t['data'].tic()
print
sys.stdout.flush()
det_file = os.path.join(imdb.cache_path, imdb.name + '_detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(kept_boxes, f, protocol=cPickle.HIGHEST_PROTOCOL)
default.res_dict = {'imname': all_imnames, 'boxes': all_boxes[1], 'gts': all_gts[1]}
# default.res_dict = {'imname': all_imnames, 'im_info': all_iminfos, 'crops': all_crops, 'boxes': all_boxes[1], 'gts': all_gts[1]}
acc = my_evaluate_detections(all_boxes, all_gts)
sys.stdout.flush()
return acc
def vis_all_boxes(im_array, boxes):
"""
visualize all boxes in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
from ..fio.load_ct_img import windowing_rev, windowing
im = windowing_rev(im_array+config.PIXEL_MEANS, config.WINDOWING)
im = windowing(im, [-175,275]).astype(np.uint8) # soft tissue window
plt.imshow(im)
color = (0.,1.,0.)
for bbox in boxes:
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=2)
plt.gca().add_patch(rect)
if boxes.shape[1] == 5:
score = bbox[-1]
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.show()
def vis_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
import random
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.random(), random.random(), random.random()) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.show()
def draw_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
bbox = map(int, bbox)
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
return im
|
py | 1a4e198097c066367983585650962ae02be05b98 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPybigwig(PythonPackage):
"""A package for accessing bigWig files using libBigWig."""
pypi = "pyBigWig/pyBigWig-0.3.4.tar.gz"
version('0.3.12', sha256='e01991790ece496bf6d3f00778dcfb136dd9ca0fd28acc1b3fb43051ad9b8403')
version('0.3.4', sha256='8c97a19218023190041c0e426f1544f7a4944a7bb4568faca1d85f1975af9ee2')
variant('numpy', default=True,
description='Enable support for numpy integers and vectors')
patch('python3_curl.patch', when='@:0.3.12 ^python@3:')
depends_on('curl', type=('build', 'link', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'), when='+numpy')
|
py | 1a4e1ae66a58b4827c29d33bfc0f9b74080f76a9 | import subprocess
import os
def arg():
try:
import sys
return sys.argv[1]
except:
None
inputfile = input("Enter the file to parse:")
outputfile = input("Enter the file to output to: ")
if os.path.exists("my_filters_001"):
os.chdir("my_filters_001")
subprocess.call(["git pull"],shell=True)
os.chdir("..")
else:
subprocess.call(["git clone https://github.com/iam-py-test/my_filters_001.git"],shell=True)
alt = open("my_filters_001/Alternative list formats/{}".format(outputfile),"w")
with open("my_filters_001/{}".format(inputfile)) as f:
lines = f.read().split("\n")
for line in lines:
if line.startswith("||"):
continue
elif line.startswith("!"):
if arg() != "--nocomment":
alt.write(line.replace("!","#"))
alt.write("\n")
elif line != "":
alt.write("127.0.0.1 {}".format(line.split("$")[0]))
alt.write("\n")
alt.close()
os.chdir("my_filters_001")
subprocess.call(["git add ."],shell=True)
subprocess.call(["git commit -m \"[bot] add alt list\""],shell=True)
subprocess.call(["git push"],shell=True)
|
py | 1a4e1d1c4fb2e286ff120ecf2c31d8325afee48e | """
Created on 27 Feb 2018
@author: Bruno Beloff ([email protected])
"""
import optparse
from scs_dfe.interface.interface_conf import InterfaceConf
# --------------------------------------------------------------------------------------------------------------------
class CmdInterfaceConf(object):
"""
unix command line handler
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self):
self.__parser = optparse.OptionParser(usage="%prog [{ [-m MODEL] | -d }] [-v]",
version="%prog 1.0")
models = ' | '.join(InterfaceConf.models())
# optional...
self.__parser.add_option("--model", "-m", type="string", nargs=1, action="store", dest="model",
help="interface model { %s }" % models)
self.__parser.add_option("--delete", "-d", action="store_true", dest="delete", default=False,
help="delete the interface configuration")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.set() and self.delete:
return False
if self.set() and self.model is not None and self.model not in InterfaceConf.models():
return False
return True
def set(self):
return self.model is not None
# ----------------------------------------------------------------------------------------------------------------
@property
def model(self):
return self.__opts.model
@property
def delete(self):
return self.__opts.delete
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdInterfaceConf:{source:%s, delete:%s, verbose:%s}" % \
(self.__opts.source, self.delete, self.verbose)
|
py | 1a4e1e5211e5c1c5674f53c622e2a5b7dc8b5706 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./check_marathon_services_replication.py [options]
This is a script that checks the number of HAProxy backends via Synapse against
the expected amount that should've been deployed via Marathon in a mesos cluster.
Basically, the script checks smartstack.yaml for listed namespaces, and then queries
Synapse for the number of available backends for that namespace. It then goes through
the Marathon service configuration file for that cluster, and sees how many instances
are expected to be available for that namespace based on the number of instances deployed
on that namespace.
After retrieving that information, a fraction of available instances is calculated
(available/expected), and then compared against a threshold. The default threshold is
50, meaning if less than 50% of a service's backends are available, the script sends
CRITICAL. If replication_threshold is defined in the yelpsoa config for a service
instance then it will be used instead.
"""
import logging
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from typing import Optional
from typing import Sequence
from marathon.models.task import MarathonTask
from paasta_tools import marathon_tools
from paasta_tools import monitoring_tools
from paasta_tools.check_services_replication_tools import main
from paasta_tools.long_running_service_tools import get_proxy_port_for_instance
from paasta_tools.marathon_tools import format_job_id
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.smartstack_tools import MesosSmartstackEnvoyReplicationChecker
log = logging.getLogger(__name__)
def filter_healthy_marathon_instances_for_short_app_id(all_tasks, app_id):
tasks_for_app = [
task for task in all_tasks if task.app_id.startswith("/%s" % app_id)
]
one_minute_ago = datetime.now(timezone.utc) - timedelta(minutes=1)
healthy_tasks = []
for task in tasks_for_app:
if (
marathon_tools.is_task_healthy(task, default_healthy=True)
and task.started_at is not None
and task.started_at < one_minute_ago
):
healthy_tasks.append(task)
return len(healthy_tasks)
def check_healthy_marathon_tasks_for_service_instance(
instance_config, expected_count, all_tasks, dry_run=False,
):
app_id = format_job_id(instance_config.service, instance_config.instance)
num_healthy_tasks = filter_healthy_marathon_instances_for_short_app_id(
all_tasks=all_tasks, app_id=app_id
)
log.info("Checking %s in marathon as it is not in smartstack" % app_id)
monitoring_tools.send_replication_event_if_under_replication(
instance_config=instance_config,
expected_count=expected_count,
num_available=num_healthy_tasks,
dry_run=dry_run,
)
def check_service_replication(
instance_config: MarathonServiceConfig,
all_tasks_or_pods: Sequence[MarathonTask],
replication_checker: MesosSmartstackEnvoyReplicationChecker,
dry_run: bool = False,
) -> Optional[bool]:
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack/envoy or mesos)
:param instance_config: an instance of MarathonServiceConfig
:param replication_checker: an instance of MesosSmartstackEnvoyReplicationChecker
"""
expected_count = instance_config.get_instances()
log.info(
"Expecting %d total tasks for %s" % (expected_count, instance_config.job_id)
)
proxy_port = get_proxy_port_for_instance(instance_config)
registrations = instance_config.get_registrations()
# if the primary registration does not match the service_instance name then
# the best we can do is check marathon for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
is_well_replicated = monitoring_tools.check_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
replication_checker=replication_checker,
dry_run=dry_run,
)
return is_well_replicated
else:
check_healthy_marathon_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
all_tasks=all_tasks_or_pods,
dry_run=dry_run,
)
return None
if __name__ == "__main__":
main(
instance_type_class=marathon_tools.MarathonServiceConfig,
check_service_replication=check_service_replication,
namespace=None, # not relevant for mesos
mesos=True,
)
|
py | 1a4e1e91de0ff3121767ebe72e238421d5ca0f0b | from event_manager import event_actions, event_subjects
from event_manager.event import Attribute, Event
SUPERUSER_ROLE_GRANTED = '{}.{}'.format(event_subjects.SUPERUSER, event_actions.GRANTED)
SUPERUSER_ROLE_REVOKED = '{}.{}'.format(event_subjects.SUPERUSER, event_actions.REVOKED)
class SuperUserRoleGrantedEvent(Event):
event_type = SUPERUSER_ROLE_GRANTED
actor_id = 'actor_id'
attributes = (
Attribute('id'),
Attribute('actor_id')
)
class SuperUserRoleRevokedEvent(Event):
event_type = SUPERUSER_ROLE_REVOKED
actor_id = 'actor_id'
attributes = (
Attribute('id'),
Attribute('actor_id')
)
|
py | 1a4e1f375a3c13fdf053b946e2a16e3273908091 | __all__ = ['ArcGIS']
from .arcgis import ArcGIS |
py | 1a4e1f9725a190e1541cdca8c155f10a2713fa90 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from aria.orchestrator.workflows.api import task
from aria.orchestrator.workflows.builtin.install import install
from tests import mock
from tests import storage
from . import assert_node_install_operations
@pytest.fixture
def ctx(tmpdir):
context = mock.context.simple(str(tmpdir),
topology=mock.topology.create_simple_topology_three_nodes)
yield context
storage.release_sqlite_storage(context.model)
def test_install(ctx):
install_tasks = list(task.WorkflowTask(install, ctx=ctx).topological_order(True))
assert len(install_tasks) == 3
dependency_node_subgraph1, dependency_node_subgraph2, dependent_node_subgraph = install_tasks
dependent_node_tasks = list(dependent_node_subgraph.topological_order(reverse=True))
dependency_node1_tasks = list(dependency_node_subgraph1.topological_order(reverse=True))
dependency_node2_tasks = list(dependency_node_subgraph2.topological_order(reverse=True))
assert_node_install_operations(dependency_node1_tasks)
assert_node_install_operations(dependency_node2_tasks)
assert_node_install_operations(dependent_node_tasks, relationships=2)
|
py | 1a4e1fee9d9aff4de6fea37e9f4e7ebedc279afc | from logging import Logger
from typing import Optional
from widgetastic.browser import Browser
from widgetastic.types import ViewParent
from widgetastic.utils import ParametrizedLocator
from widgetastic.widget.base import ClickableMixin
from widgetastic.widget.base import View
from widgetastic.widget.base import Widget
from widgetastic.xpath import quote
class OUIABase:
"""
Base class for ``OUIA`` support. According to the spec ``OUIA`` compatible components may
have the following attributes in the root level HTML element:
* data-ouia-component-type
* data-ouia-component-id
* data-ouia-safe
https://ouia.readthedocs.io/en/latest/README.html#ouia-component
"""
ROOT = ParametrizedLocator(
".//*[@data-ouia-component-type={@component_type}{@component_id_suffix}]"
)
browser: Browser
def _set_attrs(
self,
component_type: str,
component_id: Optional[str] = None,
) -> None:
self.component_type = quote(component_type)
self.component_id = quote(component_id)
component_id = f" and @data-ouia-component-id={quote(component_id)}" if component_id else ""
self.component_id_suffix = component_id
self.locator = self.ROOT.locator
@property
def is_safe(self) -> bool:
"""
An attribute called data-ouia-safe, which is True only when the component is in a static
state, i.e. no animations are occurring. At all other times, this value MUST be False.
"""
return "true" in self.browser.get_attribute("data-ouia-safe", self)
def __locator__(self) -> ParametrizedLocator:
return self.ROOT
def __repr__(self):
component_id_suffix = f"; ouia id: {self.component_id}" if self.component_id else ""
desc = f"ouia type: {self.component_type}{component_id_suffix}"
return f"<{type(self).__name__}; {desc}>"
class OUIAGenericView(OUIABase, View):
"""A base class for any OUIA compatible view.
Children classes must have the same name as the value of ``data-ouia-component-type`` attribute
of the root HTML element.
Args:
component_id: value of data-ouia-component-id attribute.
component_type: value of data-ouia-component-type attribute.
"""
OUIA_COMPONENT_TYPE: str
OUIA_ID: Optional[str]
def __init__(
self,
parent: ViewParent,
component_id: str = "",
logger: Optional[Logger] = None,
**kwargs,
) -> None:
component_type: Optional[str] = kwargs.pop("component_type", None)
self._set_attrs(
component_type=component_type or self.OUIA_COMPONENT_TYPE or type(self).__name__,
component_id=getattr(self, "OUIA_ID", component_id),
)
super().__init__(
parent=parent,
logger=logger,
**kwargs,
)
class OUIAGenericWidget(OUIABase, Widget, ClickableMixin):
"""A base class for any OUIA compatible widget.
Children classes must have the same name as the value of ``data-ouia-component-type`` attribute
of the root HTML element.
Args:
component_id: value of data-ouia-component-id attribute.
component_type: value of data-ouia-component-type attribute.
"""
OUIA_COMPONENT_TYPE: str
def __init__(
self,
parent: ViewParent,
component_id: Optional[str] = None,
logger: Optional[Logger] = None,
component_type: Optional[str] = None,
) -> None:
self._set_attrs(
component_type=component_type or self.OUIA_COMPONENT_TYPE or type(self).__name__,
component_id=component_id,
)
super().__init__(parent=parent, logger=logger)
|
py | 1a4e208260ad27aa7b8caa4119ff5af0665a0d26 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Base test cases."""
import os
from typing import Any, Dict, List, Optional
import toml
class TestDetails:
"""Holds the test details."""
repository_id: str
token: str
username: str
tenant: str
project_id: str
def __init__(self) -> None:
self.token = os.environ["SIMPLE_ADO_BASE_TOKEN"]
self.username = os.environ["SIMPLE_ADO_USERNAME"]
self.tenant = os.environ["SIMPLE_ADO_TENANT"]
self.repository_id = os.environ["SIMPLE_ADO_REPO_ID"]
self.project_id = os.environ["SIMPLE_ADO_PROJECT_ID"]
|
py | 1a4e20bb64f50c0e72179e294e26a17107229e10 | DEPS = [
'recipe_engine/path',
'recipe_engine/properties',
]
# TODO(phajdan): provide coverage (http://crbug.com/693058).
DISABLE_STRICT_COVERAGE = True
|
py | 1a4e223d483d5a6388c2bac11fdad42dac08960f | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = [
'GetCertificateAuthorityResult',
'AwaitableGetCertificateAuthorityResult',
'get_certificate_authority',
]
@pulumi.output_type
class GetCertificateAuthorityResult:
"""
A collection of values returned by getCertificateAuthority.
"""
def __init__(__self__, arn=None, certificate=None, certificate_chain=None, certificate_signing_request=None, id=None, not_after=None, not_before=None, revocation_configurations=None, serial=None, status=None, tags=None, type=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if certificate and not isinstance(certificate, str):
raise TypeError("Expected argument 'certificate' to be a str")
pulumi.set(__self__, "certificate", certificate)
if certificate_chain and not isinstance(certificate_chain, str):
raise TypeError("Expected argument 'certificate_chain' to be a str")
pulumi.set(__self__, "certificate_chain", certificate_chain)
if certificate_signing_request and not isinstance(certificate_signing_request, str):
raise TypeError("Expected argument 'certificate_signing_request' to be a str")
pulumi.set(__self__, "certificate_signing_request", certificate_signing_request)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if not_after and not isinstance(not_after, str):
raise TypeError("Expected argument 'not_after' to be a str")
pulumi.set(__self__, "not_after", not_after)
if not_before and not isinstance(not_before, str):
raise TypeError("Expected argument 'not_before' to be a str")
pulumi.set(__self__, "not_before", not_before)
if revocation_configurations and not isinstance(revocation_configurations, list):
raise TypeError("Expected argument 'revocation_configurations' to be a list")
pulumi.set(__self__, "revocation_configurations", revocation_configurations)
if serial and not isinstance(serial, str):
raise TypeError("Expected argument 'serial' to be a str")
pulumi.set(__self__, "serial", serial)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def certificate(self) -> str:
"""
Base64-encoded certificate authority (CA) certificate. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter(name="certificateChain")
def certificate_chain(self) -> str:
"""
Base64-encoded certificate chain that includes any intermediate certificates and chains up to root on-premises certificate that you used to sign your private CA certificate. The chain does not include your private CA certificate. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "certificate_chain")
@property
@pulumi.getter(name="certificateSigningRequest")
def certificate_signing_request(self) -> str:
"""
The base64 PEM-encoded certificate signing request (CSR) for your private CA certificate.
"""
return pulumi.get(self, "certificate_signing_request")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="notAfter")
def not_after(self) -> str:
"""
Date and time after which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "not_after")
@property
@pulumi.getter(name="notBefore")
def not_before(self) -> str:
"""
Date and time before which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "not_before")
@property
@pulumi.getter(name="revocationConfigurations")
def revocation_configurations(self) -> Sequence['outputs.GetCertificateAuthorityRevocationConfigurationResult']:
"""
Nested attribute containing revocation configuration.
* `revocation_configuration.0.crl_configuration` - Nested attribute containing configuration of the certificate revocation list (CRL), if any, maintained by the certificate authority.
* `revocation_configuration.0.crl_configuration.0.custom_cname` - Name inserted into the certificate CRL Distribution Points extension that enables the use of an alias for the CRL distribution point.
* `revocation_configuration.0.crl_configuration.0.enabled` - Boolean value that specifies whether certificate revocation lists (CRLs) are enabled.
* `revocation_configuration.0.crl_configuration.0.expiration_in_days` - Number of days until a certificate expires.
* `revocation_configuration.0.crl_configuration.0.s3_bucket_name` - Name of the S3 bucket that contains the CRL.
"""
return pulumi.get(self, "revocation_configurations")
@property
@pulumi.getter
def serial(self) -> str:
"""
Serial number of the certificate authority. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "serial")
@property
@pulumi.getter
def status(self) -> str:
"""
Status of the certificate authority.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Specifies a key-value map of user-defined tags that are attached to the certificate authority.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the certificate authority.
"""
return pulumi.get(self, "type")
class AwaitableGetCertificateAuthorityResult(GetCertificateAuthorityResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateAuthorityResult(
arn=self.arn,
certificate=self.certificate,
certificate_chain=self.certificate_chain,
certificate_signing_request=self.certificate_signing_request,
id=self.id,
not_after=self.not_after,
not_before=self.not_before,
revocation_configurations=self.revocation_configurations,
serial=self.serial,
status=self.status,
tags=self.tags,
type=self.type)
def get_certificate_authority(arn: Optional[str] = None,
revocation_configurations: Optional[Sequence[pulumi.InputType['GetCertificateAuthorityRevocationConfigurationArgs']]] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateAuthorityResult:
"""
Get information on a AWS Certificate Manager Private Certificate Authority (ACM PCA Certificate Authority).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.acmpca.get_certificate_authority(arn="arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012")
```
:param str arn: Amazon Resource Name (ARN) of the certificate authority.
:param Sequence[pulumi.InputType['GetCertificateAuthorityRevocationConfigurationArgs']] revocation_configurations: Nested attribute containing revocation configuration.
* `revocation_configuration.0.crl_configuration` - Nested attribute containing configuration of the certificate revocation list (CRL), if any, maintained by the certificate authority.
* `revocation_configuration.0.crl_configuration.0.custom_cname` - Name inserted into the certificate CRL Distribution Points extension that enables the use of an alias for the CRL distribution point.
* `revocation_configuration.0.crl_configuration.0.enabled` - Boolean value that specifies whether certificate revocation lists (CRLs) are enabled.
* `revocation_configuration.0.crl_configuration.0.expiration_in_days` - Number of days until a certificate expires.
* `revocation_configuration.0.crl_configuration.0.s3_bucket_name` - Name of the S3 bucket that contains the CRL.
:param Mapping[str, str] tags: Specifies a key-value map of user-defined tags that are attached to the certificate authority.
"""
__args__ = dict()
__args__['arn'] = arn
__args__['revocationConfigurations'] = revocation_configurations
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:acmpca/getCertificateAuthority:getCertificateAuthority', __args__, opts=opts, typ=GetCertificateAuthorityResult).value
return AwaitableGetCertificateAuthorityResult(
arn=__ret__.arn,
certificate=__ret__.certificate,
certificate_chain=__ret__.certificate_chain,
certificate_signing_request=__ret__.certificate_signing_request,
id=__ret__.id,
not_after=__ret__.not_after,
not_before=__ret__.not_before,
revocation_configurations=__ret__.revocation_configurations,
serial=__ret__.serial,
status=__ret__.status,
tags=__ret__.tags,
type=__ret__.type)
|
py | 1a4e22da8b7927f66582d98b3a67fd4fc23a91e4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#unit tests for ACR functions
from .test_ObjectPath import op_test
from .test_utils import utils_test
import unittest
def doTests():
print ('Started ObjectPath Python implementation testing.\n')
unittest.TextTestRunner(verbosity = 2).run(op_test)
unittest.TextTestRunner(verbosity = 2).run(utils_test)
|
py | 1a4e22ddb9f1055d8be9f66e8b38bebcdc1e6a63 | import os
import jinja2
import logging
from mkdocs import utils
from mkdocs.utils import filters
from mkdocs.config.base import ValidationError
log = logging.getLogger(__name__)
log.addFilter(utils.warning_filter)
class Theme:
"""
A Theme object.
Keywords:
name: The name of the theme as defined by its entrypoint.
custom_dir: User defined directory for custom templates.
static_templates: A list of templates to render as static pages.
All other keywords are passed as-is and made available as a key/value mapping.
"""
def __init__(self, name=None, **user_config):
self.name = name
self._vars = {}
# MkDocs provided static templates are always included
package_dir = os.path.abspath(os.path.dirname(__file__))
mkdocs_templates = os.path.join(package_dir, 'templates')
self.static_templates = set(os.listdir(mkdocs_templates))
# Build self.dirs from various sources in order of precedence
self.dirs = []
if 'custom_dir' in user_config:
self.dirs.append(user_config.pop('custom_dir'))
if self.name:
self._load_theme_config(name)
# Include templates provided directly by MkDocs (outside any theme)
self.dirs.append(mkdocs_templates)
# Handle remaining user configs. Override theme configs (if set)
self.static_templates.update(user_config.pop('static_templates', []))
self._vars.update(user_config)
def __repr__(self):
return "{}(name='{}', dirs={}, static_templates={}, {})".format(
self.__class__.__name__, self.name, self.dirs, list(self.static_templates),
', '.join('{}={}'.format(k, repr(v)) for k, v in self._vars.items())
)
def __getitem__(self, key):
return self._vars[key]
def __setitem__(self, key, value):
self._vars[key] = value
def __contains__(self, item):
return item in self._vars
def __iter__(self):
return iter(self._vars)
def _load_theme_config(self, name):
""" Recursively load theme and any parent themes. """
theme_dir = utils.get_theme_dir(name)
self.dirs.append(theme_dir)
try:
file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')
with open(file_path, 'rb') as f:
theme_config = utils.yaml_load(f)
if theme_config is None:
theme_config = {}
except OSError as e:
log.debug(e)
raise ValidationError(
"The theme '{}' does not appear to have a configuration file. "
"Please upgrade to a current version of the theme.".format(name)
)
log.debug("Loaded theme configuration for '%s' from '%s': %s", name, file_path, theme_config)
parent_theme = theme_config.pop('extends', None)
if parent_theme:
themes = utils.get_theme_names()
if parent_theme not in themes:
raise ValidationError(
"The theme '{}' inherits from '{}', which does not appear to be installed. "
"The available installed themes are: {}".format(name, parent_theme, ', '.join(themes))
)
self._load_theme_config(parent_theme)
self.static_templates.update(theme_config.pop('static_templates', []))
self._vars.update(theme_config)
def get_env(self):
""" Return a Jinja environment for the theme. """
loader = jinja2.FileSystemLoader(self.dirs)
# No autoreload because editing a template in the middle of a build is not useful.
env = jinja2.Environment(loader=loader, auto_reload=False)
env.filters['tojson'] = filters.tojson
env.filters['url'] = filters.url_filter
return env
|
py | 1a4e2300442730cb67dd38e362e1097fa3f976bd | """Tools for processing Texas PUDF in conjunction with HCUP data
Texas does not participate in HCUP, but does provide instead its own Inpatient Public Use Data Files (PUDF) for similar purposes.
More information on Texas Inpatient PUDF at http://www.dshs.state.tx.us/thcic/hospitals/Inpatientpudf.shtm.
"""
import os, re
def meta_from_txt(target):
"""Parses target text file containing Texas PUDF metadata and builds a pandas DataFrame object.
"""
pattern = '(?P<field_number>\d+\w?)\s+(?P<field>\S+)\s+(?:(?P<label>.*?)\s*)(?P<position>\d+)\s+(?P<width>\d+)\s+(?P<data_type>\w+)'
joined = ' '.join([x for x in open(target)])
captured = [x.groupdict() for x in re.finditer(pattern, joined)]
#avoid circular imports
from pandas import DataFrame
meta = DataFrame(captured)
if len(meta[ meta.field == 'SPEC_UNIT_1' ]) == 1:
#we have individual spec unit cols, so drop any combined one
#otherwise this throws off the parsing of the real files
meta = meta[ meta.field != 'SPEC_UNIT' ]
# Occasionally there's a cute field with a hyphen in it. This breaks SQL since it's a disallowed character for SQL object names due to ambiguity with the subtraction operator.
meta.field = meta.field.map(lambda x: x.replace('-', '_'))
return meta
def get_meta(year, variety='base', split_base_portion=None):
"""Retrieves a meta DataFrame object for a given year of Texas PUDF data
split_base_portion should only be used for 2011 and 2012 base years, which are split into two chunks by Texas
"""
year = int(year)#sometimes this is passed a string and I'd rather it work anyways
varieties = ['base', 'charges', 'facility']
years = xrange(1999, 2013)
assert variety.lower() in varieties, "No Texas PUDF definitions available for variety %s" % variety
assert year in years, "No Texas PUDF definitions available for year %s" % year
if not (year > 2010 and variety.lower == 'base'):
filename = 'tx_pudf_%d_%s_definition.txt' % (year, variety.lower())
else:
assert split_base_portion in [1, 2], "For 2011 and 2012 base files, must specify which portion (1 or 2)"
filename = 'tx_pudf_%d_%s_definition_%d.txt' % (year, variety.lower(), split_base_portion)
from .hachoir import BUNDLED_LOADFILE_DIR
target = os.path.join(BUNDLED_LOADFILE_DIR, 'tx_pudf', filename)
return meta_from_txt(target)
def meta_augment(meta_df):
"""Akin to sas.meta_augment(), but for use with meta derived from Texas Inpatient Public Use Data Files.
"""
meta_df['length'] = meta_df['width']
meta_df['scale'] = meta_df['field'].map(lambda x: 2 if x.find('CHARGES') > -1 or x.find('AMOUNT') > -1 else 0)
return meta_df
|
py | 1a4e235a85718e5868c7c092a5d61f9e4480695d | from .solution import matrix_to_sycamore_operations
|
py | 1a4e2388bbc2e1ef70f1caa29d2723ded0a218a5 | an = int(input())
a = set(map(int, input().split()))
N = int(input())
for i in range(N):
cmd, n = input().split(" ")
s = set(map(int, input().split(" ")))
getattr(a, cmd)(s)
print(sum(a))
|
py | 1a4e23b6b1ae6e7401f9f507d850fa8a780543ce | import json
import vidservers
from utils import gen_client, getLink, process_xpath
# -----------------------------------------------------------------------
def get_server_link(ep_number, server_id, episodes, servers, c):
client = gen_client(referer=f"{c['scheme']}{c['host']}")
sourceId = episodes[ep_number][server_id]
url = f"{c['scheme']}{c['host']}/ajax/anime/episode?id={sourceId}"
res = client.get(url).json()
encryptedURL = res['url']
server_link = getLink(encryptedURL)
return server_link
# -----------------------------------------------------------------------
def get_dl(server_link: str, server_id, servers):
dl = getattr(vidservers, servers[server_id].lower())(server_link)
return dl
# -----------------------------------------------------------------------
def parse_servers(data: str):
# server_id [ {server_id: server_name},... ]
servers = process_xpath("//*[contains(@id, 'server')]", data)
server_id = {}
server_choices = []
server_lookup = {}
for server in servers:
server_name = server.text_content().strip()
id = server.get('data-id')
server_id[id] = server_name
server_choices.append(server_name)
server_lookup[server_name] = id
return server_id, server_choices, server_lookup
# -----------------------------------------------------------------------
def parse_episodes(data: str):
# [ ep_num: { server_id: 'episode_id',... },... ]
episodes_parsed = {}
episodes = process_xpath("//a[@data-sources]", data)
for ep in episodes:
episodes_parsed[ep.get('data-base')] = json.loads(ep.get('data-sources'))
return episodes_parsed
# -----------------------------------------------------------------------
|
py | 1a4e23e1cb178e495f2c59f483e8ad5479d45f3d | import logging
RANDOM_SEED = 20201234
import argparse
import openml
import os
import numpy as np
import string
import pandas as pd
import scipy
import math
OPENML_REGRESSION_LIST = [201, 1191, 215, 344, 537, 564, 1196, 1199, 1203, 1206,
5648, 23515, 41506, 41539, 42729, 42496]
NS_LIST = list(string.ascii_lowercase) + list(string.ascii_uppercase)
# NS_LIST = list(string.ascii_lowercase)[:10]
OML_target_attribute_dict = {
42236: 'pm2.5'
}
# from ..vw_benchmark.config import QW_OML_API_KEY, VW_DS_DIR
VW_DS_DIR = './test/vw/vw_benchmark/data/openml_vwdatasets/'
QW_OML_API_KEY = '8c4eebcda506ae1065902c2b224369b9'
#TODO: how to get these info from config.py
class OpenML2VWData:
VW_DS_DIR = VW_DS_DIR
def __init__(self, did, max_ns_num, task_type='regression'):
self._did = did
self._task_type = task_type
self._is_regression = False
self.vw_x_dic_list = []
self.Y = []
if 'regression' in self._task_type:
self._is_regression = True
self.vw_examples = self.load_vw_dataset(did, OpenML2VWData.VW_DS_DIR, self._is_regression, max_ns_num)
print( 'number of samples', len(self.vw_examples))
for i, e in enumerate(self.vw_examples):
self.Y.append(float(e.split('|')[0]))
print( self.Y[0:5])
logging.info('y label%s', self.Y[0:5])
@staticmethod
def load_vw_dataset(did, ds_dir, is_regression, max_ns_num):
import os
data_list = []
if is_regression:
fname = 'ds_{}_{}_{}.vw'.format(did, max_ns_num, 0) # the second field specifies the largest number of namespaces using.
vw_dataset_file = os.path.join(ds_dir, fname)
if not os.path.exists(vw_dataset_file) or os.stat(vw_dataset_file).st_size < 1000:
get_oml_to_vw(did, max_ns_num)
print(ds_dir, vw_dataset_file)
if not os.path.exists(ds_dir): os.makedirs(ds_dir)
with open(os.path.join(ds_dir, fname), 'r') as f:
vw_content = f.read().splitlines()
print(type(vw_content), len(vw_content))
return vw_content
# target # of ns: 10-26.
# TODO: split features into 10-26 ns:(1) look at the prefix (10<# of unique prefix< 26); (2) sequentially.
def oml_to_vw_no_grouping(X, y, ds_dir, fname):
print('no feature grouping')
with open(os.path.join(ds_dir, fname), 'w') as f:
if isinstance(X, pd.DataFrame):
for i in range(len(X)):
ns_line = '{} |{}'.format(str(y[i]), '|'.join('{} {}:{:.6f}'.format(NS_LIST[j], j, val) for
j, val in enumerate(X.iloc[i].to_list()) ))
f.write(ns_line)
f.write('\n')
elif isinstance(X, np.ndarray):
for i in range(len(X)):
ns_line = '{} |{}'.format(str(y[i]), '|'.join('{} {}:{:.6f}'.format(NS_LIST[j], j, val) for
j, val in enumerate(X[i]) ))
f.write(ns_line)
f.write('\n')
elif isinstance(X, scipy.sparse.csr_matrix):
print('NotImplementedError for sparse data')
NotImplementedError
def oml_to_vw_w_grouping(X, y, ds_dir, fname, orginal_dim, group_num, grouping_method='sequential'):
all_indexes = [i for i in range(orginal_dim)]
print('grouping', group_num)
# split all_indexes into # group_num of groups
# max_size_per_group = math.ceil(orginal_dim/float(group_num))
max_size_per_group = int(np.ceil(orginal_dim / float(group_num)))
# Option 1: sequential grouping
if grouping_method == 'sequential':
group_indexes = [] # lists of lists
print('indexes', group_num)
for i in range(group_num):
print('indexes', group_num, max_size_per_group)
indexes = [ind for ind in range(i*max_size_per_group, min( (i+1)*max_size_per_group, orginal_dim)) ]
print('indexes', group_num, indexes)
if len(indexes)>0: group_indexes.append(indexes)
print(group_indexes)
print(group_indexes)
else:
NotImplementedError
if group_indexes:
print('group_indexes')
with open(os.path.join(ds_dir, fname), 'w') as f:
if isinstance(X, pd.DataFrame):
raise NotImplementedError
elif isinstance(X, np.ndarray):
for i in range(len(X)):
# ns_content = '{} {}:{:.6f}'.format(NS_LIST[j], j, val) for j, val in enumerate(X[i])
NS_content = []
for zz in range(len(group_indexes)):
ns_features = ' '.join('{}:{:.6f}'.format(ind, X[i][ind]) for ind in group_indexes[zz])
NS_content.append(ns_features)
ns_line = '{} |{}'.format(str(y[i]), '|'.join('{} {}'.format(NS_LIST[j], NS_content[j]) for
j in range(len(group_indexes)) ))
f.write(ns_line)
f.write('\n')
elif isinstance(X, scipy.sparse.csr_matrix):
print('NotImplementedError for sparse data')
NotImplementedError
def save_vw_dataset_w_ns(X, y, did, ds_dir, max_ns_num, is_regression):
""" convert openml dataset to vw example and save to file
"""
print('is_regression',is_regression)
if is_regression:
fname = 'ds_{}_{}_{}.vw'.format(did, max_ns_num, 0)
print('dataset size', X.shape[0], X.shape[1])
print('saving data', did, ds_dir, fname)
dim = X.shape[1]
# do not do feature grouping
from os import path
# if not path.exists(os.path.join(ds_dir, fname)):
# TODO: remove no_grouping code
if dim < max_ns_num:
oml_to_vw_no_grouping(X, y, ds_dir, fname)
else:
oml_to_vw_w_grouping(X, y, ds_dir, fname, dim, group_num=max_ns_num)
def shuffle_data(X, y, seed):
try:
n = len(X)
except:
n = X.getnnz()
perm = np.random.RandomState(seed=seed).permutation(n)
X_shuf = X[perm, :]
y_shuf = y[perm]
return X_shuf, y_shuf
def get_oml_to_vw(did, max_ns_num, ds_dir=VW_DS_DIR):
success = False
print('-----getting oml dataset-------', did)
ds = openml.datasets.get_dataset(did)
target_attribute = ds.default_target_attribute
if target_attribute is None and did in OML_target_attribute_dict:
target_attribute = OML_target_attribute_dict[did]
print('target=ds.default_target_attribute', target_attribute)
data = ds.get_data(target=target_attribute, dataset_format='array')
X, y = data[0], data[1] # return X: pd DataFrame, y: pd series
import scipy
if scipy.sparse.issparse(X):
X = scipy.sparse.csr_matrix.toarray(X)
print('is sparse matrix')
if data and isinstance(X, np.ndarray):
print('-----converting oml to vw and and saving oml dataset-------')
save_vw_dataset_w_ns(X, y, did, ds_dir, max_ns_num, is_regression=True)
success = True
else:
print('---failed to convert/save oml dataset to vw!!!----')
try:
X, y = data[0], data[1] # return X: pd DataFrame, y: pd series
if data and isinstance(X, np.ndarray):
print('-----converting oml to vw and and saving oml dataset-------')
save_vw_dataset_w_ns(X, y, did, ds_dir, max_ns_num, is_regression = True)
success = True
else:
print('---failed to convert/save oml dataset to vw!!!----')
except:
print('-------------failed to get oml dataset!!!', did)
return success
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='openML to vw converter')
parser.add_argument('-dataset', type=int, default=None, help='dataset id')
parser.add_argument('-ns_num', '--ns_num', metavar='ns_num', type = int,
default=10, help="max name space number")
parser.add_argument('-min_sample_size', type=int, default=10000, help='minimum sample size')
parser.add_argument('-max_sample_size', type=int, default=None, help='maximum sample size')
args = parser.parse_args()
openml.config.apikey = QW_OML_API_KEY
openml.config.set_cache_directory('./data/omlcache/')
print('loaded openML')
if not os.path.exists(VW_DS_DIR): os.makedirs(VW_DS_DIR)
if args.dataset is not None:
dids = [args.dataset]
else:
if args.min_sample_size >=10000 and args.max_sample_size is None:
dids = OPENML_REGRESSION_LIST
failed_datasets = []
for did in sorted(dids):
print('processing did', did)
print('getting data,', did)
success = get_oml_to_vw(did, args.ns_num)
if not success:
failed_datasets.append(did)
print('-----------failed datasets', failed_datasets)
## command line:
# python openml_data_helper.py -min_sample_size 10000
# failed datasets [1414, 5572, 40753, 41463, 42080, 42092, 42125, 42130, 42131, 42160, 42183, 42207,
# 42208, 42362, 42367, 42464, 42559, 42635, 42672, 42673, 42677, 42688, 42720, 42721, 42726, 42728, 42729, 42731] |
py | 1a4e25a331fcaa281b56a3bcef1f3ab1ec23670f | from rlalgos.pytorch.mf import dqn as dqn_pytorch, sac as sac_pytorch, td3 as td3_pytorch, \
categorical_dqn as c51_pytorch, qr_dqn as qr_dqn_pytorch
from rlalgos.pytorch.mf.atari import categorical_dqn as c51_pytorch, dqn as atari_dqn_pytorch, \
qr_dqn as atari_qr_dqn_pytorch
from rlalgos.pytorch.offline import cql as cql_pytorch
from rlutils.infra.runner import get_argparser_from_func
# from rlutils.tf.algos.mb import pets
# from rlutils.tf.algos.mf import td3, ppo, trpo, sac, ddpg, dqn
# from rlutils.tf.algos.offline import cql, plas
__tf__ = ['ppo', 'td3', 'trpo', 'sac', 'ddpg', 'cql', 'plas', 'dqn', 'pets']
__all__ = ['sac_pytorch', 'td3_pytorch', 'atari_dqn_pytorch', 'dqn_pytorch', 'cql_pytorch', 'c51_pytorch',
'c51_pytorch', 'qr_dqn_pytorch', 'atari_qr_dqn_pytorch']
def main():
import argparse
parser = argparse.ArgumentParser('Running rl algorithms', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
algorithm_parsers = parser.add_subparsers(title='algorithm', help='algorithm specific parser', dest='algo')
for algo in __all__:
algo_parser = algorithm_parsers.add_parser(algo, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
get_argparser_from_func(eval(f'{algo}.Runner.main'), algo_parser)
kwargs = vars(parser.parse_args())
algo = kwargs.pop('algo')
eval(f'{algo}.Runner.main')(**kwargs)
if __name__ == '__main__':
main()
|
py | 1a4e26315d026f2c14fb1692db1fab10f345dae4 | # coding: utf-8
"""
vautoscaling
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_vautoscaling.model.launch_configuration import LaunchConfiguration # noqa: F401,E501
class DeleteLaunchConfigurationResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'return_code': 'str',
'return_message': 'str',
'total_rows': 'int',
'launch_configuration_list': 'list[LaunchConfiguration]'
}
attribute_map = {
'request_id': 'requestId',
'return_code': 'returnCode',
'return_message': 'returnMessage',
'total_rows': 'totalRows',
'launch_configuration_list': 'launchConfigurationList'
}
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, launch_configuration_list=None): # noqa: E501
"""DeleteLaunchConfigurationResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._return_code = None
self._return_message = None
self._total_rows = None
self._launch_configuration_list = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if return_code is not None:
self.return_code = return_code
if return_message is not None:
self.return_message = return_message
if total_rows is not None:
self.total_rows = total_rows
if launch_configuration_list is not None:
self.launch_configuration_list = launch_configuration_list
@property
def request_id(self):
"""Gets the request_id of this DeleteLaunchConfigurationResponse. # noqa: E501
:return: The request_id of this DeleteLaunchConfigurationResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this DeleteLaunchConfigurationResponse.
:param request_id: The request_id of this DeleteLaunchConfigurationResponse. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def return_code(self):
"""Gets the return_code of this DeleteLaunchConfigurationResponse. # noqa: E501
:return: The return_code of this DeleteLaunchConfigurationResponse. # noqa: E501
:rtype: str
"""
return self._return_code
@return_code.setter
def return_code(self, return_code):
"""Sets the return_code of this DeleteLaunchConfigurationResponse.
:param return_code: The return_code of this DeleteLaunchConfigurationResponse. # noqa: E501
:type: str
"""
self._return_code = return_code
@property
def return_message(self):
"""Gets the return_message of this DeleteLaunchConfigurationResponse. # noqa: E501
:return: The return_message of this DeleteLaunchConfigurationResponse. # noqa: E501
:rtype: str
"""
return self._return_message
@return_message.setter
def return_message(self, return_message):
"""Sets the return_message of this DeleteLaunchConfigurationResponse.
:param return_message: The return_message of this DeleteLaunchConfigurationResponse. # noqa: E501
:type: str
"""
self._return_message = return_message
@property
def total_rows(self):
"""Gets the total_rows of this DeleteLaunchConfigurationResponse. # noqa: E501
:return: The total_rows of this DeleteLaunchConfigurationResponse. # noqa: E501
:rtype: int
"""
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
"""Sets the total_rows of this DeleteLaunchConfigurationResponse.
:param total_rows: The total_rows of this DeleteLaunchConfigurationResponse. # noqa: E501
:type: int
"""
self._total_rows = total_rows
@property
def launch_configuration_list(self):
"""Gets the launch_configuration_list of this DeleteLaunchConfigurationResponse. # noqa: E501
:return: The launch_configuration_list of this DeleteLaunchConfigurationResponse. # noqa: E501
:rtype: list[LaunchConfiguration]
"""
return self._launch_configuration_list
@launch_configuration_list.setter
def launch_configuration_list(self, launch_configuration_list):
"""Sets the launch_configuration_list of this DeleteLaunchConfigurationResponse.
:param launch_configuration_list: The launch_configuration_list of this DeleteLaunchConfigurationResponse. # noqa: E501
:type: list[LaunchConfiguration]
"""
self._launch_configuration_list = launch_configuration_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteLaunchConfigurationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a4e26b01d374d77e82874f56f3b585c497f33a7 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('communities', '0006_remove_community_conn_ssl_cert_pass'),
]
operations = [
migrations.AlterField(
model_name='community',
name='conn_host',
field=models.IPAddressField(verbose_name=b'Community Host'),
preserve_default=True,
),
migrations.AlterField(
model_name='community',
name='conn_pass',
field=models.CharField(max_length=1024, verbose_name=b'Community Password'),
preserve_default=True,
),
migrations.AlterField(
model_name='community',
name='conn_user',
field=models.CharField(max_length=1024, verbose_name=b'Community Username'),
preserve_default=True,
),
migrations.AlterField(
model_name='community',
name='name',
field=models.CharField(max_length=1024, verbose_name=b'Community Name'),
preserve_default=True,
),
migrations.AlterField(
model_name='community',
name='sightings_anonymous',
field=models.BooleanField(default=False, verbose_name=b'Report Sightings Anonymously?'),
preserve_default=True,
),
migrations.AlterField(
model_name='community',
name='sightings_configuration',
field=models.CharField(default=b'Auto', max_length=255, verbose_name=b'Sightings Mode', choices=[(b'Auto', b'Auto'), (b'Manual', b'Manual')]),
preserve_default=True,
),
]
|
py | 1a4e2784782e8807bca6109a6f1f51b1da392bcc | import os
import random
import numpy as np
import vec_noise
from PIL import Image
from tqdm import tqdm
WORLD_SIZE = [2000, 2000, 3]
WORKING_DIR = os.getcwd()
DATA_DIR = WORKING_DIR[:-8]
os.system("cls")
# +------------------------------------------------------------+
# | Made by Jonáš Erlebach |
# | Thanks to third party libraries from https://pypi.org/ |
# +------------------------------------------------------------+
class WorldGeneration:
def __init__(self, DATA_DIR):
self.DATA_DIR = DATA_DIR
self.NOISE_SCALE = 0.002 # def 0.002
self.octaves_devider = 1
def CreateImage(self):
x = [[[0, 0, 0] for x in range(WORLD_SIZE[0])] for _y in range(WORLD_SIZE[1])]
startx, starty = random.randint(0, 50000), random.randint(0, 50000)
for x_ in tqdm(range(WORLD_SIZE[0])):
for y in range(WORLD_SIZE[1]):
value = vec_noise.snoise2(startx + x_ * self.NOISE_SCALE, starty + y * self.NOISE_SCALE,
12 // self.octaves_devider)
if value < -0.45:
x[x_][y][0] = 128
x[x_][y][1] = 197
x[x_][y][2] = 222
continue
if value < -0.35:
x[x_][y][0] = 248
x[x_][y][1] = 240
x[x_][y][2] = 164
continue
if value < 0.35:
x[x_][y][0] = 126
x[x_][y][1] = 200
x[x_][y][2] = 80
continue
if value < 0.53:
x[x_][y][0] = 200
x[x_][y][1] = 200
x[x_][y][2] = 200
continue
else:
x[x_][y][0] = 255
x[x_][y][1] = 255
x[x_][y][2] = 255
continue
self.to_image(x)
def to_image(self, array):
print("Creating Image")
array = np.array(array).astype(np.uint8)
img = Image.fromarray(array)
img.save(self.DATA_DIR + "\\Maps\\BG.png")
print("Image Created")
if __name__ == '__main__':
WorldGeneration(DATA_DIR).CreateImage()
|
py | 1a4e286e43074ed0896465a0c9f77fdd60e3ec68 | # DIP - Dependency Injection Principle
# High level modules should not depend on low level modules.
# Both should depend upon abstractions.
# **BAD
class CarWashService(object):
def __init__(self):
self.paymentMethod = Bank()
# CarWashService depends on Bank() class
# What if you want to add other payment method?
# CarshWashService is tighly coupled with Bank()
# **GOOD
class CarWashService(object):
def __init__(self, paymentMethod):
self.paymentMethod = paymentMethod
# Changes on paymentMethod doesn't affect
# CarWashService since paymentMethod is injected.
# You can pass any payment method
|
py | 1a4e28f825b15ffc23f6d489086336cd63cd71f3 | # -*- coding: utf-8 -*-
#
# League of Code server implementation
# https://github.com/guluc3m/loc-server
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Grupo de Usuarios de Linux UC3M <http://gul.es>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Mails."""
from flask_babel import lazy_gettext as t
# Welcome
WELCOME_SUBJECT = t('Welcome to League of Code')
WELCOME_BODY = t(
'Welcome to the League of Code, %(username)s!\n\n'
'You can now login from %(link)s.\n\n'
'Happy Hacking!'
)
WELCOME_HTML = t(
'Welcome to the League of Code, %(username)s!<br/><br/>'
'You can now login from <a href="%(link)s">the application</a>.<br/><br/>'
'Happy Hacking!'
)
# Forgot password
FORGOT_PASSWORD_SUBJECT = t('League of Code - Reset your password')
FORGOT_PASSWORD_BODY = t(
'Hello %(username)s,\n\n'
'We have received a request to reset your password. If you have not done '
'this, then it is safe to ignore this email.\n\n'
'You can reset your password through the following link: %(link)s'
)
FORGOT_PASSWORD_HTML = t(
'Hello %(username)s,<br/><br/>'
'We have received a request to reset your password. If you have not done '
'this, then it is safe to ignore this email.<br/><br/>'
'You can reset your password through the <a href="%(link)s">following link</a>.'
)
# Kicked from party
KICKED_SUBJECT = t('League of Code - Kicked from party')
KICKED_BODY = t(
'Hello %(username)s,\n\n'
'This is a notification to let you know that you have been kicked from a '
'party for the match: %(match)s.\n\n'
'You are still signed up for the match, but are now alone in your own party.'
)
KICKED_HTML = (
'Hello %(username)s,<br/><br/>'
'This is a notification to let you know that you have been kicked from a '
'party for the match: %(match)s.<br/><br/>'
'You are still signed up for the match, but are now alone in your own party.'
)
|
py | 1a4e295d0ea8d1c4879c5a86815dbe32d766a651 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import sys
import time
import traceback
import json
import certifi
import urllib.parse
import aiohttp
try:
from . import paymentrequest_pb2 as pb2
except ImportError:
sys.exit("Error: could not find paymentrequest_pb2.py. Create it with 'protoc --proto_path=electrum_audax/ --python_out=electrum_audax/ electrum_audax/paymentrequest.proto'")
from . import bitcoin, ecc, util, transaction, x509, rsakey
from .util import bh2u, bfh, export_meta, import_meta, make_aiohttp_session
from .crypto import sha256
from .bitcoin import TYPE_ADDRESS
from .transaction import TxOutput
from .network import Network
from .logging import get_logger, Logger
_logger = get_logger(__name__)
REQUEST_HEADERS = {'Accept': 'application/audax-paymentrequest', 'User-Agent': 'Electrum'}
ACK_HEADERS = {'Content-Type':'application/audax-payment','Accept':'application/audax-paymentack','User-Agent':'Electrum'}
ca_path = certifi.where()
ca_list = None
ca_keyID = None
def load_ca_list():
global ca_list, ca_keyID
if ca_list is None:
ca_list, ca_keyID = x509.load_certificates(ca_path)
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_UNKNOWN = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
async def get_payment_request(url: str) -> 'PaymentRequest':
u = urllib.parse.urlparse(url)
error = None
if u.scheme in ('http', 'https'):
resp_content = None
try:
proxy = Network.get_instance().proxy
async with make_aiohttp_session(proxy, headers=REQUEST_HEADERS) as session:
async with session.get(url) as response:
resp_content = await response.read()
response.raise_for_status()
# Guard against `audax:`-URIs with invalid payment request URLs
if "Content-Type" not in response.headers \
or response.headers["Content-Type"] != "application/audax-paymentrequest":
data = None
error = "payment URL not pointing to a payment request handling server"
else:
data = resp_content
data_len = len(data) if data is not None else None
_logger.info(f'fetched payment request {url} {data_len}')
except aiohttp.ClientError as e:
error = f"Error while contacting payment URL:\n{repr(e)}"
if isinstance(e, aiohttp.ClientResponseError) and e.status == 400 and resp_content:
error += "\n" + resp_content.decode("utf8")
data = None
elif u.scheme == 'file':
try:
with open(u.path, 'r', encoding='utf-8') as f:
data = f.read()
except IOError:
data = None
error = "payment URL not pointing to a valid file"
else:
data = None
error = f"Unknown scheme for payment request. URL: {url}"
pr = PaymentRequest(data, error)
return pr
class PaymentRequest:
def __init__(self, data, error=None):
self.raw = data
self.error = error
self.parse(data)
self.requestor = None # known after verify
self.tx = None
def __str__(self):
return str(self.raw)
def parse(self, r):
if self.error:
return
self.id = bh2u(sha256(r)[0:16])
try:
self.data = pb2.PaymentRequest()
self.data.ParseFromString(r)
except:
self.error = "cannot parse payment request"
return
self.details = pb2.PaymentDetails()
self.details.ParseFromString(self.data.serialized_payment_details)
self.outputs = []
for o in self.details.outputs:
type_, addr = transaction.get_address_from_output_script(o.script)
if type_ != TYPE_ADDRESS:
# TODO maybe rm restriction but then get_requestor and get_id need changes
self.error = "only addresses are allowed as outputs"
return
self.outputs.append(TxOutput(type_, addr, o.amount))
self.memo = self.details.memo
self.payment_url = self.details.payment_url
def is_pr(self):
return self.get_amount() != 0
#return self.get_outputs() != [(TYPE_ADDRESS, self.get_requestor(), self.get_amount())]
def verify(self, contacts):
if self.error:
return False
if not self.raw:
self.error = "Empty request"
return False
pr = pb2.PaymentRequest()
try:
pr.ParseFromString(self.raw)
except:
self.error = "Error: Cannot parse payment request"
return False
if not pr.signature:
# the address will be displayed as requestor
self.requestor = None
return True
if pr.pki_type in ["x509+sha256", "x509+sha1"]:
return self.verify_x509(pr)
elif pr.pki_type in ["dnssec+audax", "dnssec+ecdsa"]:
return self.verify_dnssec(pr, contacts)
else:
self.error = "ERROR: Unsupported PKI Type for Message Signature"
return False
def verify_x509(self, paymntreq):
load_ca_list()
if not ca_list:
self.error = "Trusted certificate authorities list not found"
return False
cert = pb2.X509Certificates()
cert.ParseFromString(paymntreq.pki_data)
# verify the chain of certificates
try:
x, ca = verify_cert_chain(cert.certificate)
except BaseException as e:
_logger.exception('')
self.error = str(e)
return False
# get requestor name
self.requestor = x.get_common_name()
if self.requestor.startswith('*.'):
self.requestor = self.requestor[2:]
# verify the BIP70 signature
pubkey0 = rsakey.RSAKey(x.modulus, x.exponent)
sig = paymntreq.signature
paymntreq.signature = b''
s = paymntreq.SerializeToString()
sigBytes = bytearray(sig)
msgBytes = bytearray(s)
if paymntreq.pki_type == "x509+sha256":
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
verify = pubkey0.verify(sigBytes, x509.PREFIX_RSA_SHA256 + hashBytes)
elif paymntreq.pki_type == "x509+sha1":
verify = pubkey0.hashAndVerify(sigBytes, msgBytes)
else:
self.error = f"ERROR: unknown pki_type {paymntreq.pki_type} in Payment Request"
return False
if not verify:
self.error = "ERROR: Invalid Signature for Payment Request Data"
return False
### SIG Verified
self.error = 'Signed by Trusted CA: ' + ca.get_common_name()
return True
def verify_dnssec(self, pr, contacts):
sig = pr.signature
alias = pr.pki_data
info = contacts.resolve(alias)
if info.get('validated') is not True:
self.error = "Alias verification failed (DNSSEC)"
return False
if pr.pki_type == "dnssec+audax":
self.requestor = alias
address = info.get('address')
pr.signature = b''
message = pr.SerializeToString()
if ecc.verify_message_with_address(address, sig, message):
self.error = 'Verified with DNSSEC'
return True
else:
self.error = "verify failed"
return False
else:
self.error = "unknown algo"
return False
def has_expired(self):
return self.details.expires and self.details.expires < int(time.time())
def get_expiration_date(self):
return self.details.expires
def get_amount(self):
return sum(map(lambda x:x[2], self.outputs))
def get_address(self):
o = self.outputs[0]
assert o.type == TYPE_ADDRESS
return o.address
def get_requestor(self):
return self.requestor if self.requestor else self.get_address()
def get_verify_status(self):
return self.error if self.requestor else "No Signature"
def get_memo(self):
return self.memo
def get_dict(self):
return {
'requestor': self.get_requestor(),
'memo':self.get_memo(),
'exp': self.get_expiration_date(),
'amount': self.get_amount(),
'signature': self.get_verify_status(),
'txid': self.tx,
'outputs': self.get_outputs()
}
def get_id(self):
return self.id if self.requestor else self.get_address()
def get_outputs(self):
return self.outputs[:]
async def send_payment_and_receive_paymentack(self, raw_tx, refund_addr):
pay_det = self.details
if not self.details.payment_url:
return False, "no url"
paymnt = pb2.Payment()
paymnt.merchant_data = pay_det.merchant_data
paymnt.transactions.append(bfh(raw_tx))
ref_out = paymnt.refund_to.add()
ref_out.script = util.bfh(transaction.Transaction.pay_script(TYPE_ADDRESS, refund_addr))
paymnt.memo = "Paid using Electrum"
pm = paymnt.SerializeToString()
payurl = urllib.parse.urlparse(pay_det.payment_url)
resp_content = None
try:
proxy = Network.get_instance().proxy
async with make_aiohttp_session(proxy, headers=ACK_HEADERS) as session:
async with session.post(payurl.geturl(), data=pm) as response:
resp_content = await response.read()
response.raise_for_status()
try:
paymntack = pb2.PaymentACK()
paymntack.ParseFromString(resp_content)
except Exception:
return False, "PaymentACK could not be processed. Payment was sent; please manually verify that payment was received."
print(f"PaymentACK message received: {paymntack.memo}")
return True, paymntack.memo
except aiohttp.ClientError as e:
error = f"Payment Message/PaymentACK Failed:\n{repr(e)}"
if isinstance(e, aiohttp.ClientResponseError) and e.status == 400 and resp_content:
error += "\n" + resp_content.decode("utf8")
return False, error
def make_unsigned_request(req):
from .transaction import Transaction
addr = req['address']
time = req.get('time', 0)
exp = req.get('exp', 0)
if time and type(time) != int:
time = 0
if exp and type(exp) != int:
exp = 0
amount = req['amount']
if amount is None:
amount = 0
memo = req['memo']
script = bfh(Transaction.pay_script(TYPE_ADDRESS, addr))
outputs = [(script, amount)]
pd = pb2.PaymentDetails()
for script, amount in outputs:
pd.outputs.add(amount=amount, script=script)
pd.time = time
pd.expires = time + exp if exp else 0
pd.memo = memo
pr = pb2.PaymentRequest()
pr.serialized_payment_details = pd.SerializeToString()
pr.signature = util.to_bytes('')
return pr
def sign_request_with_alias(pr, alias, alias_privkey):
pr.pki_type = 'dnssec+audax'
pr.pki_data = str(alias)
message = pr.SerializeToString()
ec_key = ecc.ECPrivkey(alias_privkey)
compressed = bitcoin.is_compressed_privkey(alias_privkey)
pr.signature = ec_key.sign_message(message, compressed)
def verify_cert_chain(chain):
""" Verify a chain of certificates. The last certificate is the CA"""
load_ca_list()
# parse the chain
cert_num = len(chain)
x509_chain = []
for i in range(cert_num):
x = x509.X509(bytearray(chain[i]))
x509_chain.append(x)
if i == 0:
x.check_date()
else:
if not x.check_ca():
raise Exception("ERROR: Supplied CA Certificate Error")
if not cert_num > 1:
raise Exception("ERROR: CA Certificate Chain Not Provided by Payment Processor")
# if the root CA is not supplied, add it to the chain
ca = x509_chain[cert_num-1]
if ca.getFingerprint() not in ca_list:
keyID = ca.get_issuer_keyID()
f = ca_keyID.get(keyID)
if f:
root = ca_list[f]
x509_chain.append(root)
else:
raise Exception("Supplied CA Not Found in Trusted CA Store.")
# verify the chain of signatures
cert_num = len(x509_chain)
for i in range(1, cert_num):
x = x509_chain[i]
prev_x = x509_chain[i-1]
algo, sig, data = prev_x.get_signature()
sig = bytearray(sig)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
if algo == x509.ALGO_RSA_SHA1:
verify = pubkey.hashAndVerify(sig, data)
elif algo == x509.ALGO_RSA_SHA256:
hashBytes = bytearray(hashlib.sha256(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA256 + hashBytes)
elif algo == x509.ALGO_RSA_SHA384:
hashBytes = bytearray(hashlib.sha384(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA384 + hashBytes)
elif algo == x509.ALGO_RSA_SHA512:
hashBytes = bytearray(hashlib.sha512(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA512 + hashBytes)
else:
raise Exception("Algorithm not supported: {}".format(algo))
if not verify:
raise Exception("Certificate not Signed by Provided CA Certificate Chain")
return x509_chain[0], ca
def check_ssl_config(config):
from . import pem
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
# verify chain
x, ca = verify_cert_chain(bList)
# verify that privkey and pubkey match
privkey = rsakey.RSAKey(*params)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
assert x.modulus == params[0]
assert x.exponent == params[1]
# return requestor
requestor = x.get_common_name()
if requestor.startswith('*.'):
requestor = requestor[2:]
return requestor
def sign_request_with_x509(pr, key_path, cert_path):
from . import pem
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
privkey = rsakey.RSAKey(*params)
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
certificates = pb2.X509Certificates()
certificates.certificate.extend(map(bytes, bList))
pr.pki_type = 'x509+sha256'
pr.pki_data = certificates.SerializeToString()
msgBytes = bytearray(pr.SerializeToString())
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
sig = privkey.sign(x509.PREFIX_RSA_SHA256 + hashBytes)
pr.signature = bytes(sig)
def serialize_request(req):
pr = make_unsigned_request(req)
signature = req.get('sig')
requestor = req.get('name')
if requestor and signature:
pr.signature = bfh(signature)
pr.pki_type = 'dnssec+audax'
pr.pki_data = str(requestor)
return pr
def make_request(config, req):
pr = make_unsigned_request(req)
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
if key_path and cert_path:
sign_request_with_x509(pr, key_path, cert_path)
return pr
class InvoiceStore(Logger):
def __init__(self, storage):
Logger.__init__(self)
self.storage = storage
self.invoices = {}
self.paid = {}
d = self.storage.get('invoices', {})
self.load(d)
def set_paid(self, pr, txid):
pr.tx = txid
pr_id = pr.get_id()
self.paid[txid] = pr_id
if pr_id not in self.invoices:
# in case the user had deleted it previously
self.add(pr)
def load(self, d):
for k, v in d.items():
try:
pr = PaymentRequest(bfh(v.get('hex')))
pr.tx = v.get('txid')
pr.requestor = v.get('requestor')
self.invoices[k] = pr
if pr.tx:
self.paid[pr.tx] = k
except:
continue
def import_file(self, path):
def validate(data):
return data # TODO
import_meta(path, validate, self.on_import)
def on_import(self, data):
self.load(data)
self.save()
def export_file(self, filename):
export_meta(self.dump(), filename)
def dump(self):
d = {}
for k, pr in self.invoices.items():
d[k] = {
'hex': bh2u(pr.raw),
'requestor': pr.requestor,
'txid': pr.tx
}
return d
def save(self):
self.storage.put('invoices', self.dump())
def get_status(self, key):
pr = self.get(key)
if pr is None:
self.logger.info(f"get_status() can't find pr for {key}")
return
if pr.tx is not None:
return PR_PAID
if pr.has_expired():
return PR_EXPIRED
return PR_UNPAID
def add(self, pr):
key = pr.get_id()
self.invoices[key] = pr
self.save()
return key
def remove(self, key):
self.invoices.pop(key)
self.save()
def get(self, k):
return self.invoices.get(k)
def sorted_list(self):
# sort
return self.invoices.values()
def unpaid_invoices(self):
return [self.invoices[k] for k in
filter(lambda x: self.get_status(x) not in (PR_PAID, None),
self.invoices.keys())
]
|
py | 1a4e2985a39b71b6cb9efc82e8cc214589f05a26 | def run():
"""
Run the nose test scripts for rb_cqed.
"""
import nose
# runs tests in rb_cqed.tests module only
nose.run(defaultTest="rb_cqed.tests", argv=['nosetests', '-v']) |
py | 1a4e29d0530bdffbb67ae01f7fbda75966a43954 | """Add multi-column indexes to appropriation and object class program activity table
Revision ID: 51b1bbc0bfde
Revises: 4ebc7a781b31
Create Date: 2019-07-26 12:03:39.154057
"""
# revision identifiers, used by Alembic.
revision = '51b1bbc0bfde'
down_revision = '4ebc7a781b31'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_appropriation_tas_id_submission_id', 'appropriation', ['tas_id', 'submission_id'], unique=False)
op.create_index('ix_oc_pa_tas_id_submission_id', 'object_class_program_activity', ['tas_id', 'submission_id'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_oc_pa_tas_id_submission_id', table_name='object_class_program_activity')
op.drop_index('ix_appropriation_tas_id_submission_id', table_name='appropriation')
# ### end Alembic commands ###
|
py | 1a4e2b7db4de95b978b183696c1fecd840efb4c3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('example', '0002_relatedsubscription'),
]
operations = [
migrations.CreateModel(
name='Summary',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subscriptions', models.ManyToManyField(related_name='summaries', to='example.StockSubscription')),
],
),
]
|
py | 1a4e2b91d8e2a070f6a44b048df3312b511d9ced | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import entity_type
from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type
from google.cloud.aiplatform_v1beta1.types import feature
from google.cloud.aiplatform_v1beta1.types import feature as gca_feature
from google.cloud.aiplatform_v1beta1.types import featurestore
from google.cloud.aiplatform_v1beta1.types import featurestore_service
from google.longrunning import operations_pb2 # type: ignore
from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import FeaturestoreServiceGrpcTransport
class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport):
"""gRPC AsyncIO backend transport for FeaturestoreService.
The service that handles CRUD and List for resources for
Featurestore.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_featurestore(
self,
) -> Callable[
[featurestore_service.CreateFeaturestoreRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create featurestore method over gRPC.
Creates a new Featurestore in a given project and
location.
Returns:
Callable[[~.CreateFeaturestoreRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_featurestore" not in self._stubs:
self._stubs["create_featurestore"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore",
request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_featurestore"]
@property
def get_featurestore(
self,
) -> Callable[
[featurestore_service.GetFeaturestoreRequest],
Awaitable[featurestore.Featurestore],
]:
r"""Return a callable for the get featurestore method over gRPC.
Gets details of a single Featurestore.
Returns:
Callable[[~.GetFeaturestoreRequest],
Awaitable[~.Featurestore]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_featurestore" not in self._stubs:
self._stubs["get_featurestore"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore",
request_serializer=featurestore_service.GetFeaturestoreRequest.serialize,
response_deserializer=featurestore.Featurestore.deserialize,
)
return self._stubs["get_featurestore"]
@property
def list_featurestores(
self,
) -> Callable[
[featurestore_service.ListFeaturestoresRequest],
Awaitable[featurestore_service.ListFeaturestoresResponse],
]:
r"""Return a callable for the list featurestores method over gRPC.
Lists Featurestores in a given project and location.
Returns:
Callable[[~.ListFeaturestoresRequest],
Awaitable[~.ListFeaturestoresResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_featurestores" not in self._stubs:
self._stubs["list_featurestores"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores",
request_serializer=featurestore_service.ListFeaturestoresRequest.serialize,
response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize,
)
return self._stubs["list_featurestores"]
@property
def update_featurestore(
self,
) -> Callable[
[featurestore_service.UpdateFeaturestoreRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the update featurestore method over gRPC.
Updates the parameters of a single Featurestore.
Returns:
Callable[[~.UpdateFeaturestoreRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_featurestore" not in self._stubs:
self._stubs["update_featurestore"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore",
request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_featurestore"]
@property
def delete_featurestore(
self,
) -> Callable[
[featurestore_service.DeleteFeaturestoreRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete featurestore method over gRPC.
Deletes a single Featurestore. The Featurestore must not contain
any EntityTypes or ``force`` must be set to true for the request
to succeed.
Returns:
Callable[[~.DeleteFeaturestoreRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_featurestore" not in self._stubs:
self._stubs["delete_featurestore"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore",
request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_featurestore"]
@property
def create_entity_type(
self,
) -> Callable[
[featurestore_service.CreateEntityTypeRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create entity type method over gRPC.
Creates a new EntityType in a given Featurestore.
Returns:
Callable[[~.CreateEntityTypeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_entity_type" not in self._stubs:
self._stubs["create_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType",
request_serializer=featurestore_service.CreateEntityTypeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_entity_type"]
@property
def get_entity_type(
self,
) -> Callable[
[featurestore_service.GetEntityTypeRequest], Awaitable[entity_type.EntityType]
]:
r"""Return a callable for the get entity type method over gRPC.
Gets details of a single EntityType.
Returns:
Callable[[~.GetEntityTypeRequest],
Awaitable[~.EntityType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_entity_type" not in self._stubs:
self._stubs["get_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType",
request_serializer=featurestore_service.GetEntityTypeRequest.serialize,
response_deserializer=entity_type.EntityType.deserialize,
)
return self._stubs["get_entity_type"]
@property
def list_entity_types(
self,
) -> Callable[
[featurestore_service.ListEntityTypesRequest],
Awaitable[featurestore_service.ListEntityTypesResponse],
]:
r"""Return a callable for the list entity types method over gRPC.
Lists EntityTypes in a given Featurestore.
Returns:
Callable[[~.ListEntityTypesRequest],
Awaitable[~.ListEntityTypesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_entity_types" not in self._stubs:
self._stubs["list_entity_types"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes",
request_serializer=featurestore_service.ListEntityTypesRequest.serialize,
response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize,
)
return self._stubs["list_entity_types"]
@property
def update_entity_type(
self,
) -> Callable[
[featurestore_service.UpdateEntityTypeRequest],
Awaitable[gca_entity_type.EntityType],
]:
r"""Return a callable for the update entity type method over gRPC.
Updates the parameters of a single EntityType.
Returns:
Callable[[~.UpdateEntityTypeRequest],
Awaitable[~.EntityType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_entity_type" not in self._stubs:
self._stubs["update_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType",
request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize,
response_deserializer=gca_entity_type.EntityType.deserialize,
)
return self._stubs["update_entity_type"]
@property
def delete_entity_type(
self,
) -> Callable[
[featurestore_service.DeleteEntityTypeRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete entity type method over gRPC.
Deletes a single EntityType. The EntityType must not have any
Features or ``force`` must be set to true for the request to
succeed.
Returns:
Callable[[~.DeleteEntityTypeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_entity_type" not in self._stubs:
self._stubs["delete_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType",
request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_entity_type"]
@property
def create_feature(
self,
) -> Callable[
[featurestore_service.CreateFeatureRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create feature method over gRPC.
Creates a new Feature in a given EntityType.
Returns:
Callable[[~.CreateFeatureRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_feature" not in self._stubs:
self._stubs["create_feature"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature",
request_serializer=featurestore_service.CreateFeatureRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_feature"]
@property
def batch_create_features(
self,
) -> Callable[
[featurestore_service.BatchCreateFeaturesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch create features method over gRPC.
Creates a batch of Features in a given EntityType.
Returns:
Callable[[~.BatchCreateFeaturesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_create_features" not in self._stubs:
self._stubs["batch_create_features"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures",
request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_create_features"]
@property
def get_feature(
self,
) -> Callable[[featurestore_service.GetFeatureRequest], Awaitable[feature.Feature]]:
r"""Return a callable for the get feature method over gRPC.
Gets details of a single Feature.
Returns:
Callable[[~.GetFeatureRequest],
Awaitable[~.Feature]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_feature" not in self._stubs:
self._stubs["get_feature"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature",
request_serializer=featurestore_service.GetFeatureRequest.serialize,
response_deserializer=feature.Feature.deserialize,
)
return self._stubs["get_feature"]
@property
def list_features(
self,
) -> Callable[
[featurestore_service.ListFeaturesRequest],
Awaitable[featurestore_service.ListFeaturesResponse],
]:
r"""Return a callable for the list features method over gRPC.
Lists Features in a given EntityType.
Returns:
Callable[[~.ListFeaturesRequest],
Awaitable[~.ListFeaturesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_features" not in self._stubs:
self._stubs["list_features"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures",
request_serializer=featurestore_service.ListFeaturesRequest.serialize,
response_deserializer=featurestore_service.ListFeaturesResponse.deserialize,
)
return self._stubs["list_features"]
@property
def update_feature(
self,
) -> Callable[
[featurestore_service.UpdateFeatureRequest], Awaitable[gca_feature.Feature]
]:
r"""Return a callable for the update feature method over gRPC.
Updates the parameters of a single Feature.
Returns:
Callable[[~.UpdateFeatureRequest],
Awaitable[~.Feature]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_feature" not in self._stubs:
self._stubs["update_feature"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature",
request_serializer=featurestore_service.UpdateFeatureRequest.serialize,
response_deserializer=gca_feature.Feature.deserialize,
)
return self._stubs["update_feature"]
@property
def delete_feature(
self,
) -> Callable[
[featurestore_service.DeleteFeatureRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete feature method over gRPC.
Deletes a single Feature.
Returns:
Callable[[~.DeleteFeatureRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_feature" not in self._stubs:
self._stubs["delete_feature"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature",
request_serializer=featurestore_service.DeleteFeatureRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_feature"]
@property
def import_feature_values(
self,
) -> Callable[
[featurestore_service.ImportFeatureValuesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the import feature values method over gRPC.
Imports Feature values into the Featurestore from a
source storage.
The progress of the import is tracked by the returned
operation. The imported features are guaranteed to be
visible to subsequent read operations after the
operation is marked as successfully done.
If an import operation fails, the Feature values
returned from reads and exports may be inconsistent. If
consistency is required, the caller must retry the same
import request again and wait till the new operation
returned is marked as successfully done.
There are also scenarios where the caller can cause
inconsistency.
- Source data for import contains multiple distinct
Feature values for the same entity ID and timestamp.
- Source is modified during an import. This includes
adding, updating, or removing source data and/or
metadata. Examples of updating metadata include but are
not limited to changing storage location, storage class,
or retention policy.
- Online serving cluster is under-provisioned.
Returns:
Callable[[~.ImportFeatureValuesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_feature_values" not in self._stubs:
self._stubs["import_feature_values"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues",
request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_feature_values"]
@property
def batch_read_feature_values(
self,
) -> Callable[
[featurestore_service.BatchReadFeatureValuesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch read feature values method over gRPC.
Batch reads Feature values from a Featurestore.
This API enables batch reading Feature values, where
each read instance in the batch may read Feature values
of entities from one or more EntityTypes. Point-in-time
correctness is guaranteed for Feature values of each
read instance as of each instance's read timestamp.
Returns:
Callable[[~.BatchReadFeatureValuesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_read_feature_values" not in self._stubs:
self._stubs["batch_read_feature_values"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues",
request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_read_feature_values"]
@property
def export_feature_values(
self,
) -> Callable[
[featurestore_service.ExportFeatureValuesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the export feature values method over gRPC.
Exports Feature values from all the entities of a
target EntityType.
Returns:
Callable[[~.ExportFeatureValuesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_feature_values" not in self._stubs:
self._stubs["export_feature_values"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues",
request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_feature_values"]
@property
def search_features(
self,
) -> Callable[
[featurestore_service.SearchFeaturesRequest],
Awaitable[featurestore_service.SearchFeaturesResponse],
]:
r"""Return a callable for the search features method over gRPC.
Searches Features matching a query in a given
project.
Returns:
Callable[[~.SearchFeaturesRequest],
Awaitable[~.SearchFeaturesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_features" not in self._stubs:
self._stubs["search_features"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures",
request_serializer=featurestore_service.SearchFeaturesRequest.serialize,
response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize,
)
return self._stubs["search_features"]
def close(self):
return self.grpc_channel.close()
__all__ = ("FeaturestoreServiceGrpcAsyncIOTransport",)
|
py | 1a4e2c0281571b8c7bbc7da8dcb377835adbb091 | """HTML handling.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import codecs
import copy
import htmlentitydefs
import re
import _sgmllib_copy as sgmllib
import _beautifulsoup
import _form
from _headersutil import split_header_words, is_html as _is_html
import _request
import _rfc3986
DEFAULT_ENCODING = "latin-1"
COMPRESS_RE = re.compile(r"\s+")
class CachingGeneratorFunction(object):
"""Caching wrapper around a no-arguments iterable."""
def __init__(self, iterable):
self._cache = []
# wrap iterable to make it non-restartable (otherwise, repeated
# __call__ would give incorrect results)
self._iterator = iter(iterable)
def __call__(self):
cache = self._cache
for item in cache:
yield item
for item in self._iterator:
cache.append(item)
yield item
class EncodingFinder:
def __init__(self, default_encoding):
self._default_encoding = default_encoding
def encoding(self, response):
# HTTPEquivProcessor may be in use, so both HTTP and HTTP-EQUIV
# headers may be in the response. HTTP-EQUIV headers come last,
# so try in order from first to last.
for ct in response.info().getheaders("content-type"):
for k, v in split_header_words([ct])[0]:
if k == "charset":
encoding = v
try:
codecs.lookup(v)
except LookupError:
continue
else:
return encoding
return self._default_encoding
class ResponseTypeFinder:
def __init__(self, allow_xhtml):
self._allow_xhtml = allow_xhtml
def is_html(self, response, encoding):
ct_hdrs = response.info().getheaders("content-type")
url = response.geturl()
# XXX encoding
return _is_html(ct_hdrs, url, self._allow_xhtml)
class Args(object):
# idea for this argument-processing trick is from Peter Otten
def __init__(self, args_map):
self.__dict__["dictionary"] = dict(args_map)
def __getattr__(self, key):
try:
return self.dictionary[key]
except KeyError:
return getattr(self.__class__, key)
def __setattr__(self, key, value):
if key == "dictionary":
raise AttributeError()
self.dictionary[key] = value
def form_parser_args(
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
return Args(locals())
class Link:
def __init__(self, base_url, url, text, tag, attrs):
assert None not in [url, tag, attrs]
self.base_url = base_url
self.absolute_url = _rfc3986.urljoin(base_url, url)
self.url, self.text, self.tag, self.attrs = url, text, tag, attrs
def __cmp__(self, other):
try:
for name in "url", "text", "tag", "attrs":
if getattr(self, name) != getattr(other, name):
return -1
except AttributeError:
return -1
return 0
def __repr__(self):
return "Link(base_url=%r, url=%r, text=%r, tag=%r, attrs=%r)" % (
self.base_url, self.url, self.text, self.tag, self.attrs)
class LinksFactory:
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _pullparser
if link_parser_class is None:
link_parser_class = _pullparser.TolerantPullParser
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._response = None
self._encoding = None
def set_response(self, response, base_url, encoding):
self._response = response
self._encoding = encoding
self._base_url = base_url
def links(self):
"""Return an iterator that provides links of the document."""
response = self._response
encoding = self._encoding
base_url = self._base_url
p = self.link_parser_class(response, encoding=encoding)
try:
for token in p.tags(*(self.urltags.keys()+["base"])):
if token.type == "endtag":
continue
if token.data == "base":
base_href = dict(token.attrs).get("href")
if base_href is not None:
base_url = base_href
continue
attrs = dict(token.attrs)
tag = token.data
text = None
# XXX use attr_encoding for ref'd doc if that doc does not
# provide one by other means
#attr_encoding = attrs.get("charset")
url = attrs.get(self.urltags[tag]) # XXX is "" a valid URL?
if not url:
# Probably an <A NAME="blah"> link or <AREA NOHREF...>.
# For our purposes a link is something with a URL, so
# ignore this.
continue
url = _rfc3986.clean_url(url, encoding)
if tag == "a":
if token.type != "startendtag":
# hmm, this'd break if end tag is missing
text = p.get_compressed_text(("endtag", tag))
# but this doesn't work for e.g.
# <a href="blah"><b>Andy</b></a>
#text = p.get_compressed_text()
yield Link(base_url, url, text, tag, token.attrs)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
class FormsFactory:
"""Makes a sequence of objects satisfying HTMLForm interface.
After calling .forms(), the .global_form attribute is a form object
containing all controls not a descendant of any FORM element.
For constructor argument docs, see ParseResponse argument docs.
"""
def __init__(self,
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
self.select_default = select_default
if form_parser_class is None:
form_parser_class = _form.FormParser
self.form_parser_class = form_parser_class
if request_class is None:
request_class = _request.Request
self.request_class = request_class
self.backwards_compat = backwards_compat
self._response = None
self.encoding = None
self.global_form = None
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
self.global_form = None
def forms(self):
encoding = self.encoding
forms = _form.ParseResponseEx(
self._response,
select_default=self.select_default,
form_parser_class=self.form_parser_class,
request_class=self.request_class,
encoding=encoding,
_urljoin=_rfc3986.urljoin,
_urlparse=_rfc3986.urlsplit,
_urlunparse=_rfc3986.urlunsplit,
)
self.global_form = forms[0]
return forms[1:]
class TitleFactory:
def __init__(self):
self._response = self._encoding = None
def set_response(self, response, encoding):
self._response = response
self._encoding = encoding
def _get_title_text(self, parser):
import _pullparser
text = []
tok = None
while 1:
try:
tok = parser.get_token()
except _pullparser.NoMoreTokensError:
break
if tok.type == "data":
text.append(str(tok))
elif tok.type == "entityref":
t = unescape("&%s;" % tok.data,
parser._entitydefs, parser.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, parser.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type == "endtag" and tag_name == "title":
break
text.append(str(tok))
return COMPRESS_RE.sub(" ", "".join(text).strip())
def title(self):
import _pullparser
p = _pullparser.TolerantPullParser(
self._response, encoding=self._encoding)
try:
try:
p.get_tag("title")
except _pullparser.NoMoreTokensError:
return None
else:
return self._get_title_text(p)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
def unescape(data, entities, encoding):
if data is None or "&" not in data:
return data
def replace_entities(match):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent[1:-1])
if repl is not None:
repl = unichr(repl)
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
class MechanizeBs(_beautifulsoup.BeautifulSoup):
_entitydefs = htmlentitydefs.name2codepoint
# don't want the magic Microsoft-char workaround
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>')
]
def __init__(self, encoding, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
self._encoding = encoding
_beautifulsoup.BeautifulSoup.__init__(
self, text, avoidParserProblems, initialTextIsEverything)
def handle_charref(self, ref):
t = unescape("&#%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def handle_entityref(self, ref):
t = unescape("&%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
val = unescape(val, self._entitydefs, self._encoding)
escaped_attrs.append((key, val))
return escaped_attrs
class RobustLinksFactory:
compress_re = COMPRESS_RE
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
if link_parser_class is None:
link_parser_class = MechanizeBs
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._bs = None
self._encoding = None
self._base_url = None
def set_soup(self, soup, base_url, encoding):
self._bs = soup
self._base_url = base_url
self._encoding = encoding
def links(self):
bs = self._bs
base_url = self._base_url
encoding = self._encoding
for ch in bs.recursiveChildGenerator():
if (isinstance(ch, _beautifulsoup.Tag) and
ch.name in self.urltags.keys()+["base"]):
link = ch
attrs = bs.unescape_attrs(link.attrs)
attrs_dict = dict(attrs)
if link.name == "base":
base_href = attrs_dict.get("href")
if base_href is not None:
base_url = base_href
continue
url_attr = self.urltags[link.name]
url = attrs_dict.get(url_attr)
if not url:
continue
url = _rfc3986.clean_url(url, encoding)
text = link.fetchText(lambda t: True)
if not text:
# follow _pullparser's weird behaviour rigidly
if link.name == "a":
text = ""
else:
text = None
else:
text = self.compress_re.sub(" ", " ".join(text).strip())
yield Link(base_url, url, text, link.name, attrs)
class RobustFormsFactory(FormsFactory):
def __init__(self, *args, **kwds):
args = form_parser_args(*args, **kwds)
if args.form_parser_class is None:
args.form_parser_class = _form.RobustFormParser
FormsFactory.__init__(self, **args.dictionary)
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
class RobustTitleFactory:
def __init__(self):
self._bs = self._encoding = None
def set_soup(self, soup, encoding):
self._bs = soup
self._encoding = encoding
def title(self):
title = self._bs.first("title")
if title == _beautifulsoup.Null:
return None
else:
inner_html = "".join([str(node) for node in title.contents])
return COMPRESS_RE.sub(" ", inner_html.strip())
class Factory:
"""Factory for forms, links, etc.
This interface may expand in future.
Public methods:
set_request_class(request_class)
set_response(response)
forms()
links()
Public attributes:
Note that accessing these attributes may raise ParseError.
encoding: string specifying the encoding of response if it contains a text
document (this value is left unspecified for documents that do not have
an encoding, e.g. an image file)
is_html: true if response contains an HTML document (XHTML may be
regarded as HTML too)
title: page title, or None if no title or not HTML
global_form: form object containing all controls that are not descendants
of any FORM element, or None if the forms_factory does not support
supplying a global form
"""
LAZY_ATTRS = ["encoding", "is_html", "title", "global_form"]
def __init__(self, forms_factory, links_factory, title_factory,
encoding_finder=EncodingFinder(DEFAULT_ENCODING),
response_type_finder=ResponseTypeFinder(allow_xhtml=False),
):
"""
Pass keyword arguments only.
default_encoding: character encoding to use if encoding cannot be
determined (or guessed) from the response. You should turn on
HTTP-EQUIV handling if you want the best chance of getting this right
without resorting to this default. The default value of this
parameter (currently latin-1) may change in future.
"""
self._forms_factory = forms_factory
self._links_factory = links_factory
self._title_factory = title_factory
self._encoding_finder = encoding_finder
self._response_type_finder = response_type_finder
self.set_response(None)
def set_request_class(self, request_class):
"""Set request class (mechanize.Request by default).
HTMLForm instances returned by .forms() will return instances of this
class when .click()ed.
"""
self._forms_factory.request_class = request_class
def set_response(self, response):
"""Set response.
The response must either be None or implement the same interface as
objects returned by mechanize.urlopen().
"""
self._response = response
self._forms_genf = self._links_genf = None
self._get_title = None
for name in self.LAZY_ATTRS:
try:
delattr(self, name)
except AttributeError:
pass
def __getattr__(self, name):
if name not in self.LAZY_ATTRS:
return getattr(self.__class__, name)
if name == "encoding":
self.encoding = self._encoding_finder.encoding(
copy.copy(self._response))
return self.encoding
elif name == "is_html":
self.is_html = self._response_type_finder.is_html(
copy.copy(self._response), self.encoding)
return self.is_html
elif name == "title":
if self.is_html:
self.title = self._title_factory.title()
else:
self.title = None
return self.title
elif name == "global_form":
self.forms()
return self.global_form
def forms(self):
"""Return iterable over HTMLForm-like objects.
Raises mechanize.ParseError on failure.
"""
# this implementation sets .global_form as a side-effect, for benefit
# of __getattr__ impl
if self._forms_genf is None:
try:
self._forms_genf = CachingGeneratorFunction(
self._forms_factory.forms())
except: # XXXX define exception!
self.set_response(self._response)
raise
self.global_form = getattr(
self._forms_factory, "global_form", None)
return self._forms_genf()
def links(self):
"""Return iterable over mechanize.Link-like objects.
Raises mechanize.ParseError on failure.
"""
if self._links_genf is None:
try:
self._links_genf = CachingGeneratorFunction(
self._links_factory.links())
except: # XXXX define exception!
self.set_response(self._response)
raise
return self._links_genf()
class DefaultFactory(Factory):
"""Based on sgmllib."""
def __init__(self, i_want_broken_xhtml_support=False):
Factory.__init__(
self,
forms_factory=FormsFactory(),
links_factory=LinksFactory(),
title_factory=TitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_response(
copy.copy(response), response.geturl(), self.encoding)
self._title_factory.set_response(
copy.copy(response), self.encoding)
class RobustFactory(Factory):
"""Based on BeautifulSoup, hopefully a bit more robust to bad HTML than is
DefaultFactory.
"""
def __init__(self, i_want_broken_xhtml_support=False,
soup_class=None):
Factory.__init__(
self,
forms_factory=RobustFormsFactory(),
links_factory=RobustLinksFactory(),
title_factory=RobustTitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
if soup_class is None:
soup_class = MechanizeBs
self._soup_class = soup_class
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
data = response.read()
soup = self._soup_class(self.encoding, data)
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_soup(
soup, response.geturl(), self.encoding)
self._title_factory.set_soup(soup, self.encoding)
|
py | 1a4e2db38e8bfb322c1eb669756e5f0b3c750aec | from django.core.mail import send_mail
from rest_framework.decorators import api_view
from rest_framework.pagination import PageNumberPagination
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from companies.models import Company
from companies.serializers import CompanySerializer
class CompanyViewSet(ModelViewSet):
serializer_class = CompanySerializer
queryset = Company.objects.all().order_by("-last_update")
pagination_class = PageNumberPagination
@api_view(http_method_names=["POST"])
def send_company_email(request: Request) -> Response:
"""
sends email with request payload
sender: [email protected]
receiver: [email protected]
"""
send_mail(
subject=request.data.get("subject"),
message=request.data.get("message"),
from_email="[email protected]",
recipient_list=["[email protected]"],
)
return Response(
{"status": "success", "info": "email sent successfully"}, status=200
)
|
py | 1a4e2e69cf2f762aeb6d216b155a534b12bc6662 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to oblivion.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "oblivion.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "oblivion.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
py | 1a4e32b4d8cace386858d3d7d0551d3e4168647b | ###############################################################################
#
# ChartScatter - A class for writing the Excel XLSX Scatter charts.
#
# Copyright 2013-2016, John McNamara, [email protected]
#
from . import chart
class ChartScatter(chart.Chart):
"""
A class for writing the Excel XLSX Scatter charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartScatter, self).__init__()
if options is None:
options = {}
self.subtype = options.get('subtype')
if not self.subtype:
self.subtype = 'marker_only'
self.cross_between = 'midCat'
self.horiz_val_axis = 0
self.val_axis_position = 'b'
self.smooth_allowed = True
self.requires_category = True
# Set the available data label positions for this chart type.
self.label_position_default = 'right'
self.label_positions = {
'center': 'ctr',
'right': 'r',
'left': 'l',
'above': 't',
'below': 'b',
# For backward compatibility.
'top': 't',
'bottom': 'b'}
def combine(self, chart=None):
"""
Create a combination chart with a secondary chart.
Note: Override parent method to add a warning.
Args:
chart: The secondary chart to combine with the primary chart.
Returns:
Nothing.
"""
if chart is None:
return
warn('Combined chart not currently supported with scatter chart '
'as the primary chart')
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:scatterChart element.
self._write_scatter_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_scatter_chart(self, args):
# Write the <c:scatterChart> element.
if args['primary_axes']:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not len(series):
return
style = 'lineMarker'
subtype = self.subtype
# Set the user defined chart subtype.
if subtype == 'marker_only':
style = 'lineMarker'
if subtype == 'straight_with_markers':
style = 'lineMarker'
if subtype == 'straight':
style = 'lineMarker'
if subtype == 'smooth_with_markers':
style = 'smoothMarker'
if subtype == 'smooth':
style = 'smoothMarker'
# Add default formatting to the series data.
self._modify_series_formatting()
self._xml_start_tag('c:scatterChart')
# Write the c:scatterStyle element.
self._write_scatter_style(style)
# Write the series elements.
for data in series:
self._write_ser(data)
# Write the c:marker element.
self._write_marker_value()
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag('c:scatterChart')
def _write_ser(self, series):
# Over-ridden to write c:xVal/c:yVal instead of c:cat/c:val elements.
# Write the <c:ser> element.
index = self.series_index
self.series_index += 1
self._xml_start_tag('c:ser')
# Write the c:idx element.
self._write_idx(index)
# Write the c:order element.
self._write_order(index)
# Write the series name.
self._write_series_name(series)
# Write the c:spPr element.
self._write_sp_pr(series)
# Write the c:marker element.
self._write_marker(series.get('marker'))
# Write the c:dPt element.
self._write_d_pt(series.get('points'))
# Write the c:dLbls element.
self._write_d_lbls(series.get('labels'))
# Write the c:trendline element.
self._write_trendline(series.get('trendline'))
# Write the c:errBars element.
self._write_error_bars(series.get('error_bars'))
# Write the c:xVal element.
self._write_x_val(series)
# Write the c:yVal element.
self._write_y_val(series)
# Write the c:smooth element.
if 'smooth' in self.subtype and series['smooth'] is None:
# Default is on for smooth scatter charts.
self._write_c_smooth(True)
else:
self._write_c_smooth(series['smooth'])
self._xml_end_tag('c:ser')
def _write_plot_area(self):
# Over-ridden to have 2 valAx elements for scatter charts instead
# of catAx/valAx.
#
# Write the <c:plotArea> element.
self._xml_start_tag('c:plotArea')
# Write the c:layout element.
self._write_layout(self.plotarea.get('layout'), 'plot')
# Write the subclass chart elements for primary and secondary axes.
self._write_chart_type({'primary_axes': 1})
self._write_chart_type({'primary_axes': 0})
# Write c:catAx and c:valAx elements for series using primary axes.
self._write_cat_val_axis({'x_axis': self.x_axis,
'y_axis': self.y_axis,
'axis_ids': self.axis_ids,
'position': 'b',
})
tmp = self.horiz_val_axis
self.horiz_val_axis = 1
self._write_val_axis({'x_axis': self.x_axis,
'y_axis': self.y_axis,
'axis_ids': self.axis_ids,
'position': 'l',
})
self.horiz_val_axis = tmp
# Write c:valAx and c:catAx elements for series using secondary axes
self._write_cat_val_axis({'x_axis': self.x2_axis,
'y_axis': self.y2_axis,
'axis_ids': self.axis2_ids,
'position': 'b',
})
self.horiz_val_axis = 1
self._write_val_axis({'x_axis': self.x2_axis,
'y_axis': self.y2_axis,
'axis_ids': self.axis2_ids,
'position': 'l',
})
# Write the c:spPr element for the plotarea formatting.
self._write_sp_pr(self.plotarea)
self._xml_end_tag('c:plotArea')
def _write_x_val(self, series):
# Write the <c:xVal> element.
formula = series.get('categories')
data_id = series.get('cat_data_id')
data = self.formula_data[data_id]
self._xml_start_tag('c:xVal')
# Check the type of cached data.
data_type = self._get_data_type(data)
# TODO. Can a scatter plot have non-numeric data.
if data_type == 'str':
# Write the c:numRef element.
self._write_str_ref(formula, data, data_type)
else:
# Write the c:numRef element.
self._write_num_ref(formula, data, data_type)
self._xml_end_tag('c:xVal')
def _write_y_val(self, series):
# Write the <c:yVal> element.
formula = series.get('values')
data_id = series.get('val_data_id')
data = self.formula_data[data_id]
self._xml_start_tag('c:yVal')
# Unlike Cat axes data should only be numeric.
# Write the c:numRef element.
self._write_num_ref(formula, data, 'num')
self._xml_end_tag('c:yVal')
def _write_scatter_style(self, val):
# Write the <c:scatterStyle> element.
attributes = [('val', val)]
self._xml_empty_tag('c:scatterStyle', attributes)
def _modify_series_formatting(self):
# Add default formatting to the series data unless it has already been
# specified by the user.
subtype = self.subtype
# The default scatter style "markers only" requires a line type.
if subtype == 'marker_only':
# Go through each series and define default values.
for series in self.series:
# Set a line type unless there is already a user defined type.
if not series['line']['defined']:
series['line'] = {'width': 2.25,
'none': 1,
'defined': 1,
}
# Turn markers off for subtypes that don't have them.
if 'marker' not in subtype:
# Go through each series and define default values.
for series in self.series:
# Set a marker type unless there is a user defined type.
if not series.get('marker'):
series['marker'] = {'type': 'none', 'defined': 1}
def _write_d_pt_point(self, index, point):
# Write an individual <c:dPt> element. Override the parent method to
# add markers.
self._xml_start_tag('c:dPt')
# Write the c:idx element.
self._write_idx(index)
self._xml_start_tag('c:marker')
# Write the c:spPr element.
self._write_sp_pr(point)
self._xml_end_tag('c:marker')
self._xml_end_tag('c:dPt')
|
py | 1a4e343e349f6aa32b67415b3c734f62535ed7db | #encoding: utf-8
import json
from django import template
from django.conf import settings
register = template.Library()
@register.inclusion_tag('laws/bill_full_name.html')
def bill_full_name(bill):
return { 'bill': bill }
@register.inclusion_tag('laws/bill_list_item.html')
def bill_list_item(bill, add_li=True, show_tags=True):
return { 'bill': bill, 'add_li': add_li, 'show_tags': show_tags }
@register.inclusion_tag('laws/item_tags.html')
def item_tags(tags):
return { 'tags': tags }
def split_member_vote_list_by_party(member_vote_list):
''' create a party partitioned list of "for" voters and "against" voters '''
list_by_party = []
if len(member_vote_list) > 0:
''' first party, first member '''
curr_party = { 'party' : member_vote_list[0].member.current_party.name,
'members' : []}
for vote in member_vote_list:
member = {'name' : vote.member.name,
'url' : vote.member.get_absolute_url(),
'img_url' : vote.member.img_url,
'id' : vote.member.id}
if vote.member.current_party.name == curr_party['party']:
curr_party['members'].append(member)
else:
list_by_party.append(curr_party)
curr_party = { 'party' : vote.member.current_party.name,
'members' : [member]}
''' last party '''
list_by_party.append(curr_party)
return list_by_party
def create_vote_dict(vote):
for_vote_sorted = vote.for_votes()\
.order_by('member__current_party')\
.select_related('member','member__current_party')
for_vote_sorted = list(for_vote_sorted)
for_votes_grouped = split_member_vote_list_by_party(for_vote_sorted)
against_vote_sorted = vote.against_votes()\
.order_by('member__current_party')\
.select_related('member','member__current_party')
against_vote_sorted = list(against_vote_sorted)
against_votes_grouped = split_member_vote_list_by_party(against_vote_sorted)
vote_drill_data = dict({'against': dict({'count': len(against_vote_sorted),
'votes' : against_votes_grouped}),
'for': dict({ 'count' : len(for_vote_sorted),
'votes' : for_votes_grouped})})
vote_dict = dict({'vote' : vote,
'vote_drill_data' : json.dumps(vote_drill_data),
'vote_passed' : vote.for_votes_count > vote.against_votes_count,
'vote_time' : {'day' : vote.time.day,
'month' : vote.time.month,
'year' : vote.time.year}})
return vote_dict
def get_explanation(bill, proposals):
if hasattr(bill, 'knesset_proposal'):
if bill.knesset_proposal.get_explanation() != '':
return bill.knesset_proposal.get_explanation()
if hasattr(bill, 'gov_proposal'):
if bill.gov_proposal.get_explanation() != '':
return bill.gov_proposal.get_explanation()
for proposal in proposals:
if proposal.get_explanation() != '':
return proposal.get_explanation()
@register.inclusion_tag('laws/bill_inabox.html')
def bill_inabox(bill):
""" TODO: firstX and not first3"""
proposals = list(bill.proposals.all())
proposers = bill.proposers.all()
bill_inabox_dict = {
'bill': bill,
'billurl': 'http://oknesset.org%s' % bill.get_absolute_url(),
'proposers_first3': proposers[:3],
'proposers_count_minus3': len(proposers) - 3,
'explanation': get_explanation(bill, proposals),
}
#proposal
if proposals:
proposal = proposals[-1]
bill_inabox_dict['proposal'] = dict({'day' : proposal.date.day,
'month' : proposal.date.month,
'year' : proposal.date.year})
#pre vote
pre_votes = list(bill.pre_votes.all())
pre_vote = None
if pre_votes:
pre_vote = pre_votes[-1]
bill_inabox_dict['pre_vote'] = create_vote_dict(pre_vote)
#first_committee_meetings
cms = list(bill.first_committee_meetings.all())
if cms:
first_committee_meetings = cms[-1]
bill_inabox_dict['first_committee_meetings'] = dict({'day' : first_committee_meetings.date.day,
'month' : first_committee_meetings.date.month,
'year' : first_committee_meetings.date.year,
'url' : first_committee_meetings.get_absolute_url()})
#first vote
fv = bill.first_vote
if fv:
bill_inabox_dict['first_vote'] = create_vote_dict(fv)
#second_committee_meetings
cms = list(bill.second_committee_meetings.all())
if cms:
second_committee_meetings = cms[-1]
bill_inabox_dict['second_committee_meetings'] = dict({'day' : second_committee_meetings.date.day,
'month' : second_committee_meetings.date.month,
'year' : second_committee_meetings.date.year,
'url' : second_committee_meetings.get_absolute_url()})
#second+third vote (approval_vote)
av = bill.approval_vote
if av:
bill_inabox_dict['approval_vote'] = create_vote_dict(av)
return bill_inabox_dict
|
py | 1a4e344589b6ad36f9f10396002766d6e96964cd | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Wrappers for primitive Neural Net (NN) Operations.
"""
from __future__ import print_function as _print_function
from tensorflow._api.v1.compat.v1.nn import rnn_cell
from tensorflow.python import depth_to_space
from tensorflow.python import sigmoid
from tensorflow.python import space_to_batch
from tensorflow.python import space_to_depth
from tensorflow.python import tanh
from tensorflow.python.ops.candidate_sampling_ops import all_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import compute_accidental_hits
from tensorflow.python.ops.candidate_sampling_ops import fixed_unigram_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import learned_unigram_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import log_uniform_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import uniform_candidate_sampler
from tensorflow.python.ops.ctc_ops import collapse_repeated
from tensorflow.python.ops.ctc_ops import ctc_beam_search_decoder
from tensorflow.python.ops.ctc_ops import ctc_beam_search_decoder_v2
from tensorflow.python.ops.ctc_ops import ctc_greedy_decoder
from tensorflow.python.ops.ctc_ops import ctc_loss
from tensorflow.python.ops.ctc_ops import ctc_loss_v2
from tensorflow.python.ops.ctc_ops import ctc_unique_labels
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.ops.embedding_ops import embedding_lookup_sparse
from tensorflow.python.ops.embedding_ops import safe_embedding_lookup_sparse
from tensorflow.python.ops.gen_nn_ops import conv3d_backprop_filter_v2
from tensorflow.python.ops.gen_nn_ops import conv3d_backprop_filter_v2 as conv3d_backprop_filter
from tensorflow.python.ops.gen_nn_ops import depthwise_conv2d_native
from tensorflow.python.ops.gen_nn_ops import depthwise_conv2d_native_backprop_filter
from tensorflow.python.ops.gen_nn_ops import depthwise_conv2d_native_backprop_filter as depthwise_conv2d_backprop_filter
from tensorflow.python.ops.gen_nn_ops import depthwise_conv2d_native_backprop_input
from tensorflow.python.ops.gen_nn_ops import depthwise_conv2d_native_backprop_input as depthwise_conv2d_backprop_input
from tensorflow.python.ops.gen_nn_ops import elu
from tensorflow.python.ops.gen_nn_ops import l2_loss
from tensorflow.python.ops.gen_nn_ops import lrn
from tensorflow.python.ops.gen_nn_ops import lrn as local_response_normalization
from tensorflow.python.ops.gen_nn_ops import quantized_avg_pool
from tensorflow.python.ops.gen_nn_ops import quantized_conv2d
from tensorflow.python.ops.gen_nn_ops import quantized_max_pool
from tensorflow.python.ops.gen_nn_ops import quantized_relu_x
from tensorflow.python.ops.gen_nn_ops import relu
from tensorflow.python.ops.gen_nn_ops import selu
from tensorflow.python.ops.gen_nn_ops import softplus
from tensorflow.python.ops.gen_nn_ops import softsign
from tensorflow.python.ops.nn import atrous_conv2d
from tensorflow.python.ops.nn import atrous_conv2d_transpose
from tensorflow.python.ops.nn import avg_pool
from tensorflow.python.ops.nn import avg_pool as avg_pool2d
from tensorflow.python.ops.nn import avg_pool1d
from tensorflow.python.ops.nn import avg_pool3d
from tensorflow.python.ops.nn import avg_pool_v2
from tensorflow.python.ops.nn import batch_norm_with_global_normalization
from tensorflow.python.ops.nn import batch_normalization
from tensorflow.python.ops.nn import bias_add
from tensorflow.python.ops.nn import bidirectional_dynamic_rnn
from tensorflow.python.ops.nn import conv1d
from tensorflow.python.ops.nn import conv1d_transpose
from tensorflow.python.ops.nn import conv2d
from tensorflow.python.ops.nn import conv2d_backprop_filter
from tensorflow.python.ops.nn import conv2d_backprop_input
from tensorflow.python.ops.nn import conv2d_transpose
from tensorflow.python.ops.nn import conv3d_transpose
from tensorflow.python.ops.nn import conv3d_v1 as conv3d
from tensorflow.python.ops.nn import conv_transpose
from tensorflow.python.ops.nn import convolution
from tensorflow.python.ops.nn import crelu
from tensorflow.python.ops.nn import depthwise_conv2d
from tensorflow.python.ops.nn import dilation2d_v1 as dilation2d
from tensorflow.python.ops.nn import dropout
from tensorflow.python.ops.nn import dynamic_rnn
from tensorflow.python.ops.nn import erosion2d
from tensorflow.python.ops.nn import fractional_avg_pool
from tensorflow.python.ops.nn import fractional_max_pool
from tensorflow.python.ops.nn import fused_batch_norm
from tensorflow.python.ops.nn import in_top_k
from tensorflow.python.ops.nn import l2_normalize
from tensorflow.python.ops.nn import leaky_relu
from tensorflow.python.ops.nn import log_poisson_loss
from tensorflow.python.ops.nn import log_softmax
from tensorflow.python.ops.nn import max_pool
from tensorflow.python.ops.nn import max_pool1d
from tensorflow.python.ops.nn import max_pool2d
from tensorflow.python.ops.nn import max_pool3d
from tensorflow.python.ops.nn import max_pool_v2
from tensorflow.python.ops.nn import max_pool_with_argmax_v1 as max_pool_with_argmax
from tensorflow.python.ops.nn import moments
from tensorflow.python.ops.nn import nce_loss
from tensorflow.python.ops.nn import normalize_moments
from tensorflow.python.ops.nn import pool
from tensorflow.python.ops.nn import raw_rnn
from tensorflow.python.ops.nn import relu6
from tensorflow.python.ops.nn import relu_layer
from tensorflow.python.ops.nn import sampled_softmax_loss
from tensorflow.python.ops.nn import separable_conv2d
from tensorflow.python.ops.nn import sigmoid_cross_entropy_with_logits
from tensorflow.python.ops.nn import softmax
from tensorflow.python.ops.nn import softmax_cross_entropy_with_logits
from tensorflow.python.ops.nn import softmax_cross_entropy_with_logits_v2_helper as softmax_cross_entropy_with_logits_v2
from tensorflow.python.ops.nn import sparse_softmax_cross_entropy_with_logits
from tensorflow.python.ops.nn import static_rnn
from tensorflow.python.ops.nn import static_state_saving_rnn
from tensorflow.python.ops.nn import sufficient_statistics
from tensorflow.python.ops.nn import swish
from tensorflow.python.ops.nn import top_k
from tensorflow.python.ops.nn import weighted_cross_entropy_with_logits
from tensorflow.python.ops.nn import weighted_moments
from tensorflow.python.ops.nn import with_space_to_batch
from tensorflow.python.ops.nn import xw_plus_b
from tensorflow.python.ops.nn import zero_fraction
from tensorflow.python.ops.rnn import static_bidirectional_rnn
del _print_function
|
py | 1a4e34659d43ea754a721b7f08454d9ffc097894 | """
Top-level entries in a TOML file.
"""
from .prettify import elements
from .prettify.elements import TableElement, TableHeaderElement
from .peekableit import PeekableIterator
class TopLevel:
"""
A abstract top-level entry.
"""
def __init__(self, names, table_element):
self._table_element = table_element
self._names = Name(names)
@property
def table_element(self):
return self._table_element
@property
def name(self):
"""
The distinct name of a table entry as an Name instance.
"""
return self._names
class Name:
def __init__(self, names):
self._names = names
@property
def sub_names(self):
return self._names
def drop(self, n=0):
"""
Returns the name after dropping the first n entries of it.
"""
return Name(names=self._names[n:])
def is_prefixed_with(self, names):
if isinstance(names, Name):
return self.is_prefixed_with(names.sub_names)
for i, name in enumerate(names):
if self._names[i] != name:
return False
return True
def without_prefix(self, names):
if isinstance(names, Name):
return self.without_prefix(names.sub_names)
for i, name in enumerate(names):
if name != self._names[i]:
return Name(self._names[i:])
return Name(names=self.sub_names[len(names) :])
@property
def is_qualified(self):
return len(self._names) > 1
def __str__(self):
return ".".join(self.sub_names)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self.__eq__(other)
class AnonymousTable(TopLevel):
def __init__(self, table_element):
TopLevel.__init__(self, ("",), table_element)
class Table(TopLevel):
def __init__(self, names, table_element):
TopLevel.__init__(self, names=names, table_element=table_element)
class ArrayOfTables(TopLevel):
def __init__(self, names, table_element):
TopLevel.__init__(self, names=names, table_element=table_element)
def _validate_file_elements(file_elements):
pass
def identify(file_elements):
"""
Outputs an ordered sequence of instances of TopLevel types.
Elements start with an optional TableElement, followed by zero or more pairs of (TableHeaderElement, TableElement).
"""
if not file_elements:
return
_validate_file_elements(file_elements)
# An iterator over enumerate(the non-metadata) elements
iterator = PeekableIterator(
(element_i, element)
for (element_i, element) in enumerate(file_elements)
if element.type != elements.TYPE_METADATA
)
try:
_, first_element = iterator.peek()
if isinstance(first_element, TableElement):
iterator.next()
yield AnonymousTable(first_element)
except KeyError:
pass
except StopIteration:
return
for element_i, element in iterator:
if not isinstance(element, TableHeaderElement):
continue
# If TableHeader of a regular table, return Table following it
if not element.is_array_of_tables:
table_element_i, table_element = next(iterator)
yield Table(names=element.names, table_element=table_element)
# If TableHeader of an array of tables, do your thing
else:
table_element_i, table_element = next(iterator)
yield ArrayOfTables(names=element.names, table_element=table_element)
|
py | 1a4e3566c3cde6d108cfb95c6e9a27d5b40a1ea0 | ################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""TensorFlow Quantization"""
from ft_tensorflow_quantization.python.ops.fake_quantize import *
from ft_tensorflow_quantization.python.layers.tensor_quantizer import *
from ft_tensorflow_quantization.python.layers.dense import *
from ft_tensorflow_quantization.python.calib.max import *
from ft_tensorflow_quantization.python.calib.histogram import *
from ft_tensorflow_quantization.python.calib.calibrator import *
|
py | 1a4e35ce63c20ef056ef397d23545c2398fb6b89 | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Pytest integration."""
import os
from pipelines import config
from pipelines import nox
RUN_FLAGS = [
"-c",
config.PYPROJECT_TOML,
"--showlocals",
]
COVERAGE_FLAGS = [
"--cov",
config.MAIN_PACKAGE,
"--cov-config",
config.PYPROJECT_TOML,
"--cov-report",
"term",
"--cov-report",
f"html:{config.COVERAGE_HTML_PATH}",
"--cov-report",
"xml",
]
@nox.session(reuse_venv=True)
def pytest(session: nox.Session) -> None:
"""Run unit tests and measure code coverage.
Coverage can be disabled with the `--skip-coverage` flag.
"""
session.install("-r", "requirements.txt", "-r", "dev-requirements.txt")
_pytest(session)
@nox.session(reuse_venv=True)
def pytest_all_features(session: nox.Session) -> None:
"""Run unit tests and measure code coverage, using speedup modules.
Coverage can be disabled with the `--skip-coverage` flag.
"""
session.install(
"-r",
"requirements.txt",
"-r",
"server-requirements.txt",
"-r",
"speedup-requirements.txt",
"-r",
"dev-requirements.txt",
)
_pytest(session, "-OO")
def _pytest(session: nox.Session, *py_flags: str) -> None:
if "--skip-coverage" in session.posargs:
session.posargs.remove("--skip-coverage")
flags = RUN_FLAGS
else:
flags = [*RUN_FLAGS, *COVERAGE_FLAGS]
session.run("python", *py_flags, "-m", "pytest", *flags, *session.posargs, config.TEST_PACKAGE)
|
py | 1a4e3646c65b735881362d6aeaae2897d6b7e452 | # -*- coding: utf-8 -*-
#
# straight.plugin documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 25 22:49:22 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"straight.plugin"
copyright = u"2012, Calvin Spealman"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.4"
# The full version, including alpha/beta/rc tags.
release = "1.4.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "straightplugindoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"straightplugin.tex",
u"straight.plugin Documentation",
u"Calvin Spealman",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"straightplugin",
u"straight.plugin Documentation",
[u"Calvin Spealman"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"straightplugin",
u"straight.plugin Documentation",
u"Calvin Spealman",
"straightplugin",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py | 1a4e36e7c65cf39f317f173105ce01aa4ce7d5fb | from time import time
from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from app import create_flask_app
from Database.redis import check_actual_users_redis
scheduler = BackgroundScheduler()
scheduler.add_job(func=check_actual_users_redis, trigger="interval", minutes=60)
scheduler.start()
if __name__ == "__main__":
create_flask_app().run(host='0.0.0.0')
|
py | 1a4e36eb0e5746d27c6e946a9ea5cb4fb09df5b6 | # flake8: noqa
from .base import Settings
__version__ = '0.2'
__all__ = ['Settings']
|
py | 1a4e372b89b00deb6d11c1c4e42c2dd5079e1ac8 | import copy
import argparse
import json
import pickle
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import egg.core as core
from egg.core.util import find_lengths
from egg.core import EarlyStopperAccuracy
from egg.core import CheckpointSaver
from egg.zoo.imitation_learning.archs import (
PlusOneWrapper,
Receiver,
Sender,
)
from egg.zoo.compo_vs_generalization import train as compo_vs_generalization
from egg.zoo.compo_vs_generalization.data import (
ScaledDataset,
enumerate_attribute_value,
one_hotify,
select_subset_V1,
select_subset_V2,
split_holdout,
split_train_test,
)
from egg.zoo.imitation_learning.loader import *
from egg.zoo.imitation_learning.util import *
def eval_expert(metadata_path: str):
checkpoint_wrapper = load_metadata_from_pkl(metadata_path)
params = checkpoint_wrapper['params']
params.append('--load_from_checkpoint={}'.format(checkpoint_wrapper['checkpoint_path']))
compo_vs_generalization.main(params, train_mode=False)
def eval_bc_prediction(new_sender, new_receiver, trainer, t=None, checkpoint_path=None):
_, interaction = trainer.eval()
r_loss, r_acc, _ = new_receiver.score(interaction, val=True)
s_loss, s_acc, _ = new_sender.score(interaction, val=True)
print('Epoch: {}; Receiver val loss: {}; Sender val loss: {}'.format(t, r_loss, s_loss))
return r_loss, s_loss, r_acc, s_acc
def eval_expert_original_task(trainer):
# print('About to evaluate og agents on og task')
mean_loss, interaction = trainer.eval()
acc_or, acc = interaction.aux['acc_or'].mean(), interaction.aux['acc'].mean()
print('Expert Loss: {}. Acc_or: {}. Acc: {}'.format(mean_loss, acc_or, acc))
# input()
return mean_loss, acc, acc_or
def eval_bc_original_task(new_trainer, t=None, checkpoint_path=None):
mean_loss, interaction = new_trainer.eval()
acc_or, acc = interaction.aux['acc_or'].mean(), interaction.aux['acc'].mean()
print('Epoch: {}; Original Task Loss: {}. Acc_or: {}. Acc: {}'.format(t, mean_loss, acc_or, acc))
return mean_loss, acc, acc_or # new results
def train_bc(bc_args, new_sender, new_receiver, optimizer_s, optimizer_r, trainer,
new_trainer=None, imitation=False, perf_log=None, sender_aware_weight=0.0):
new_receiver_converged = False
new_sender_converged = False
receiver_converged_epoch, sender_converged_epoch = 0, 0
cumu_r_loss, cumu_s_loss = torch.zeros(bc_args.n_epochs_bc), torch.zeros(bc_args.n_epochs_bc)
cumu_r_acc, cumu_s_acc = torch.empty(bc_args.n_epochs_bc), torch.empty(bc_args.n_epochs_bc)
reinforce_loss_for_sender = torch.zeros(bc_args.n_epochs_bc)
for t in range(bc_args.n_epochs_bc):
val = t % bc_args.val_interval == 0
if val:
new_sender.eval()
new_receiver.eval()
r_loss, s_loss, r_acc, s_acc = eval_bc_prediction(new_sender, new_receiver, trainer, t)
if new_trainer is not None: mean_loss, acc, acc_or = eval_bc_original_task(new_trainer, t)
if perf_log is not None:
log_performance(perf_log, r_loss.item(),
s_loss.item(), r_acc.item(), s_acc.item(), mean_loss,
acc.item(), acc_or.item(), sender_converged_epoch, receiver_converged_epoch)
_, interaction = trainer.eval(trainer.train_data)
trainer.game.train()
if not new_receiver_converged:
new_receiver.train()
r_loss, r_acc, aux_info = train_epoch(
optimizer_r,
new_receiver,
interaction,
expert=trainer.game.receiver,
imitation=imitation,
aux_info={'expert_sender': train.game.sender if sender_aware_weight > 0 else None,
'sender_aware': sender_aware_weight > 0}
)
reinforce_loss_for_sender[t] = aux_info['reinforce_loss']
cumu_r_loss[t] = r_loss
cumu_r_acc[t] = r_acc
new_receiver_converged = get_grad_norm(new_receiver) < bc_args.convergence_epsilon
receiver_converged_epoch = t
if not new_sender_converged:
new_sender.train()
s_loss, s_acc, _ = train_epoch(
optimizer_s,
new_sender,
interaction,
expert=trainer.game.sender,
imitation=imitation
)
cumu_s_loss[t] = s_loss
cumu_s_acc[t] = s_acc
new_sender_converged = get_grad_norm(new_sender) < bc_args.convergence_epsilon
sender_converged_epoch = t
if new_receiver_converged and new_sender_converged:
print('Both receiver and sender gradients < epsilon={}'.format(bc_args.convergence_epsilon))
break
print('Epoch: {}; Receiver loss: {}; Sender loss: {}; R acc: {}; S acc: {}'.format(t, r_loss, s_loss, r_acc, s_acc))
cumu_s_loss += sender_aware_weight * reinforce_loss_for_sender
cumu_s_loss = cumu_s_loss.sum()
cumu_r_loss = cumu_r_loss.sum()
return cumu_s_loss, cumu_r_loss, t, s_acc, r_acc, cumu_s_acc, cumu_r_acc
def train_epoch(optimizer, agent, interaction, expert=None, imitation=False, aux_info={}):
optimizer.zero_grad()
loss, acc, aux = agent.score(interaction, expert=expert, imitation=imitation, aux_info=aux_info)
loss.backward()
optimizer.step()
return loss, acc, aux
def main(metadata_path: str, bc_params, expert_seed):
bc_args = get_bc_params(bc_params)
checkpoint_wrapper = load_metadata_from_pkl(metadata_path)
params = checkpoint_wrapper['params']
params.append('--load_from_checkpoint={}'.format(checkpoint_wrapper['checkpoint_path']))
params = list(filter(lambda x: 'random_seed' not in x, params))
params.append('--random_seed={}'.format(bc_args.bc_random_seed))
opts = get_params(params)
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
# New agents
new_sender, new_receiver = bc_agents_setup(opts, device, *define_agents(opts))
optimizer_r = torch.optim.Adam(new_receiver.parameters(), lr=opts.lr)
optimizer_s = torch.optim.Adam(new_sender.parameters(), lr=opts.lr)
# Dataloader
trainer = expert_setup(opts)
new_trainer = copy.deepcopy(trainer)
new_trainer.game.sender, new_trainer.game.receiver = new_sender.agent, new_receiver.agent
# Logging
perf_log = {
'r_loss': [],
's_loss': [],
'r_acc': [],
's_acc': [],
'mean_loss': [],
'acc': [],
'acc_or': [],
'epoch': [],
'epoch_speaker': [],
'epoch_receiver': []
}
train_bc(bc_args, new_sender, new_receiver, optimizer_s, optimizer_r, trainer, new_trainer, perf_log)
# Last validation score
print('==============================================')
print('Last validation score')
r_loss, s_loss, r_acc, s_acc = eval_bc_prediction(new_sender, new_receiver, trainer, t=t)
# Integrate with og environment on validation
print('Last validation score on original task')
mean_loss, acc, acc_or = eval_bc_original_task(new_trainer, t=t)
# Original model score
print('Expert validation on original task')
eval_expert_original_task(trainer)
log_performance(perf_log, r_loss.item(), s_loss.item(), r_acc.item(), s_acc.item(), mean_loss, acc.item(), acc_or.item(), sender_converged_epoch,
receiver_converged_epoch)
# Save BC model
if bc_args.save_bc:
save_behavioral_clones(bc_args, params, new_receiver, new_sender,
optimizer_r, optimizer_s, metadata_path, perf_log, expert_seed)
core.close()
if __name__=='__main__':
import sys
import random
# for i in range(100):
# try:
# resave_compo_metrics_on_whole_dataset('saved_models/' +
# 'n_val_10_n_att_2_vocab_100_max_len_3_hidden_500/' +
# 'checkpoint_wrapper_randomseed{}.pkl'.format(i))
# except:
# continue
# # run program for all the things
for seed in range(101, 131):
print('Random seed: ', seed)
random.seed(seed)
params = sys.argv[1:].copy()
params.append('--bc_random_seed={}'.format(seed))
for i in range(100):
try:
main('saved_models/' +
'n_val_10_n_att_2_vocab_100_max_len_3_hidden_500/' +
'checkpoint_wrapper_randomseed{}.pkl'.format(i), params, i)
except(FileNotFoundError):
continue
|
py | 1a4e37a56186a1ddb16acaee9d37c0d13aa9f347 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_human_female.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 1a4e37b3577443cee6cb31950534e7241026f38b | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pig(Package):
"""
Pig is a dataflow programming environment for processing very large files.
Pig's language is called Pig Latin. A Pig Latin program consists of a
directed acyclic graph where each node represents an operation that
transforms data.
"""
homepage = "http://archive.apache.org"
url = "http://archive.apache.org/dist/hadoop/pig/stable/pig-0.7.0.tar.gz"
version('0.7.0', sha256='fa7211fb339f547f679a3dd90055f1ddc45d5754d88463e4cc39c380ddf8b02a')
def install(self, spec, prefix):
install_tree('.', prefix)
|
py | 1a4e38020b44b19d4a5fe2c24e9329033f2cf80c | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `utils/monte_carlo.py`."""
from jax import test_util as jtu
from jax.config import config as jax_config
from jax.lib import xla_bridge
import jax.numpy as np
import jax.random as random
from neural_tangents import stax
from neural_tangents.utils import batch
from neural_tangents.utils import empirical
from neural_tangents.utils import monte_carlo
from neural_tangents.utils import test_utils
jax_config.parse_flags_with_absl()
BATCH_SIZES = [
1,
2,
4,
]
DEVICE_COUNTS = [0, 1, 2]
STORE_ON_DEVICE = [True, False]
N_SAMPLES = 4
ALL_GET = ('nngp', 'ntk', ('nngp', 'ntk'), None)
test_utils.update_test_tolerance()
def _get_inputs_and_model(width=1, n_classes=2, use_conv=True):
key = random.PRNGKey(1)
key, split = random.split(key)
x1 = random.normal(key, (8, 4, 3, 2))
x2 = random.normal(split, (4, 4, 3, 2))
if not use_conv:
x1 = np.reshape(x1, (x1.shape[0], -1))
x2 = np.reshape(x2, (x2.shape[0], -1))
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Conv(width, (3, 3)) if use_conv else stax.Dense(width),
stax.Relu(),
stax.Flatten(),
stax.Dense(n_classes, 2., 0.5))
return x1, x2, init_fn, apply_fn, kernel_fn, key
class MonteCarloTest(jtu.JaxTestCase):
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
'get={} '
']'.format(batch_size, device_count, store_on_device,
get),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
'get': get,
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE
for get in ALL_GET))
def test_sample_once_batch(self, batch_size, device_count, store_on_device,
get):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, _, key = _get_inputs_and_model()
kernel_fn = empirical.empirical_kernel_fn(apply_fn)
sample_once_fn = monte_carlo._sample_once_kernel_fn(kernel_fn, init_fn)
sample_once_batch_fn = monte_carlo._sample_once_kernel_fn(
kernel_fn, init_fn, batch_size, device_count, store_on_device)
one_sample = sample_once_fn(x1, x2, key, get)
one_sample_batch = sample_once_batch_fn(x1, x2, key, get)
self.assertAllClose(one_sample, one_sample_batch, True)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
'get={} '
']'.format(batch_size, device_count, store_on_device,
get),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
'get': get,
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE
for get in ALL_GET))
def test_batch_sample_once(self, batch_size, device_count, store_on_device,
get):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, _, key = _get_inputs_and_model()
kernel_fn = empirical.empirical_kernel_fn(apply_fn)
sample_once_fn = monte_carlo._sample_once_kernel_fn(
kernel_fn, init_fn, device_count=0)
batch_sample_once_fn = batch.batch(sample_once_fn, batch_size,
device_count, store_on_device)
one_sample = sample_once_fn(x1, x2, key, get)
one_batch_sample = batch_sample_once_fn(x1, x2, key, get)
self.assertAllClose(one_sample, one_batch_sample, True)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
']'.format(batch_size, device_count, store_on_device
),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE))
def test_sample_vs_analytic_nngp(self, batch_size, device_count,
store_on_device):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, stax_kernel_fn, key = _get_inputs_and_model(
1024, 256, xla_bridge.get_backend().platform == 'tpu')
sample = monte_carlo.monte_carlo_kernel_fn(init_fn, apply_fn, key, 200,
batch_size, device_count,
store_on_device)
ker_empirical = sample(x1, x2, 'nngp')
ker_analytic = stax_kernel_fn(x1, x2, 'nngp')
test_utils.assert_close_matrices(self, ker_analytic, ker_empirical, 2e-2)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
']'.format(batch_size, device_count, store_on_device
),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE))
def test_monte_carlo_vs_analytic_ntk(self, batch_size, device_count,
store_on_device):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, stax_kernel_fn, key = _get_inputs_and_model(
256, 2, xla_bridge.get_backend().platform == 'tpu')
sample = monte_carlo.monte_carlo_kernel_fn(init_fn, apply_fn, key, 100,
batch_size, device_count,
store_on_device)
ker_empirical = sample(x1, x2, 'ntk')
ker_empirical = (
np.sum(ker_empirical, axis=(-1, -2)) / ker_empirical.shape[-1])
ker_analytic = stax_kernel_fn(x1, x2, 'ntk')
test_utils.assert_close_matrices(self, ker_analytic, ker_empirical, 2e-2)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
'get={}'
']'.format(batch_size, device_count, store_on_device,
get),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
'get': get
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE
for get in ALL_GET))
def test_monte_carlo_generator(self, batch_size, device_count,
store_on_device, get):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, stax_kernel_fn, key = _get_inputs_and_model(8, 1)
x3, x4, _, _, _, _ = _get_inputs_and_model(8, 1)
log_n_max = 4
n_samples = [2**k for k in range(log_n_max)]
sample_generator = monte_carlo.monte_carlo_kernel_fn(
init_fn, apply_fn, key, n_samples, batch_size, device_count,
store_on_device)
if get is None:
samples_12 = sample_generator(x1, x2)
samples_34 = sample_generator(x3, x4)
count = 0
for n, s_12, s_34 in zip(n_samples, samples_12, samples_34):
sample_fn = monte_carlo.monte_carlo_kernel_fn(init_fn, apply_fn, key,
n, batch_size,
device_count,
store_on_device)
sample_12 = sample_fn(x1, x2)
sample_34 = sample_fn(x3, x4)
self.assertAllClose(s_12, sample_12, True)
self.assertAllClose(s_12, s_34, True)
self.assertAllClose(s_12, sample_34, True)
count += 1
self.assertEqual(log_n_max, count)
ker_analytic_12 = stax_kernel_fn(x1, x2, ('nngp', 'ntk'))
ker_analytic_34 = stax_kernel_fn(x3, x4, ('nngp', 'ntk'))
else:
samples_12 = sample_generator(x1, x2, get)
samples_34 = sample_generator(x3, x4, get)
count = 0
for n, s_12, s_34 in zip(n_samples, samples_12, samples_34):
sample_fn = monte_carlo.monte_carlo_kernel_fn(
init_fn, apply_fn, key, n, batch_size,
device_count, store_on_device)
sample_12 = sample_fn(x1, x2, get)
sample_34 = sample_fn(x3, x4, get)
self.assertAllClose(s_12, sample_12, True)
self.assertAllClose(s_12, s_34, True)
self.assertAllClose(s_12, sample_34, True)
count += 1
self.assertEqual(log_n_max, count)
ker_analytic_12 = stax_kernel_fn(x1, x2, get)
ker_analytic_34 = stax_kernel_fn(x3, x4, get)
if get == 'ntk':
s_12 = np.squeeze(s_12, (-1, -2))
elif get is None or 'ntk' in get:
s_12 = s_12._replace(ntk=np.squeeze(s_12.ntk, (-1, -2)))
self.assertAllClose(ker_analytic_12, s_12, True, 2., 2.)
self.assertAllClose(ker_analytic_12, ker_analytic_34, True)
if __name__ == '__main__':
jtu.absltest.main()
|
py | 1a4e38cddfe6a8c1fe09d9baae76dd1a6ee650dd | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Copyright (c) 2017 The Peps developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "peps.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
pepsd and peps-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run pepsd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "pepsd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "peps-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in peps.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a pepsd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "pepsd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "peps-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple pepsds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
py | 1a4e393a5e54ee06f846bd2a9a4dea653a876e63 | # Copyright 2018 Databricks, Inc.
import re
VERSION = "1.13.1.dev0"
def is_release_version():
return bool(re.match(r"^\d+\.\d+\.\d+$", VERSION))
|
py | 1a4e39e1b321cb96558f69766ad4fffe4cc9504c | # # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:[email protected])
import enum
import logging
import typing
from decimal import Decimal
from ..account import Account, AccountSlot
from ..accountinstrumentvalues import AccountInstrumentValues, PricedAccountInstrumentValues
from ..cache import Cache, MarketCache
from ..context import Context
from ..group import GroupSlotSpotMarket, GroupSlotPerpMarket, GroupSlot, Group
from ..instrumentvalue import InstrumentValue
from ..lotsizeconverter import NullLotSizeConverter
from ..openorders import OpenOrders
from ..perpaccount import PerpAccount
from ..token import Instrument
# # 🥭 HealthType enum
#
# Is the health calculation Initial or Maintenance?
#
class HealthType(enum.Enum):
# We use strings here so that argparse can work with these as parameters.
INITIAL = "INITIAL"
MAINTENANCE = "MAINTENANCE"
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"{self}"
class HealthCalculator:
def __init__(self, context: Context, health_type: HealthType) -> None:
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.context: Context = context
self.health_type: HealthType = health_type
def _calculate_pessimistic_spot_value(self, values: PricedAccountInstrumentValues) -> typing.Tuple[InstrumentValue, InstrumentValue]:
# base total if all bids were executed
if_all_bids_executed: InstrumentValue = values.quote_token_locked + values.base_token_total
# base total if all asks were executed
if_all_asks_executed: InstrumentValue = values.base_token_free
base: InstrumentValue
quote: InstrumentValue
if if_all_bids_executed > if_all_asks_executed:
base = values.net_value + if_all_bids_executed
quote = values.quote_token_free
return base, quote
else:
base = values.net_value + if_all_asks_executed
quote = values.base_token_locked + values.quote_token_total
return base, quote
def _calculate_pessimistic_perp_value(self, values: PricedAccountInstrumentValues) -> typing.Tuple[InstrumentValue, InstrumentValue]:
return values.perp_base_position, values.perp_quote_position
def _calculate_perp_value(self, basket_token: AccountSlot, token_price: InstrumentValue, market_index: int, cache: Cache, unadjustment_factor: Decimal) -> typing.Tuple[Decimal, Decimal]:
if basket_token.perp_account is None or basket_token.perp_account.empty:
return Decimal(0), Decimal(0)
perp_market_cache = cache.perp_market_cache[market_index]
if perp_market_cache is None:
raise Exception(f"Cache contains no perp market cache for market index {market_index}.")
perp_account: PerpAccount = basket_token.perp_account
token: Instrument = basket_token.base_instrument
base_lot_size: Decimal = perp_account.lot_size_converter.base_lot_size
quote_lot_size: Decimal = perp_account.lot_size_converter.quote_lot_size
takerQuote: Decimal = perp_account.taker_quote * quote_lot_size
base_position: Decimal = (perp_account.base_position + perp_account.taker_base) * base_lot_size
bids_quantity: Decimal = perp_account.bids_quantity * base_lot_size
asks_quantity: Decimal = perp_account.asks_quantity * base_lot_size
if_all_bids_executed = token.shift_to_decimals(base_position + bids_quantity) * unadjustment_factor
if_all_asks_executed = token.shift_to_decimals(base_position - asks_quantity) * unadjustment_factor
if abs(if_all_bids_executed) > abs(if_all_asks_executed):
quote_position = perp_account.quote_position - perp_account.unsettled_funding(perp_market_cache)
full_quote_position = quote_position + takerQuote - (bids_quantity * token_price.value)
return if_all_bids_executed, full_quote_position
else:
quote_position = perp_account.quote_position - perp_account.unsettled_funding(perp_market_cache)
full_quote_position = quote_position + takerQuote + (asks_quantity * token_price.value)
return if_all_asks_executed, full_quote_position
def calculate(self, account: Account, open_orders_by_address: typing.Dict[str, OpenOrders], group: Group, cache: Cache) -> Decimal:
priced_reports: typing.List[PricedAccountInstrumentValues] = []
for asset in account.base_slots:
# if (asset.deposit.value != 0) or (asset.borrow.value != 0) or (asset.net_value.value != 0):
report: AccountInstrumentValues = AccountInstrumentValues.from_account_basket_base_token(
asset, open_orders_by_address, group)
# print("report", report)
# price: InstrumentValue = group.token_price_from_cache(cache, report.base_token)
market_cache: MarketCache = group.market_cache_from_cache(cache, report.base_token)
# print("Market cache", market_cache)
priced_report: PricedAccountInstrumentValues = report.priced(market_cache)
# print("priced_report", priced_report)
priced_reports += [priced_report]
quote_token_free_in_open_orders: InstrumentValue = InstrumentValue(group.shared_quote_token, Decimal(0))
quote_token_total_in_open_orders: InstrumentValue = InstrumentValue(group.shared_quote_token, Decimal(0))
for priced_report in priced_reports:
quote_token_free_in_open_orders += priced_report.quote_token_free
quote_token_total_in_open_orders += priced_report.quote_token_total
# print("quote_token_free_in_open_orders", quote_token_free_in_open_orders)
# print("quote_token_total_in_open_orders", quote_token_total_in_open_orders)
quote_report: AccountInstrumentValues = AccountInstrumentValues(account.shared_quote_token,
account.shared_quote_token,
account.shared_quote.raw_deposit,
account.shared_quote.deposit,
account.shared_quote.raw_borrow,
account.shared_quote.borrow,
InstrumentValue(
group.shared_quote_token, Decimal(0)),
InstrumentValue(
group.shared_quote_token, Decimal(0)),
quote_token_free_in_open_orders,
quote_token_total_in_open_orders,
InstrumentValue(
group.shared_quote_token, Decimal(0)),
Decimal(0), Decimal(0),
InstrumentValue(
group.shared_quote_token, Decimal(0)),
InstrumentValue(
group.shared_quote_token, Decimal(0)),
Decimal(0), Decimal(0),
NullLotSizeConverter())
# print("quote_report", quote_report)
health: Decimal = quote_report.net_value.value
# print("Health (start)", health)
for priced_report in priced_reports:
slot: GroupSlot = group.slot_by_instrument(priced_report.base_token)
spot_market: typing.Optional[GroupSlotSpotMarket] = slot.spot_market
if spot_market is None:
raise Exception(f"Could not find market for spot token {priced_report.base_token.symbol}.")
base_value, quote_value = self._calculate_pessimistic_spot_value(priced_report)
spot_weight = spot_market.init_asset_weight if base_value > 0 else spot_market.init_liab_weight
spot_health = base_value.value * spot_weight
# print("Weights", base_value.value, "*", spot_weight, spot_health)
perp_base, perp_quote = priced_report.if_worst_execution()
perp_market: typing.Optional[GroupSlotPerpMarket] = slot.perp_market
perp_health: Decimal = Decimal(0)
if perp_market is not None:
perp_weight = perp_market.init_asset_weight if perp_base > 0 else perp_market.init_liab_weight
perp_health = perp_base.value * perp_weight
health += spot_health
health += perp_health
health += quote_value.value
health += perp_quote.value
health += priced_report.raw_perp_quote_position
# print("Health (now)", health, spot_health, perp_health, quote_value.value,
# perp_quote.value, priced_report.raw_perp_quote_position)
# print("Health (returning)", health)
return health
def __str__(self) -> str:
return f"« 𝙷𝚎𝚊𝚕𝚝𝚑𝙲𝚊𝚕𝚌𝚞𝚕𝚊𝚝𝚘𝚛 [{self.health_type}] »"
def __repr__(self) -> str:
return f"{self}"
|
py | 1a4e3a01f972499b2801f6c09fadd6baaccd155b | n = 10
m = 4
stack = []
def main():
while True:
if is_full_solution():
is_acceptable()
if has_next_solution():
try_next_solution()
else:
backtrack()
continue
if can_expand_solution():
expand_solution()
continue
break
def is_full_solution():
return len(stack) == m
def is_acceptable():
if len(stack) == m and stack[len(stack) - 1] < stack[len(stack) - 2]:
print(stack)
def can_expand_solution():
if len(stack) < m:
return True
def expand_solution():
stack.append(m - len(stack))
def has_next_solution():
return stack[len(stack) - 1] + 1 < stack[len(stack) - 2]
def try_next_solution():
stack[len(stack) - 1] += 1
def backtrack():
global stack
cursor = len(stack) - 1
while stack[cursor] - stack[cursor - 1] == -1 and cursor - 1 >= 0:
cursor -= 1
stack = stack[:cursor+1]
# increase one
stack[-1] += 1
if stack[0] > n:
raise
main()
|
py | 1a4e3a799a262c54b287009a7020efa39f118553 | from .base import *
from .policy_iteration import *
from .value_iteration import *
from .q_learner import *
__all__ = ['policy_iteration', 'value_iteration', 'q_learner']
|
py | 1a4e3c53f9187ad8d1561407b65814231c7dd840 | import os
import warnings
from collections import OrderedDict
from itertools import product
from typing import Any, Dict, List, Optional, Union
import torch
from torch.nn.functional import interpolate
from torch.nn.modules import LSTM
from torch.nn.modules.conv import Conv2d
from torch.nn.modules.linear import Linear
import delve
from delve.logger import log
from delve.metrics import *
from delve.torch_utils import TorchCovarianceMatrix
from delve.writers import STATMAP, WRITERS, CompositWriter, NPYWriter
class CheckLayerSat(object):
"""Takes PyTorch module and records layer saturation,
intrinsic dimensionality and other scalars.
Args:
savefile (str) : destination for summaries
save_to (str, List[Union[str, delve.writers.AbstractWriter]]:
Specify one or multiple save strategies.
You can use preimplemented save strategies or inherit from
the AbstractWriter in order to implement your
own preferred saving strategy.
pre-existing saving strategies are:
csv : stores all stats in a csv-file with one
row for each epoch.
plot : produces plots from intrinsic dimensionality
and / or layer saturation
tensorboard : saves all stats to tensorboard
print : print all metrics on console
as soon as they are logged
npy : creates a folder-structure with npy-files
containing the logged values. This is the only
save strategy that can save the
full covariance matrix.
This strategy is useful if you want to reproduce
intrinsic dimensionality and saturation values
with other thresholds without re-evaluating
model checkpoints.
modules (torch modules or list of modules) : layer-containing object.
Per default, only Conv2D,
Linear and LSTM-Cells
are recorded
writers_args (dict) : contains additional arguments passed over to the
writers. This is only used, when a writer is
initialized through a string-key.
log_interval (int) : distances between two batches used for updating the
covariance matrix. Default value is 1, which means
that all data is used for computing
intrinsic dimensionality and saturation.
Increasing the log interval is usefull on very
large datasets to reduce numeric instability.
max_samples (int) : (optional) the covariance matrix in each layer
will halt updating itself when max_samples
are reached. Usecase is similar to log-interval,
when datasets are very large.
stats (list of str): list of stats to compute
supported stats are:
idim : intrinsic dimensionality
lsat : layer saturation (intrinsic dimensionality divided by feature space dimensionality)
cov : the covariance-matrix (only saveable using the 'npy' save strategy)
det : the determinant of the covariance matrix (also known as generalized variance)
trc : the trace of the covariance matrix, generally a more useful metric than det for determining
the total variance of the data than the determinant.
However note that this does not take the correlation between
features into account. On the other hand, in most cases the determinent will be zero, since
there will be very strongly correlated features, so trace might be the better option.
dtrc : the trace of the diagonalmatrix, another way of measuring the dispersion of the data.
lsat : layer saturation (intrinsic dimensionality
divided by feature space dimensionality)
embed : samples embedded in the eigenspace of dimension 2
layerwise_sat (bool): whether or not to include
layerwise saturation when saving
reset_covariance (bool): True by default, resets the covariance
every time the stats are computed. Disabling
this option will strongly bias covariance
since the gradient will influence the model.
We recommend computing saturation at the
end of training and testing.
include_conv : setting to False includes only linear layers
conv_method (str) : how to subsample convolutional layers. Default is
channelwise, which means that the each position of
the filter tensor is considered a datapoint,
effectivly yielding a data matrix of shape
(height*width*batch_size, num_filters)
supported methods are:
channelwise : treats every depth vector of the tensor as a
datapoint, effectivly reshaping the data tensor
from shape (batch_size, height, width, channel)
into (batch_size*height*width, channel).
mean : applies global average pooling on
each feature map
max : applies global max pooling on
each feature map
median : applies global median pooling on
each feature map
flatten : flattenes the entire feature map to a vector,
reshaping the data tensor into a data matrix
of shape (batch_size, height*width*channel).
This strategy for dealing with convolutions is
extremly memory intensive and will likely cause
memory and performance problems for any
non toy-problem
timeseries_method (str) : how to subsample timeseries methods. Default
is last_timestep.
supported methods are:
timestepwise : stacks each sample timestep-by-timestep
last_timestep : selects the last timestep's output
nosave (bool) : If True, disables saving artifacts (images), default is False
verbose (bool) : print saturation for every layer during training
sat_threshold (float): threshold used to determine the number of
eigendirections belonging to the latent space.
In effect, this is the threshold determining
the the intrinsic dimensionality. Default value
is 0.99 (99% of the explained variance), which
is a compromise between a good and interpretable
approximation. From experience the threshold
should be between 0.97 and 0.9995 for
meaningfull results.
verbose (bool) : Change verbosity level (default is 0)
device (str) : Device to do the computations on.
Default is cuda:0. Generally it is recommended
to do the computations
on the gpu in order to get maximum performance.
Using the cpu is generally slower but it lets
delve use regular RAM instead of the generally
more limited VRAM of the GPU.
Not having delve run on the same device as the
network causes slight performance decrease due
to copying memory between devices during each
forward pass.
Delve can handle models distributed on multiple
GPUs, however delve itself will always
run on a single device.
initial_epoch (int) : The initial epoch to start with. Default is 0,
which corresponds to a new run.
If initial_epoch != 0 the writers will
look for save states that they can resume.
If set to zero, all existing states
will be overwritten. If set to a lower epoch
than actually recorded the behavior of the
writers is undefined and may result in crashes,
loss of data or corrupted data.
interpolation_strategy (str) : Default is None (disabled). If set to a
string key accepted by the
model-argument of
torch.nn.functional.interpolate, the
feature map will be resized to match the
interpolated size. This is useful if
you work with large resolutions and want
to save up on computation time.
is done if the resolution is smaller.
interpolation_downsampling (int): Default is 32. The target resolution
if downsampling is enabled.
"""
def __init__(self,
savefile: str,
save_to: Union[str, delve.writers.AbstractWriter],
modules: torch.nn.Module,
writer_args: Optional[Dict[str, Any]] = None,
log_interval=1,
max_samples=None,
stats: list = ['lsat'],
layerwise_sat: bool = True,
reset_covariance: bool = True,
average_sat: bool = False,
ignore_layer_names: List[str] = [],
include_conv: bool = True,
conv_method: str = 'channelwise',
timeseries_method: str = 'last_timestep',
sat_threshold: str = .99,
nosave=False,
verbose: bool = False,
device='cuda:0',
initial_epoch: int = 0,
interpolation_strategy: Optional[str] = None,
interpolation_downsampling: int = 32):
self.nosave = nosave
self.verbose = verbose
# self.disable_compute: bool = False
self.include_conv = include_conv
self.conv_method = conv_method
self.timeseries_method = timeseries_method
self.threshold = sat_threshold
self.layers = self.get_layers_recursive(modules)
self.max_samples = max_samples
self.log_interval = log_interval
self.reset_covariance = reset_covariance
self.initial_epoch = initial_epoch
self.interpolation_strategy = interpolation_strategy
self.interpolation_downsampling = interpolation_downsampling
writer_args = writer_args or {}
writer_args['savepath'] = savefile
os.makedirs(savefile, exist_ok=True)
self.writer = self._get_writer(save_to, writer_args)
self.interval = log_interval
self._warn_if_covariance_not_saveable(stats)
self.logs, self.stats = self._check_stats(stats)
self.layerwise_sat = layerwise_sat
self.average_sat = average_sat
self.ignore_layer_names = ignore_layer_names
self.seen_samples = {'train': {}, 'eval': {}}
self.global_steps = 0
self.global_hooks_registered = False
self.is_notebook = None
self.device = device
self.record = True
for name, layer in self.layers.items():
if isinstance(layer, Conv2d) or isinstance(layer, Linear) \
or isinstance(layer, LSTM):
self._register_hooks(layer=layer,
layer_name=name,
interval=log_interval)
if self.initial_epoch != 0:
self.writer.resume_from_saved_state(self.initial_epoch)
def _warn_if_covariance_not_saveable(self, stats: List[str]):
warn = False
if 'cov' in stats:
if isinstance(self.writer, CompositWriter):
for writer in self.writer.writers:
if isinstance(writer, NPYWriter):
return
warn = True
elif not isinstance(self.writer, NPYWriter):
warn = True
if warn:
warnings.warn("'cov' was selected as stat, but 'npy' (NPYWriter)"
"is not used as a save strategy, which is the only"
"writer able to save the covariance matrix. The"
"training and logging will run normally, but the"
"covariance matrix will not be saved. Note that you"
"can add multiple writers by passing a list.")
def __getattr__(self, name):
if name.startswith('add_') and name != 'add_saturations':
if not self.nosave:
return getattr(self.writer, name)
else:
def noop(*args, **kwargs):
log.info(
f'Logging disabled, not logging: {args}, {kwargs}')
pass
return noop
else:
try:
# Redirect to writer object
return self.writer.__getattribute__(name)
except Exception:
# Default behaviour
return self.__getattribute__(name)
def __repr__(self):
return self.layers.keys().__repr__()
def is_recording(self) -> bool:
return self.record
def stop(self):
self.record = False
def resume(self):
self.record = True
def close(self):
"""User endpoint to close writer and progress bars."""
return self.writer.close()
def _format_saturation(self, saturation_status):
raise NotImplementedError
def _check_stats(self, stats: list):
if not isinstance(stats, list):
stats = list(stats)
supported_stats = [
'lsat',
'idim',
'cov',
'det',
'trc',
'dtrc',
'embed',
]
compatible = [
stat in supported_stats
if "_" not in stat else stat.split("_")[0] in stats
for stat in stats
]
incompatible = [i for i, x in enumerate(compatible) if not x]
assert all(compatible), "Stat {} is not supported".format(
stats[incompatible[0]])
name_mapper = STATMAP
logs = {
f'{mode}-{name_mapper[stat]}': OrderedDict()
for mode, stat in product(['train', 'eval'], ['cov'])
}
return logs, stats
def _add_conv_layer(self, layer: torch.nn.Module):
layer.out_features = layer.out_channels
layer.conv_method = self.conv_method
def _add_lstm_layer(self, layer: torch.nn.Module):
layer.out_features = layer.hidden_size
layer.timeseries_method = self.timeseries_method
def get_layer_from_submodule(self,
submodule: torch.nn.Module,
layers: dict,
name_prefix: str = ''):
if len(submodule._modules) > 0:
for idx, (name, subsubmodule) in \
enumerate(submodule._modules.items()):
new_prefix = name if name_prefix == '' else name_prefix + \
'-' + name
self.get_layer_from_submodule(subsubmodule, layers, new_prefix)
return layers
else:
layer_name = name_prefix
layer_type = layer_name
if not self._check_is_supported_layer(submodule):
log.info(f"Skipping {layer_type}")
return layers
if isinstance(submodule, Conv2d) and self.include_conv:
self._add_conv_layer(submodule)
layers[layer_name] = submodule
log.info('added layer {}'.format(layer_name))
return layers
def _check_is_supported_layer(self, layer: torch.nn.Module) -> bool:
return isinstance(layer, Conv2d) or isinstance(
layer, Linear) or isinstance(layer, LSTM)
def get_layers_recursive(self, modules: Union[list, torch.nn.Module]):
layers = {}
if not isinstance(modules, list) and not hasattr(
modules, 'out_features'):
# submodules = modules._modules # OrderedDict
layers = self.get_layer_from_submodule(modules, layers, '')
elif self._check_is_supported_layer(modules):
for module in modules:
layers = self.get_layer_from_submodule(module, layers,
type(module))
else:
for i, module in enumerate(modules):
layers = self.get_layer_from_submodule(
module, layers,
'' if not self._check_is_supported_layer(module) else
f'Module-{i}-{type(module).__name__}')
return layers
def _get_writer(self, save_to, writers_args) -> \
delve.writers.AbstractWriter:
"""Create a writer to log history to `writer_dir`."""
if issubclass(type(save_to), delve.writers.AbstractWriter):
return save_to
if isinstance(save_to, list):
all_writers = []
for saver in save_to:
all_writers.append(
self._get_writer(save_to=saver, writers_args=writers_args))
return CompositWriter(all_writers)
if save_to in WRITERS:
writer = WRITERS[save_to](**writers_args)
else:
raise ValueError(
'Illegal argument for save_to "{}"'.format(save_to))
return writer
def _register_hooks(self, layer: torch.nn.Module, layer_name: str,
interval):
layer.eval_layer_history = getattr(layer, 'eval_layer_history', list())
layer.train_layer_history = getattr(layer, 'train_layer_history',
list())
layer.layer_svd = getattr(layer, 'layer_svd', None)
layer.forward_iter = getattr(layer, 'forward_iter', 0)
layer.interval = getattr(layer, 'interval', interval)
layer.writer = getattr(layer, 'writer', self.writer)
layer.name = getattr(layer, 'name', layer_name)
self.register_forward_hooks(layer, self.stats)
return self
def _record_stat(self, activations_batch: torch.Tensor, lstm_ae: bool,
layer: torch.nn.Module, training_state: str, stat: str):
if activations_batch.dim() == 4: # conv layer (B x C x H x W)
if self.interpolation_strategy is not None and (
activations_batch.shape[3] >
self.interpolation_downsampling
or activations_batch.shape[2] >
self.interpolation_downsampling):
activations_batch = interpolate(
activations_batch,
size=self.interpolation_downsampling,
mode=self.interpolation_strategy)
if self.conv_method == 'median':
shape = activations_batch.shape
reshaped_batch = activations_batch.reshape(
shape[0], shape[1], shape[2] * shape[3])
activations_batch, _ = torch.median(reshaped_batch,
dim=2) # channel median
elif self.conv_method == 'max':
shape = activations_batch.shape
reshaped_batch = activations_batch.reshape(
shape[0], shape[1], shape[2] * shape[3])
activations_batch, _ = torch.max(reshaped_batch,
dim=2) # channel median
elif self.conv_method == 'mean':
activations_batch = torch.mean(activations_batch, dim=(2, 3))
elif self.conv_method == 'flatten':
activations_batch = activations_batch.view(
activations_batch.size(0), -1)
elif self.conv_method == 'channelwise':
reshaped_batch: torch.Tensor = activations_batch.permute(
[1, 0, 2, 3])
shape = reshaped_batch.shape
reshaped_batch: torch.Tensor = reshaped_batch.flatten(1)
reshaped_batch: torch.Tensor = reshaped_batch.permute([1, 0])
activations_batch = reshaped_batch
elif activations_batch.dim() == 3: # LSTM layer (B x T x U)
if self.timeseries_method == 'timestepwise':
activations_batch = activations_batch.flatten(1)
elif self.timeseries_method == 'last_timestep':
activations_batch = activations_batch[:, -1, :]
if layer.name not in self.logs[f'{training_state}-{stat}'] or (
not isinstance(self.logs[f'{training_state}-{stat}'],
TorchCovarianceMatrix) and self.record):
save_data = 'embed' in self.stats
self.logs[f'{training_state}-{stat}'][
layer.name] = TorchCovarianceMatrix(device=self.device,
save_data=save_data)
self.logs[f'{training_state}-{stat}'][layer.name].update(
activations_batch, lstm_ae)
def register_forward_hooks(self, layer: torch.nn.Module, stats: list):
"""Register hook to show `stats` in `layer`."""
def record_layer_saturation(layer: torch.nn.Module, input, output):
"""Hook to register in `layer` module."""
if not self.record:
if layer.name not in self.logs[
f'{"train" if layer.training else "eval"}-{"covariance-matrix"}']:
# save_data = 'embed' in self.stats
self.logs[
f'{"train" if layer.training else "eval"}-{"covariance-matrix"}'][
layer.name] = np.nan
return
# Increment step counter
layer.forward_iter += 1
# VAE output is a tuple; Hence output.data throw exception
lstm_ae = False
if layer.name in [
'encoder_lstm', 'encoder_output', 'decoder_lstm',
'decoder_output'
]:
output = output[1][0]
lstm_ae = True
elif isinstance(layer, torch.nn.LSTM):
output = output[0]
training_state = 'train' if layer.training else 'eval'
if layer.name not in self.seen_samples[training_state]:
self.seen_samples[training_state][layer.name] = 0
if (self.max_samples is None
or self.seen_samples[training_state][layer.name] <
self.max_samples
) and layer.forward_iter % self.log_interval == 0:
num_samples = min(
output.data.shape[0], self.max_samples -
self.seen_samples[training_state][layer.name]
) if self.max_samples is not None else output.data.shape[0]
activations_batch = output.data[:num_samples]
self.seen_samples[training_state][layer.name] += num_samples
self._record_stat(activations_batch, lstm_ae, layer,
training_state, 'covariance-matrix')
layer.register_forward_hook(record_layer_saturation)
def add_saturations(self, save=True):
"""
Computes saturation and saves all stats
:return:
"""
for key in self.logs:
train_sats = []
val_sats = []
for i, layer_name in enumerate(self.logs[key]):
if layer_name in self.ignore_layer_names:
continue
if self.record and self.logs[key][layer_name]._cov_mtx is None:
raise ValueError("Attempting to compute intrinsic"
"dimensionality when covariance"
"is not initialized")
if self.record:
cov_mat = self.logs[key][layer_name].fix()
log_values = {}
sample_log_values = {}
for stat in self.stats:
if stat == 'lsat':
log_values[key.replace(STATMAP['cov'], STATMAP['lsat'])
+ '_' + layer_name] = compute_saturation(
cov_mat, thresh=self.threshold
) if self.record else np.nan
elif stat == 'idim':
log_values[
key.replace(STATMAP['cov'], STATMAP['idim']) +
'_' +
layer_name] = compute_intrinsic_dimensionality(
cov_mat, thresh=self.threshold
) if self.record else np.nan
elif stat == 'cov':
log_values[key + '_' +
layer_name] = cov_mat.cpu().numpy()
elif stat == 'det':
log_values[key.replace(STATMAP['cov'], STATMAP['det'])
+ '_' +
layer_name] = compute_cov_determinant(
cov_mat) if self.record else np.nan
elif stat == 'trc':
log_values[key.replace(STATMAP['cov'], STATMAP['trc'])
+ '_' +
layer_name] = compute_cov_trace(cov_mat)
elif stat == 'dtrc':
log_values[key.replace(STATMAP['cov'], STATMAP['dtrc'])
+ '_' +
layer_name] = compute_diag_trace(cov_mat)
elif stat == 'embed':
transformation_matrix = torch.mm(
cov_mat[0:2].transpose(0, 1), cov_mat[0:2])
saved_samples = self.logs[key][
layer_name].saved_samples
sample_log_values['embed'] = list()
for (index, sample) in enumerate(saved_samples):
coord = torch.matmul(transformation_matrix, sample)
sample_log_values['embed'].append(
(coord[0], coord[1]))
self.seen_samples[key.split('-')[0]][layer_name] = 0
if self.reset_covariance and self.record:
self.logs[key][layer_name]._cov_mtx = None
if self.layerwise_sat:
self.writer.add_scalars(
prefix='',
value_dict=log_values,
sample_value_dict=sample_log_values)
if self.average_sat:
self.writer.add_scalar('average-train-sat', np.mean(train_sats))
self.writer.add_scalar('average-eval-sat', np.mean(val_sats))
if save:
self.save()
def save(self):
self.writer.save()
|
py | 1a4e3c8aaf1724471a045f698119469708c30cfb | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Module gathering all abstract base classes"""
from abc import ABCMeta, abstractmethod, abstractproperty
import re
import time
import weakref
from .catch23 import make_abc, BYTE_TYPES
from .conversion import MySQLConverterBase
from .constants import ClientFlag, CharacterSet, DEFAULT_CONFIGURATION
from .optionfiles import MySQLOptionsParser
from . import errors
NAMED_TUPLE_CACHE = weakref.WeakValueDictionary()
@make_abc(ABCMeta)
class MySQLConnectionAbstract(object):
"""Abstract class for classes connecting to a MySQL server"""
def __init__(self, **kwargs):
"""Initialize"""
self._client_flags = ClientFlag.get_default()
self._charset_id = 33
self._sql_mode = None
self._time_zone = None
self._autocommit = False
self._server_version = None
self._handshake = None
self._user = ''
self._password = ''
self._database = ''
self._host = '127.0.0.1'
self._port = 3306
self._unix_socket = None
self._client_host = ''
self._client_port = 0
self._ssl = {}
self._ssl_disabled = DEFAULT_CONFIGURATION["ssl_disabled"]
self._force_ipv6 = False
self._use_unicode = True
self._get_warnings = False
self._raise_on_warnings = False
self._connection_timeout = DEFAULT_CONFIGURATION["connect_timeout"]
self._buffered = False
self._unread_result = False
self._have_next_result = False
self._raw = False
self._in_transaction = False
self._prepared_statements = None
self._ssl_active = False
self._auth_plugin = None
self._pool_config_version = None
self.converter = None
self._converter_class = None
self._compress = False
self._consume_results = False
def _get_self(self):
"""Return self for weakref.proxy
This method is used when the original object is needed when using
weakref.proxy.
"""
return self
def _read_option_files(self, config):
"""
Read option files for connection parameters.
Checks if connection arguments contain option file arguments, and then
reads option files accordingly.
"""
if 'option_files' in config:
try:
if isinstance(config['option_groups'], str):
config['option_groups'] = [config['option_groups']]
groups = config['option_groups']
del config['option_groups']
except KeyError:
groups = ['client', 'connector_python']
if isinstance(config['option_files'], str):
config['option_files'] = [config['option_files']]
option_parser = MySQLOptionsParser(list(config['option_files']),
keep_dashes=False)
del config['option_files']
config_from_file = option_parser.get_groups_as_dict_with_priority(
*groups)
config_options = {}
for group in groups:
try:
for option, value in config_from_file[group].items():
try:
if option == 'socket':
option = 'unix_socket'
# pylint: disable=W0104
DEFAULT_CONFIGURATION[option]
# pylint: enable=W0104
if (option not in config_options or
config_options[option][1] <= value[1]):
config_options[option] = value
except KeyError:
if group is 'connector_python':
raise AttributeError("Unsupported argument "
"'{0}'".format(option))
except KeyError:
continue
for option, value in config_options.items():
if option not in config:
try:
config[option] = eval(value[0]) # pylint: disable=W0123
except (NameError, SyntaxError):
config[option] = value[0]
return config
@property
def user(self):
"""User used while connecting to MySQL"""
return self._user
@property
def server_host(self):
"""MySQL server IP address or name"""
return self._host
@property
def server_port(self):
"MySQL server TCP/IP port"
return self._port
@property
def unix_socket(self):
"MySQL Unix socket file location"
return self._unix_socket
@abstractproperty
def database(self):
"""Get the current database"""
pass
@database.setter
def database(self, value):
"""Set the current database"""
self.cmd_query("USE %s" % value)
@property
def can_consume_results(self):
"""Returns whether to consume results"""
return self._consume_results
def config(self, **kwargs):
"""Configure the MySQL Connection
This method allows you to configure the MySQLConnection instance.
Raises on errors.
"""
config = kwargs.copy()
if 'dsn' in config:
raise errors.NotSupportedError("Data source name is not supported")
# Read option files
self._read_option_files(config)
# Configure how we handle MySQL warnings
try:
self.get_warnings = config['get_warnings']
del config['get_warnings']
except KeyError:
pass # Leave what was set or default
try:
self.raise_on_warnings = config['raise_on_warnings']
del config['raise_on_warnings']
except KeyError:
pass # Leave what was set or default
# Configure client flags
try:
default = ClientFlag.get_default()
self.set_client_flags(config['client_flags'] or default)
del config['client_flags']
except KeyError:
pass # Missing client_flags-argument is OK
try:
if config['compress']:
self._compress = True
self.set_client_flags([ClientFlag.COMPRESS])
except KeyError:
pass # Missing compress argument is OK
try:
if not config['allow_local_infile']:
self.set_client_flags([-ClientFlag.LOCAL_FILES])
except KeyError:
pass # Missing allow_local_infile argument is OK
try:
if not config['consume_results']:
self._consume_results = False
else:
self._consume_results = True
except KeyError:
self._consume_results = False
# Configure auth_plugin
try:
self._auth_plugin = config['auth_plugin']
del config['auth_plugin']
except KeyError:
self._auth_plugin = ''
# Configure character set and collation
if 'charset' in config or 'collation' in config:
try:
charset = config['charset']
del config['charset']
except KeyError:
charset = None
try:
collation = config['collation']
del config['collation']
except KeyError:
collation = None
self._charset_id = CharacterSet.get_charset_info(charset,
collation)[0]
# Set converter class
try:
self.set_converter_class(config['converter_class'])
except KeyError:
pass # Using default converter class
except TypeError:
raise AttributeError("Converter class should be a subclass "
"of conversion.MySQLConverterBase.")
# Compatible configuration with other drivers
compat_map = [
# (<other driver argument>,<translates to>)
('db', 'database'),
('passwd', 'password'),
('connect_timeout', 'connection_timeout'),
]
for compat, translate in compat_map:
try:
if translate not in config:
config[translate] = config[compat]
del config[compat]
except KeyError:
pass # Missing compat argument is OK
# Configure login information
if 'user' in config or 'password' in config:
try:
user = config['user']
del config['user']
except KeyError:
user = self._user
try:
password = config['password']
del config['password']
except KeyError:
password = self._password
self.set_login(user, password)
# Check network locations
try:
self._port = int(config['port'])
del config['port']
except KeyError:
pass # Missing port argument is OK
except ValueError:
raise errors.InterfaceError(
"TCP/IP port number should be an integer")
if "ssl_disabled" in config:
self._ssl_disabled = config.pop("ssl_disabled")
# Other configuration
set_ssl_flag = False
for key, value in config.items():
try:
DEFAULT_CONFIGURATION[key]
except KeyError:
raise AttributeError("Unsupported argument '{0}'".format(key))
# SSL Configuration
if key.startswith('ssl_'):
set_ssl_flag = True
self._ssl.update({key.replace('ssl_', ''): value})
else:
attribute = '_' + key
try:
setattr(self, attribute, value.strip())
except AttributeError:
setattr(self, attribute, value)
if set_ssl_flag:
if 'verify_cert' not in self._ssl:
self._ssl['verify_cert'] = \
DEFAULT_CONFIGURATION['ssl_verify_cert']
# Make sure both ssl_key/ssl_cert are set, or neither (XOR)
if 'ca' not in self._ssl or self._ssl['ca'] is None:
raise AttributeError(
"Missing ssl_ca argument.")
if bool('key' in self._ssl) != bool('cert' in self._ssl):
raise AttributeError(
"ssl_key and ssl_cert need to be both "
"specified, or neither."
)
# Make sure key/cert are set to None
elif not set(('key', 'cert')) <= set(self._ssl):
self._ssl['key'] = None
self._ssl['cert'] = None
elif (self._ssl['key'] is None) != (self._ssl['cert'] is None):
raise AttributeError(
"ssl_key and ssl_cert need to be both "
"set, or neither."
)
def _check_server_version(self, server_version):
"""Check the MySQL version
This method will check the MySQL version and raise an InterfaceError
when it is not supported or invalid. It will return the version
as a tuple with major, minor and patch.
Raises InterfaceError if invalid server version.
Returns tuple
"""
if isinstance(server_version, BYTE_TYPES):
server_version = server_version.decode()
# pylint: disable=W1401
regex_ver = re.compile(r"^(\d{1,2})\.(\d{1,2})\.(\d{1,3})(.*)")
# pylint: enable=W1401
match = regex_ver.match(server_version)
if not match:
raise errors.InterfaceError("Failed parsing MySQL version")
version = tuple([int(v) for v in match.groups()[0:3]])
if 'fabric' in match.group(4).lower():
if version < (1, 5):
raise errors.InterfaceError(
"MySQL Fabric '{0}' is not supported".format(
server_version))
elif version < (4, 1):
raise errors.InterfaceError(
"MySQL Version '{0}' is not supported.".format(server_version))
return version
def get_server_version(self):
"""Get the MySQL version
This method returns the MySQL server version as a tuple. If not
previously connected, it will return None.
Returns a tuple or None.
"""
return self._server_version
def get_server_info(self):
"""Get the original MySQL version information
This method returns the original MySQL server as text. If not
previously connected, it will return None.
Returns a string or None.
"""
try:
return self._handshake['server_version_original']
except (TypeError, KeyError):
return None
@abstractproperty
def in_transaction(self):
"""MySQL session has started a transaction"""
pass
def set_client_flags(self, flags):
"""Set the client flags
The flags-argument can be either an int or a list (or tuple) of
ClientFlag-values. If it is an integer, it will set client_flags
to flags as is.
If flags is a list (or tuple), each flag will be set or unset
when it's negative.
set_client_flags([ClientFlag.FOUND_ROWS,-ClientFlag.LONG_FLAG])
Raises ProgrammingError when the flags argument is not a set or
an integer bigger than 0.
Returns self.client_flags
"""
if isinstance(flags, int) and flags > 0:
self._client_flags = flags
elif isinstance(flags, (tuple, list)):
for flag in flags:
if flag < 0:
self._client_flags &= ~abs(flag)
else:
self._client_flags |= flag
else:
raise errors.ProgrammingError(
"set_client_flags expect integer (>0) or set")
return self._client_flags
def isset_client_flag(self, flag):
"""Check if a client flag is set"""
if (self._client_flags & flag) > 0:
return True
return False
@property
def time_zone(self):
"""Get the current time zone"""
return self.info_query("SELECT @@session.time_zone")[0]
@time_zone.setter
def time_zone(self, value):
"""Set the time zone"""
self.cmd_query("SET @@session.time_zone = '{0}'".format(value))
self._time_zone = value
@property
def sql_mode(self):
"""Get the SQL mode"""
return self.info_query("SELECT @@session.sql_mode")[0]
@sql_mode.setter
def sql_mode(self, value):
"""Set the SQL mode
This method sets the SQL Mode for the current connection. The value
argument can be either a string with comma separate mode names, or
a sequence of mode names.
It is good practice to use the constants class SQLMode:
from mysql.connector.constants import SQLMode
cnx.sql_mode = [SQLMode.NO_ZERO_DATE, SQLMode.REAL_AS_FLOAT]
"""
if isinstance(value, (list, tuple)):
value = ','.join(value)
self.cmd_query("SET @@session.sql_mode = '{0}'".format(value))
self._sql_mode = value
@abstractmethod
def info_query(self, query):
"""Send a query which only returns 1 row"""
pass
def set_login(self, username=None, password=None):
"""Set login information for MySQL
Set the username and/or password for the user connecting to
the MySQL Server.
"""
if username is not None:
self._user = username.strip()
else:
self._user = ''
if password is not None:
self._password = password
else:
self._password = ''
def set_unicode(self, value=True):
"""Toggle unicode mode
Set whether we return string fields as unicode or not.
Default is True.
"""
self._use_unicode = value
if self.converter:
self.converter.set_unicode(value)
@property
def autocommit(self):
"""Get whether autocommit is on or off"""
value = self.info_query("SELECT @@session.autocommit")[0]
return True if value == 1 else False
@autocommit.setter
def autocommit(self, value):
"""Toggle autocommit"""
switch = 'ON' if value else 'OFF'
self.cmd_query("SET @@session.autocommit = {0}".format(switch))
self._autocommit = value
@property
def get_warnings(self):
"""Get whether this connection retrieves warnings automatically
This method returns whether this connection retrieves warnings
automatically.
Returns True, or False when warnings are not retrieved.
"""
return self._get_warnings
@get_warnings.setter
def get_warnings(self, value):
"""Set whether warnings should be automatically retrieved
The toggle-argument must be a boolean. When True, cursors for this
connection will retrieve information about warnings (if any).
Raises ValueError on error.
"""
if not isinstance(value, bool):
raise ValueError("Expected a boolean type")
self._get_warnings = value
@property
def raise_on_warnings(self):
"""Get whether this connection raises an error on warnings
This method returns whether this connection will raise errors when
MySQL reports warnings.
Returns True or False.
"""
return self._raise_on_warnings
@raise_on_warnings.setter
def raise_on_warnings(self, value):
"""Set whether warnings raise an error
The toggle-argument must be a boolean. When True, cursors for this
connection will raise an error when MySQL reports warnings.
Raising on warnings implies retrieving warnings automatically. In
other words: warnings will be set to True. If set to False, warnings
will be also set to False.
Raises ValueError on error.
"""
if not isinstance(value, bool):
raise ValueError("Expected a boolean type")
self._raise_on_warnings = value
self._get_warnings = value
@property
def unread_result(self):
"""Get whether there is an unread result
This method is used by cursors to check whether another cursor still
needs to retrieve its result set.
Returns True, or False when there is no unread result.
"""
return self._unread_result
@unread_result.setter
def unread_result(self, value):
"""Set whether there is an unread result
This method is used by cursors to let other cursors know there is
still a result set that needs to be retrieved.
Raises ValueError on errors.
"""
if not isinstance(value, bool):
raise ValueError("Expected a boolean type")
self._unread_result = value
@property
def charset(self):
"""Returns the character set for current connection
This property returns the character set name of the current connection.
The server is queried when the connection is active. If not connected,
the configured character set name is returned.
Returns a string.
"""
return CharacterSet.get_info(self._charset_id)[0]
@property
def python_charset(self):
"""Returns the Python character set for current connection
This property returns the character set name of the current connection.
Note that, unlike property charset, this checks if the previously set
character set is supported by Python and if not, it returns the
equivalent character set that Python supports.
Returns a string.
"""
encoding = CharacterSet.get_info(self._charset_id)[0]
if encoding in ('utf8mb4', 'binary'):
return 'utf8'
else:
return encoding
def set_charset_collation(self, charset=None, collation=None):
"""Sets the character set and collation for the current connection
This method sets the character set and collation to be used for
the current connection. The charset argument can be either the
name of a character set as a string, or the numerical equivalent
as defined in constants.CharacterSet.
When the collation is not given, the default will be looked up and
used.
For example, the following will set the collation for the latin1
character set to latin1_general_ci:
set_charset('latin1','latin1_general_ci')
"""
if charset:
if isinstance(charset, int):
(self._charset_id, charset_name, collation_name) = \
CharacterSet.get_charset_info(charset)
elif isinstance(charset, str):
(self._charset_id, charset_name, collation_name) = \
CharacterSet.get_charset_info(charset, collation)
else:
raise ValueError(
"charset should be either integer, string or None")
elif collation:
(self._charset_id, charset_name, collation_name) = \
CharacterSet.get_charset_info(collation=collation)
self._execute_query("SET NAMES '{0}' COLLATE '{1}'".format(
charset_name, collation_name))
try:
# Required for C Extension
self.set_character_set_name(charset_name) # pylint: disable=E1101
except AttributeError:
# Not required for pure Python connection
pass
if self.converter:
self.converter.set_charset(charset_name)
@property
def collation(self):
"""Returns the collation for current connection
This property returns the collation name of the current connection.
The server is queried when the connection is active. If not connected,
the configured collation name is returned.
Returns a string.
"""
return CharacterSet.get_charset_info(self._charset_id)[2]
@abstractmethod
def _do_handshake(self):
"""Gather information of the MySQL server before authentication"""
pass
@abstractmethod
def _open_connection(self):
"""Open the connection to the MySQL server"""
pass
def _post_connection(self):
"""Executes commands after connection has been established
This method executes commands after the connection has been
established. Some setting like autocommit, character set, and SQL mode
are set using this method.
"""
self.set_charset_collation(self._charset_id)
self.autocommit = self._autocommit
if self._time_zone:
self.time_zone = self._time_zone
if self._sql_mode:
self.sql_mode = self._sql_mode
@abstractmethod
def disconnect(self):
"""Disconnect from the MySQL server"""
pass
close = disconnect
def connect(self, **kwargs):
"""Connect to the MySQL server
This method sets up the connection to the MySQL server. If no
arguments are given, it will use the already configured or default
values.
"""
if len(kwargs) > 0:
self.config(**kwargs)
self.disconnect()
self._open_connection()
self._post_connection()
def reconnect(self, attempts=1, delay=0):
"""Attempt to reconnect to the MySQL server
The argument attempts should be the number of times a reconnect
is tried. The delay argument is the number of seconds to wait between
each retry.
You may want to set the number of attempts higher and use delay when
you expect the MySQL server to be down for maintenance or when you
expect the network to be temporary unavailable.
Raises InterfaceError on errors.
"""
counter = 0
while counter != attempts:
counter = counter + 1
try:
self.disconnect()
self.connect()
if self.is_connected():
break
except Exception as err: # pylint: disable=W0703
if counter == attempts:
msg = "Can not reconnect to MySQL after {0} "\
"attempt(s): {1}".format(attempts, str(err))
raise errors.InterfaceError(msg)
if delay > 0:
time.sleep(delay)
@abstractmethod
def is_connected(self):
"""Reports whether the connection to MySQL Server is available"""
pass
@abstractmethod
def ping(self, reconnect=False, attempts=1, delay=0):
"""Check availability of the MySQL server"""
pass
@abstractmethod
def commit(self):
"""Commit current transaction"""
pass
@abstractmethod
def cursor(self, buffered=None, raw=None, prepared=None, cursor_class=None,
dictionary=None, named_tuple=None):
"""Instantiates and returns a cursor"""
pass
@abstractmethod
def _execute_query(self, query):
"""Execute a query"""
pass
@abstractmethod
def rollback(self):
"""Rollback current transaction"""
pass
def start_transaction(self, consistent_snapshot=False,
isolation_level=None, readonly=None):
"""Start a transaction
This method explicitly starts a transaction sending the
START TRANSACTION statement to the MySQL server. You can optionally
set whether there should be a consistent snapshot, which
isolation level you need or which access mode i.e. READ ONLY or
READ WRITE.
For example, to start a transaction with isolation level SERIALIZABLE,
you would do the following:
>>> cnx = mysql.connector.connect(..)
>>> cnx.start_transaction(isolation_level='SERIALIZABLE')
Raises ProgrammingError when a transaction is already in progress
and when ValueError when isolation_level specifies an Unknown
level.
"""
if self.in_transaction:
raise errors.ProgrammingError("Transaction already in progress")
if isolation_level:
level = isolation_level.strip().replace('-', ' ').upper()
levels = ['READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ',
'SERIALIZABLE']
if level not in levels:
raise ValueError(
'Unknown isolation level "{0}"'.format(isolation_level))
self._execute_query(
"SET TRANSACTION ISOLATION LEVEL {0}".format(level))
if readonly is not None:
if self._server_version < (5, 6, 5):
raise ValueError(
"MySQL server version {0} does not support "
"this feature".format(self._server_version))
if readonly:
access_mode = 'READ ONLY'
else:
access_mode = 'READ WRITE'
self._execute_query(
"SET TRANSACTION {0}".format(access_mode))
query = "START TRANSACTION"
if consistent_snapshot:
query += " WITH CONSISTENT SNAPSHOT"
self.cmd_query(query)
def reset_session(self, user_variables=None, session_variables=None):
"""Clears the current active session
This method resets the session state, if the MySQL server is 5.7.3
or later active session will be reset without re-authenticating.
For other server versions session will be reset by re-authenticating.
It is possible to provide a sequence of variables and their values to
be set after clearing the session. This is possible for both user
defined variables and session variables.
This method takes two arguments user_variables and session_variables
which are dictionaries.
Raises OperationalError if not connected, InternalError if there are
unread results and InterfaceError on errors.
"""
if not self.is_connected():
raise errors.OperationalError("MySQL Connection not available.")
try:
self.cmd_reset_connection()
except (errors.NotSupportedError, NotImplementedError):
if self._compress:
raise errors.NotSupportedError(
"Reset session is not supported with compression for "
"MySQL server version 5.7.2 or earlier.")
else:
self.cmd_change_user(self._user, self._password,
self._database, self._charset_id)
if user_variables or session_variables:
cur = self.cursor()
if user_variables:
for key, value in user_variables.items():
cur.execute("SET @`{0}` = %s".format(key), (value,))
if session_variables:
for key, value in session_variables.items():
cur.execute("SET SESSION `{0}` = %s".format(key), (value,))
cur.close()
def set_converter_class(self, convclass):
"""
Set the converter class to be used. This should be a class overloading
methods and members of conversion.MySQLConverter.
"""
if convclass and issubclass(convclass, MySQLConverterBase):
charset_name = CharacterSet.get_info(self._charset_id)[0]
self._converter_class = convclass
self.converter = convclass(charset_name, self._use_unicode)
else:
raise TypeError("Converter class should be a subclass "
"of conversion.MySQLConverterBase.")
@abstractmethod
def get_rows(self, count=None, binary=False, columns=None):
"""Get all rows returned by the MySQL server"""
pass
def cmd_init_db(self, database):
"""Change the current database"""
raise NotImplementedError
def cmd_query(self, query, raw=False, buffered=False, raw_as_string=False):
"""Send a query to the MySQL server"""
raise NotImplementedError
def cmd_query_iter(self, statements):
"""Send one or more statements to the MySQL server"""
raise NotImplementedError
def cmd_refresh(self, options):
"""Send the Refresh command to the MySQL server"""
raise NotImplementedError
def cmd_quit(self):
"""Close the current connection with the server"""
raise NotImplementedError
def cmd_shutdown(self, shutdown_type=None):
"""Shut down the MySQL Server"""
raise NotImplementedError
def cmd_statistics(self):
"""Send the statistics command to the MySQL Server"""
raise NotImplementedError
def cmd_process_info(self):
"""Get the process list of the MySQL Server
This method is a placeholder to notify that the PROCESS_INFO command
is not supported by raising the NotSupportedError. The command
"SHOW PROCESSLIST" should be send using the cmd_query()-method or
using the INFORMATION_SCHEMA database.
Raises NotSupportedError exception
"""
raise errors.NotSupportedError(
"Not implemented. Use SHOW PROCESSLIST or INFORMATION_SCHEMA")
def cmd_process_kill(self, mysql_pid):
"""Kill a MySQL process"""
raise NotImplementedError
def cmd_debug(self):
"""Send the DEBUG command"""
raise NotImplementedError
def cmd_ping(self):
"""Send the PING command"""
raise NotImplementedError
def cmd_change_user(self, username='', password='', database='',
charset=33):
"""Change the current logged in user"""
raise NotImplementedError
def cmd_stmt_prepare(self, statement):
"""Prepare a MySQL statement"""
raise NotImplementedError
def cmd_stmt_execute(self, statement_id, data=(), parameters=(), flags=0):
"""Execute a prepared MySQL statement"""
raise NotImplementedError
def cmd_stmt_close(self, statement_id):
"""Deallocate a prepared MySQL statement"""
raise NotImplementedError
def cmd_stmt_send_long_data(self, statement_id, param_id, data):
"""Send data for a column"""
raise NotImplementedError
def cmd_stmt_reset(self, statement_id):
"""Reset data for prepared statement sent as long data"""
raise NotImplementedError
def cmd_reset_connection(self):
"""Resets the session state without re-authenticating"""
raise NotImplementedError
@make_abc(ABCMeta)
class MySQLCursorAbstract(object):
"""Abstract cursor class
Abstract class defining cursor class with method and members
required by the Python Database API Specification v2.0.
"""
def __init__(self):
"""Initialization"""
self._description = None
self._rowcount = -1
self._last_insert_id = None
self._warnings = None
self.arraysize = 1
@abstractmethod
def callproc(self, procname, args=()):
"""Calls a stored procedure with the given arguments
The arguments will be set during this session, meaning
they will be called like _<procname>__arg<nr> where
<nr> is an enumeration (+1) of the arguments.
Coding Example:
1) Defining the Stored Routine in MySQL:
CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT)
BEGIN
SET pProd := pFac1 * pFac2;
END
2) Executing in Python:
args = (5,5,0) # 0 is to hold pprod
cursor.callproc('multiply', args)
print(cursor.fetchone())
Does not return a value, but a result set will be
available when the CALL-statement execute successfully.
Raises exceptions when something is wrong.
"""
pass
@abstractmethod
def close(self):
"""Close the cursor."""
pass
@abstractmethod
def execute(self, operation, params=(), multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
pass
@abstractmethod
def executemany(self, operation, seqparams):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s')"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
pass
@abstractmethod
def fetchone(self):
"""Returns next row of a query result set
Returns a tuple or None.
"""
pass
@abstractmethod
def fetchmany(self, size=1):
"""Returns the next set of rows of a query result, returning a
list of tuples. When no more rows are available, it returns an
empty list.
The number of rows returned can be specified using the size argument,
which defaults to one
"""
pass
@abstractmethod
def fetchall(self):
"""Returns all rows of a query result set
Returns a list of tuples.
"""
pass
def nextset(self):
"""Not Implemented."""
pass
def setinputsizes(self, sizes):
"""Not Implemented."""
pass
def setoutputsize(self, size, column=None):
"""Not Implemented."""
pass
def reset(self, free=True):
"""Reset the cursor to default"""
pass
@abstractproperty
def description(self):
"""Returns description of columns in a result
This property returns a list of tuples describing the columns in
in a result set. A tuple is described as follows::
(column_name,
type,
None,
None,
None,
None,
null_ok,
column_flags) # Addition to PEP-249 specs
Returns a list of tuples.
"""
return self._description
@abstractproperty
def rowcount(self):
"""Returns the number of rows produced or affected
This property returns the number of rows produced by queries
such as a SELECT, or affected rows when executing DML statements
like INSERT or UPDATE.
Note that for non-buffered cursors it is impossible to know the
number of rows produced before having fetched them all. For those,
the number of rows will be -1 right after execution, and
incremented when fetching rows.
Returns an integer.
"""
return self._rowcount
@abstractproperty
def lastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement or None when there is
no such value available.
Returns a long value or None.
"""
return self._last_insert_id
def fetchwarnings(self):
"""Returns Warnings."""
return self._warnings
|
py | 1a4e3cca6df2f1f8f8e18baa3edbfca37647e0f2 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
from copy import copy
import numpy as np
from extensions.back.ConvolutionNormalizer import ConvolutionNormalizer, ConvolutionWithGroupsResolver
from extensions.back.MarkNodesWithShapeValues import MarkNodesWithShapeValues
from extensions.back.PackBinaryWeights import PackBinaryWeights
from extensions.back.SpecialNodesFinalization import RemoveConstOps, CreateConstNodesReplacement
from extensions.back.StridedSliceMasksNormalizer import StridedSliceMasksNormalizer
from extensions.back.blob_normalizer import BlobNormalizer
from mo.graph.graph import Graph
from mo.middle.passes.convert_data_type import data_type_str_to_precision
from mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively
from mo.pipeline.common import prepare_emit_ir
from mo.utils.class_registration import apply_replacements_list
from mo.utils.ir_engine.ir_engine import IREngine
from mo.utils.ir_reader.layer_to_class import copy_graph_with_ops, collect_extenders, collect_ops
from mo.utils.utils import get_mo_root_dir
def restore_graph_from_ir(path_to_xml: str, path_to_bin: str = None) -> (Graph, dict):
"""
Function to make valid graph and metadata for MO back stage from IR.
:param path_to_xml:
:param path_to_bin:
:return: (restored graph, meta data)
"""
ir = IREngine(path_to_xml, path_to_bin)
assert ir.graph.graph.get('ir_version') >= 10, 'IR version {} is not supported, ' \
'please generate actual IR for your model and use it.'.format(ir.graph.graph.get('ir_version'))
path = get_mo_root_dir()
collect_ops(path)
collect_extenders(path)
# Create a new copy of graph with correct attributes (shape & type infer, backend attrs etc.)
new_graph = copy_graph_with_ops(ir.graph)
return new_graph, copy(ir.meta_data)
def save_restored_graph(graph: Graph, path: str, meta_data, name=None):
"""
Function to apply all necessary transforms from back stage to prepare and save restored graph and metadata.
:param graph: Graph to save
:param path: Path to saved IR
:param meta_data: Namespace with converting parameters restored from IR
:param name: Name for saved IR
:return:
"""
if name is None:
name = graph.name
if 'data_type' not in meta_data:
log.debug('Provided `meta_data` does not contain `data_type` parameter. Set `data_type`'
' parameter value to `FP32`.')
# Set data_type to FP32. All restored constants will be saved in provided data type.
data_type = 'FP32'
# We need to specify this attribute to pass graph transformations. This information will not be saved into IR.
# All constants and placeholders will be saved with same types as restored from IR
graph.graph['cmd_params'].data_type = data_type
else:
data_type = data_type_str_to_precision(graph.graph['cmd_params'].data_type)
assert data_type in ['FP16', 'FP32'], '`data_type` value {} is not supported by MO,' \
' cannot save graph'.format(data_type)
# List items order matters, do not change it.
transformation_list = [
ConvolutionWithGroupsResolver,
StridedSliceMasksNormalizer,
PackBinaryWeights,
BlobNormalizer,
ConvolutionNormalizer,
MarkNodesWithShapeValues,
]
# We need to run some specific passes from MO back stage.
apply_replacements_list(graph, transformation_list)
# Transformations with enabled=False should be run manually.
for_graph_and_each_sub_graph_recursively(graph, RemoveConstOps().find_and_replace_pattern)
for_graph_and_each_sub_graph_recursively(graph, CreateConstNodesReplacement().find_and_replace_pattern)
prepare_emit_ir(graph, data_type, path, name, meta_info=meta_data, used_by_ir_reader=True)
|
py | 1a4e3cf360b21f1e1c1a29cde163e09b5e384bbd | from input_output.Loader import Loader
from joblib import load
# Loader specific for the Titanic task
# The loader loads the data
class TitanicLoader(Loader):
def load_split(self, training_data_file, test_data_file, verbose=False):
train, test = self.load_data(training_data_file, test_data_file)
test_labels = test['PassengerId']
X_train, Y_train = self.split_data(train)
if verbose:
print( "\n" + ('-' * 40) )
print( " Original data")
print( '-' * 40)
print( X_train.head() )
print ("Loaded dataset")
return X_train, Y_train, test, test_labels
def split_data(self, train):
# split the features and predector feature
train_X = train
train_Y = train_X["Survived"]
del train_X["Survived"]
return train_X, train_Y
def load_pkl(self, file_name):
return load(file_name)
|
py | 1a4e3d98efc9f7c2d2732b4c66ec73f2a78509bd | # coding: utf-8
import pprint
import re
import six
class StartRecyclePolicyRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'body': 'RecyclePolicyRequestBody'
}
attribute_map = {
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, x_language=None, body=None):
"""StartRecyclePolicyRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._body = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def x_language(self):
"""Gets the x_language of this StartRecyclePolicyRequest.
:return: The x_language of this StartRecyclePolicyRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this StartRecyclePolicyRequest.
:param x_language: The x_language of this StartRecyclePolicyRequest.
:type: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this StartRecyclePolicyRequest.
:return: The body of this StartRecyclePolicyRequest.
:rtype: RecyclePolicyRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this StartRecyclePolicyRequest.
:param body: The body of this StartRecyclePolicyRequest.
:type: RecyclePolicyRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StartRecyclePolicyRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a4e3dc618def9a471572a5898b4298b9313598d | from microbit import *
import neopixel
from utime import ticks_us, sleep_us
class bitBotXL:
def __init__(self):
pass
leds = neopixel.NeoPixel(pin13, 12)
def forward(self, speed):
if (speed > 1023) or (speed < 1):
print("Error, speed must be a number 1-1023")
display.scroll("Speed must be a number 1-1023")
else:
pin16.write_analog(speed)
pin8.write_digital(0)
pin14.write_analog(speed)
pin12.write_digital(0)
def backward(self, speed):
if (speed > 1023) or (speed < 1):
print("Error, speed must be a number 1-1023")
display.scroll("Speed must be a number 1-1023")
else:
pin16.write_digital(0)
pin8.write_analog(speed)
pin14.write_digital(0)
pin12.write_analog(speed)
def left(self, speed):
if (speed > 1023) or (speed < 1):
print("Error, speed must be a number 1-1023")
display.scroll("Speed must be a number 1-1023")
else:
pin14.write_analog(0)
pin12.write_analog(0)
pin16.write_analog(speed)
pin8.write_digital(0)
def right(self, speed):
if (speed > 1023) or (speed < 1):
print("Error, speed must be a number 1-1023")
display.scroll("Speed must be a number 1-1023")
else:
pin16.write_analog(0)
pin8.write_analog(0)
pin14.write_analog(speed)
pin12.write_digital(0)
def stop(self, brake=True):
if brake==True:
pin16.write_analog(1023)
pin8.write_analog(1023)
pin14.write_analog(1023)
pin12.write_analog(1023)
else:
pin16.write_analog(0)
pin8.write_analog(0)
pin14.write_analog(0)
pin12.write_analog(0)
def sonar_cm(self):
pin15.write_digital(1)
sleep_us(10)
pin15.write_digital(0)
while pin15.read_digital() == 0:
pass
start = ticks_us()
while pin15.read_digital() == 1:
pass
end = ticks_us()
echo = end-start
distance = int(0.01715 * echo)
return distance
def sonar_mm(self):
pin15.write_digital(1)
sleep_us(10)
pin15.write_digital(0)
while pin15.read_digital() == 0:
pass
start = ticks_us()
while pin15.read_digital() == 1:
pass
end = ticks_us()
echo = end-start
distance = int(0.01715 * echo*10)
return distance |
py | 1a4e3e25f5ef18db304baafa3b6bc1f888fb1433 | from somerandomapi.sync_async_handler import SyncAsyncHandler
from somerandomapi import http
def welcome(
key: str,
image: int,
background: str,
type: str,
avatar: str,
username: str,
discriminator: int,
guild_name: str,
text_color: str,
member_count: int,
):
"""
Docs: https://some-random-api.ml/docs/canvas/welcome
- key
- This endpoint requires a key to use but even if the key is expired it is fine.
- image
- It must be between 1 and 7.
- background
- Must be one of these:
- stars
- stars2
- rainbowgradient
- rainbow
- sunset
- night
- blobday
- blobnight
- space
- gaming1
- gaming3
- gaming2
- gaming4
- type
- Could be either `join` or `leave`
- avatar
- username
- Maximum 30 characters
- discriminator
- guild_name
- text_color
- member_count
"""
return SyncAsyncHandler(
get_welcome,
async_get_welcome,
key=key,
image=image,
background=background,
type=type,
avatar=avatar,
username=username,
discriminator=discriminator,
guildName=guild_name,
textcolor=text_color,
memberCount=member_count,
)
class Query:
def __init__(self, queries):
self.__dict__.update(queries)
async def async_get_welcome(**queries):
query = Query(queries)
queries.pop("image", None)
queries.pop("background", None)
async with http.GET(
("welcome", "img", str(query.image), query.background), queries
) as response:
return response
def get_welcome(**queries):
query = Query(queries)
queries.pop("image", None)
queries.pop("background", None)
with http.GET(("img", query.image, query.background), queries) as response:
return response
|
py | 1a4e3e370a3f4517061d3d861f089f86845e83c4 | import os
import json
import nltk
import random
import re
classes_under_consideration = ['ynQuestion','whQuestion','Greet','Statement','Emotion']
out_dir = './../res/data/nps_chat_dataset'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
posts = nltk.corpus.nps_chat.xml_posts()[:]
dataset = {}
for post in posts:
_class = post.get('class')
if _class not in classes_under_consideration:
continue
text = " "
for word in nltk.word_tokenize(post.text):
if not re.search('user', word, re.IGNORECASE):
text = text + " " + word.lower()
text = text.strip()
if dataset.get(_class) == None:
dataset[_class] = []
if _class not in ['ynQuestion','whQuestion'] and len(text) > 3:
dataset[_class].append(text)
elif _class in ['ynQuestion','whQuestion']:
dataset[_class].append(text)
for _class, texts in dataset.items():
texts = random.sample(texts,533)
file_name = '{}.txt'.format(_class)
with open(os.path.join(out_dir,file_name), 'w') as f:
f.write('\n'.join(texts)) |
py | 1a4e3eda6cfb05ce7bee80a98e00a59c8e8c6059 | # %%
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from ..config import (
device,
experiment_folder,
second_stage,
second_stage_dataset,
)
from ..model import LanguageGenerator, SentenceDecoderWithAttention, TermEncoder
from .misc import extract_caption_len
# In case of "RuntimeError: received 0 items of ancdata"
# https://github.com/pytorch/pytorch/issues/973
# torch.multiprocessing.set_sharing_strategy("file_system")
def train(model, dataset, mapping, criterion, optimizer, writer, epoch):
dataloader = DataLoader(
dataset, batch_size=second_stage["batch_size"], num_workers=4, shuffle=True
)
model = model.train().to(device)
running_loss = 0
for i, data in enumerate(tqdm(dataloader, desc="Batches")):
caps, terms = data
caps, terms = torch.stack(caps).to(device), torch.stack(terms).to(device)
caps, clens = extract_caption_len(caps.T)
terms, tlens = extract_caption_len(terms.T)
targets = caps.detach().clone()[:, 1:]
optimizer.zero_grad()
out, hidden, attn = model(terms, tlens, caps[:, :-1], clens + 1) # add <start>
loss = criterion(out.permute(0, 2, 1), targets)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 50 == 49:
step_number = epoch * len(dataloader) + i
writer.add_scalar("Training loss", running_loss / 50, step_number)
running_loss = 0
return model
def main():
dataset = second_stage_dataset()
writer = SummaryWriter(experiment_folder)
cmapping, tmapping = dataset.get_cap_mapping, dataset.get_term_mapping
enc = TermEncoder(len(tmapping), 2048)
dec = SentenceDecoderWithAttention(len(cmapping), 2048, len(cmapping))
lang = LanguageGenerator(enc, dec)
criterion = nn.NLLLoss(ignore_index=0)
optimizer = torch.optim.Adam(lang.parameters(), lr=second_stage["learning_rate"])
for i in range(second_stage["epochs"]):
print(f"Epoch {i}")
lang = train(lang, dataset, cmapping, criterion, optimizer, writer, i)
torch.save(lang.state_dict(), experiment_folder / f"language_ep{i:03d}.pth")
if __name__ == "__main__":
main()
# %%
|
py | 1a4e3fba02d077f0a70268a005275337e9fff65a | from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
from .utils import _normalize_grid
def random_label_cmap(n=2**16):
import matplotlib
import colorsys
# cols = np.random.rand(n,3)
# cols = np.random.uniform(0.1,1.0,(n,3))
h,l,s = np.random.uniform(0,1,n), 0.4 + np.random.uniform(0,0.6,n), 0.2 + np.random.uniform(0,0.8,n)
cols = np.stack([colorsys.hls_to_rgb(_h,_l,_s) for _h,_l,_s in zip(h,l,s)],axis=0)
cols[0] = 0
return matplotlib.colors.ListedColormap(cols)
def _plot_polygon(x,y,score,color):
import matplotlib.pyplot as plt
a,b = list(x),list(y)
a += a[:1]
b += b[:1]
plt.plot(a,b,'--', alpha=1, linewidth=score, zorder=1, color=color)
def draw_polygons(coord, score, poly_idx, grid=(1,1), cmap=None, show_dist=False):
"""poly_idx is a N x 2 array with row-col coordinate indices"""
return _draw_polygons(polygons=coord[poly_idx[:,0],poly_idx[:,1]],
points=poly_idx,
scores=score[poly_idx[:,0],poly_idx[:,1]],
grid=grid, cmap=cmap, show_dist=show_dist)
def _draw_polygons(polygons, points=None, scores=None, grid=(1,1), cmap=None, show_dist=False):
"""
polygons is a list/array of x,y coordinate lists/arrays
points is a list/array of x,y coordinates
scores is a list/array of scalar values between 0 and 1
"""
# TODO: better name for this function?
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
grid = _normalize_grid(grid,2)
if points is None:
points = [None]*len(polygons)
if scores is None:
scores = np.ones(len(polygons))
if cmap is None:
cmap = random_label_cmap(len(polygons)+1)
assert len(polygons) == len(scores)
assert len(cmap.colors[1:]) >= len(polygons)
assert not show_dist or all(p is not None for p in points)
for point,poly,score,c in zip(points,polygons,scores,cmap.colors[1:]):
if point is not None:
plt.plot(point[1]*grid[1], point[0]*grid[0], '.', markersize=8*score, color=c)
if show_dist:
dist_lines = np.empty((poly.shape[-1],2,2))
dist_lines[:,0,0] = poly[1]
dist_lines[:,0,1] = poly[0]
dist_lines[:,1,0] = point[1]*grid[1]
dist_lines[:,1,1] = point[0]*grid[0]
plt.gca().add_collection(LineCollection(dist_lines, colors=c, linewidths=0.4))
_plot_polygon(poly[1], poly[0], 3*score, color=c)
|
py | 1a4e427718f8bd440fbca9b663982c7af48861b0 | #!/usr/bin/env python
import colorsys
import math
import time
import unicornhathd
print("""Unicorn HAT HD: demo.py
This pixel shading demo transitions between 4 classic graphics demo effects.
Press Ctrl+C to exit!
""")
unicornhathd.rotation(0)
u_width, u_height = unicornhathd.get_shape()
# Generate a lookup table for 8bit hue to RGB conversion
hue_to_rgb = []
for i in range(0, 255):
hue_to_rgb.append(colorsys.hsv_to_rgb(i / 255.0, 1, 1))
def gradient(x, y, step):
g = x * 16
b = y * 16
r = 255 - (x * 16)
return (r, g, b)
# twisty swirly goodness
def swirl(x, y, step):
x -= (u_width / 2)
y -= (u_height / 2)
dist = math.sqrt(pow(x, 2) + pow(y, 2)) / 2.0
angle = (step / 10.0) + (dist * 1.5)
s = math.sin(angle)
c = math.cos(angle)
xs = x * c - y * s
ys = x * s + y * c
r = abs(xs + ys)
r = r * 12.0
r -= 20
return (r, r + (s * 130), r + (c * 130))
# roto-zooming checker board
def checker(x, y, step):
x -= (u_width / 2)
y -= (u_height / 2)
angle = (step / 10.0)
s = math.sin(angle)
c = math.cos(angle)
xs = x * c - y * s
ys = x * s + y * c
xs -= math.sin(step / 200.0) * 40.0
ys -= math.cos(step / 200.0) * 40.0
scale = step % 20
scale /= 20
scale = (math.sin(step / 50.0) / 8.0) + 0.25
xs *= scale
ys *= scale
xo = abs(xs) - int(abs(xs))
yo = abs(ys) - int(abs(ys))
v = 0 if (math.floor(xs) + math.floor(ys)) % 2 else 1 if xo > .1 and yo > .1 else .5
r, g, b = hue_to_rgb[step % 255]
return (r * (v * 255), g * (v * 255), b * (v * 255))
# weeee waaaah
def blues_and_twos(x, y, step):
x -= (u_width / 2)
y -= (u_height / 2)
scale = math.sin(step / 6.0) / 1.5
r = math.sin((x * scale) / 1.0) + math.cos((y * scale) / 1.0)
b = math.sin(x * scale / 2.0) + math.cos(y * scale / 2.0)
g = r - .8
g = 0 if g < 0 else g
b -= r
b /= 1.4
return (r * 255, (b + g) * 255, g * 255)
# rainbow search spotlights
def rainbow_search(x, y, step):
xs = math.sin((step) / 100.0) * 20.0
ys = math.cos((step) / 100.0) * 20.0
scale = ((math.sin(step / 60.0) + 1.0) / 5.0) + 0.2
r = math.sin((x + xs) * scale) + math.cos((y + xs) * scale)
g = math.sin((x + xs) * scale) + math.cos((y + ys) * scale)
b = math.sin((x + ys) * scale) + math.cos((y + ys) * scale)
return (r * 255, g * 255, b * 255)
# zoom tunnel
def tunnel(x, y, step):
speed = step / 100.0
x -= (u_width / 2)
y -= (u_height / 2)
xo = math.sin(step / 27.0) * 2
yo = math.cos(step / 18.0) * 2
x += xo
y += yo
if y == 0:
if x < 0:
angle = -(math.pi / 2)
else:
angle = (math.pi / 2)
else:
angle = math.atan(x / y)
if y > 0:
angle += math.pi
angle /= 2 * math.pi # convert angle to 0...1 range
hyp = math.sqrt(math.pow(x, 2) + math.pow(y, 2))
shade = hyp / 2.1
shade = 1 if shade > 1 else shade
angle += speed
depth = speed + (hyp / 10)
col1 = hue_to_rgb[step % 255]
col1 = (col1[0] * 0.8, col1[1] * 0.8, col1[2] * 0.8)
col2 = hue_to_rgb[step % 255]
col2 = (col2[0] * 0.3, col2[1] * 0.3, col2[2] * 0.3)
col = col1 if int(abs(angle * 6.0)) % 2 == 0 else col2
td = .3 if int(abs(depth * 3.0)) % 2 == 0 else 0
col = (col[0] + td, col[1] + td, col[2] + td)
col = (col[0] * shade, col[1] * shade, col[2] * shade)
return (col[0] * 255, col[1] * 255, col[2] * 255)
def current_milli_time():
return int(round(time.time() * 1000))
effects = [gradient, tunnel, rainbow_search, checker, swirl]
step = 0
try:
while True:
for i in range(100):
start = current_milli_time()
for y in range(u_height):
for x in range(u_width):
r, g, b = effects[0](x, y, step)
if i > 75:
r2, g2, b2 = effects[-1](x, y, step)
ratio = (100.00 - i) / 25.0
r = r * ratio + r2 * (1.0 - ratio)
g = g * ratio + g2 * (1.0 - ratio)
b = b * ratio + b2 * (1.0 - ratio)
r = int(max(0, min(255, r)))
g = int(max(0, min(255, g)))
b = int(max(0, min(255, b)))
unicornhathd.set_pixel(x, y, r, g, b)
step += 2
unicornhathd.show()
effect = effects.pop()
effects.insert(0, effect)
except KeyboardInterrupt:
unicornhathd.off()
|
py | 1a4e44fa1437f33e3868edd4c85aa17b157bd285 | # https://justhackerthings.com/post/building-a-dark-web-scraper/
import sys
def main():
# Disable SSL warnings
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
START = sys.argv[1]
if __name__ == "__main__":
main()
|
py | 1a4e46a77ff266eff87274201b43bf42cd193c74 | #!/usr/bin/env python
import sys, os , socket, random, struct, time
import argparse
from scapy.all import sendp, send, get_if_list, get_if_hwaddr, bind_layers
from scapy.all import Packet
from scapy.all import Ether, IP, UDP, TCP, Raw
from scapy.fields import *
SRC = 0
DST = 1
DSCP = 2
BOS = 0
LABEL1 = 1
SWITCH_ID = 0
TIMESTAMP = 1
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-e', '--ethernet', type=str, help='Ethernet src/dst addresses')
parser.add_argument('-m', '--mpls', type=str, help='Enable MPLS header and add parameters')
parser.add_argument('-i', '--ip', type=str, help='Add IPv4 parameters')
parser.add_argument('-t', '--tcp', type=int, action='store', help='Enable TCP header and add parameters')
parser.add_argument('-u', '--udp', type=int, action='store', help='Enable UDP header and add parameters')
parser.add_argument('-p', '--packets', type=int, action='store', help='Number of packets to send')
parser.add_argument('-b', '--bytes', type=int, action='store', help='Bytes for the payload')
parser.add_argument('-r', '--randbytes', const=True, action='store_const', help='Add random bytes to the payload')
parser.add_argument('-f', '--filename', type=str, help='Path for the filename')
parser.add_argument('-c', '--interface', type=str, help='Name of the interface to send the packet to')
parser.add_argument('-n', '--int', type=str, help='Add INT header')
args = parser.parse_args()
class MPLS(Packet):
name = "MPLS"
fields_desc = [
BitField("label", 1000, 20),
BitField("exp", 0, 3),
BitField("bos", 1, 1),
ByteField("ttl", 0)
]
class INT(Packet):
name = "INT"
fields_desc = [
BitField("egress_timestamp", 5, 64) #name, default, size
]
bind_layers(Ether, IP, type=0x0800)
bind_layers(IP, INT, protocol=0xFE)
def main():
if args.ethernet:
ethernetParams = [p for p in args.ethernet.split(',')]
if args.ip:
ipParams = [p for p in args.ip.split(',')]
#outF = open(fileName, "a")
print("Sending packets on interface %s" % (args.interface))
pkt = Ether(src=ethernetParams[SRC], dst=ethernetParams[DST])
pkt = pkt / IP(src=ipParams[SRC], dst=ipParams[DST], tos=int(ipParams[DSCP], 0) << 2)
if args.int:
pkt = pkt / INT(egress_timestamp = 7) # el "/" es para hacer append
if args.udp:
pkt = pkt / UDP(sport=0, dport=args.udp)
if args.tcp:
pkt = pkt / TCP(sport=0, dport=args.tcp)
if args.bytes:
if args.randbytes:
pkt = pkt / Raw(load=bytearray(os.urandom(args.bytes)))
else:
pkt = pkt / Raw(load=bytearray([0] * args.bytes) )
for i in range(args.packets):
#pkt.show()
#t = time.time_ns()
if args.udp:
pkt[UDP].sport = i+1
if args.tcp:
pkt[TCP].sport = i+1
sendp(pkt, iface=args.interface, verbose=False)
print("Sent packet: " + str(i+1))
time.sleep(0.3)
if __name__ == '__main__':
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.