max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
corehq/apps/builds/management/commands/add_commcare_build.py | andyasne/commcare-hq | 471 | 11101047 | from github import Github
from django.core.management.base import BaseCommand, CommandError
from corehq.apps.builds.models import CommCareBuild, CommCareBuildConfig, BuildMenuItem, BuildSpec
class Command(BaseCommand):
help = ('Adds a commcare build, labeled with the version (x.y.z) and build_number (an incrementing integer)\n'
'to get started see https://github.com/dimagi/commcare-hq/blob/master/corehq/apps/builds/README.md')
def add_arguments(self, parser):
parser.add_argument('build_path', nargs='?')
parser.add_argument('version', nargs='?')
parser.add_argument('build_number', type=int, nargs='?')
parser.add_argument(
'-l',
'--latest',
action='store_true',
help="add the latest CommCare build version from GitHub"
)
def handle(self, build_path, version, build_number, **options):
if options.get('latest'):
_create_build_with_latest_version()
else:
if build_path and version and build_number:
try:
CommCareBuild.create_from_zip(build_path, version, build_number)
except Exception as e:
raise CommandError("%s" % e)
self.stdout.write('Build %s #%s created\n' % (version, build_number))
self.stdout.write('You can see a list of builds at [your-server]/builds/\n')
else:
raise CommandError("<build_path>, <version> or <build_number> not specified!")
def _create_build_with_latest_version():
version = _get_latest_commcare_build_version()
commcare_version_build = next(
(cc_build for cc_build in CommCareBuild.all_builds() if cc_build.version == version),
None
)
if commcare_version_build is None:
CommCareBuild.create_without_artifacts(version, None)
_update_commcare_build_menu(version)
def _get_latest_commcare_build_version():
repo = Github().get_organization('dimagi').get_repo("commcare-android")
latest_release_tag = repo.get_latest_release().tag_name
return latest_release_tag.split('commcare_')[1]
def _update_commcare_build_menu(version):
build_config_doc = CommCareBuildConfig.fetch()
_add_build_menu_item(build_config_doc, version)
_update_default_build_spec_to_version(build_config_doc, version)
build_config_doc.save()
CommCareBuildConfig.clear_local_cache()
def _add_build_menu_item(build_config, version):
build_menu_items = build_config.menu
build = BuildSpec(version=version, latest=True)
build_menu_item = BuildMenuItem(build=build, label="CommCare {}".format(version), j2me_enabled=False)
build_menu_items.append(build_menu_item)
def _update_default_build_spec_to_version(build_config, version):
major_version = version[0]
defaults = build_config.defaults
major_default_build_spec = next(
(default for default in defaults if default.version.startswith(major_version)),
None
)
if major_default_build_spec and major_default_build_spec.version != version:
major_default_build_spec.version = version
|
unittest/scripts/auto/py_devapi/validation/collection_create_index.py | mueller/mysql-shell | 119 | 11101080 | #@<OUT> Create an index on a single field. 1 (WL10858-FR1_1)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: 10
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index on a single field. 2 (WL10858-FR1_1)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` text GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField'))) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10))
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10)),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index on a single field with all the possibles options. 1 (WL10858-FR1_2)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: 10
Packed: NULL
Null:
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index on a single field with all the possibles options. 2 (WL10858-FR1_2)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` text GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField'))) VIRTUAL NOT NULL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10))
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10)),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index on multiple fields 1 (WL10858-FR1_3)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: 10
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
*************************** 2. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 2
Column_name: <<<idx_col_2>>>
Collation: A
Cardinality: 0
Sub_part: 10
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
*************************** 3. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 3
Column_name: <<<idx_col_3>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index on multiple fields 2 (WL10858-FR1_3)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` text GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField'))) VIRTUAL,
`<<<idx_col_2>>>` text GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField2'))) VIRTUAL,
?{VER(<8.0.19)}
`<<<idx_col_3>>>` int(11) GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField3')) VIRTUAL,
?{}
?{VER(>=8.0.19)}
`<<<idx_col_3>>>` int GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField3')) VIRTUAL,
?{}
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10),`<<<idx_col_2>>>`(10),`<<<idx_col_3>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10),`<<<idx_col_2>>>`(10),`<<<idx_col_3>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index on multiple fields with all the possibles options. 1 (WL10858-FR1_4)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: 10
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
*************************** 2. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 2
Column_name: <<<idx_col_2>>>
Collation: A
Cardinality: 0
Sub_part: 10
Packed: NULL
Null:
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
*************************** 3. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 3
Column_name: <<<idx_col_3>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index on multiple fields with all the possibles options. 2 (WL10858-FR1_4)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` text GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField'))) VIRTUAL,
`<<<idx_col_2>>>` text GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField2'))) VIRTUAL NOT NULL,
?{VER(<8.0.19)}
`<<<idx_col_3>>>` int(11) GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField3')) VIRTUAL,
?{}
?{VER(>=8.0.19)}
`<<<idx_col_3>>>` int GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField3')) VIRTUAL,
?{}
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10),`<<<idx_col_2>>>`(10),`<<<idx_col_3>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10),`<<<idx_col_2>>>`(10),`<<<idx_col_3>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a geojson datatype field. 1 (WL10858-FR1_5)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: 32
Packed: NULL
Null:
Index_type: SPATIAL
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a geojson datatype field. 2 (WL10858-FR1_5)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` geometry GENERATED ALWAYS AS (st_geomfromgeojson(json_extract(`doc`,_utf8mb4'$.myGeoJsonField'),1,4326)) STORED NOT NULL /*!80003 SRID 4326 */,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
SPATIAL KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
SPATIAL KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a geojson datatype field without specifying the required flag it should be set to True by default. 1 (WL10858-FR1_6)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: 32
Packed: NULL
Null:
Index_type: SPATIAL
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a geojson datatype field without specifying the required flag it should be set to True by default. 2 (WL10858-FR1_6)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` geometry GENERATED ALWAYS AS (st_geomfromgeojson(json_extract(`doc`,_utf8mb4'$.myGeoJsonField'),1,4326)) STORED NOT NULL /*!80003 SRID 4326 */,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
SPATIAL KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
SPATIAL KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a geojson datatype field with all the possibles options. 1 (WL10858-FR1_7)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: 32
Packed: NULL
Null:
Index_type: SPATIAL
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a geojson datatype field with all the possibles options. 2 (WL10858-FR1_7)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` geometry GENERATED ALWAYS AS (st_geomfromgeojson(json_extract(`doc`,_utf8mb4'$.myGeoJsonField'),2,4400)) STORED NOT NULL /*!80003 SRID 4400 */,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
SPATIAL KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
SPATIAL KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a datetime field. 1 (WL10858-FR1_8)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a datetime field. 2 (WL10858-FR1_8)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` datetime GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField'))) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a timestamp field. 1 (WL10858-FR1_9)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a timestamp field. 2 (WL10858-FR1_9)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` timestamp GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField'))) VIRTUAL NULL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a time field. 1 (WL10858-FR1_10)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a time field. 2 (WL10858-FR1_10)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` time GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField'))) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a date field. 1 (WL10858-FR1_11)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a date field. 2 (WL10858-FR1_11)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` date GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField'))) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a numeric field. 1 (WL10858-FR1_12)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a numeric field. 2 (WL10858-FR1_12)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` decimal(10,0) unsigned GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> FR1_13 Create an index using a decimal field. 1 (WL10858-FR1_13)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> FR1_13 Create an index using a decimal field. 2 (WL10858-FR1_13)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` decimal(10,0) GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a double field. 1 (WL10858-FR1_14)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a double field. 2 (WL10858-FR1_14)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` double GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a float field. 1 (WL10858-FR1_15)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a float field. 2 (WL10858-FR1_15)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` float unsigned GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a real field. 1 (WL10858-FR1_16)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a real field. 2 (WL10858-FR1_16)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` double unsigned GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a bigint field. 1 (WL10858-FR1_17)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a bigint field. 2 (WL10858-FR1_17)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(<8.0.19)}
`<<<idx_col_1>>>` bigint(20) GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
`<<<idx_col_1>>>` bigint GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a integer field. 1 (WL10858-FR1_18)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a integer field. 2 (WL10858-FR1_18)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(<8.0.19)}
`<<<idx_col_1>>>` int(10) unsigned GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
`<<<idx_col_1>>>` int unsigned GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a mediumint field. 1 (WL10858-FR1_19)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a mediumint field. 2 (WL10858-FR1_19)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(<8.0.19)}
`<<<idx_col_1>>>` mediumint(8) unsigned GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
`<<<idx_col_1>>>` mediumint unsigned GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a smallint field. 1 (WL10858-FR1_20)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a smallint field. 2 (WL10858-FR1_20)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(<8.0.19)}
`<<<idx_col_1>>>` smallint(6) GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
`<<<idx_col_1>>>` smallint GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Create an index using a tinyint field. 1 (WL10858-FR1_21)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: NULL
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Create an index using a tinyint field. 2 (WL10858-FR1_21)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(<8.0.19)}
`<<<idx_col_1>>>` tinyint(3) unsigned GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
`<<<idx_col_1>>>` tinyint unsigned GENERATED ALWAYS AS (json_extract(`doc`,_utf8mb4'$.myField')) VIRTUAL,
?{}
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`)
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@<OUT> Verify that the drop_index function removes the index entry from the table schema of a collection. 1 (WL10858-FR4_1)
*************************** 1. row ***************************
Table: my_coll
Non_unique: 1
Key_name: myIndex
Seq_in_index: 1
Column_name: <<<idx_col_1>>>
Collation: A
Cardinality: 0
Sub_part: 10
Packed: NULL
Null: YES
Index_type: BTREE
Comment:
Index_comment:
Visible: YES
Expression: NULL
#@<OUT> Verify that the drop_index function removes the index entry from the table schema of a collection. 2 (WL10858-FR4_1)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
?{}
`<<<idx_col_1>>>` text GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$.myField'))) VIRTUAL,
PRIMARY KEY (`_id`),
?{VER(<8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10))
?{}
?{VER(>=8.0.19)}
KEY `myIndex` (`<<<idx_col_1>>>`(10)),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@ Verify that the drop_index function removes the index entry from the table schema of a collection. 3 (WL10858-FR4_1)
|Empty set|
#@<OUT> Verify that the drop_index function removes the index entry from the table schema of a collection. 4 (WL10858-FR4_1)
*************************** 1. row ***************************
Table: my_coll
Create Table: CREATE TABLE `my_coll` (
`doc` json DEFAULT NULL,
`_id` varbinary(32) GENERATED ALWAYS AS (json_unquote(json_extract(`doc`,_utf8mb4'$._id'))) STORED NOT NULL,
?{VER(<8.0.19)}
PRIMARY KEY (`_id`)
?{}
?{VER(>=8.0.19)}
`_json_schema` json GENERATED ALWAYS AS (_utf8mb4'{"type":"object"}') VIRTUAL,
PRIMARY KEY (`_id`),
CONSTRAINT `$val_strict_98ECC39AA1BEFEB54F58E37A530CD5D1BD7631C5` CHECK (json_schema_valid(`_json_schema`,`doc`)) /*!80016 NOT ENFORCED */
?{}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
#@ Verify that the dropIndex silently succeeds if the index does not exist. (WL10858-FR4_2)
||
#@ Create an index with the name of an index that already exists. (WL10858-FR5_2)
||MySQL Error (1061): Duplicate key name 'myIndex'
#@ Create an index with a not valid JSON document definition. (WL10858-FR5_3) {sys.version_info[:2] < (3, 8)}
||coll.create_index('myIndex', {'fields': [{'field' = '$.myField', type = 'TEXT(10)'}]})
|| ^
||SyntaxError: invalid syntax
||coll.create_index('myIndex', {'fields': [{'field': '$.myField', 'type': 'TEXT(10)']})
|| ^
||SyntaxError: invalid syntax
||coll.create_index('myIndex', {'fields': [{'field': '$.myField', 'type': 'TEXT(10)'}})
|| ^
||SyntaxError: invalid syntax
#@ Create an index with a not valid JSON document definition. (WL10858-FR5_3) {sys.version_info[:2] >= (3, 8)}
||coll.create_index('myIndex', {'fields': [{'field' = '$.myField', type = 'TEXT(10)'}]})
|| ^
||SyntaxError: invalid syntax
||SyntaxError: closing parenthesis ']' does not match opening parenthesis '{'
||SyntaxError: closing parenthesis '}' does not match opening parenthesis '['
#@ Create an index where its definition is a JSON document but its structure is not valid. (WL10858-FR5_4)
||MySQL Error (5015): Invalid number of arguments, expected value for 'fields[0].field'
#@ Create an index with the index type not "INDEX" or "SPATIAL" (case insensitive). (WL10858-FR5_5)
||MySQL Error (5017): Argument value 'IDX' for index type is invalid
||MySQL Error (5017): Argument value 'SPATIAL_' for index type is invalid
||MySQL Error (5017): Argument value 'INVALID' for index type is invalid
#@ Create a 'SPATIAL' index with "required" flag set to False. (WL10858-FR5_6)
||MySQL Error (5117): GEOJSON index requires 'field.required: TRUE
#@ Create an index with an invalid "type" specified (type names are case insensitive). (WL10858-FR5_7)
||MySQL Error (5017): Invalid or unsupported type specification '_Text(10)'
||MySQL Error (5017): Invalid or unsupported type specification 'Invalid'
||MySQL Error (5017): Invalid or unsupported type specification 'Timestamps'
||MySQL Error (5017): Invalid or unsupported type specification 'Dates'
#@ Create an index specifiying geojson options for non geojson data type. (WL10858-FR5_8)
||MySQL Error (5017): Unsupported argument specification for '$.myField'
#@ Create an index with mismatched data types (WL10858-ET_1)
||MySQL Error (1292): Incorrect datetime value: '10' for column
#@ Create an index specifiying SPATIAL as the index type for a non spatial data type (WL10858-ET_2)
||MySQL Error (3106): 'Spatial index on virtual generated column' is not supported for generated columns.
#@ Create an index specifiying INDEX as the index type for a spatial data type (WL10858-ET_3)
||Column '$ix_gj_r_B4C4FDF5AD30671EF010BCE1E67FA76778A889F7' cannot be null
|
rllib/examples/env/utils/interfaces.py | mgelbart/ray | 21,382 | 11101138 | ##########
# Contribution by the Center on Long-Term Risk:
# https://github.com/longtermrisk/marltoolbox
##########
from abc import ABC, abstractmethod
class InfoAccumulationInterface(ABC):
@abstractmethod
def _init_info(self):
raise NotImplementedError()
@abstractmethod
def _reset_info(self):
raise NotImplementedError()
@abstractmethod
def _get_episode_info(self):
raise NotImplementedError()
@abstractmethod
def _accumulate_info(self, *args, **kwargs):
raise NotImplementedError()
|
loris/utils.py | munnellg/loris | 150 | 11101145 | <filename>loris/utils.py
import errno
import logging
import os
import shutil
import uuid
logger = logging.getLogger(__name__)
def symlink(src, dst):
"""Create a symlink from ``src`` to ``dst``.
Creates any required intermediate directories, and overrides any existing
file at ``dst``.
"""
if src == dst:
logger.warn(
'Circular symlink requested from %s to %s; not creating symlink',
src, dst)
return
os.makedirs(os.path.dirname(dst), exist_ok=True)
# Shouldn't be the case, but helps with debugging.
if os.path.lexists(dst):
os.unlink(dst)
os.symlink(src, dst)
def safe_rename(src, dst):
"""Rename a file from ``src`` to ``dst``.
We use a custom version rather than the standard library because we
have two requirements:
* Moves must be atomic. Otherwise Loris may serve a partial image from
a cache, which causes an error. ``shutil.move()`` is not atomic.
Note that multiple threads may try to write to the cache at once,
so atomicity is required to ensure the serving on one thread doesn't
pick up a partially saved image from another thread.
* Moves must work across filesystems. Often temp directories and the
cache directories live on different filesystems. ``os.rename()`` can
throw errors if run across filesystems.
So we try ``os.rename()``, but if we detect a cross-filesystem copy, we
switch to ``shutil.move()`` with some wrappers to make it atomic.
"""
logger.debug('Renaming %r to %r', src, dst)
try:
os.rename(src, dst)
except OSError as err:
logger.debug('Calling os.rename(%r, %r) failed with %r', src, dst, err)
if err.errno == errno.EXDEV:
# Generate a unique ID, and copy `<src>` to the target directory
# with a temporary name `<dst>.<ID>.tmp`. Because we're copying
# across a filesystem boundary, this initial copy may not be
# atomic. We intersperse a random UUID so if different processes
# are copying into `<dst>`, they don't overlap in their tmp copies.
mole_id = uuid.uuid4()
tmp_dst = '%s.%s.tmp' % (dst, mole_id)
shutil.copyfile(src, tmp_dst)
# Then do an atomic rename onto the new name, and clean up the
# source image.
os.rename(tmp_dst, dst)
os.unlink(src)
else:
raise
def decode_bytes(data):
try:
return data.decode('utf8')
except UnicodeDecodeError:
return data.decode('latin1')
|
omega_miya/utils/bilibili_utils/request_utils.py | rinrini001/omega-miya | 120 | 11101165 | from nonebot import get_driver
from omega_miya.utils.omega_plugin_utils import HttpFetcher, PicEncoder
from omega_miya.database import Result
__GLOBAL_CONFIG = get_driver().config
BILI_SESSDATA = __GLOBAL_CONFIG.bili_sessdata
BILI_CSRF = __GLOBAL_CONFIG.bili_csrf
BILI_UID = __GLOBAL_CONFIG.bili_uid
class BiliRequestUtils(object):
HEADERS = {'accept': 'application/json, text/plain, */*',
'accept-encoding': 'gzip, deflate',
'accept-language': 'zh-CN,zh;q=0.9',
'dnt': '1',
'origin': 'https://www.bilibili.com',
'referer': 'https://www.bilibili.com/',
'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'sec-gpc': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4389.114 Safari/537.36'
}
@classmethod
def get_bili_uid(cls):
return BILI_UID
@classmethod
def get_bili_csrf(cls):
return BILI_CSRF
@classmethod
def get_bili_sessdata(cls):
return BILI_SESSDATA
@classmethod
def get_cookies(cls) -> Result.DictResult:
cookies = {}
if BILI_SESSDATA and BILI_CSRF:
cookies.update({'SESSDATA': BILI_SESSDATA})
cookies.update({'bili_jct': BILI_CSRF})
return Result.DictResult(error=False, info='Success', result=cookies)
else:
return Result.DictResult(error=True, info='None', result=cookies)
async def verify_cookies(self) -> Result.TextResult:
cookies_result = self.get_cookies()
if cookies_result.error:
return Result.TextResult(error=True, info='No cookies configs', result='')
cookies_verify_url = 'https://api.bilibili.com/x/web-interface/nav'
cookies = cookies_result.result
fetcher = HttpFetcher(timeout=10, flag='bilibili_live_monitor', headers=self.HEADERS, cookies=cookies)
result = await fetcher.get_json(url=cookies_verify_url)
if result.success():
code = result.result.get('code')
data = dict(result.result.get('data'))
if code == 0 and data.get('isLogin'):
uname = data.get('uname')
mid = data.get('mid')
if mid == BILI_UID:
return Result.TextResult(error=False, info='Success login', result=uname)
else:
return Result.TextResult(error=True, info='Logged user UID does not match', result=uname)
else:
return Result.TextResult(error=True, info='Not login', result='')
else:
return Result.TextResult(error=True, info=result.info, result='')
@classmethod
# 图片转base64
async def pic_to_base64(cls, url: str) -> Result.TextResult:
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4389.114 Safari/537.36',
'origin': 'https://www.bilibili.com',
'referer': 'https://www.bilibili.com/'}
fetcher = HttpFetcher(
timeout=30, attempt_limit=2, flag='bilibili_live_monitor_get_image', headers=headers)
bytes_result = await fetcher.get_bytes(url=url)
if bytes_result.error:
return Result.TextResult(error=True, info='Image download failed', result='')
encode_result = PicEncoder.bytes_to_b64(image=bytes_result.result)
if encode_result.success():
return Result.TextResult(error=False, info='Success', result=encode_result.result)
else:
return Result.TextResult(error=True, info=encode_result.info, result='')
@classmethod
async def pic_to_file(cls, url: str) -> Result.TextResult:
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4389.114 Safari/537.36',
'origin': 'https://www.bilibili.com',
'referer': 'https://www.bilibili.com/'}
fetcher = HttpFetcher(
timeout=30, attempt_limit=2, flag='bilibili_live_monitor_get_image', headers=headers)
bytes_result = await fetcher.get_bytes(url=url)
if bytes_result.error:
return Result.TextResult(error=True, info='Image download failed', result='')
encode_result = await PicEncoder.bytes_to_file(image=bytes_result.result, folder_flag='bilibili')
if encode_result.success():
return Result.TextResult(error=False, info='Success', result=encode_result.result)
else:
return Result.TextResult(error=True, info=encode_result.info, result='')
__all__ = [
'BiliRequestUtils'
]
|
cassiopeia/dto/league.py | artemigkh/cassiopeia | 437 | 11101235 | <gh_stars>100-1000
from .common import DtoObject
class MiniSeriesDto(DtoObject):
pass
class LeagueEntryDto(DtoObject):
pass
class LeagueDto(DtoObject):
pass
class LeagueSummonerEntriesDto(DtoObject):
pass
class LeagueEntriesDto(DtoObject):
pass
class ChallengerLeagueListDto(DtoObject):
pass
class GrandmasterLeagueListDto(DtoObject):
pass
class MasterLeagueListDto(DtoObject):
pass
|
tests/openwisp2/sample_integration_device/apps.py | scloudvn/openwisp-network-topology | 105 | 11101258 | from openwisp_network_topology.integrations.device.apps import (
OpenwispTopologyDeviceConfig as BaseAppConfig,
)
class OpenwispTopologyDeviceConfig(BaseAppConfig):
name = 'openwisp2.sample_integration_device'
label = 'sample_integration_device'
|
venv/lib/python3.9/site-packages/pendulum/lang/pl.py | qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3 | 224 | 11101260 | # -*- coding: utf-8 -*-
translations = {
# Days
'days': {
0: 'niedziela',
1: 'poniedziałek',
2: 'wtorek',
3: 'środa',
4: 'czwartek',
5: 'piątek',
6: 'sobota'
},
'days_abbrev': {
0: 'Nd',
1: 'Pn',
2: 'Wt',
3: 'Śr',
4: 'Czw',
5: 'Pt',
6: 'So'
},
# Months
'months': {
1: 'styczeń',
2: 'luty',
3: 'marzec',
4: 'kwiecień',
5: 'maj',
6: 'czerwiec',
7: 'lipiec',
8: 'sierpień',
9: 'wrzesień',
10: 'październik',
11: 'listopad',
12: 'grudzień',
},
'months_abbrev': {
1: 'sty',
2: 'lut',
3: 'mar',
4: 'kwi',
5: 'maj',
6: 'cze',
7: 'lip',
8: 'sie',
9: 'wrz',
10: 'paź',
11: 'lis',
12: 'gru',
},
# Units of time
'year': ['{count} rok', '{count} lata', '{count} lat'],
'month': ['{count} miesiąc', '{count} miesiące', '{count} miesięcy'],
'week': ['{count} tydzień', '{count} tygodnie', '{count} tygodni'],
'day': ['{count} dzień', '{count} dni', '{count} dni'],
'hour': ['{count} godzina', '{count} godziny', '{count} godzin'],
'minute': ['{count} minuta', '{count} minuty', '{count} minut'],
'second': ['{count} sekunda', '{count} sekundy', '{count} sekund'],
# Relative time
'ago': '{time} temu',
'from_now': '{time} od teraz',
'after': '{time} po',
'before': '{time} przed',
# Date formats
'date_formats': {
'LTS': 'HH:mm:ss',
'LT': 'HH:mm',
'LLLL': 'dddd, D MMMM YYYY HH:mm',
'LLL': 'D MMMM YYYY HH:mm',
'LL': 'D MMMM YYYY',
'L': 'DD.MM.YYYY',
},
}
|
tests/classification/interpret/sst_test.py | shunk031/allennlp-models | 402 | 11101278 | <reponame>shunk031/allennlp-models
import pytest
def test_gradient_visualization():
from allennlp.predictors.predictor import Predictor
predictor = Predictor.from_path(
"https://storage.googleapis.com/allennlp-public-models/sst-roberta-large-2020.06.08.tar.gz"
)
sentence = "a very well-made, funny and entertaining picture."
inputs = {"sentence": sentence}
from allennlp.interpret.saliency_interpreters import SimpleGradient
simple_gradient_interpreter = SimpleGradient(predictor)
simple_gradient_interpretation = simple_gradient_interpreter.saliency_interpret_from_json(
inputs
)
gradients = simple_gradient_interpretation["instance_1"]["grad_input_1"]
assert max(gradients) - min(gradients) < 0.75
|
utils/maintenance.py | goztrk/django-htk | 206 | 11101299 | # HTK Imports
from htk.utils import htk_setting
def is_maintenance_mode():
maintenance_mode = htk_setting('HTK_MAINTENANCE_MODE', False)
return maintenance_mode
|
tekore/_client/api/player/modify.py | evanofslack/tekore | 135 | 11101301 | <reponame>evanofslack/tekore
from typing import Union
from tekore._auth import scope
from tekore.model import RepeatState
from tekore._convert import to_uri
from ...base import SpotifyBase
from ...decor import send_and_process, scopes
from ...process import nothing
def offset_to_dict(offset: Union[int, str]):
"""
Parse playback start offset to an appropriate payload member.
If offset is an integer, it is an index to a track position.
If it is a string, it is a URI of a specific track.
"""
if isinstance(offset, int):
return {'position': offset}
elif isinstance(offset, str):
return {'uri': to_uri('track', offset)}
class SpotifyPlayerModify(SpotifyBase):
"""Player API endpoints that modify state."""
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_transfer(self, device_id: str, force_play: bool = False) -> None:
"""
Transfer playback to another device.
Parameters
----------
device_id
device to transfer playback to
force_play
true: play after transfer, false: keep current state
"""
payload = {
'device_ids': [device_id],
'play': force_play
}
return self._put('me/player', payload=payload)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_resume(self, device_id: str = None) -> None:
"""
Resume user's playback.
Parameters
----------
device_id
device to start playback on
"""
return self._put('me/player/play', device_id=device_id)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_start_tracks(
self,
track_ids: list,
offset: Union[int, str] = None,
position_ms: int = None,
device_id: str = None
) -> None:
"""
Start playback of one or more tracks.
Parameters
----------
track_ids
track IDs to start playing
offset
offset into tracks by index or track ID
position_ms
initial position of first played track
device_id
device to start playback on
"""
payload = {
'uris': [to_uri('track', t) for t in track_ids],
'offset': offset_to_dict(offset),
'position_ms': position_ms,
}
payload = {k: v for k, v in payload.items() if v is not None}
return self._put('me/player/play', payload=payload, device_id=device_id)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_start_context(
self,
context_uri: str,
offset: Union[int, str] = None,
position_ms: int = None,
device_id: str = None
) -> None:
"""
Start playback of a context: an album, artist or playlist.
Parameters
----------
context_uri
context to start playing
offset
offset into context by index or track ID,
only available when context is an album or playlist
position_ms
initial position of first played track
device_id
device to start playback on
"""
payload = {
'context_uri': context_uri,
'offset': offset_to_dict(offset),
'position_ms': position_ms,
}
payload = {k: v for k, v in payload.items() if v is not None}
return self._put('me/player/play', payload=payload, device_id=device_id)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_queue_add(self, uri: str, device_id: str = None) -> None:
"""
Add a track or an episode to a user's queue.
Parameters
----------
uri
resource to add, track or episode
device_id
devide to extend the queue on
"""
return self._post('me/player/queue', uri=uri, device_id=device_id)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_pause(self, device_id: str = None) -> None:
"""
Pause a user's playback.
Parameters
----------
device_id
device to pause playback on
"""
return self._put('me/player/pause', device_id=device_id)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_next(self, device_id: str = None) -> None:
"""
Skip user's playback to next track.
Parameters
----------
device_id
device to skip track on
"""
return self._post('me/player/next', device_id=device_id)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_previous(self, device_id: str = None) -> None:
"""
Skip user's playback to previous track.
Parameters
----------
device_id
device to skip track on
"""
return self._post('me/player/previous', device_id=device_id)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_seek(self, position_ms: int, device_id: str = None) -> None:
"""
Seek to position in current playing track.
Parameters
----------
position_ms
position on track
device_id
device to seek on
"""
return self._put(
'me/player/seek',
position_ms=position_ms,
device_id=device_id
)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_repeat(
self,
state: Union[str, RepeatState],
device_id: str = None
) -> None:
"""
Set repeat mode for playback.
Parameters
----------
state
`track`, `context`, or `off`
device_id
device to set repeat on
"""
return self._put('me/player/repeat', state=str(state), device_id=device_id)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_shuffle(self, state: bool, device_id: str = None) -> None:
"""
Toggle shuffle for user's playback.
Parameters
----------
state
shuffle state
device_id
device to toggle shuffle on
"""
state = 'true' if state else 'false'
return self._put('me/player/shuffle', state=state, device_id=device_id)
@scopes([scope.user_modify_playback_state])
@send_and_process(nothing)
def playback_volume(self, volume_percent: int, device_id: str = None) -> None:
"""
Set volume for user's playback.
Parameters
----------
volume_percent
volume to set (0..100)
device_id
device to set volume on
"""
return self._put(
'me/player/volume',
volume_percent=volume_percent,
device_id=device_id
)
|
scripts/build_sdk_ios.py | eliatlas/unity_sdk | 111 | 11101337 | from scripting_utils import *
def build(root_dir, ios_submodule_dir, with_test_lib):
# ------------------------------------------------------------------
# Paths.
src_dir = '{0}/sdk'.format(ios_submodule_dir)
lib_out_dir = '{0}/Assets/Adjust/iOS'.format(root_dir)
lib_out_dir_test = '{0}/Assets/Adjust/iOS/Test'.format(root_dir)
sdk_static_framework = '{0}/Frameworks/Static/AdjustSdk.framework'.format(src_dir)
# ------------------------------------------------------------------
# Build AdjustStatic framework target.
debug_green('Building AdjustStatic framework target ...')
change_dir(src_dir)
xcode_build_release('AdjustStatic')
copy_file(sdk_static_framework + '/Versions/A/AdjustSdk', lib_out_dir + '/AdjustSdk.a')
copy_files('*', sdk_static_framework + '/Versions/A/Headers/', lib_out_dir)
if with_test_lib:
# ------------------------------------------------------------------
# Paths.
test_static_framework = '{0}/Frameworks/Static/AdjustTestLibrary.framework'.format(src_dir)
# ------------------------------------------------------------------
# Build AdjustTestLibraryStatic framework target.
set_log_tag('IOS-TEST-LIB-BUILD')
debug_green('Building Test Library started ...')
change_dir('{0}/AdjustTests/AdjustTestLibrary'.format(src_dir))
xcode_build_debug('AdjustTestLibraryStatic')
copy_file(test_static_framework + '/Versions/A/AdjustTestLibrary', lib_out_dir_test + '/AdjustTestLibrary.a')
copy_files('*', test_static_framework + '/Versions/A/Headers/', lib_out_dir_test)
|
PR_BCI_team/Team_StarLab/DKHan/examples/giga_cnn/model_openbmi.py | PatternRecognition/OpenBMI | 217 | 11101369 | <gh_stars>100-1000
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import pickle
from .cbam import *
class Base_cnn(nn.Module):
def __init__(self,use_attn = None):
super(Base_cnn, self).__init__()
self.num_filters = 112
self.num_hidden = 512
self.conv1 = nn.Conv2d(1, self.num_filters, (10,10), 1)
self.fc1 = nn.Linear(self.num_filters * 1 * 1, self.num_hidden)
#self.bn = nn.BatchNorm1d(self.num_hidden)
#self.fc2 = nn.Linear(self.num_hidden,self.num_hidden)
self.fc_fin = nn.Linear(self.num_hidden, 2)
if not use_attn == None:
self.cbam = CBAM(1,16)
def forward(self, x):
if not self.cbam == None:
x = self.cbam(x)
x = self.conv1(x[:, :, :, :])
x = F.elu(x) #temporal
#x = F.max_pool2d(x, (1, 10), 3)
x = x.view(-1, self.num_filters * 1 * 1)
# x = F.elu(self.bn(self.fc1(x)))
x = F.elu(self.fc1(x))
x = F.dropout(x, training=self.training,p=0.5)
#x = F.leaky_relu(self.fc2(x))
#x = F.dropout(x, training=self.training)
x = self.fc_fin(x)
x = F.log_softmax(x, dim=1)
return x
class Base_cnn_dev(nn.Module):
def __init__(self,use_attn = None):
super(Base_cnn_dev, self).__init__()
self.num_filters = 100
self.num_hidden = 512
self.conv1 = nn.Conv2d(1, self.num_filters, (1,10), 1) #temporal
self.conv2 = nn.Conv2d(self.num_filters, self.num_filters, (62, 30), 1) #spatio-temporal
self.fc1 = nn.Linear(self.num_filters * 1 * 1, self.num_hidden)
#self.bn = nn.BatchNorm1d(self.num_hidden)
#self.fc2 = nn.Linear(self.num_hidden,self.num_hidden)
self.fc_fin = nn.Linear(self.num_hidden, 2)
if not use_attn == None:
self.cbam = CBAM(1,16)
def forward(self, x):
if not self.cbam == None:
x = self.cbam(x)
x = self.conv1(x[:, :, :, :])
x = F.elu(x) #temporal
#x = F.max_pool2d(x, (1, 10), 3)
x = x.view(-1, self.num_filters * 1 * 1)
# x = F.elu(self.bn(self.fc1(x)))
x = F.elu(self.fc1(x))
x = F.dropout(x, training=self.training,p=0.5)
#x = F.leaky_relu(self.fc2(x))
#x = F.dropout(x, training=self.training)
x = self.fc_fin(x)
x = F.log_softmax(x, dim=1)
return x
class Base_cnn_mult(nn.Module):
def __init__(self):
super(Base_cnn_mult, self).__init__()
self.num_filters = 40
self.num_hidden = 1024
self.conv1 = nn.Conv2d(1, self.num_filters, (62,45), 1)
self.fc1 = nn.Linear(self.num_filters * 1 * 83, self.num_hidden)
self.bn = nn.BatchNorm1d(self.num_hidden)
self.fc2 = nn.Linear(self.num_hidden,self.num_hidden)
self.fc_lr = nn.Linear(self.num_hidden, 2)
self.fc_subj = nn.Linear(self.num_hidden, 2)
def forward(self, x):
x = F.elu(self.conv1(x[:,:,:,:])) #temporal
x = F.max_pool2d(x, (1, 10), 3)
x = x.view(-1, self.num_filters * 1 * 83)
# x = F.elu(self.bn(self.fc1(x)))
x = F.elu(self.fc1(x))
x = F.dropout(x, training=self.training,p=0.5)
#x = F.leaky_relu(self.fc2(x))
#x = F.dropout(x, training=self.training)
x1 = self.fc_lr(x)
x2 = self.fc_subj(x)
x1 = F.log_softmax(x1, dim=1)
x2 = F.log_softmax(x2, dim=1)
return x1,x2
class depthwise_separable_conv(nn.Module):
def __init__(self):
super(depthwise_separable_conv, self).__init__()
self.num_filters = 100
self.num_hidden = 1024
self.depthwise1 = nn.Conv2d(1, 1, kernel_size=(62,45), padding=0, groups=1)
torch.nn.init.xavier_uniform(self.depthwise1.weight)
self.pointwise1 = nn.Conv2d(1, self.num_filters, kernel_size=1)
torch.nn.init.xavier_uniform(self.pointwise1.weight)
self.depthwise2 = nn.Conv2d(self.num_filters, self.num_filters, kernel_size=(1,10), padding=0, groups=self.num_filters)
self.pointwise2 = nn.Conv2d(self.num_filters, self.num_filters, kernel_size=1)
self.fc1 = nn.Linear(self.num_filters * 1 * 24, 2)
def forward(self, x):
x = self.depthwise1(x)
x = self.pointwise1(x)
x = F.elu(x)
x = self.depthwise2(x)
x = self.pointwise2(x)
x = F.elu(x)
x = F.max_pool2d(x, (1, 10), 10)
x = x.view(-1, self.num_filters * 1 * 24)
# x = F.elu(self.bn(self.fc1(x)))
x = self.fc1(x)
x = F.dropout(x, training=self.training, p=0.5)
# x = F.leaky_relu(self.fc2(x))
# x = F.dropout(x, training=self.training)
# x = self.fc_fin(x)
x = F.log_softmax(x, dim=1)
return x
class ResNet_EEG(nn.Module): #Resnet
def __init__(self,block,layers, att_type=None, use_cbam = True):
super(ResNet_EEG, self).__init__()
self.num_filters = 40
self.num_hidden = 960
self.inplanes = 1
self.layer1 = self._make_layer(block, 20, layers[0], att_type=att_type)
self.layer2 = self._make_layer(block, 40, layers[1], stride=2, att_type=att_type)
self.layer3 = self._make_layer(block, 80, layers[2], stride=2, att_type=att_type)
self.layer4 = self._make_layer(block, 160, layers[3], stride=2, att_type=att_type)
self.depthwise = nn.Conv2d(160, 160, kernel_size=(8, 8), padding=0,
groups=160)
self.pointwise = nn.Conv2d(160, 160, kernel_size=1)
self.fc = nn.Linear(self.num_hidden, 2)
#self.fc2 = nn.Linear(1024, 2)
def _make_layer(self, block, planes, blocks, stride=1, att_type=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, use_cbam=att_type == 'CBAM'))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, use_cbam=att_type == 'CBAM'))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.depthwise(x)
x = self.pointwise(x)
x = F.max_pool2d(x,(1,5))
x = x.view(x.size(0), -1)
x = self.fc(x)
#x = F.dropout(x, training=self.training, p=0.5)
#x = self.fc2(x)
#x = F.dropout(x, training=self.training, p=0.5)
x = F.log_softmax(x, dim=1)
return x
class Base_dilated_cnn(nn.Module):
def __init__(self):
super(Base_dilated_cnn, self).__init__()
self.num_filters = 128
self.num_hidden = 1024
self.conv1 = nn.Conv2d(1, 64, (62, 10), stride=1, dilation=(1, 1))
self.conv2 = nn.Conv2d(64, 128, (1,10), stride=10, dilation=(1, 2))
self.fc1 = nn.Linear(self.num_filters * 1 * 7, self.num_hidden)
self.bn = nn.BatchNorm1d(self.num_hidden)
self.fc2 = nn.Linear(self.num_hidden,self.num_hidden)
self.fc_fin = nn.Linear(self.num_hidden, 2)
self.cbam = CBAM(self.num_filters,16)
def forward(self, x):
x = self.conv1(x)
x = F.elu(x)
x = self.conv2(x)
#x = self.cbam(x)
x = F.elu(x) #temporal
x = F.max_pool2d(x, (1, 10), 1)
x = x.view(-1, self.num_filters * 1 * 7)
# x = F.elu(self.bn(self.fc1(x)))
x = F.elu(self.fc1(x))
x = F.dropout(x, training=self.training,p=0.5)
#x = F.leaky_relu(self.fc2(x))
#x = F.dropout(x, training=self.training)
x = self.fc_fin(x)
x = F.log_softmax(x, dim=1)
return x
class ShallowCNN(nn.Module): #shallowconv
def __init__(self,use_cbam = False,ismult = False,use_bn = False):
super(ShallowCNN, self).__init__()
self.num_filters = 40
self.num_hidden = 1000
#self.SpatialGate = SpatialGate()
# self.conv1 = nn.Conv2d(1, 25, kernel_size=(1, 10), stride=1) # 템포럴
# self.conv2 = nn.Conv2d(25, 25, kernel_size=(62, 1), stride=1) # 채널
self.conv1 = nn.Conv2d(1, 40, kernel_size= (1,25), stride=(1, 1)) #템포럴
self.conv2 = nn.Conv2d(40,40, kernel_size = (62, 1), stride=(1, 1)) # 채널
# self.cbam = CBAM(self.num_filters, 16)
if use_bn:
self.bn1 = nn.BatchNorm2d(self.num_filters)
self.bn2 = nn.BatchNorm2d(self.num_filters)
else:
self.bn1 = None
self.bn2 = None
if use_cbam:
self.cbam1 = CBAM(self.num_filters,40)
self.cbam2 = CBAM(self.num_filters, 40)
else:
self.cbam1 = None
self.cbam2 = None
#self.fc1 = nn.Linear(self.num_filters * 1 * 21, self.num_hidden)
self.fc_lr = nn.Linear(self.num_filters * 1 * 21, 2)
if ismult:
self.fc_subj = nn.Linear(self.num_filters * 1 * 21, 2)
else:
self.fc_subj = None
def forward(self, x):
x = self.conv1(x)
# x = self.SpatialGate(x)
if not self.cbam1 ==None:
x = self.cbam1(x)
if not self.bn1 ==None:
x = self.bn1(x)
x = self.conv2(x)
if not self.cbam2 ==None:
x = self.cbam2(x)
if not self.bn2 ==None:
x = self.bn2(x)
x = x*x
x = F.avg_pool2d(x, kernel_size = (1, 75), stride = (1,15)) #1,149
x = x.view(-1, self.num_filters * 1 * 21)
x = torch.log(x)
#x = F.leaky_relu(self.fc2(x))
#x = F.dropout(x, training=self.training)
x1 = self.fc_lr(x)
x1 = F.dropout(x1, training=self.training, p=0.5)
x1 = F.log_softmax(x1, dim=1)
if not self.fc_subj == None:
x2 = self.fc_subj(x)
x2 = F.dropout(x2, training=self.training, p=0.5)
x2 = F.log_softmax(x2, dim=1)
return x1,x2
else:
return x1
class Deep4CNN(nn.Module): #shallowconv
def __init__(self,use_cbam = False,ismult = False,use_bn = False):
super(Deep4CNN, self).__init__()
self.num_filters = 200
self.num_hidden = 1000
self.conv1 = nn.Conv2d(1, 25, kernel_size=(1,10), stride=1) #템포럴
self.conv2 = nn.Conv2d(25, 25, kernel_size=(62, 1), stride=1) # 채널
self.conv3 = nn.Conv2d(25, 50, kernel_size=(1, 10), stride=1) # 채널
self.conv4 = nn.Conv2d(50,100,kernel_size=(1,10),stride=1)
self.conv5 = nn.Conv2d(100,200,kernel_size=(1,10),stride=1)
#self.conv_classifier = nn.Conv2d(200, 2, kernel_size=(9, 1), stride=(1, 1))
if use_bn:
self.bn1 = nn.BatchNorm2d(25, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.bn2 = nn.BatchNorm2d(50, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.bn3 = nn.BatchNorm2d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.bn4 = nn.BatchNorm2d(200, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
else:
self.bn1 = None
self.bn2 = None
self.bn3 = None
self.bn4 = None
if use_cbam:
self.cbam1 = CBAM(25,10)
self.cbam2 = CBAM(50,10)
self.cbam3 = None #CBAM(100,10)
self.cbam4 = None # CBAM(200,10)
else:
self.cbam1 = None
self.cbam2 = None
self.cbam3 = None
self.cbam4 = None
self.fc1 = nn.Linear(self.num_filters * 1 * 10, self.num_hidden)
self.fc_lr = nn.Linear(self.num_filters * 1 * 10, 2)
if ismult:
self.fc_subj = nn.Linear(self.num_filters * 1 * 14, 2)
else:
self.fc_subj = None
def forward(self, x):
#block1
x = self.conv1(x)
x = self.conv2(x)
if not self.cbam1 ==None:
x = self.cbam1(x)
if not self.bn1 ==None:
x = self.bn1(x)
x = F.elu(x)
x = F.max_pool2d(x, kernel_size = (1, 3), stride = (1, 2))
#block2
x = self.conv3(x)
if not self.cbam2 ==None:
x = self.cbam2(x)
if not self.bn2 ==None:
x = self.bn2(x)
x = F.elu(x)
x = F.max_pool2d(x, kernel_size=(1, 3), stride=(1, 2))
#block3
x = self.conv4(x)
if not self.cbam3 == None:
x = self.cbam3(x)
if not self.bn3 == None:
x = self.bn3(x)
x = F.elu(x)
x = F.max_pool2d(x, kernel_size=(1, 3), stride=(1, 2))
#block4
x = self.conv5(x)
if not self.cbam4 ==None:
x = self.cbam4(x)
if not self.bn4 ==None:
x = self.bn4(x)
x = F.elu(x)
x = F.max_pool2d(x, kernel_size=(1, 3), stride=(1, 3))
x = x.view(-1, 200* 1 * 10)
#x = torch.log(x)
#x = F.leaky_relu(self.fc2(x))
#x = F.dropout(x, training=self.training)
x1 = self.fc_lr(x)
x1 = F.dropout(x1, training=self.training, p=0.5)
x1 = F.log_softmax(x1, dim=1)
if not self.fc_subj == None:
x2 = self.fc_subj(x)
x2 = F.dropout(x2, training=self.training, p=0.5)
x2 = F.log_softmax(x2, dim=1)
return x1,x2
else:
return x1
class melCNN(nn.Module):
def __init__(self):
super(melCNN, self).__init__()
self.conv1 = nn.Conv2d(62, 100, (6, 6), stride=1) # 템포럴
self.bn1 = nn.BatchNorm2d(100)
self.conv2 = nn.Conv2d(100, 100, (6, 6), stride=1) # 템포럴
self.bn2 = nn.BatchNorm2d(100)
self.conv3 = nn.Conv2d(10, 20, (3, 3), stride=1) # 템포럴
self.fc1 = nn.Linear(1600, 2)
def forward(self, x):
x = x.squeeze(1)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
#
# x = self.conv3(x)
# x = F.relu(x)
# x = F.max_pool2d(x, kernel_size=2)
x = x.view(-1,1600)
x = self.fc1(x)
x = F.log_softmax(x, dim=1)
return x
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(24800, 1000)
nn.init.xavier_uniform_(self.fc1.weight)
self.fc2 = nn.Linear(1000, 1000)
nn.init.xavier_uniform_(self.fc2.weight)
self.fc3 = nn.Linear(1000, 2)
nn.init.xavier_uniform_(self.fc3.weight)
def forward(self, x):
x = x.view(-1, 24800)
x = self.fc1(x)
x = F.dropout(x, training=self.training, p=0.5)
x = F.relu(x)
x = self.fc2(x)
x = F.dropout(x, training=self.training, p=0.5)
x = F.relu(x)
x = self.fc3(x)
x = F.dropout(x, training=self.training, p=0.5)
x = F.log_softmax(x, dim=1)
return x |
hpccm/primitives/runscript.py | robertmaynard/hpc-container-maker | 340 | 11101415 | <filename>hpccm/primitives/runscript.py
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Runscript primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import shlex
from six.moves import shlex_quote
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type
class runscript(object):
"""The `runscript` primitive specifies the commands to be invoked
when the container starts.
# Parameters
_args: Boolean flag to specify whether `"$@"` should be appended
to the command. If more than one command is specified, nothing is
appended regardless of the value of this flag. The default is
True (Singularity specific).
_app: String containing the [SCI-F](https://www.sylabs.io/guides/2.6/user-guide/reproducible_scif_apps.html)
identifier. This also causes the Singularity block to named `%apprun`
rather than `%runscript` (Singularity specific).
commands: A list of commands to execute. The default is an empty
list.
_exec: Boolean flag to specify whether `exec` should be inserted
to preface the final command. The default is True (Singularity
specific).
# Examples
```python
runscript(commands=['cd /workdir', 'source env.sh'])
```
```python
runscript(commands=['/usr/local/bin/entrypoint.sh'])
```
"""
def __init__(self, **kwargs):
"""Initialize primitive"""
#super(wget, self).__init__()
self._args = kwargs.get('_args', True) # Singularity specific
self._app = kwargs.get('_app', '') # Singularity specific
self._exec = kwargs.get('_exec', True) # Singularity specific
self.commands = kwargs.get('commands', [])
def __str__(self):
"""String representation of the primitive"""
if self.commands:
if hpccm.config.g_ctype == container_type.DOCKER:
if self._app:
logging.warning('The Singularity specific %app.. syntax was '
'requested. Docker does not have an '
'equivalent: using regular ENTRYPOINT!')
if len(self.commands) > 1:
logging.warning('Multiple commands given to runscript. '
'Docker ENTRYPOINT supports just one cmd: '
'ignoring remaining commands!')
# Format:
# ENTRYPOINT ["cmd1", "arg1", "arg2", ...]
s = []
s.extend('"{}"'.format(shlex_quote(x))
for x in shlex.split(self.commands[0]))
return 'ENTRYPOINT [' + ', '.join(s) + ']'
elif hpccm.config.g_ctype == container_type.SINGULARITY:
if self._exec:
# prepend last command with exec
self.commands[-1] = 'exec {0}'.format(self.commands[-1])
if len(self.commands) == 1 and self._args:
# append "$@" to singleton command
self.commands[0] = '{} "$@"'.format(self.commands[0])
# Format:
# %runscript
# cmd1
# cmd2
# exec cmd3
if self._app:
s = ['%apprun {0}'.format(self._app)]
else:
s = ['%runscript']
s.extend([' {}'.format(x) for x in self.commands])
return '\n'.join(s)
elif hpccm.config.g_ctype == container_type.BASH:
logging.warning('runscript primitive does not map into bash')
return ''
else:
raise RuntimeError('Unknown container type')
else:
return ''
def merge(self, lst, _app=None):
"""Merge one or more instances of the primitive into a single
instance. Due to conflicts or option differences the merged
primitive may not be exact.
"""
if not lst: # pragma: nocover
raise RuntimeError('no items provided to merge')
cmds = []
for item in lst:
if not item.__class__.__name__ == 'runscript': # pragma: nocover
logging.warning('item is not the correct type, skipping...')
continue
cmds.extend(item.commands)
return runscript(commands=cmds, _app=_app)
|
contrib/notebooks/deep_learning/model_scripts/ConvNet_CIFAR10.py | hebinhuang/batch-shipyard | 279 | 11101426 | <filename>contrib/notebooks/deep_learning/model_scripts/ConvNet_CIFAR10.py
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import print_function
import _cntk_py
import argparse
import json
import logging
import os
from uuid import uuid4
import cntk
import cntk.io.transforms as xforms
import numpy as np
from cntk import layers, Trainer, learning_rate_schedule, momentum_as_time_constant_schedule, momentum_sgd, \
UnitType, CrossValidationConfig
from cntk.io import MinibatchSource, ImageDeserializer, StreamDef, StreamDefs
from cntk.logging import ProgressPrinter, TensorBoardProgressWriter
from cntk.losses import cross_entropy_with_softmax
from cntk.metrics import classification_error
from cntk.ops import minus, element_times, constant, relu
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
_ABS_PATH = os.getcwd()
_MODEL_PATH = os.path.join(_ABS_PATH, "Models")
# model dimensions
_IMAGE_HEIGHT = 32
_IMAGE_WIDTH = 32
_NUM_CHANNELS = 3 # RGB
_NUM_CLASSES = 10
_MODEL_NAME = "ConvNet_CIFAR10_model.dnn"
_EPOCH_SIZE = 50000
def process_map_file(map_file, imgfolder):
""" Convert map file format to one required by CNTK ImageDeserializer
"""
logger.info('Processing {}...'.format(map_file))
orig_file = open(map_file, 'r')
map_path, map_name = os.path.split(map_file)
new_filename = os.path.join(map_path, 'p_{}'.format(map_name))
new_file = open(new_filename, 'w')
for line in orig_file:
fname, label = line.split('\t')
new_file.write("%s\t%s\n" % (os.path.join(imgfolder, fname), label.strip()))
orig_file.close()
new_file.close()
return new_filename
def _create_env_variable_appender(env_var_name):
def env_var_appender(identifier):
env_var_value = os.environ.get(env_var_name, None)
if env_var_value is None:
return identifier
else:
return '{}_{}'.format(identifier, env_var_value)
return env_var_appender
_append_task_id = _create_env_variable_appender('AZ_BATCH_TASK_ID') # Append task id if the env variable exists
_append_job_id = _create_env_variable_appender('AZ_BATCH_JOB_ID') # Append job id if the env variable exists
def _get_unique_id():
""" Returns a unique identifier
If executed in a batch environment it will incorporate the job and task id
"""
return _append_job_id(_append_task_id(str(uuid4())[:8]))
def _save_results(test_result, filename, **kwargs):
results_dict = {'test_metric':test_result, 'parameters': kwargs}
logger.info('Saving results {}'.format(results_dict))
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as outfile:
json.dump(results_dict, outfile)
def create_image_mb_source(map_file, mean_file, train, total_number_of_samples):
""" Creates minibatch source
"""
if not os.path.exists(map_file) or not os.path.exists(mean_file):
raise RuntimeError(
"File '%s' or '%s' does not exist. " %
(map_file, mean_file))
# transformation pipeline for the features has jitter/crop only when training
transforms = []
if train:
imgfolder = os.path.join(os.path.split(map_file)[0], 'train')
transforms += [
xforms.crop(crop_type='randomside', side_ratio=0.8, jitter_type='uniratio') # train uses jitter
]
else:
imgfolder = os.path.join(os.path.split(map_file)[0], 'test')
transforms += [
xforms.scale(width=_IMAGE_WIDTH, height=_IMAGE_HEIGHT, channels=_NUM_CHANNELS, interpolations='linear'),
xforms.mean(mean_file)
]
map_file = process_map_file(map_file, imgfolder)
# deserializer
return MinibatchSource(
ImageDeserializer(map_file, StreamDefs(
features=StreamDef(field='image', transforms=transforms),
# first column in map file is referred to as 'image'
labels=StreamDef(field='label', shape=_NUM_CLASSES))), # and second as 'label'
randomize=train,
max_samples=total_number_of_samples,
multithreaded_deserializer=True)
def create_network(num_convolution_layers):
""" Create network
"""
# Input variables denoting the features and label data
input_var = cntk.input_variable((_NUM_CHANNELS, _IMAGE_HEIGHT, _IMAGE_WIDTH))
label_var = cntk.input_variable((_NUM_CLASSES))
# create model, and configure learning parameters
# Instantiate the feedforward classification model
input_removemean = minus(input_var, constant(128))
scaled_input = element_times(constant(0.00390625), input_removemean)
print('Creating NN model')
with layers.default_options(activation=relu, pad=True):
model = layers.Sequential([
layers.For(range(num_convolution_layers), lambda: [
layers.Convolution2D((3, 3), 64),
layers.Convolution2D((3, 3), 64),
layers.MaxPooling((3, 3), (2, 2))
]),
layers.For(range(2), lambda i: [
layers.Dense([256, 128][i]),
layers.Dropout(0.5)
]),
layers.Dense(_NUM_CLASSES, activation=None)
])(scaled_input)
# loss and metric
ce = cross_entropy_with_softmax(model, label_var)
pe = classification_error(model, label_var)
return {
'name': 'convnet',
'feature': input_var,
'label': label_var,
'ce': ce,
'pe': pe,
'output': model
}
def train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore,
model_path=_MODEL_PATH, cv_config=None):
""" Train and test
"""
# define mapping from intput streams to network inputs
input_map = {
network['feature']: train_source.streams.features,
network['label']: train_source.streams.labels
}
cntk.training_session(
trainer=trainer,
mb_source=train_source,
mb_size=minibatch_size,
model_inputs_to_streams=input_map,
checkpoint_config=cntk.CheckpointConfig(filename=os.path.join(model_path, _MODEL_NAME), restore=restore),
progress_frequency=epoch_size,
cv_config=cv_config
).train()
def create_trainer(network, minibatch_size, epoch_size, progress_printer):
""" Create trainer
"""
# Set learning parameters
lr_per_sample = [0.0015625] * 10 + [0.00046875] * 10 + [0.00015625]
momentum_time_constant = [0] * 20 + [-minibatch_size / np.log(0.9)]
l2_reg_weight = 0.002
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample)
mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant)
learner = momentum_sgd(network['output'].parameters,
lr_schedule,
mm_schedule,
l2_regularization_weight=l2_reg_weight)
return Trainer(network['output'], (network['ce'], network['pe']), learner, progress_printer)
def create_results_callback(filename, **kwargs):
def simple_callback(index, average_error, cv_num_samples, cv_num_minibatches):
_save_results(average_error, filename, **kwargs)
return False
return simple_callback
def convnet_cifar10(train_source,
test_source,
epoch_size,
num_convolution_layers=2,
minibatch_size=64,
max_epochs=30,
log_file=None,
tboard_log_dir='.',
results_path=_MODEL_PATH):
_cntk_py.set_computation_network_trace_level(0)
logger.info("""Running network with:
{num_convolution_layers} convolution layers
{minibatch_size} minibatch size
for {max_epochs} epochs""".format(
num_convolution_layers=num_convolution_layers,
minibatch_size=minibatch_size,
max_epochs=max_epochs
))
network = create_network(num_convolution_layers)
progress_printer = ProgressPrinter(
tag='Training',
log_to_file=log_file,
rank=cntk.Communicator.rank(),
num_epochs=max_epochs)
tensorboard_writer = TensorBoardProgressWriter(freq=10,
log_dir=tboard_log_dir,
model=network['output'])
trainer = create_trainer(network, minibatch_size, epoch_size, [progress_printer, tensorboard_writer])
cv_config = CrossValidationConfig(minibatch_source=test_source,
minibatch_size=16,
callback=create_results_callback(os.path.join(results_path, "model_results.json"),
num_convolution_layers=num_convolution_layers,
minibatch_size=minibatch_size,
max_epochs=max_epochs))
train_and_test(network,
trainer,
train_source,
test_source,
minibatch_size,
epoch_size,
restore=False,
cv_config=cv_config)
network['output'].save(os.path.join(results_path, _MODEL_NAME))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--datadir',
help='Data directory where the CIFAR dataset is located',
required=True)
parser.add_argument('-m', '--modeldir',
help='directory for saving model',
required=False,
default=_MODEL_PATH)
parser.add_argument('-logfile', '--logfile', help='Log file', required=False, default=None)
parser.add_argument('-tensorboard_logdir', '--tensorboard_logdir',
help='Directory where TensorBoard logs should be created',
required=False,
default='.')
parser.add_argument('-e', '--max_epochs',
help='Total number of epochs to train',
type=int,
required=False,
default='20')
parser.add_argument('--num_convolution_layers',
help='Number of convolution layers',
type=int,
required=False,
default='2')
parser.add_argument('--minibatch_size',
help='Number of examples in each minibatch',
type=int,
required=False,
default='64')
args = vars(parser.parse_args())
epochs = int(args['max_epochs'])
model_path = args['modeldir']
data_path = args['datadir']
if not os.path.exists(data_path):
raise RuntimeError("Folder %s does not exist" % data_path)
train_source = create_image_mb_source(os.path.join(data_path, 'train_map.txt'),
os.path.join(data_path, 'CIFAR-10_mean.xml'),
train=True,
total_number_of_samples=epochs * _EPOCH_SIZE)
test_source = create_image_mb_source(os.path.join(data_path, 'test_map.txt'),
os.path.join(data_path, 'CIFAR-10_mean.xml'),
train=False,
total_number_of_samples=cntk.io.FULL_DATA_SWEEP)
unique_path = os.path.join(model_path, _get_unique_id())
convnet_cifar10(train_source,
test_source,
_EPOCH_SIZE,
num_convolution_layers=args['num_convolution_layers'],
minibatch_size=args['minibatch_size'],
max_epochs=args['max_epochs'],
log_file=None,
tboard_log_dir='.',
results_path=unique_path)
|
saleor/checkout/migrations/0028_auto_20200824_1019.py | fairhopeweb/saleor | 15,337 | 11101437 | # Generated by Django 3.1 on 2020-08-24 10:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("checkout", "0027_auto_20200810_1415"),
]
operations = [
migrations.AddField(
model_name="checkout",
name="redirect_url",
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name="checkout",
name="tracking_code",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
tests/utils/test_array_utils.py | llv22/baal_tf2.4_mac | 575 | 11101449 | <reponame>llv22/baal_tf2.4_mac
import numpy as np
import pytest
import torch
from scipy.special import softmax, expit
from baal.utils import array_utils
from baal.utils.array_utils import to_prob
from baal.utils.iterutils import map_on_tensor
@pytest.fixture()
def a_tensor():
return torch.randn([10, 3, 32, 32])
@pytest.fixture()
def an_array():
return np.random.randn(10, 3, 32, 32)
@pytest.fixture()
def a_binary_array():
return np.random.randn(10, 1, 32, 32)
def test_stack_in_memory_single(a_tensor):
iterations = 10
out = array_utils.stack_in_memory(a_tensor, iterations=iterations)
assert out.shape == (10 * iterations, 3, 32, 32)
def test_stack_in_memory_multi(a_tensor):
iterations = 10
t = [a_tensor, a_tensor]
out = map_on_tensor(lambda ti: array_utils.stack_in_memory(ti, iterations=iterations), t)
assert out[0].shape == (10 * iterations, 3, 32, 32)
assert out[1].shape == (10 * iterations, 3, 32, 32)
def test_to_prob(an_array, a_binary_array):
out = to_prob(an_array)
assert not np.allclose(out, an_array)
out = to_prob(a_binary_array)
assert not np.allclose(out, a_binary_array)
a_array_scaled = softmax(an_array, 1)
a_binary_array_scaled = expit(a_binary_array)
out = to_prob(a_array_scaled)
assert np.allclose(out, a_array_scaled)
out = to_prob(a_binary_array_scaled)
assert np.allclose(out, a_binary_array_scaled)
if __name__ == '__main__':
pytest.main()
|
examples/command_send_message.py | tvorogme/pytg | 385 | 11101512 | # -*- coding: utf-8 -*-
"""
Simplest way to just send a message.
Without complicated message receiving stuff.
"""
from pytg.sender import Sender
__author__ = 'luckydonald'
def main():
sender = Sender("127.0.0.1", 4458)
# you need a CLI already running in json mode on port 4458.
res = sender.msg("@username", "Hello!")
print("Response: {response}".format(response=res))
# end def main
if __name__ == '__main__':
main()
|
easyreg/ants_iter.py | ebrahimebrahim/easyreg | 107 | 11101530 | <filename>easyreg/ants_iter.py
from .base_toolkit import ToolkitBase
from .ants_utils import *
class AntsRegIter(ToolkitBase):
"""
The AntsRegIter provides an interface to [AntsPy](https://github.com/ANTsX/ANTsPy),
the version we work on is 0.1.4, though the newest version is 0.2.0
AntsPy is not fully functioned, a support on ants package is plan to replace the AntsPy.
"""
def name(self):
return 'ants_reg iter'
def initialize(self,opt):
"""
initialize the ants registration
mehtod support: "affine", "syn"
* the "syn" include affine as preproccessing
:param opt: task opt settings
:return: None
"""
ToolkitBase.initialize(self, opt)
if self.method_name =='affine':
self.affine_on = True
self.warp_on = False
elif self.method_name =='syn':
self.affine_on = False
self.warp_on = True
self.ants_param = opt['tsk_set']['reg']['ants']
def affine_optimization(self):
"""
run the affine optimization
the results, including warped image, warped label, transformation map, etc. take the ants format and saved in record path
:return: warped image, warped label(None), transformation map(None)
"""
output, loutput, phi,_ = performAntsRegistration(self.ants_param, self.resized_moving_path,self.resized_target_path,self.method_name,self.record_path,self.resized_l_moving_path,self.resized_l_target_path,self.fname_list[0])
self.output = output
self.warped_label_map = loutput
self.phi = None
return self.output, None, None
def syn_optimization(self):
"""
run the syn optimization
the results, including warped image, warped label, transformation map, etc. take the ants format and saved in record path
:return: warped image, warped label(None), transformation map(None)
"""
output, loutput, disp,jacobian = performAntsRegistration(self.ants_param, self.resized_moving_path,self.resized_target_path,self.method_name,self.record_path,self.resized_l_moving_path,self.resized_l_target_path,self.fname_list[0])
#self.afimg_or_afparam = None
self.output = output
self.warped_label_map = loutput
self.jacobian= jacobian
self.phi = None
return self.output,None, None
def forward(self,input=None):
"""
forward the model
:param input:
:return:
"""
if self.affine_on and not self.warp_on:
return self.affine_optimization()
elif self.warp_on:
""" the syn include affine"""
return self.syn_optimization()
def compute_jacobi_map(self,jacobian):
"""
In ants, negative jacobi are set to zero,
we compute the num of zero jacobi instead
the jacobi_abs_sum is not used here
:param jacobian:
:return:
"""
jacobi_abs = -0.0 # - np.sum(jacobian[jacobian < 0.]) #
jacobi_num = np.sum(jacobian <=0.)
print("the jacobi_value of fold points for current batch is {}".format(jacobi_abs))
print("the number of fold points for current batch is {}".format(jacobi_num))
# np.sum(np.abs(dfx[dfx<0])) + np.sum(np.abs(dfy[dfy<0])) + np.sum(np.abs(dfz[dfz<0]))
jacobi_abs_sum = jacobi_abs # / np.prod(map.shape)
return jacobi_abs_sum, jacobi_num
|
recipes/Python/577758_Sleepsort_processes/recipe-577758.py | tdiprima/code | 2,023 | 11101531 | import os
import time
def sleepsort(l):
"""Another dumb sorting algorithm."""
pids = []
def reap():
while pids:
os.waitpid(pids.pop(), 0)
# Setup communication.
startr, startw = os.pipe()
resr, resw = os.pipe()
try:
for i, x in enumerate(l):
pid = os.fork()
if pid == 0:
# Wait for parent process to signal start.
os.read(startr, 1)
time.sleep(x)
# Notify the parent process.
os.write(resw, str(i).encode("ascii") + b" ")
# Goodbye.
os._exit(0)
else:
pids.append(pid)
# Start the sleeps.
os.write(startw, b"x" * len(l))
os.close(startw)
startw = -1
reap()
os.close(resw)
resw = -1
# Read results.
data = []
while True:
d = os.read(resr, 4096)
if len(d) == 0:
break
data.append(d)
finally:
os.close(startr)
if startw > 0:
os.close(startw)
os.close(resr)
if resw > 0:
os.close(resw)
reap()
return [l[int(c)] for c in b"".join(data)[:-1].split(b" ")]
if __name__ == "__main__":
print(sleepsort([10, 9, 7.3, 7, 6, .2, .4, 3, 2, 1.5]))
|
website/registries/utils.py | gaybro8777/osf.io | 628 | 11101601 | <gh_stars>100-1000
REG_CAMPAIGNS = {
'prereg': 'OSF Preregistration',
'osf-registered-reports': 'Registered Report Protocol Preregistration',
}
def get_campaign_schema(campaign):
from osf.models import RegistrationSchema
if campaign not in REG_CAMPAIGNS:
raise ValueError('campaign must be one of: {}'.format(', '.join(REG_CAMPAIGNS.keys())))
schema_name = REG_CAMPAIGNS[campaign]
return RegistrationSchema.objects.filter(name=schema_name).order_by('-schema_version').first()
def drafts_for_user(user, campaign=None):
from osf.models import DraftRegistration, Node
from osf.utils.permissions import ADMIN_NODE
if not user or user.is_anonymous:
return None
node_qs = Node.objects.get_nodes_for_user(user, ADMIN_NODE).values_list('id', flat=True)
drafts = DraftRegistration.objects.filter(
approval=None,
registered_node=None,
deleted__isnull=True,
branched_from__in=node_qs,
)
if campaign:
drafts = drafts.filter(
registration_schema=get_campaign_schema(campaign),
)
return drafts
|
libnd4j/include/graph/generated/nd4j/graph/ByteOrder.py | rghwer/testdocs | 13,006 | 11101621 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: graph
class ByteOrder(object):
LE = 0
BE = 1
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_infra_fti_cfg.py | CiscoDevNet/ydk-py | 177 | 11101633 | <reponame>CiscoDevNet/ydk-py
""" Cisco_IOS_XR_infra_fti_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR infra\-fti package configuration.
This module contains definitions
for the following management objects\:
dci\-fabric\-interconnect\: Configure FTI
parameters/sub\-parameters
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class DciFabricInterconnect(_Entity_):
"""
Configure FTI parameters/sub\-parameters
.. attribute:: fabrics
Configure fabric parameters
**type**\: :py:class:`Fabrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Fabrics>`
.. attribute:: acp
Configure Auto Config Pool parameters
**type**\: :py:class:`Acp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Acp>`
.. attribute:: identity
Identity (Loopback IP address)<x.x.x.x>
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect, self).__init__()
self._top_entity = None
self.yang_name = "dci-fabric-interconnect"
self.yang_parent_name = "Cisco-IOS-XR-infra-fti-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("fabrics", ("fabrics", DciFabricInterconnect.Fabrics)), ("acp", ("acp", DciFabricInterconnect.Acp))])
self._leafs = OrderedDict([
('identity', (YLeaf(YType.str, 'identity'), ['str'])),
])
self.identity = None
self.fabrics = DciFabricInterconnect.Fabrics()
self.fabrics.parent = self
self._children_name_map["fabrics"] = "fabrics"
self.acp = DciFabricInterconnect.Acp()
self.acp.parent = self
self._children_name_map["acp"] = "acp"
self._segment_path = lambda: "Cisco-IOS-XR-infra-fti-cfg:dci-fabric-interconnect"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect, ['identity'], name, value)
class Fabrics(_Entity_):
"""
Configure fabric parameters
.. attribute:: fabric
Enter fabric identifier
**type**\: list of :py:class:`Fabric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Fabrics.Fabric>`
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Fabrics, self).__init__()
self.yang_name = "fabrics"
self.yang_parent_name = "dci-fabric-interconnect"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("fabric", ("fabric", DciFabricInterconnect.Fabrics.Fabric))])
self._leafs = OrderedDict()
self.fabric = YList(self)
self._segment_path = lambda: "fabrics"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-fti-cfg:dci-fabric-interconnect/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Fabrics, [], name, value)
class Fabric(_Entity_):
"""
Enter fabric identifier
.. attribute:: id1 (key)
fabric identifier
**type**\: int
**range:** 1000..9999
.. attribute:: controllers
Enter Opflex peer info
**type**\: :py:class:`Controllers <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Fabrics.Fabric.Controllers>`
.. attribute:: ssl
Disabled or Path to certificate
**type**\: str
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Fabrics.Fabric, self).__init__()
self.yang_name = "fabric"
self.yang_parent_name = "fabrics"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['id1']
self._child_classes = OrderedDict([("controllers", ("controllers", DciFabricInterconnect.Fabrics.Fabric.Controllers))])
self._leafs = OrderedDict([
('id1', (YLeaf(YType.uint32, 'id1'), ['int'])),
('ssl', (YLeaf(YType.str, 'ssl'), ['str'])),
])
self.id1 = None
self.ssl = None
self.controllers = DciFabricInterconnect.Fabrics.Fabric.Controllers()
self.controllers.parent = self
self._children_name_map["controllers"] = "controllers"
self._segment_path = lambda: "fabric" + "[id1='" + str(self.id1) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-fti-cfg:dci-fabric-interconnect/fabrics/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Fabrics.Fabric, ['id1', 'ssl'], name, value)
class Controllers(_Entity_):
"""
Enter Opflex peer info
.. attribute:: controller
Enter Spine IP address
**type**\: list of :py:class:`Controller <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Fabrics.Fabric.Controllers.Controller>`
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Fabrics.Fabric.Controllers, self).__init__()
self.yang_name = "controllers"
self.yang_parent_name = "fabric"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("controller", ("controller", DciFabricInterconnect.Fabrics.Fabric.Controllers.Controller))])
self._leafs = OrderedDict()
self.controller = YList(self)
self._segment_path = lambda: "controllers"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Fabrics.Fabric.Controllers, [], name, value)
class Controller(_Entity_):
"""
Enter Spine IP address
.. attribute:: ip1 (key)
Enter Spine IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Fabrics.Fabric.Controllers.Controller, self).__init__()
self.yang_name = "controller"
self.yang_parent_name = "controllers"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['ip1']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ip1', (YLeaf(YType.str, 'ip1'), ['str'])),
])
self.ip1 = None
self._segment_path = lambda: "controller" + "[ip1='" + str(self.ip1) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Fabrics.Fabric.Controllers.Controller, ['ip1'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Fabrics.Fabric.Controllers.Controller']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Fabrics.Fabric.Controllers']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Fabrics.Fabric']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Fabrics']['meta_info']
class Acp(_Entity_):
"""
Configure Auto Config Pool parameters
.. attribute:: bd_range
Specify BD pool range
**type**\: :py:class:`BdRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Acp.BdRange>`
.. attribute:: vni_range
Specify VNI pool range
**type**\: :py:class:`VniRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Acp.VniRange>`
.. attribute:: bvi_range
Specify BVI pool range
**type**\: :py:class:`BviRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Acp.BviRange>`
.. attribute:: vrfs
Configure local VRF parameters
**type**\: :py:class:`Vrfs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Acp.Vrfs>`
.. attribute:: nve_id
Specify NVE interface id
**type**\: int
**range:** 0..4294967295
.. attribute:: bgp_as
Specify BGP AS number
**type**\: int
**range:** 0..4294967295
.. attribute:: bg_name
Specify Bridge\-group name
**type**\: str
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Acp, self).__init__()
self.yang_name = "acp"
self.yang_parent_name = "dci-fabric-interconnect"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("bd-range", ("bd_range", DciFabricInterconnect.Acp.BdRange)), ("vni-range", ("vni_range", DciFabricInterconnect.Acp.VniRange)), ("bvi-range", ("bvi_range", DciFabricInterconnect.Acp.BviRange)), ("vrfs", ("vrfs", DciFabricInterconnect.Acp.Vrfs))])
self._leafs = OrderedDict([
('nve_id', (YLeaf(YType.uint32, 'nve-id'), ['int'])),
('bgp_as', (YLeaf(YType.uint32, 'bgp-as'), ['int'])),
('bg_name', (YLeaf(YType.str, 'bg-name'), ['str'])),
])
self.nve_id = None
self.bgp_as = None
self.bg_name = None
self.bd_range = DciFabricInterconnect.Acp.BdRange()
self.bd_range.parent = self
self._children_name_map["bd_range"] = "bd-range"
self.vni_range = DciFabricInterconnect.Acp.VniRange()
self.vni_range.parent = self
self._children_name_map["vni_range"] = "vni-range"
self.bvi_range = DciFabricInterconnect.Acp.BviRange()
self.bvi_range.parent = self
self._children_name_map["bvi_range"] = "bvi-range"
self.vrfs = DciFabricInterconnect.Acp.Vrfs()
self.vrfs.parent = self
self._children_name_map["vrfs"] = "vrfs"
self._segment_path = lambda: "acp"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-fti-cfg:dci-fabric-interconnect/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Acp, ['nve_id', 'bgp_as', 'bg_name'], name, value)
class BdRange(_Entity_):
"""
Specify BD pool range
.. attribute:: bd_min
BD Range\:min value
**type**\: int
**range:** 1..4000
.. attribute:: bd_max
BD Range\:max value
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Acp.BdRange, self).__init__()
self.yang_name = "bd-range"
self.yang_parent_name = "acp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('bd_min', (YLeaf(YType.uint32, 'bd-min'), ['int'])),
('bd_max', (YLeaf(YType.uint32, 'bd-max'), ['int'])),
])
self.bd_min = None
self.bd_max = None
self._segment_path = lambda: "bd-range"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-fti-cfg:dci-fabric-interconnect/acp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Acp.BdRange, ['bd_min', 'bd_max'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Acp.BdRange']['meta_info']
class VniRange(_Entity_):
"""
Specify VNI pool range
.. attribute:: vni_min
VNI Range\:min value
**type**\: int
**range:** 1..4000
.. attribute:: vni_max
VNI Range\:max value
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Acp.VniRange, self).__init__()
self.yang_name = "vni-range"
self.yang_parent_name = "acp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vni_min', (YLeaf(YType.uint32, 'vni-min'), ['int'])),
('vni_max', (YLeaf(YType.uint32, 'vni-max'), ['int'])),
])
self.vni_min = None
self.vni_max = None
self._segment_path = lambda: "vni-range"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-fti-cfg:dci-fabric-interconnect/acp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Acp.VniRange, ['vni_min', 'vni_max'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Acp.VniRange']['meta_info']
class BviRange(_Entity_):
"""
Specify BVI pool range
.. attribute:: bvi_min
BVI Range\:min value
**type**\: int
**range:** 1..4000
.. attribute:: bvi_max
BVI Range\:max value
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Acp.BviRange, self).__init__()
self.yang_name = "bvi-range"
self.yang_parent_name = "acp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('bvi_min', (YLeaf(YType.uint32, 'bvi-min'), ['int'])),
('bvi_max', (YLeaf(YType.uint32, 'bvi-max'), ['int'])),
])
self.bvi_min = None
self.bvi_max = None
self._segment_path = lambda: "bvi-range"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-fti-cfg:dci-fabric-interconnect/acp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Acp.BviRange, ['bvi_min', 'bvi_max'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Acp.BviRange']['meta_info']
class Vrfs(_Entity_):
"""
Configure local VRF parameters
.. attribute:: vrf
vrf name
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_fti_cfg.DciFabricInterconnect.Acp.Vrfs.Vrf>`
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Acp.Vrfs, self).__init__()
self.yang_name = "vrfs"
self.yang_parent_name = "acp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vrf", ("vrf", DciFabricInterconnect.Acp.Vrfs.Vrf))])
self._leafs = OrderedDict()
self.vrf = YList(self)
self._segment_path = lambda: "vrfs"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-fti-cfg:dci-fabric-interconnect/acp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Acp.Vrfs, [], name, value)
class Vrf(_Entity_):
"""
vrf name
.. attribute:: vrf_name (key)
vrf name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: bvi_vrf_ip
BVI override IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'infra-fti-cfg'
_revision = '2017-11-13'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DciFabricInterconnect.Acp.Vrfs.Vrf, self).__init__()
self.yang_name = "vrf"
self.yang_parent_name = "vrfs"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vrf_name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
('bvi_vrf_ip', (YLeaf(YType.str, 'bvi-vrf-ip'), ['str'])),
])
self.vrf_name = None
self.bvi_vrf_ip = None
self._segment_path = lambda: "vrf" + "[vrf-name='" + str(self.vrf_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-fti-cfg:dci-fabric-interconnect/acp/vrfs/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DciFabricInterconnect.Acp.Vrfs.Vrf, ['vrf_name', 'bvi_vrf_ip'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Acp.Vrfs.Vrf']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Acp.Vrfs']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect.Acp']['meta_info']
def clone_ptr(self):
self._top_entity = DciFabricInterconnect()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_fti_cfg as meta
return meta._meta_table['DciFabricInterconnect']['meta_info']
|
LRC/train_mixup.py | houj04/AutoDL | 155 | 11101648 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
# Based on:
# --------------------------------------------------------
# DARTS
# Copyright (c) 2018, <NAME>.
# Licensed under the Apache License, Version 2.0;
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from learning_rate import cosine_decay
import numpy as np
import argparse
from model import NetworkCIFAR as Network
import reader_cifar as reader
import sys
import os
import time
import logging
import genotypes
import paddle.fluid as fluid
import shutil
import utils
import math
parser = argparse.ArgumentParser("cifar")
# yapf: disable
parser.add_argument('--data', type=str, default='./dataset/cifar/cifar-10-batches-py/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained model to load')
parser.add_argument('--model_id', type=int, help='model id')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--save_model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--lr_exp_decay', action='store_true', default=False, help='use exponential_decay learning_rate')
parser.add_argument('--mix_alpha', type=float, default=0.5, help='mixup alpha')
parser.add_argument('--lrc_loss_lambda', default=0, type=float, help='lrc_loss_lambda')
# yapf: enable
args = parser.parse_args()
CIFAR_CLASSES = 10
dataset_train_size = 50000.
image_size = 32
genotypes.DARTS = genotypes.MY_DARTS_list[args.model_id]
def main():
image_shape = [3, image_size, image_size]
devices = os.getenv("CUDA_VISIBLE_DEVICES") or ""
devices_num = len(devices.split(","))
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
args.auxiliary, genotype)
steps_one_epoch = math.ceil(dataset_train_size /
(devices_num * args.batch_size))
train(model, args, image_shape, steps_one_epoch)
def build_program(main_prog, startup_prog, args, is_train, model, im_shape,
steps_one_epoch):
out = []
with fluid.program_guard(main_prog, startup_prog):
py_reader = model.build_input(im_shape, is_train)
if is_train:
with fluid.unique_name.guard():
loss = model.train_model(py_reader, args.init_channels,
args.auxiliary, args.auxiliary_weight,
args.lrc_loss_lambda)
optimizer = fluid.optimizer.Momentum(
learning_rate=cosine_decay(args.learning_rate, args.epochs,
steps_one_epoch),
regularization=fluid.regularizer.L2Decay(args.weight_decay),
momentum=args.momentum)
optimizer.minimize(loss)
out = [py_reader, loss]
else:
with fluid.unique_name.guard():
prob, acc_1, acc_5 = model.test_model(py_reader,
args.init_channels)
out = [py_reader, prob, acc_1, acc_5]
return out
def train(model, args, im_shape, steps_one_epoch):
startup_prog = fluid.Program()
train_prog = fluid.Program()
test_prog = fluid.Program()
train_py_reader, loss_train = build_program(
train_prog, startup_prog, args, True, model, im_shape, steps_one_epoch)
test_py_reader, prob, acc_1, acc_5 = build_program(
test_prog, startup_prog, args, False, model, im_shape, steps_one_epoch)
test_prog = test_prog.clone(for_test=True)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_prog)
if args.pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(
exe,
args.pretrained_model,
main_program=train_prog,
predicate=if_exist)
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = True
compile_program = fluid.compiler.CompiledProgram(
train_prog).with_data_parallel(
loss_name=loss_train.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
train_reader = reader.train10(args)
test_reader = reader.test10(args)
train_py_reader.decorate_paddle_reader(train_reader)
test_py_reader.decorate_paddle_reader(test_reader)
fluid.clip.set_gradient_clip(
fluid.clip.GradientClipByGlobalNorm(args.grad_clip), program=train_prog)
train_fetch_list = [loss_train]
def save_model(postfix, main_prog):
model_path = os.path.join(args.save_model_path, postfix)
if os.path.isdir(model_path):
shutil.rmtree(model_path)
fluid.io.save_persistables(exe, model_path, main_program=main_prog)
def test(epoch_id):
test_fetch_list = [prob, acc_1, acc_5]
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
test_py_reader.start()
test_start_time = time.time()
step_id = 0
try:
while True:
prev_test_start_time = test_start_time
test_start_time = time.time()
prob_v, acc_1_v, acc_5_v = exe.run(test_prog,
fetch_list=test_fetch_list)
top1.update(np.array(acc_1_v), np.array(prob_v).shape[0])
top5.update(np.array(acc_5_v), np.array(prob_v).shape[0])
if step_id % args.report_freq == 0:
print("Epoch {}, Step {}, acc_1 {}, acc_5 {}, time {}".
format(epoch_id, step_id,
np.array(acc_1_v),
np.array(acc_5_v), test_start_time -
prev_test_start_time))
step_id += 1
except fluid.core.EOFException:
test_py_reader.reset()
print("Epoch {0}, top1 {1}, top5 {2}".format(epoch_id, top1.avg,
top5.avg))
epoch_start_time = time.time()
for epoch_id in range(args.epochs):
model.drop_path_prob = args.drop_path_prob * epoch_id / args.epochs
train_py_reader.start()
epoch_end_time = time.time()
if epoch_id > 0:
print("Epoch {}, total time {}".format(epoch_id - 1, epoch_end_time
- epoch_start_time))
epoch_start_time = epoch_end_time
epoch_end_time
start_time = time.time()
step_id = 0
try:
while True:
prev_start_time = start_time
start_time = time.time()
loss_v, = exe.run(
compile_program,
fetch_list=[v.name for v in train_fetch_list])
print("Epoch {}, Step {}, loss {}, time {}".format(epoch_id, step_id, \
np.array(loss_v).mean(), start_time-prev_start_time))
step_id += 1
sys.stdout.flush()
except fluid.core.EOFException:
train_py_reader.reset()
if epoch_id % 50 == 0:
save_model(str(epoch_id), train_prog)
if epoch_id == args.epochs - 1:
save_model('final', train_prog)
test(epoch_id)
if __name__ == '__main__':
main()
|
colossus/apps/templates/models.py | CreativeWurks/emailerpro | 372 | 11101661 | <gh_stars>100-1000
from django.db import models
from django.template.loader import get_template
from django.urls import reverse
from django.utils import timezone
from django.utils.html import mark_safe
from django.utils.translation import gettext_lazy as _
from .utils import wrap_blocks
class EmailTemplateManager(models.Manager):
@classmethod
def default_content(cls):
default_content = get_template('templates/default_email_template_content.html')
content = default_content.template.source
return content
class EmailTemplate(models.Model):
name = models.CharField(_('name'), max_length=100)
content = models.TextField(blank=True)
create_date = models.DateTimeField(_('create date'), auto_now_add=True)
update_date = models.DateTimeField(_('update date'), default=timezone.now)
last_used_date = models.DateTimeField(_('last used'), null=True, blank=True)
last_used_campaign = models.ForeignKey(
'campaigns.Campaign',
on_delete=models.SET_NULL,
null=True,
blank=True,
verbose_name=_('last used campaign'),
related_name='+'
)
objects = EmailTemplateManager()
class Meta:
verbose_name = _('email template')
verbose_name_plural = _('email templates')
db_table = 'colossus_email_templates'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk and not self.content:
self.content = self.__class__.objects.default_content()
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('templates:emailtemplate_editor', kwargs={'pk': self.pk})
def html_preview(self):
html = wrap_blocks(self.content)
return mark_safe(html)
|
languages/python/oso/polar/variable.py | connec/oso | 2,167 | 11101715 | class Variable(str):
"""An unbound variable type, can be used to query the KB for information"""
def __repr__(self):
return f"Variable({super().__repr__()})"
def __str__(self):
return repr(self)
def __eq__(self, other):
return super().__eq__(other)
def __hash__(self):
return super().__hash__()
|
extras/mysteryHex.py | Manny27nyc/BitcoinArmory | 505 | 11101750 | <reponame>Manny27nyc/BitcoinArmory<filename>extras/mysteryHex.py
#! /usr/bin/python
from os import path
import sys
from optparse import OptionParser
sys.path.append('..')
from pybtcengine import *
HASHCODE_HEADER = 1
HASHCODE_MERKLE = 2
HASHCODE_TX = 3
################################################################################
################################################################################
def figureOutMysteryHex(hexStr, hashDict={}):
binStr = hex_to_binary(hexStr)
print '\n' + '-'*80
print '\nStarting hex data:', len(binStr), 'bytes'
hexStr.replace(' ','')
pprintHex(hexStr, ' ')
print '\n' + '-'*80
# These search terms only give us hints about where things are. We have more
# operations to determine if something is actually behind these strings
hintStr = {}
hintStr['Empty4B' ] = hex_to_binary('00000000' )
hintStr['Version' ] = hex_to_binary('01000000' )
hintStr['PkStart' ] = hex_to_binary('76a9' )
hintStr['PkEnd' ] = hex_to_binary('88ac' )
hintStr['SeqNum' ] = hex_to_binary('ffffffff' )
# These search terms are simple, self-explanatory terms. We find them, flag
# them and we're done.
simpleList = []
simpleList.append(['f9beb4d9', 'MagicNum', 'Main network magic bytes (f9beb4d9)'])
simpleList.append(['fabfb5da', 'MagicNum', 'Test network magic bytes (fabfb5da)'])
simpleList.append(['76657261636b', 'VERACK', 'Version acknowledgement message'])
simpleList.append(['76657273696f6e', 'VersionMsg', 'Version declaration message'])
simpleList.append(['61646472', 'AddressMsg', 'Address declaration message'])
# To verify a timestamp, check it's between 2009 and today + 10days
timeMin = time.mktime( (2009,1,1,0,0,0,0,0,-1))
timeMax = time.time() + 10*86400
# Exclusive list of [Name, startIdx, endIdx, hexSubstr, toPrintAfter]
# Exlucsive means that if we already identified something there, we don't
# search it again
idListExcl = []
# Inclusive list of multipe random things. Inclusive means that even if
# we already identified a transaction somewhere, we will still ID all the
# scripts in it, even though it's technically already flagged as ID'd
idListSimple = []
# This is a running mask of what bytes have been identified already
maskAll = [0]*len(binStr)
# This method will return all indices that match the substring "findBin"
# excluding matches inside chunks already ID'd
def getIdxListNotIdYet(findBin, theMask):
versIdx = []
findIdx = binStr.find(findBin)
while not findIdx == -1:
if not theMask[findIdx] == 1:
versIdx.append(findIdx)
findIdx = binStr.find(hintStr['Version'],findIdx+1)
return versIdx
# Return all matches for the string, regardless of whether it's ID'd already
def getIdxList(findBin):
versIdx = []
findIdx = binStr.find(findBin)
while not findIdx == -1:
versIdx.append(findIdx)
findIdx = binStr.find(findBin,findIdx+1)
return versIdx
############################################################################
# Search for version numbers which will help us find Tx's and BlockHeaders
############################################################################
versIdx = getIdxListNotIdYet(hintStr['Version'], maskAll)
for idx in versIdx:
# Check for block Header: hash has leading zeros and timestamp is sane
if idx<=len(binStr)-80:
hashZeros = binStr[idx+32:idx+36] == hintStr['Empty4B']
validTime = timeMin < binary_to_int(binStr[idx+68:idx+72]) < timeMax
if hashZeros and validTime:
bin80 = binStr[idx:idx+80]
blkhead = PyBlockHeader().unserialize(bin80)
idListExcl.append(['BlockHeader', idx, idx+80, binary_to_hex(bin80), blkhead])
maskAll[idx:idx+80] = [1]*80
continue
# If not a header, check to see if it's a Tx
try:
testTx = PyTx().unserialize(binStr[idx:])
if len(testTx.inputs) < 1 or len(testTx.outputs) < 1:
raise Exception
for inp in testTx.inputs:
if not inp.intSeq==binary_to_int(hintStr['SeqNum']):
raise Exception
# If we got here, the sequence numbers should be sufficient evidence for
# declaring this is a transaction
txBin = testTx.serialize()
txLen = len(txBin)
txHex = binary_to_hex(txBin)
idListExcl.append(['Transaction', idx, idx+txLen, txHex, testTx])
maskAll[idx:idx+txLen] = [1]*txLen
except:
# Obviously wasn't a transaction, either
continue
pubkeyList = [ ]
# Try to find a PkScript
pkIdx = getIdxListNotIdYet(hintStr['PkStart'], maskAll)
for idx in pkIdx:
if binStr[idx+23:idx+25] == hintStr['PkEnd']:
addrStr = PyBtcAddress().createFromPublicKeyHash160(binStr[idx+3:idx+23])
extraInfo = addrStr.getAddrStr()
idListSimple.append(['TxOutScript', idx, idx+25, extraInfo, ''])
maskAll[idx:idx+25] = [1]*25
startCBPK = hex_to_binary('04')
pkIdx = getIdxListNotIdYet(startCBPK, maskAll)
for idx in pkIdx:
if idx > len(binStr)-65:
continue
try:
addrStr = PyBtcAddress().createFromPublicKey(binStr[idx:idx+65])
extraInfo = addrStr.calculateAddrStr()
if not idx+65==len(binStr) and binStr[idx+65] == hex_to_binary('ac'):
idListSimple.append(['CoinbaseScript', idx, idx+66, extraInfo, ''])
maskAll[idx:idx+66] = [1]*66
else:
idListSimple.append(['BarePublicKey', idx, idx+65, extraInfo, ''])
maskAll[idx:idx+65] = [1]*65
if idx>0 and binStr[idx-1] == hex_to_binary('41'):
idListSimple[-1][1] -= 1 # get the 41 that's there if it's a script
maskAll[idx-1] = 1
except:
pass # I guess this wasn't a PK after all...
############################################################################
# Random straightforward things to search for without any extra computation.
############################################################################
for triplet in simpleList:
foundIdx = getIdxList( hex_to_binary(triplet[0]))
for idx in foundIdx:
idListSimple.append([triplet[1], idx, idx+len(triplet[0])/2, triplet[2], ''])
# If we have a useful dictionary of hashes, let's use it
if len(hashDict) > 0:
for i in range(len(binStr)-31):
if maskAll[i] == 1:
continue
str32 = binStr[i:i+32]
if hashDict.has_key(str32):
hashcode = hashDict[str32]
if hashcode==HASHCODE_HEADER:
hashCode = 'HeaderHash'
elif hashcode==HASHCODE_MERKLE:
hashCode = 'MerkleRoot'
elif hashcode==HASHCODE_TX:
hashCode = 'TxHash'
else:
hashCode = 'UnknownHash'
idListSimple.append([hashCode, i, i+32, binary_to_hex(str32), ''])
elif hashDict.has_key(binary_switchEndian(str32)):
hashcode = hashDict[binary_switchEndian(str32)]
if hashcode==HASHCODE_HEADER:
hashCode = 'HeaderHash(BE)'
elif hashcode==HASHCODE_MERKLE:
hashCode = 'MerkleRoot(BE)'
elif hashcode==HASHCODE_TX:
hashCode = 'TxHash(BE)'
else:
hashCode = 'UnknownHash'
idListSimple.append([hashCode, i, i+32, binary_to_hex(str32), ''])
############################################################################
# Done searching for stuff. Print results
############################################################################
for ids in idListExcl:
print ''
print '#'*100
idx0,idx1 = ids[1], ids[2]
# If this is a Tx or BH, need to pprint the last arg
hexToPrint = ['-'] * 2*len(binStr)
if ids[0] == 'Transaction' or ids[0] == 'BlockHeader':
hexToPrint[2*ids[1]:2*ids[2]] = ids[3]
print 'Found: ', ids[0]
print 'Size:', idx1-idx0, 'bytes'
print 'Bytes: %d to %d (0x%s to 0x%s)' % (idx0, idx1, \
int_to_hex(idx0, 4, BIGENDIAN), \
int_to_hex(idx1, 4, BIGENDIAN))
pprintHex( ''.join(hexToPrint), ' ')
print ''
ids[4].pprint(1)
print ''
print '#'*100
# Print all the simple stuff onto a single bytemap
print 'Other assorted things:'
idListSimple.sort(key=lambda x: x[1])
hexToPrint = ['-'] * 2*len(binStr)
ReprList = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
for j,ids in enumerate(idListSimple):
i1 = ids[1]
i2 = ids[2]
nb = i2-i1
maskAll[i1:i2] = [1]*nb
hexToPrint[2*i1:2*i2] = ReprList[j]*2*nb
hexToPrint = ''.join(hexToPrint)
pprintHex(hexToPrint, ' ')
print ''
for j,ids in enumerate(idListSimple):
print ' ', ReprList[j] + ':', ids[0].ljust(16,' '), ':', ids[3]
print '\n\nUnidentified bytes'
maskedBytes = ['--' if maskAll[i] == 1 else hexStr[2*i:2*i+2] for i in range(len(binStr))]
pprintHex(''.join(maskedBytes));
################################################################################
################################################################################
def updateHashList(hashfile, blkfile, rescan=False):
print ''
print '\t.Updating hashlist from the blockchain file in your bitcoin directory'
print '\t.This will take 1-5 min the first time you run this script (and on rescan)'
print '\t\t.Hashfile: ', hashfile
print '\t\t.BlockFile:', blkfile
if not path.exists(hashfile) or rescan:
hf = open('knownHashes.bin','wb')
hf.write('\x00'*8)
hf.close()
hf = open(hashfile, 'rb')
startBlkByte = binary_to_int(hf.read(8))
hf.close()
assert(path.exists(blkfile))
blkfileSize = os.stat(blkfile).st_size
bf = open(blkfile, 'rb')
hf = open(hashfile, 'r+')
hf.write(int_to_binary(blkfileSize, widthBytes=8))
hf.seek(0,2) # start writing at the end of the file
# The first 8 bytes of the hashfile tells us where to start searching
# blk0001.dat (so we don't recompute every time). We need to rewrite
# this value every time
bf.seek(startBlkByte, 0) # seek to this point in the file
binunpack = BinaryUnpacker(bf.read())
newBlocksRead = 0
newHashes = 0
while( binunpack.getRemainingSize() > 0):
binunpack.advance(4) # magic
sz = binunpack.get(UINT32) # total bytes in this block
thisHeader = PyBlockHeader().unserialize(binunpack)
hf.write(thisHeader.theHash + '\x01')
hf.write(thisHeader.merkleRoot + '\x02')
thisData = PyBlockData().unserialize(binunpack)
for tx in thisData.txList:
hf.write(tx.thisHash + '\x03')
newHashes += 2 + len(thisData.txList)
if newBlocksRead==0:
print '\n\t\tReading blocks...',
newBlocksRead += 1
if(newBlocksRead%1000==0):
if(newBlocksRead%10000==0):
print '\n\t\tRead', newBlocksRead, 'blocks',
print '.',
sys.stdout.flush()
print '\n\t.Updated hashfile with %d bytes / %d hashes / %d blocks from blkfile' % \
(blkfileSize-startBlkByte, newHashes, newBlocksRead)
hf.close()
if __name__ == '__main__':
print '\nTry to identify Bitcoin-related strings in a block of data'
parser = OptionParser(usage='USAGE: %prog [--binary|-b] -f FILE \n or: %prog unidentifiedHex')
parser.add_option('-f', '--file', dest='filename', \
help='Get unidentified data from this file')
parser.add_option('-k', '--blkfile', dest='blk0001file', default='', \
help='Update hashlist from this file (default ~/.bitcoin/blk0001.dat)')
parser.add_option('-g', '--hashfile', dest='hashfile', default='./knownHashes.bin', \
help='The file to store and retrieve header/tx hashes')
parser.add_option('-b', '--binary', action='store_false', dest='isHex', default=True, \
help='Specified file is in binary')
parser.add_option('--byterange', dest='byterange', default='all', \
help='Bytes to read, --byterange=0,100')
parser.add_option('-s', '--usehashes', action='store_true', dest='useHashes', default=False, \
help='Import header/tx hashes to be used in searching')
parser.add_option('-u', '--noupdatehashes', action='store_false', dest='updateHashes', default=True, \
help='Disable searching blk0001.dat to update hashlist (ignored without -s)')
parser.add_option('-r', '--rescanhashes', action='store_true', dest='doRescan', default=False, \
help='Rescan blkfile for header/tx hashes')
#parser.add_option('-t', '--testnet', action='store_true', dest='testnet', default=False, \
#help='Run the script using testnet data/addresses')
# Should add option for picking (start,end) bytes for files that are long
#parser.add_option('-o', '--outfile', dest='outfile', default='', \
#help='Redirect results to output file')
(opts, args) = parser.parse_args()
fn = opts.filename
isHex = opts.isHex
blkfile = opts.blk0001file
hashfile = opts.hashfile
#outfile = opts.outfile
if len(blkfile)==0 and opts.updateHashes:
import platform
opsys = platform.system()
if 'win' in opsys.lower():
blkfile = path.join(os.getenv('APPDATA'), 'Bitcoin', 'blk0001.dat')
if 'nix' in opsys.lower() or 'nux' in opsys.lower():
blkfile = path.join(os.getenv('HOME'), '.bitcoin', 'blk0001.dat')
if 'mac' in opsys.lower() or 'osx' in opsys.lower():
blkfile = os.path.expanduser('~/Library/Application Support/Bitcoin/blk0001.dat')
# A variety of error conditions
if fn == None and len(args)==0:
parser.error('Please supply hex data or a file with unidentified data\n')
if not fn == None and not path.exists(fn):
parser.error('Cannot find ' + fn)
if fn == None and not isHex:
parser.error('Cannot read binary data from command line. Please put it in a file and use -f option')
if not path.exists(blkfile) and opts.useHashes and opts.updateHashes:
print 'Cannot find blockdata file', blkfile, '... proceeding without updating hashes'
opts.updateHashes = False
if not opts.useHashes:
print '\t(use the -s option to enable search for header/tx hashes from blk0001.dat)'
byteStart,byteStop = 0,0
print opts.byterange
if not opts.byterange=='all':
byteStart,byteStop = [int(i) for i in opts.byterange.split(',')]
# Update the knownHashes.txt file, if necessary
if(opts.useHashes and opts.updateHashes):
updateHashList(hashfile, blkfile, opts.doRescan)
# If we plan to use it, populate a dictionary of hashes
hashDict = {}
if(opts.useHashes):
hfile = open(hashfile, 'rb')
skip = hfile.read(8)
binaryHashes = hfile.read()
hfile.close()
print '\t.Reading %s (%0.1f MB)' % (hashfile, len(binaryHashes)/float(1024**2))
if not opts.updateHashes:
print '\t (remove -u flag to update hashlist with recent blocks from blk0001.dat'
nHash = len(binaryHashes) / 33
for i in range(nHash):
loc = i*33
hash32 = binaryHashes[loc:loc+32]
code = binaryHashes[loc+32]
hashDict[hash32] = binary_to_int(code)
print '\t.Hash dictionary populated with %d hashes from %s' % (len(hashDict),hashfile)
binaryToSearch = []
if not fn == None:
if not isHex:
f = open(fn, 'rb')
binaryToSearch = ''
if byteStop<=byteStart:
binaryToSearch = f.read()
else:
f.seek(byteStart,0);
binaryToSearch = f.read(byteStop-byteStart)
f.close()
else:
f = open(fn, 'r')
hexLines = f.readlines()
hexToSearch = ''.join([l.strip().replace(' ','') for l in hexLines])
if not byteStop<=byteStart:
hexToSearch = hexToSearch[2*byteStart:2*byteStop]
try:
binaryToSearch = hex_to_binary(hexToSearch)
except:
print 'Error processing %s. If this is a binary file, please use the -b flag' % (fn,)
exit(0)
else:
# pull data from the remaining arguments (which must be hex)
hexToSearch = ''.join(args)
binaryToSearch = hex_to_binary(hexToSearch.replace(' ',''))
# Yeah, I know we just converted it to binary, now back to hex
figureOutMysteryHex(binary_to_hex(binaryToSearch), hashDict)
|
src/programy/clients/polling/twitter/config.py | cdoebler1/AIML2 | 345 | 11101768 | <gh_stars>100-1000
"""
Copyright (c) 2016-2020 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.clients.config import ClientConfigurationData
from programy.utils.substitutions.substitues import Substitutions
class TwitterConfiguration(ClientConfigurationData):
def __init__(self):
ClientConfigurationData.__init__(self, "twitter")
self._description = 'ProgramY AIML2.0 Twitter Client'
self._polling_interval = 60
self._rate_limit_sleep = 900
self._follow_followers = True
self._respond_to_mentions = True
self._respond_to_directs = False
self._mentions = ['#askprogramy']
self._welcome_message = "Thanks for following me."
@property
def polling_interval(self):
return self._polling_interval
@property
def rate_limit_sleep(self):
return self._rate_limit_sleep
@property
def follow_followers(self):
return self._follow_followers
@property
def respond_to_mentions(self):
return self._respond_to_mentions
@property
def respond_to_directs(self):
return self._respond_to_directs
@property
def mentions(self):
return self._mentions
@property
def welcome_message(self):
return self._welcome_message
def load_configuration_section(self, configuration_file, section, bot_root, subs: Substitutions = None):
assert section is not None
self._polling_interval = configuration_file.get_int_option(section, "polling_interval", missing_value=60,
subs=subs)
self._rate_limit_sleep = configuration_file.get_int_option(section, "rate_limit_sleep", missing_value=900,
subs=subs)
self._follow_followers = configuration_file.get_bool_option(section, "follow_followers", missing_value=False,
subs=subs)
self._respond_to_mentions = configuration_file.get_bool_option(section, "respond_to_mentions", missing_value=False,
subs=subs)
self._respond_to_directs = configuration_file.get_bool_option(section, "respond_to_directs", missing_value=True,
subs=subs)
self._mentions = configuration_file.get_multi_option(section, "mentions", missing_value="", subs=subs)
self._welcome_message = configuration_file.get_option(section, "welcome_message", subs=subs)
super(TwitterConfiguration, self).load_configuration_section(configuration_file, section, bot_root,
subs=subs)
def to_yaml(self, data, defaults=True):
if defaults is True:
data['polling_interval'] = 60
data['rate_limit_sleep'] = 900
data['follow_followers'] = True
data['respond_to_mentions'] = True
data['respond_to_directs'] = False
data['mentions'] = ["#askprogramy"]
data['welcome_message'] = "Thanks for following me."
data['storage'] = 'file'
data['storage_location'] = './storage/twitter.data'
else:
data['polling_interval'] = self._polling_interval
data['rate_limit_sleep'] = self._rate_limit_sleep
data['follow_followers'] = self._follow_followers
data['respond_to_mentions'] = self._respond_to_mentions
data['respond_to_directs'] = self._respond_to_directs
data['mentions'] = self._mentions[:]
data['welcome_message'] = self._welcome_message
super(TwitterConfiguration, self).to_yaml(data, defaults)
|
env/Lib/site-packages/OpenGL/GL/ARB/ES3_2_compatibility.py | 5gconnectedbike/Navio2 | 210 | 11101785 | <filename>env/Lib/site-packages/OpenGL/GL/ARB/ES3_2_compatibility.py
'''OpenGL extension ARB.ES3_2_compatibility
This module customises the behaviour of the
OpenGL.raw.GL.ARB.ES3_2_compatibility to provide a more
Python-friendly API
Overview (from the spec)
This extension adds support for features of OpenGL ES 3.2 that are
missing from OpenGL 4.5. Enabling these features will ease the process
of porting applications from OpenGL ES 3.2 to OpenGL.
In particular this adds the following features:
- Bounding box used to optimization tessellation processing
(OES_primitive_bounding_box)
- query for MULTISAMPLE_LINE_WIDTH_RANGE_ARB
- support for the OpenGL ES 3.20 shading language
For full OpenGL ES 3.2 compatibility the implementation must support
KHR_blend_equation_advanced and KHR_texture_compression_astc_ldr. Those
features are not defined in this extension spec since they are already
defined at the KHR level.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/ES3_2_compatibility.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.ES3_2_compatibility import *
from OpenGL.raw.GL.ARB.ES3_2_compatibility import _EXTENSION_NAME
def glInitEs32CompatibilityARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
desktop/core/ext-py/odfpy-1.4.1/examples/subobject.py | yetsun/hue | 5,079 | 11101845 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2007 <NAME>, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
# This is an example of an OpenDocument Text with an embedded Chart.
#
from odf.opendocument import OpenDocumentChart, OpenDocumentText
from odf import chart, style, table, text, draw
# import a support class from the examples directory
from datatable import DataTable
class BarChart(object):
def __init__(self):
self.charttype = 'chart:bar'
self.subtype = 'normal' # 'percentage', 'stacked' or 'normal'
self.threedimensional = "false"
self.x_axis = "X"
self.y_axis = "Y"
self.values = (1,2,3)
self.title = None
self.subtitle = None
def __call__(self, doc):
chartstyle = style.Style(name="chartstyle", family="chart")
chartstyle.addElement( style.GraphicProperties(stroke="none", fillcolor="#ffffff"))
doc.automaticstyles.addElement(chartstyle)
mychart = chart.Chart(width="476pt", height="404pt",stylename=chartstyle, attributes={'class':self.charttype})
doc.chart.addElement(mychart)
# Title
if self.title:
titlestyle = style.Style(name="titlestyle", family="chart")
titlestyle.addElement( style.GraphicProperties(stroke="none", fill="none"))
titlestyle.addElement( style.TextProperties(fontfamily="'Nimbus Sans L'",
fontfamilygeneric="swiss", fontpitch="variable", fontsize="13pt"))
doc.automaticstyles.addElement(titlestyle)
mytitle = chart.Title(x="185pt", y="27pt", stylename=titlestyle)
mytitle.addElement( text.P(text=self.title))
mychart.addElement(mytitle)
# Subtitle
if self.subtitle:
subtitlestyle = style.Style(name="subtitlestyle", family="chart")
subtitlestyle.addElement( style.GraphicProperties(stroke="none", fill="none"))
subtitlestyle.addElement( style.TextProperties(fontfamily="'Nimbus Sans L'",
fontfamilygeneric="swiss", fontpitch="variable", fontsize="10pt"))
doc.automaticstyles.addElement(subtitlestyle)
subtitle = chart.Subtitle(x="50pt", y="50pt", stylename=subtitlestyle)
subtitle.addElement( text.P(text= self.subtitle))
mychart.addElement(subtitle)
# Legend
legendstyle = style.Style(name="legendstyle", family="chart")
legendstyle.addElement( style.GraphicProperties(fill="none"))
legendstyle.addElement( style.TextProperties(fontfamily="'Nimbus Sans L'",
fontfamilygeneric="swiss", fontpitch="variable", fontsize="8pt"))
doc.automaticstyles.addElement(legendstyle)
mylegend = chart.Legend(legendposition="end", legendalign="center", stylename=legendstyle)
mychart.addElement(mylegend)
# Plot area
plotstyle = style.Style(name="plotstyle", family="chart")
if self.subtype == "stacked": percentage="false"; stacked="true"
elif self.subtype == "percentage": percentage="true"; stacked="false"
else: percentage="false"; stacked="false"
plotstyle.addElement( style.ChartProperties(seriessource="columns",
percentage=percentage, stacked=stacked,
threedimensional=self.threedimensional))
doc.automaticstyles.addElement(plotstyle)
plotarea = chart.PlotArea(datasourcehaslabels=self.datasourcehaslabels, stylename=plotstyle)
mychart.addElement(plotarea)
# Style for the X,Y axes
axisstyle = style.Style(name="axisstyle", family="chart")
axisstyle.addElement( style.ChartProperties(displaylabel="true"))
axisstyle.addElement( style.TextProperties(fontfamily="'Nimbus Sans L'",
fontfamilygeneric="swiss", fontpitch="variable", fontsize="8pt"))
doc.automaticstyles.addElement(axisstyle)
# Title for the X axis
xaxis = chart.Axis(dimension="x", name="primary-x", stylename=axisstyle)
plotarea.addElement(xaxis)
xt = chart.Title()
xaxis.addElement(xt)
xt.addElement(text.P(text=self.x_axis))
# Title for the Y axis
yaxis = chart.Axis(dimension="y", name="primary-y", stylename=axisstyle)
plotarea.addElement(yaxis)
yt = chart.Title()
yaxis.addElement(yt)
yt.addElement(text.P(text=self.y_axis))
# Set up the data series. OOo doesn't show correctly without them.
s = chart.Series(valuescellrangeaddress="local-table.B2:.B6", labelcelladdress="local-table.B1")
s.addElement(chart.DataPoint(repeated=5))
plotarea.addElement(s)
s = chart.Series(valuescellrangeaddress="local-table.C2:.C6", labelcelladdress="local-table.C1")
s.addElement(chart.DataPoint(repeated=5))
plotarea.addElement(s)
# The data are placed in a table inside the chart object - but could also be a
# table in the main document
datatable = DataTable(self.values)
datatable.datasourcehaslabels = self.datasourcehaslabels
mychart.addElement(datatable())
if __name__ == "__main__":
# Create the subdocument
chartdoc = OpenDocumentChart()
mychart = BarChart()
mychart.title = "SPECTRE"
mychart.subtitle = "SPecial Executive for Counter-intelligence, Terrorism, Revenge and Extortion"
mychart.x_axis = "Divisions"
mychart.y_axis = u"€ (thousand)"
# These represent the data. Six rows in three columns
mychart.values = (
('','Expense','Revenue'),
('Counterfeit',1000,1500),
('Murder',1100,1150),
('Prostitution',3200,2350),
('Blackmail',1100,1150),
('Larceny',1000,1750)
)
mychart.datasourcehaslabels = "both"
mychart(chartdoc)
# Create the containg document
textdoc = OpenDocumentText()
# Create a paragraph to contain the frame. You can put the frame directly
# as a child og textdoc.text, but both Kword and OOo has problems wiht
# this approach.
p = text.P()
textdoc.text.addElement(p)
# Create the frame.
df = draw.Frame(width="476pt", height="404pt", anchortype="paragraph")
p.addElement(df)
# Here we add the subdocument to the main document. We get back a reference
# to use in the href.
objectloc = textdoc.addObject(chartdoc)
do = draw.Object(href=objectloc)
# Put the object inside the frame
df.addElement(do)
textdoc.save("spectre-balance", True)
|
eeauditor/auditors/aws/Amazon_VPC_Auditor.py | kbhagi/ElectricEye | 442 | 11101846 | <reponame>kbhagi/ElectricEye
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
# create boto3 clients
ec2 = boto3.client("ec2")
# loop through vpcs
def describe_vpcs(cache):
response = cache.get("describe_vpcs")
if response:
return response
cache["describe_vpcs"] = ec2.describe_vpcs(DryRun=False)
return cache["describe_vpcs"]
@registry.register_check("ec2")
def vpc_default_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[VPC.1] Consider deleting the Default VPC if unused"""
vpc = describe_vpcs(cache=cache)
for vpcs in vpc["Vpcs"]:
vpcId = str(vpcs["VpcId"])
vpcArn = f"arn:{awsPartition}:ec2:{awsRegion}:{awsAccountId}vpc/{vpcId}"
defaultVpcCheck = str(vpcs["IsDefault"])
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if defaultVpcCheck == "True":
finding = {
"SchemaVersion": "2018-10-08",
"Id": vpcArn + "/vpc-is-default-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": vpcArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[VPC.1] Consider deleting the Default VPC if unused",
"Description": "VPC "
+ vpcId
+ " has been identified as the Default VPC, consider deleting this VPC if it is not necessary for daily operations. The Default VPC in AWS Regions not typically used can serve as a persistence area for malicious actors, additionally, many services will automatically use this VPC which can lead to a degraded security posture. Refer to the remediation instructions if this configuration is not intended",
"Remediation": {
"Recommendation": {
"Text": "For more information on the default VPC refer to the Deleting Your Default Subnets and Default VPC section of the Amazon Virtual Private Cloud User Guide",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html#deleting-default-vpc",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEc2Vpc",
"Id": vpcArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"VpcId": vpcId}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-5",
"NIST SP 800-53 AC-4",
"NIST SP 800-53 AC-10",
"NIST SP 800-53 SC-7",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.1.3",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": vpcArn + "/vpc-is-default-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": vpcArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[VPC.1] Consider deleting the Default VPC if unused",
"Description": "VPC " + vpcId + " is not the Default VPC",
"Remediation": {
"Recommendation": {
"Text": "For more information on the default VPC refer to the Deleting Your Default Subnets and Default VPC section of the Amazon Virtual Private Cloud User Guide",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html#deleting-default-vpc",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEc2Vpc",
"Id": vpcArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"VpcId": vpcId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-5",
"NIST SP 800-53 AC-4",
"NIST SP 800-53 AC-10",
"NIST SP 800-53 SC-7",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.1.3",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("ec2")
def vpc_flow_logs_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[VPC.2] Flow Logs should be enabled for all VPCs"""
vpc = describe_vpcs(cache=cache)
for vpcs in vpc["Vpcs"]:
vpcId = str(vpcs["VpcId"])
vpcArn = f"arn:{awsPartition}:ec2:{awsRegion}:{awsAccountId}vpc/{vpcId}"
response = ec2.describe_flow_logs(
DryRun=False, Filters=[{"Name": "resource-id", "Values": [vpcId]}]
)
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if str(response["FlowLogs"]) == "[]":
finding = {
"SchemaVersion": "2018-10-08",
"Id": vpcArn + "/vpc-flow-log-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": vpcArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[VPC.2] Flow Logs should be enabled for all VPCs",
"Description": "VPC "
+ vpcId
+ " does not have flow logging enabled. Refer to the remediation instructions if this configuration is not intended",
"Remediation": {
"Recommendation": {
"Text": "For more information on flow logs refer to the VPC Flow Logs section of the Amazon Virtual Private Cloud User Guide",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEc2Vpc",
"Id": vpcArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"VpcId": vpcId}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": vpcArn + "/vpc-flow-log-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": vpcArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[VPC.2] Flow Logs should be enabled for all VPCs",
"Description": "VPC " + vpcId + " has flow logging enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on flow logs refer to the VPC Flow Logs section of the Amazon Virtual Private Cloud User Guide",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEc2Vpc",
"Id": vpcArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"VpcId": vpcId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("ec2")
def subnet_public_ip_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[VPC.3] Subnets should not automatically map Public IP addresses on launch"""
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
vpc = describe_vpcs(cache=cache)
myVpcs = vpc["Vpcs"]
for vpcs in myVpcs:
vpcId = str(vpcs["VpcId"])
# Get subnets for the VPC
for snet in ec2.describe_subnets(Filters=[{'Name': 'vpc-id','Values': [vpcId]}])["Subnets"]:
snetArn = str(snet["SubnetArn"])
snetId = str(snet["SubnetId"])
if str(snet["MapPublicIpOnLaunch"]) == "True":
# This is a failing check
finding = {
"SchemaVersion": "2018-10-08",
"Id": snetArn + "/subnet-map-public-ip-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": snetArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[VPC.3] Subnets should not automatically map Public IP addresses on launch",
"Description": "Subnet "
+ snetId
+ " maps Public IPs on Launch, consider disabling this to avoid unncessarily exposing workloads to the internet. Refer to the remediation instructions if this configuration is not intended",
"Remediation": {
"Recommendation": {
"Text": "For information on IP addressing refer to the IP Addressing in your VPC section of the Amazon Virtual Private Cloud User Guide",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEc2Subnet",
"Id": snetArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"VpcId": vpcId,
"SubnetId": snetId
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-5",
"NIST SP 800-53 AC-4",
"NIST SP 800-53 AC-10",
"NIST SP 800-53 SC-7",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.1.3",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
]
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE"
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": snetArn + "/subnet-map-public-ip-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": snetArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[VPC.3] Subnets should not automatically map Public IP addresses on launch",
"Description": "Subnet "
+ snetId
+ " does not map Public IPs on Launch.",
"Remediation": {
"Recommendation": {
"Text": "For information on IP addressing refer to the IP Addressing in your VPC section of the Amazon Virtual Private Cloud User Guide",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEc2Subnet",
"Id": snetArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"VpcId": vpcId,
"SubnetId": snetId
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-5",
"NIST SP 800-53 AC-4",
"NIST SP 800-53 AC-10",
"NIST SP 800-53 SC-7",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.1.3",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3"
]
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED"
}
yield finding
@registry.register_check("ec2")
def subnet_no_ip_space_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[VPC.4] Subnets should be monitored for available IP address space"""
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
vpc = describe_vpcs(cache=cache)
myVpcs = vpc["Vpcs"]
for vpcs in myVpcs:
vpcId = str(vpcs["VpcId"])
# Get subnets for the VPC
for snet in ec2.describe_subnets(Filters=[{'Name': 'vpc-id','Values': [vpcId]}])["Subnets"]:
snetArn = str(snet["SubnetArn"])
snetId = str(snet["SubnetId"])
if int(snet["AvailableIpAddressCount"]) <= 1:
# This is a failing check
finding = {
"SchemaVersion": "2018-10-08",
"Id": snetArn + "/subnet-map-no-more-ips-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": snetArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[VPC.4] Subnets should be monitored for available IP address space",
"Description": "Subnet "
+ snetId
+ " does not have any available IP address space, consider terminating unncessary workloads or expanding CIDR capacity to avoid availability losses. Refer to the remediation instructions if this configuration is not intended",
"Remediation": {
"Recommendation": {
"Text": "For information on IP addressing refer to the IP Addressing in your VPC section of the Amazon Virtual Private Cloud User Guide",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEc2Subnet",
"Id": snetArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"VpcId": vpcId,
"SubnetId": snetId
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
]
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE"
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": snetArn + "/subnet-map-no-more-ips-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": snetArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[VPC.4] Subnets should be monitored for available IP address space",
"Description": "Subnet "
+ snetId
+ " has available IP address space, well, at least 2 lol...",
"Remediation": {
"Recommendation": {
"Text": "For information on IP addressing refer to the IP Addressing in your VPC section of the Amazon Virtual Private Cloud User Guide",
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEc2Subnet",
"Id": snetArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"VpcId": vpcId,
"SubnetId": snetId
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
]
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED"
}
yield finding |
Arrays/random_sample.py | techsavvyy/coding-problems | 2,647 | 11101853 | <filename>Arrays/random_sample.py
'''
Random Sample
Given an array and length of the sample, find a random sample from that array.
Input: [1, 2, 3, 4], 2
Output: This is a nondeterministic algorithm, C(N, K) combinations exist.
In this case 4! / (2! * (4 - 2)!) = 6. All combinations are a valid solution.
[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]
=========================================
Simple solution in one pass, Reservoir sampling. Maybe the solution looks like the elements don't have an equal probability
to be chosen, but there is a proof that they have equal probability https://en.wikipedia.org/wiki/Reservoir_sampling
Btw this solution works when we don't know the total number of elements.
Time Complexity: O(N)
Space Complexity: O(K)
Another simple solution in one pass, the probability for each element to be choosen in the first choosing is K/N,
after that we're removing that element. In the second choosing the probability for each element is (K-1)/(N-1),
in the third is (K-2)/(N-2), etc... This solution could be proved using induction hypothesis.
Time Complexity: O(N)
Space Complexity: O(K)
Note: In Python there is already implemented sample method (in random module "from random import sample", sample(arr, k)).
Note 2: This problem can be solved using the shuffle method (shuffle_array.py), and choosing the first K element from the shuffled array.
Note 3: Don't use solutions which are iterating until K distinct elements/indices are chosen. For example:
distinct = set()
while(len(distinct) < k):
distinct.insert(randint(0, n))
Why? Because if you try it with an array with 100 000 elements and K equal to 99 999, then the code inside the "while"
could be executed more than 1 million times, that's O(10*N). So this algorithm doesn't work good when K is close to N,
to many duplicates will be choosen, read about Birthday Problem (https://en.wikipedia.org/wiki/Birthday_problem).
'''
##############
# Solution 1 #
##############
from random import randint
def reservoir_sampling(arr, k):
# fill the reservoir array
sample = []
for i in range(k):
sample.append(arr[i])
# replace elements with gradually decreasing probability
n = len(arr)
for i in range(k, n):
# randint(a, b) generates a uniform integer from the inclusive range {a, ..., b} (a <= X <= b)
j = randint(0, i)
if j < k:
sample[j] = arr[i]
return sample
##############
# Solution 2 #
##############
from random import random
def probabilistic_sampling(arr, k):
sample = []
n = len(arr)
for el in arr:
# random() generates a uniform double in this range (0 <= X < 1)
# (k / n) is the probability for this element to be choosen (0 <= X <= 1)
if random() < (k / n):
sample.append(el)
k -= 1 # left elements to be choosen
n -= 1 # left elements for choosing
return sample
###########
# Testing #
###########
# Test 1
# Correct result => One of these: [1, 2], [1, 3], [1, 4], [2, 3], [2, 4]
arr = [1, 2, 3, 4]
k = 2
print(reservoir_sampling(arr, k))
print(probabilistic_sampling(arr, k)) |
tests/test_torch_hub.py | unitaryai/detoxify | 404 | 11101862 | <reponame>unitaryai/detoxify
import torch
import gc
def test_torch_hub_models():
result = torch.hub.list("unitaryai/detoxify")
def test_torch_hub_bert():
model = torch.hub.load('unitaryai/detoxify', 'toxic_bert')
del model
gc.collect()
def test_torch_hub_roberta():
model = torch.hub.load('unitaryai/detoxify', 'unbiased_toxic_roberta')
del model
gc.collect()
def test_torch_hub_multilingual():
model = torch.hub.load('unitaryai/detoxify', 'multilingual_toxic_xlm_r')
del model
gc.collect()
|
sample/tensorflow/unit_test/fused_QKV_multihead_attention_unit_test.py | dujiangsu/FasterTransformer | 777 | 11101876 | <filename>sample/tensorflow/unit_test/fused_QKV_multihead_attention_unit_test.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import tensorflow as tf
import numpy as np
import unittest
import sys
import os
import math
sys.path.append("./tensorflow/")
from utils.encoder import build_sequence_mask
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
USE_CACHE_BATCH_MAJOR_ATTENTION = True
class TestFusedQKVMutiheadAttention(unittest.TestCase):
def test_attn_batch_fp32(self):
for b in [1, 4, 32, 128]:
tf.reset_default_graph()
self.run_attn(b, 128, 12, 64, tf.float32)
def test_attn_batch_fp16(self):
for b in [1, 4, 32, 128]:
tf.reset_default_graph()
self.run_attn(b, 128, 12, 64, tf.float16)
def test_attn_seq_fp32(self):
for seq in [64, 96, 128, 384]:
tf.reset_default_graph()
self.run_attn(4, seq, 12, 64, tf.float32)
def test_attn_seq_fp16(self):
for seq in [64, 96, 128, 384]:
tf.reset_default_graph()
self.run_attn(4, seq, 12, 64, tf.float16)
def test_attn_head_fp32(self):
for head in [8, 12, 16]:
tf.reset_default_graph()
self.run_attn(4, 128, head, 64, tf.float32)
def test_attn_head_fp16(self):
for head in [8, 12, 16]:
tf.reset_default_graph()
self.run_attn(4, 128, head, 64, tf.float16)
def test_attn_size_fp32(self):
for size in [32, 64, 128]:
tf.reset_default_graph()
self.run_attn(4, 128, 12, size, tf.float32)
def test_attn_size_fp16(self):
for size in [32, 64, 128]:
tf.reset_default_graph()
self.run_attn(4, 128, 12, size, tf.float16)
def run_attn(self, batch_size, seq_len, head_num, size_per_head, data_type):
threshold = 3e-5
if data_type == tf.float16:
threshold = 3e-3
# Inputs: qkv_buf and k/v cache
# Do: update k/v cahce, and compute attention (Q*K, QK*V)
# Output: attention result, new k/v cache
# Notes: Only used for decoder, so seqlen of q is always 1.
np.random.seed(1)
tf.set_random_seed(1)
qkv_buf = tf.random.normal([batch_size, 3, head_num, size_per_head], dtype=data_type)
qkv_bias = tf.random.normal([3, head_num, size_per_head], dtype=data_type)
k_cache = tf.random.normal([batch_size, head_num, seq_len - 1, size_per_head], dtype=data_type)
v_cache = tf.random.normal([batch_size, head_num, seq_len - 1, size_per_head], dtype=data_type)
q, k, v = tf.split(qkv_buf + qkv_bias, 3, axis=1)
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
keys = tf.concat([k_cache, k], axis=2)
values = tf.concat([v_cache, v], axis=2)
tf_k_cache = keys
tf_v_cache = values
q *= (size_per_head)**-0.5
dot = tf.matmul(q, keys, transpose_b=True)
attn = tf.cast(tf.nn.softmax(tf.cast(dot, data_type)), dot.dtype)
context = tf.matmul(attn, values)
tf_attn_result = tf.transpose(context, [0, 2, 1, 3])
fused_multihead_attention_op = tf.load_op_library(os.path.join('./lib/libtf_fused_multihead_attention.so'))
# if USE_CACHE_BATCH_MAJOR_ATTENTION == True
# The layout of the cache buffer for the keys is [batch_size, head_num, size_per_head/x, seq_len, x]
# where x == 8 for FP16 and x == 4 for FP32 where the fastest moving dimension (contiguous data)
# is the rightmost one. The values for x are chosen to create chunks of 16 bytes.
# The layout of the cache buffer for the values is [batch_size, head_num, seq_len, size_per_head].
if USE_CACHE_BATCH_MAJOR_ATTENTION == True:
x = 8 if data_type == tf.float16 else 4
assert size_per_head % x == 0
ft_k_cache = tf.concat([k_cache, tf.zeros_like(k)], axis=2)
ft_k_cache_shape = np.array([batch_size, head_num, seq_len, size_per_head / x, x], dtype=np.int32)
ft_k_cache = tf.reshape(ft_k_cache, ft_k_cache_shape)
ft_k_cache = tf.transpose(ft_k_cache, [0, 1, 3, 2, 4])
ft_v_cache = tf.concat([v_cache, tf.zeros_like(v)], axis=2)
else :
ft_k_cache = tf.concat([k_cache, tf.zeros_like(k)], axis=2) # [batch_size, head_num, seq_len + 1, size_per_head]
ft_k_cache = tf.transpose(ft_k_cache, [2, 0, 1, 3]) # [seq_len + 1, batch_size, head_num, size_per_head]
ft_v_cache = tf.concat([v_cache, tf.zeros_like(v)], axis=2)
ft_v_cache = tf.transpose(ft_v_cache, [2, 0, 1, 3])
ft_attn_result, ft_k_cache, ft_v_cache = fused_multihead_attention_op.fused_qkv_multi_head_attention(qkv_buf,
qkv_bias,
ft_k_cache,
ft_v_cache,
batch_size,
seq_len,
head_num,
size_per_head)
if USE_CACHE_BATCH_MAJOR_ATTENTION == True:
ft_k_cache = tf.transpose(ft_k_cache, [0, 1, 3, 2, 4])
ft_k_cache_shape = np.array([batch_size, head_num, seq_len, size_per_head], dtype=np.int32)
ft_k_cache = tf.reshape(ft_k_cache, ft_k_cache_shape)
else:
ft_k_cache = tf.transpose(ft_k_cache, [1, 2, 0, 3]) # [batch_size, head_num, seq_len + 1, size_per_head]
ft_v_cache = tf.transpose(ft_v_cache, [1, 2, 0, 3])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
print(batch_size, seq_len, head_num, size_per_head)
sess.run(tf.global_variables_initializer())
tf_attn_result_val, ft_attn_result_val, k_cache_diff_val, v_cache_diff_val = sess.run([tf_attn_result,
ft_attn_result,
tf_k_cache - ft_k_cache,
tf_v_cache - ft_v_cache])
attn_diff_val = tf_attn_result_val - ft_attn_result_val
attn_max_diff = abs(attn_diff_val).max()
attn_max_diff_id = abs(attn_diff_val).argmax()
print("attn_max_diff_id = ", attn_max_diff_id)
k_cache_max_diff = abs(k_cache_diff_val).max()
v_cache_max_diff = abs(v_cache_diff_val).max()
print("tf_attn_result_val at max diff = ", tf_attn_result_val.flatten()[attn_max_diff_id])
print("ft_attn_result_val at max diff = ", ft_attn_result_val.flatten()[attn_max_diff_id])
print("threshold = ", threshold)
print(attn_max_diff)
print(k_cache_max_diff)
print(v_cache_max_diff)
sys.stdout.flush()
assert(attn_max_diff < threshold)
assert(k_cache_max_diff < threshold)
assert(v_cache_max_diff < threshold)
if __name__ == "__main__":
unittest.main()
|
animatplot/blocks/vectors.py | ianhi/animatplot | 402 | 11101881 | <reponame>ianhi/animatplot
from .base import Block
from .image_like import Pcolormesh
import numpy as np
class Quiver(Block):
"""
A block for animated quiver plots
Parameters
----------
X : 1D or 2D numpy array
The x positions of the arrows. Cannot be animated.
Y : 1D or 2D numpy array
The y positions of the arrows. Cannot be animated.
U : 2D or 3D numpy array
The U displacement of the arrows. 1 dimension
higher than the X, Y arrays.
V : 2D or 3D numpy array
The V displcement of the arrows. 1 dimension
higher than the X, Y arrays.
ax : matplotlib.axes.Axes, optional
The matplotlib axes to the block to.
Defaults to matplotlib.pyplot.gca()
t_axis : int, optional
The axis of the array that represents time. Defaults to 0.
No effect if U, V are lists.
Attributes
----------
ax : matplotlib.axes.Axes
The matplotlib axes that the block is attached to.
Notes
-----
This block accepts additional keyword arguments to be passed to
:meth:`matplotlib.axes.Axes.quiver`
"""
def __init__(self, X, Y, U, V, ax=None, t_axis=0, **kwargs):
self.X = X
self.Y = Y
self.U = np.asanyarray(U)
self.V = np.asanyarray(V)
if X.shape != Y.shape:
raise ValueError("X, Y must have the same shape")
if self.U.shape != self.V.shape:
raise ValueError("U, V must have the same shape")
super().__init__(ax, t_axis)
self._dim = len(self.U.shape)
self._is_list = isinstance(U, list)
Slice = self._make_slice(0, self._dim)
self.Q = self.ax.quiver(self.X, self.Y,
self.U[Slice], self.V[Slice],
**kwargs)
def _update(self, i):
Slice = self._make_slice(i, self._dim)
self.Q.set_UVC(self.U[Slice], self.V[Slice])
return self.Q
def __len__(self):
if self._is_list:
return self.U.shape[0]
return self.U.shape[self.t_axis]
def vector_comp(X, Y, U, V, skip=5, *, t_axis=0, pcolor_kw={}, quiver_kw={}):
"""produces an animation of vector fields
This takes 2D vector field, and plots the magnitude as a pcolomesh, and the
normalized direction as a quiver plot. It then animates it.
This is a convience function. It wraps around the Pcolormesh and Quiver
blocks. It will be more restrictive than using the blocks themselves. If
you need more control, or the ability to pass data in as a list, then use
the individual blocks.
Parameters
----------
X : 2D numpy array
The x location of the vectors to be animated
Y : 2D numpy array
The x location of the vectors to be animated
U : 3D numpy array
The x components of the vectors to be animated.
V : 3D numpy array
The y components of the vectors to be animated.
skip : int, optional
The amount of values to skip over when making the quiver plot.
Higher skip means fewer arrows. For best results, the skip should
divide the length of the data-1. Defaults to 5.
t_axis : int, optional
The axis of the U, V array's the represent time. Defaults to 0. Note
this is different from the defaults that blocks choose. This default
is chosen to be consistent with 3D-meshgrids (meshgrid(x, y, t)).
pcolor_kw : dict, optional
A dictionary of parameters to pass to pcolormesh.
quiver_kw : dict, optional
A dictionary of parameters to pass to quiver.
Returns
-------
list of Animatplot.blocks.Block
A list of all the blocks used in the animation. The list
contains a Pcolorblock, and a Quiver block in that order.
"""
# plot the magnitude of the vectors as a pcolormesh
magnitude = np.sqrt(U**2+V**2)
pcolor_block = Pcolormesh(X, Y, magnitude, t_axis=t_axis, **pcolor_kw)
# use a subset of the data to plot the arrows as a quiver plot.
xy_slice = tuple([slice(None, None, skip)]*len(X.shape))
uv_slice = [slice(None, None, skip)]*len(U.shape)
uv_slice[t_axis] = slice(None)
uv_slice = tuple(uv_slice)
quiver_block = Quiver(X[xy_slice], Y[xy_slice],
U[uv_slice]/magnitude[uv_slice],
V[uv_slice]/magnitude[uv_slice],
t_axis=t_axis, **quiver_kw)
return [pcolor_block, quiver_block]
|
RecoTracker/FinalTrackSelectors/python/trackListMerger_cfi.py | ckamtsikis/cmssw | 852 | 11101883 | import FWCore.ParameterSet.Config as cms
#
# ctf tracks parameter-set entries for module
#
# TrackListMerger
#
# located in
#
# RecoTracker/FinalTrackSelectors
#
#
# sequence dependency:
#
#
#
# cleans and merges ctf and rs Track lists and put new list back in Event
trackListMerger = cms.EDProducer("TrackListMerger",
# minimum shared fraction to be called duplicate for tracks between collections
ShareFrac = cms.double(0.19),
# best track chosen by chi2 modified by parameters below:
FoundHitBonus = cms.double(5.0),
LostHitPenalty = cms.double(5.0),
# minimum pT in GeV/c
MinPT = cms.double(0.05),
# minimum difference in rechit position in cm
# negative Epsilon uses sharedInput for comparison
Epsilon = cms.double(-0.001),
# maximum chisq/dof
MaxNormalizedChisq = cms.double(1000.0),
# minimum number of RecHits used in fit
MinFound = cms.int32(3),
# always override these in the clone
TrackProducers = cms.VInputTag(cms.InputTag(''),cms.InputTag('')),
hasSelector = cms.vint32(0,0),
# minimum shared fraction to be called duplicate
indivShareFrac = cms.vdouble(1.0,1.0),
selectedTrackQuals = cms.VInputTag(cms.InputTag(""),cms.InputTag("")),
setsToMerge = cms.VPSet( cms.PSet( tLists=cms.vint32(0,1), pQual=cms.bool(False)),
cms.PSet( tLists=cms.vint32(2,3), pQual=cms.bool(True) ),
cms.PSet( tLists=cms.vint32(4,5), pQual=cms.bool(True) ),
cms.PSet( tLists=cms.vint32(2,3,4,5), pQual=cms.bool(True) ),
cms.PSet( tLists=cms.vint32(0,1,2,3,4,5), pQual=cms.bool(True) )
),
trackAlgoPriorityOrder = cms.string("trackAlgoPriorityOrder"),
# set new quality for confirmed tracks for each merged pair and then for the final pair
allowFirstHitShare = cms.bool(True),
newQuality = cms.string('confirmed'),
copyExtras = cms.untracked.bool(False),
writeOnlyTrkQuals = cms.bool(False),
copyMVA = cms.bool(True)
)
|
etl/parsers/etw/Microsoft_Windows_RemoteApp_and_Desktop_Connections.py | IMULMUL/etl-parser | 104 | 11101904 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-RemoteApp and Desktop Connections
GUID : 1b8b402d-78dc-46fb-bf71-46e64aedf165
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1000, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1000_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1001, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1001_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1002, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1002_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1003, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1003_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1004, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1004_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1005, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1005_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1006, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1006_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1007, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1007_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1008, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1008_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul,
"NumResourcesAvailable" / Int32ul,
"NumResourcesDownloaded" / Int32ul,
"NumResourcesNotDownloaded" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1009, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1009_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul,
"NumResourcesAvailable" / Int32ul,
"NumResourcesDownloaded" / Int32ul,
"NumResourcesNotDownloaded" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1010, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1010_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1011, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1011_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1012, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1012_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1013, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1013_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1014, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1014_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1015, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1015_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1016, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1016_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1017, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1017_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul,
"ResourceURL" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1018, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1018_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul,
"ErrorCodeAdditional" / Int32ul,
"ResourceURL" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1019, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1019_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1020, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1020_0(Etw):
pattern = Struct(
"Name" / WString,
"FeedURL" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1021, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1021_0(Etw):
pattern = Struct(
"String1" / WString,
"String2" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1022, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1022_0(Etw):
pattern = Struct(
"Hint" / WString,
"FeedURL" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1023, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1023_0(Etw):
pattern = Struct(
"Hint" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1024, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1024_0(Etw):
pattern = Struct(
"Hint" / WString,
"FeedURL" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1025, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1025_0(Etw):
pattern = Struct(
"ResourceName" / WString,
"ConnectionName" / WString,
"ConnectionURL" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1026, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1026_0(Etw):
pattern = Struct(
"User" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1027, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1027_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1028, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1028_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1029, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1029_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1030, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1030_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1031, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1031_0(Etw):
pattern = Struct(
"ConnectionName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1032, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1032_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1033, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1033_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1034, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1034_0(Etw):
pattern = Struct(
"ConnectionId" / Int32ul,
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1035, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1035_0(Etw):
pattern = Struct(
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1036, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1036_0(Etw):
pattern = Struct(
"ConnectionName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1037, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1037_0(Etw):
pattern = Struct(
"ConnectionId" / Int32ul,
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1038, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1038_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1039, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1039_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1040, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1040_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1041, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1041_0(Etw):
pattern = Struct(
"RemoteAppName" / WString,
"ConnectionName" / WString,
"Reason" / WString
)
@declare(guid=guid("1b8b402d-78dc-46fb-bf71-46e64aedf165"), event_id=1042, version=0)
class Microsoft_Windows_RemoteApp_and_Desktop_Connections_1042_0(Etw):
pattern = Struct(
"UserName" / WString,
"ConnectionName" / WString,
"ErrorCode" / Int32ul
)
|
ib/ext/cfg/EWrapperMsgGenerator.py | LewisW/IbPy | 1,260 | 11101906 | <filename>ib/ext/cfg/EWrapperMsgGenerator.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" ib.ext.cfg.EWrapperMsgGenerator -> config module for EWrapperMsgGenerator.java.
"""
from java2python.config.default import modulePrologueHandlers
modulePrologueHandlers += [
'from ib.ext.AnyWrapperMsgGenerator import AnyWrapperMsgGenerator',
'from ib.ext.EClientSocket import EClientSocket',
'from ib.ext.MarketDataType import MarketDataType',
'from ib.ext.TickType import TickType',
'from ib.ext.Util import Util',
'',
'from ib.lib import Double',
]
|
project/game/ai/riichi.py | MahjongRepository/tenhou-python-bot | 201 | 11101931 | <filename>project/game/ai/riichi.py<gh_stars>100-1000
from typing import List
from game.ai.defence.enemy_analyzer import EnemyAnalyzer
from game.ai.discard import DiscardOption
from game.ai.placement import Placement
from mahjong.tile import TilesConverter
from mahjong.utils import is_chi, is_honor, is_pair, is_terminal, plus_dora, simplify
class Riichi:
def __init__(self, player):
self.player = player
def should_call_riichi(self, discard_option: DiscardOption, threats: List[EnemyAnalyzer]):
assert discard_option.shanten == 0
assert not self.player.is_open_hand
hand_builder = self.player.ai.hand_builder
waiting_34 = discard_option.waiting
# empty waiting can be found in some cases
if not waiting_34:
return False
# save original hand state
# we will restore it after we have finished our routines
tiles_original, discards_original = hand_builder.emulate_discard(discard_option)
count_tiles = hand_builder.count_tiles(waiting_34, TilesConverter.to_34_array(self.player.closed_hand))
if count_tiles == 0:
# don't call karaten riichi
hand_builder.restore_after_emulate_discard(tiles_original, discards_original)
return False
# we decide if we should riichi or not before making a discard, hence we check for round step == 0
first_discard = self.player.round_step == 0
if first_discard and not self.player.table.meld_was_called:
hand_builder.restore_after_emulate_discard(tiles_original, discards_original)
# it is daburi!
return True
# regular path
if len(waiting_34) == 1:
should_riichi = self._should_call_riichi_one_sided(waiting_34, threats)
else:
should_riichi = self._should_call_riichi_many_sided(waiting_34, threats)
hand_builder.restore_after_emulate_discard(tiles_original, discards_original)
return should_riichi
def _should_call_riichi_one_sided(self, waiting_34: List[int], threats: List[EnemyAnalyzer]):
count_tiles = self.player.ai.hand_builder.count_tiles(
waiting_34, TilesConverter.to_34_array(self.player.closed_hand)
)
waiting_34 = waiting_34[0]
hand_value = self.player.ai.estimate_hand_value_or_get_from_cache(waiting_34, call_riichi=False)
hand_value_with_riichi = self.player.ai.estimate_hand_value_or_get_from_cache(waiting_34, call_riichi=True)
must_riichi = self.player.ai.placement.must_riichi(
has_yaku=(hand_value.yaku is not None and hand_value.cost is not None),
num_waits=count_tiles,
cost_with_riichi=hand_value_with_riichi.cost["main"],
cost_with_damaten=(hand_value.cost and hand_value.cost["main"] or 0),
)
if must_riichi == Placement.MUST_RIICHI:
return True
elif must_riichi == Placement.MUST_DAMATEN:
return False
tiles = self.player.closed_hand[:]
closed_melds = [x for x in self.player.melds if not x.opened]
for meld in closed_melds:
tiles.extend(meld.tiles[:3])
results, tiles_34 = self.player.ai.hand_builder.divide_hand(tiles, waiting_34)
result = results[0]
closed_tiles_34 = TilesConverter.to_34_array(self.player.closed_hand)
have_suji, have_kabe = self.player.ai.hand_builder.check_suji_and_kabe(closed_tiles_34, waiting_34)
# what if we have yaku
if hand_value.yaku is not None and hand_value.cost is not None:
min_cost = hand_value.cost["main"]
min_cost_with_riichi = hand_value_with_riichi and hand_value_with_riichi.cost["main"] or 0
# tanki honor is a good wait, let's damaten only if hand is already expensive
if is_honor(waiting_34):
if self.player.is_dealer and min_cost < 12000:
return True
if not self.player.is_dealer and min_cost < 8000:
return True
return False
is_chiitoitsu = len([x for x in result if is_pair(x)]) == 7
simplified_waiting = simplify(waiting_34)
for hand_set in result:
if waiting_34 not in hand_set:
continue
# tanki wait but not chiitoitsu
if is_pair(hand_set) and not is_chiitoitsu:
# let's not riichi tanki 4, 5, 6
if 3 <= simplified_waiting <= 5:
return False
# don't riichi tanki wait on 1, 2, 3, 7, 8, 9 if it's only 1 tile
if count_tiles == 1:
return False
# don't riichi 2378 tanki if hand has good value
if simplified_waiting != 0 and simplified_waiting != 8:
if self.player.is_dealer and min_cost >= 7700:
return False
if not self.player.is_dealer and min_cost >= 5200:
return False
# only riichi if we have suji-trap or there is kabe
if not have_suji and not have_kabe:
return False
# let's not push these bad wait against threats
if threats:
return False
return True
# tanki wait with chiitoitsu
if is_pair(hand_set) and is_chiitoitsu:
# chiitoitsu on last suit tile is not the best
if count_tiles == 1:
return False
# early riichi on 19 tanki is good
if (simplified_waiting == 0 or simplified_waiting == 8) and self.player.round_step < 7:
return True
# riichi on 19 tanki is good later too if we have 3 tiles to wait for
if (
(simplified_waiting == 0 or simplified_waiting == 8)
and self.player.round_step < 12
and count_tiles == 3
):
return True
# riichi on 28 tanki is good if we have 3 tiles to wait for
if (
(simplified_waiting == 1 or simplified_waiting == 7)
and self.player.round_step < 12
and count_tiles == 3
):
return True
# otherwise only riichi if we have suji-trab or there is kabe
if not have_suji and not have_kabe:
return False
# let's not push these bad wait against threats
if threats:
return False
return True
# 1-sided wait means kanchan or penchan
if is_chi(hand_set):
# if we only have 1 tile to wait for, let's damaten
if count_tiles == 1:
return False
# for dealer it is always riichi
if self.player.is_dealer:
return True
# let's not push cheap hands against threats
elif threats and min_cost_with_riichi < 2600:
return False
if 3 <= simplified_waiting <= 5:
if min_cost_with_riichi >= 2600:
return True
# for not dealer let's not riichi cheap kanchan on 4, 5, 6
return False
# if we have 2 tiles to wait for and hand cost is good without riichi,
# let's damaten
if count_tiles == 2:
if self.player.is_dealer and min_cost >= 7700:
return False
if not self.player.is_dealer and min_cost >= 5200:
return False
# if we have more than two tiles to wait for and we have kabe or suji - insta riichi
if count_tiles > 2 and (have_suji or have_kabe):
return True
# 2 and 8 are good waits but not in every condition
if simplified_waiting == 1 or simplified_waiting == 7:
if self.player.round_step < 7:
if self.player.is_dealer and min_cost < 18000:
return True
if not self.player.is_dealer and min_cost < 8000:
return True
if self.player.round_step < 12:
if self.player.is_dealer and min_cost < 12000:
return True
if not self.player.is_dealer and min_cost < 5200:
return True
if self.player.round_step < 15:
if self.player.is_dealer and 2000 < min_cost < 7700:
return True
# 3 and 7 are ok waits sometimes too
if simplified_waiting == 2 or simplified_waiting == 6:
if self.player.round_step < 7:
if self.player.is_dealer and min_cost < 12000:
return True
if not self.player.is_dealer and min_cost < 5200:
return True
if self.player.round_step < 12:
if self.player.is_dealer and min_cost < 7700:
return True
if not self.player.is_dealer and min_cost < 5200:
return True
if self.player.round_step < 15:
if self.player.is_dealer and 2000 < min_cost < 7700:
return True
# otherwise only riichi if we have suji-trap or there is kabe
if not have_suji and not have_kabe:
return False
return True
# what if we don't have yaku
# our tanki wait is good, let's riichi
if is_honor(waiting_34):
return True
if count_tiles > 1:
# terminal tanki is ok, too, just should be more than one tile left
if is_terminal(waiting_34):
return True
# whatever dora wait is ok, too, just should be more than one tile left
if plus_dora(waiting_34 * 4, self.player.table.dora_indicators, add_aka_dora=False) > 0:
return True
simplified_waiting = simplify(waiting_34)
for hand_set in result:
if waiting_34 not in hand_set:
continue
if is_pair(hand_set):
# let's not riichi tanki wait without suji-trap or kabe
if not have_suji and not have_kabe:
return False
# let's not riichi tanki on last suit tile if it's early
if count_tiles == 1 and self.player.round_step < 6:
return False
# let's not riichi tanki 4, 5, 6 if it's early
if 3 <= simplified_waiting <= 5 and self.player.round_step < 6:
return False
# 1-sided wait means kanchan or penchan
# let's only riichi this bad wait if
# it has all 4 tiles available or it
# it's not too early
# and there are no threats
if not threats and is_chi(hand_set) and 4 <= simplified_waiting <= 6:
return count_tiles == 4 or self.player.round_step >= 6
return True
def _should_call_riichi_many_sided(self, waiting_34: List[int], threats: List[EnemyAnalyzer]):
count_tiles = self.player.ai.hand_builder.count_tiles(
waiting_34, TilesConverter.to_34_array(self.player.closed_hand)
)
hand_costs = []
hand_costs_with_riichi = []
waits_with_yaku = 0
for wait in waiting_34:
hand_value = self.player.ai.estimate_hand_value_or_get_from_cache(wait, call_riichi=False)
if hand_value.error is None:
hand_costs.append(hand_value.cost["main"])
if hand_value.yaku is not None and hand_value.cost is not None:
waits_with_yaku += 1
hand_value_with_riichi = self.player.ai.estimate_hand_value_or_get_from_cache(wait, call_riichi=True)
if hand_value_with_riichi.error is None:
hand_costs_with_riichi.append(hand_value_with_riichi.cost["main"])
min_cost = hand_costs and min(hand_costs) or 0
min_cost_with_riichi = hand_costs_with_riichi and min(hand_costs_with_riichi) or 0
must_riichi = self.player.ai.placement.must_riichi(
has_yaku=waits_with_yaku == len(waiting_34),
num_waits=count_tiles,
cost_with_riichi=min_cost_with_riichi,
cost_with_damaten=min_cost,
)
if must_riichi == Placement.MUST_RIICHI:
return True
elif must_riichi == Placement.MUST_DAMATEN:
return False
is_dealer_threat = any([x.enemy.is_dealer for x in threats])
# we don't want to push cheap hand against dealer
if is_dealer_threat and min_cost_with_riichi <= 1300:
return False
# if we have yaku on every wait
if waits_with_yaku == len(waiting_34):
# let's not riichi this bad wait
if count_tiles <= 2:
return False
# chasing riichi on late steps of the game is not profitable
if threats and self.player.round_step >= 9:
return False
# if wait is slightly better, we will riichi only a cheap hand
if count_tiles <= 4:
if self.player.is_dealer and min_cost >= 7700:
return False
if not self.player.is_dealer and min_cost >= 5200:
return False
return True
# wait is even better, but still don't call riichi on damaten mangan
if count_tiles <= 6:
# if it's early riichi more readily
if self.player.round_step > 6:
if self.player.is_dealer and min_cost >= 11600:
return False
if not self.player.is_dealer and min_cost >= 7700:
return False
else:
if self.player.is_dealer and min_cost >= 18000:
return False
if not self.player.is_dealer and min_cost >= 12000:
return False
return True
# if wait is good we only damaten haneman
if self.player.is_dealer and min_cost >= 18000:
return False
if not self.player.is_dealer and min_cost >= 12000:
return False
return True
# if we don't have yaku on every wait and it's two-sided or more, we call riichi
return True
|
test/visuals/test_interpolation.py | colinmford/coldtype | 142 | 11101949 | from coldtype.test import *
ov = Font("assets/ColdtypeObviously.designspace")
@test((1000, 1000), rstate=1)
def test_mouse_interp(r, rs):
ri = r.inset(100)
sx, sy = ri.ipos(rs.mouse)
return [
DATPen().rect(ri).f(None).s(hsl(0.9, a=0.3)).sw(10),
(StyledString("COLD",
Style(ov, 250+sy*100, wdth=sx))
.pens()
.align(r)
.f(0))]
|
app/core/lldbEvents.py | ant4g0nist/vegvisir | 209 | 11101991 | <gh_stars>100-1000
import json
import logging
from ..config import config
from threading import Thread
verbose = config.verbose
logging.basicConfig(name="lldb",level=logging.DEBUG)
def logEvent(eventType, event):
if verbose:
logging.debug("[:EVENT:] Type %d (%s)\n" %(eventType, str(event)))
def msgProcess(msg):
if verbose:
logging.debug("[:MSG:] %s"%(json.dumps(msg)))
def stateTypeToString(state, lldb):
"""
Returns the state type string for the given an state.
"""
if state == lldb.eStateInvalid:
return "invalid"
elif state == lldb.eStateUnloaded:
return "unloaded"
elif state == lldb.eStateConnected:
return "connected"
elif state == lldb.eStateAttaching:
return "attaching"
elif state == lldb.eStateLaunching:
return "launching"
elif state == lldb.eStateStopped:
return "stopped"
elif state == lldb.eStateRunning:
return "running"
elif state == lldb.eStateStepping:
return "stepping"
elif state == lldb.eStateCrashed:
return "crashed"
elif state == lldb.eStateDetached:
return "detached"
elif state == lldb.eStateExited:
return "exited"
elif state == lldb.eStateSuspended:
return "suspended"
else:
raise Exception("Unknown StateType enum")
class LLDBEvents(Thread):
"""
Listens for Events from lldb process
-- modified from do_listen_for_and_print_event lldb examples
"""
def __init__(self, handler, lldb):
Thread.__init__(self)
self.lldb = lldb
self.handler = handler
def run(self):
target = self.handler.target
process = target.GetProcess()
listener = self.lldb.SBListener("LLDB events listener")
# create process broadcaster to listen for state changes,
processBroadcaster = process.GetBroadcaster()
processBroadcaster.AddListener(listener, self.lldb.SBProcess.eBroadcastBitStateChanged | self.lldb.SBProcess.eBroadcastBitSTDOUT | self.lldb.SBProcess.eBroadcastBitSTDERR)
self.done = False
event = self.lldb.SBEvent()
while not self.done:
if listener.WaitForEvent(1, event):
# get the broadcaster for this event
eBroadcaster = event.GetBroadcaster()
eventType = event.GetType()
logEvent(eventType, event)
# get details give by process broadcaster
if eBroadcaster == processBroadcaster:
# eBroadcastBitStateChanged
if eventType == self.lldb.SBProcess.eBroadcastBitStateChanged:
state = self.lldb.SBProcess.GetStateFromEvent(event)
message = {"status":"event", "type":"state", "inferior_state":state, "state_desc": stateTypeToString(state,self.lldb)}
if state == self.lldb.eStateExited:
message["exit_status"] = process.GetExitStatus()
# eBroadcastBitSTDOUT
elif eventType == self.lldb.SBProcess.eBroadcastBitSTDOUT:
stdout = process.GetSTDOUT(256)
if stdout is not None and len(stdout) > 0:
message = {"status":"event", "type":"stdout", "output": "".join(["%02x" % ord(i) for i in stdout])}
# eBroadcastBitSTDERR
elif eventType == self.lldb.SBProcess.eBroadcastBitSTDERR:
stderr = process.GetSTDERR(256)
if stderr is not None and len(stderr) > 0:
message = {"status":"event", "type":"stderr", "output": "".join(["%02x" % ord(i) for i in stderr])}
msgProcess(message)
return
|
src/modules/python/webapp/mods/io.vertx~example-web-app~1.0/app.py | vietj/vertx-examples | 143 | 11102025 | import vertx
from core.event_bus import EventBus
# Our application config - you can maintain it here or alternatively you could
# stick it in a conf.json text file and specify that on the command line when
# starting this verticle
# Configuration for the web server
web_server_conf = {
# Normal web server stuff
'port': 8080,
'host': 'localhost',
'ssl': True,
# Configuration for the event bus client side bridge
# This bridges messages from the client side to the server side event bus
'bridge': True,
# This defines which messages from the client we will let through
# to the server side
'inbound_permitted': [
# Allow calls to login
{
'address': 'vertx.basicauthmanager.login'
},
# Allow calls to get static album data from the persistor
{
'address': 'vertx.mongopersistor',
'match': {
'action': 'find',
'collection': 'albums'
}
},
# And to place orders
{
'address': 'vertx.mongopersistor',
'requires_auth': True, # User must be logged in to send let these through
'match': {
'action': 'save',
'collection': 'orders'
}
}
],
# This defines which messages from the server we will let through to the client
'outbound_permitted': [
{}
]
}
# And when it's deployed run a script to load it with some reference
# data for the demov
def deploy_handler(err, id):
if err is None:
# Load the static data
import static_data
else:
print 'Failed to deploy %s' % err
# Now we deploy the modules that we need
# Deploy a MongoDB persistor module
vertx.deploy_module('io.vertx~mod-mongo-persistor~2.0.0-final', handler=deploy_handler)
# Deploy an auth manager to handle the authentication
vertx.deploy_module('io.vertx~mod-auth-mgr~2.0.0-final')
# Start the web server, with the config we defined above
vertx.deploy_module('io.vertx~mod-web-server~2.0.0-final', web_server_conf)
|
tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py | LaudateCorpus1/python-driver | 1,163 | 11102047 | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cassandra.graph import Vertex, Edge
from tests.integration.advanced.graph import (
validate_classic_vertex, validate_classic_edge, validate_generic_vertex_result_type,
validate_classic_edge_properties, validate_line_edge,
validate_generic_edge_result_type, validate_path_result_type)
from tests.integration import requiredse, DSE_VERSION
from tests.integration.advanced import use_single_node_with_graph
from tests.integration.advanced.graph import GraphTestConfiguration
from tests.integration.advanced.graph.fluent import (
BaseExplicitExecutionTest, _AbstractTraversalTest, _validate_prop)
def setup_module():
if DSE_VERSION:
dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}}
use_single_node_with_graph(dse_options=dse_options)
@requiredse
@GraphTestConfiguration.generate_tests(traversal=True)
class ExplicitExecutionTest(BaseExplicitExecutionTest, _AbstractTraversalTest):
"""
This test class will execute all tests of the AbstractTraversalTestClass using Explicit execution
All queries will be run by converting them to byte code, and calling execute graph explicitly with a generated ep.
"""
@staticmethod
def fetch_key_from_prop(property):
return property.label
def _validate_classic_vertex(self, g, vertex):
validate_classic_vertex(self, vertex)
def _validate_generic_vertex_result_type(self, g, vertex):
validate_generic_vertex_result_type(self, vertex)
def _validate_classic_edge_properties(self, g, edge):
validate_classic_edge_properties(self, edge)
def _validate_classic_edge(self, g, edge):
validate_classic_edge(self, edge)
def _validate_line_edge(self, g, edge):
validate_line_edge(self, edge)
def _validate_generic_edge_result_type(self, edge):
validate_generic_edge_result_type(self, edge)
def _validate_type(self, g, vertex):
for key in vertex.properties:
value = vertex.properties[key][0].value
_validate_prop(key, value, self)
def _validate_path_result_type(self, g, path_obj):
# This pre-processing is due to a change in TinkerPop
# properties are not returned automatically anymore
# with some queries.
for obj in path_obj.objects:
if not obj.properties:
props = []
if isinstance(obj, Edge):
obj.properties = {
p.key: p.value
for p in self.fetch_edge_props(g, obj)
}
elif isinstance(obj, Vertex):
obj.properties = {
p.label: p.value
for p in self.fetch_vertex_props(g, obj)
}
validate_path_result_type(self, path_obj)
def _validate_meta_property(self, g, vertex):
self.assertEqual(len(vertex.properties), 1)
self.assertEqual(len(vertex.properties['key']), 1)
p = vertex.properties['key'][0]
self.assertEqual(p.label, 'key')
self.assertEqual(p.value, 'meta_prop')
self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'})
|
zeus/common/util/benchmark_data.py | TianQi-777/xingtian | 240 | 11102065 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
"""Make database for record."""
class Data(object):
"""
Make base class of data structure to store train/test information, for analysis relative performance.
local database will using sqlite.
local file will work with numpy & csv
"""
VERSION = 0.1
def __init__(self):
self.base_fields = (
"env_name", # rl's environment
"alg_name", # algorithm
"train_index", # the index of model saved, user define
"start_time", # this event start time
"sample_step", # the total sample steps used for training,
"train_loss",
"train_reward",
"eval_reward",
"framework",
"comments", # user others' comments
)
def insert_records(self, to_record):
"""
Insert train record.
Args:
----
to_record:
"""
raise NotImplementedError
def get_version(self):
"""Get database version info."""
return self.VERSION
|
sknetwork/clustering/__init__.py | HerrZYZ/scikit-network | 457 | 11102082 | <reponame>HerrZYZ/scikit-network
"""clustering module"""
from sknetwork.clustering.base import BaseClustering
from sknetwork.clustering.kmeans import KMeans
from sknetwork.clustering.louvain import Louvain
from sknetwork.clustering.metrics import modularity, bimodularity, comodularity, normalized_std
from sknetwork.clustering.postprocess import reindex_labels
from sknetwork.clustering.propagation_clustering import PropagationClustering
|
tests/test_botvars.py | KennethBlaney/rivescript-python | 154 | 11102089 | <reponame>KennethBlaney/rivescript-python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from .config import RiveScriptTestCase
class BotvarTests(RiveScriptTestCase):
"""Test bot variables."""
def test_bot_variables(self):
self.new("""
! var name = Aiden
! var age = 5
+ what is your name
- My name is <bot name>.
+ how old are you
- I am <bot age>.
+ what are you
- I'm <bot gender>.
+ happy birthday
- <bot age=6>Thanks!
+ who is your master
- My master is <bot master>.
""")
self.rs.set_variable("master", "kirsle")
self.reply("What is your name?", "My name is Aiden.")
self.reply("How old are you?", "I am 5.")
self.reply("What are you?", "I'm undefined.")
self.reply("Happy birthday!", "Thanks!")
self.reply("How old are you?", "I am 6.")
self.reply("Who is your master?", "My master is kirsle.")
self.assertEqual(self.rs.get_variable("age"), "6")
self.assertEqual(self.rs.get_variable("master"), "kirsle")
self.assertEqual(self.rs.get_variable("fake"), "undefined")
def test_global_variables(self):
self.new("""
! global debug = false
+ debug mode
- Debug mode is: <env debug>
+ set debug mode *
- <env debug=<star>>Switched to <star>.
+ are you testing
- Testing: <env testing>
""")
self.rs.set_global("testing", "true")
self.reply("Debug mode.", "Debug mode is: false")
self.reply("Set debug mode true", "Switched to true.")
self.reply("Debug mode?", "Debug mode is: true")
self.reply("Are you testing?", "Testing: true")
self.assertEqual(self.rs.get_global("debug"), "true")
self.assertEqual(self.rs.get_global("testing"), "true")
self.assertEqual(self.rs.get_global("fake"), "undefined")
|
tools/pot/openvino/tools/pot/engines/simplified_engine.py | pazamelin/openvino | 2,406 | 11102091 | <gh_stars>1000+
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from .ie_engine import IEEngine
from .utils import append_stats
class SimplifiedEngine(IEEngine):
@staticmethod
def _process_batch(batch):
""" Processes batch data and returns lists of annotations, images and batch meta data
:param batch: a list with batch data [image]
:returns None as annotations
a list with input data [image]
None as meta_data
"""
return None, batch, None
def _process_infer_output(self, stats_layout, predictions,
batch_annotations, batch_meta, need_metrics_per_sample):
# Collect statistics
if stats_layout:
append_stats(self._accumulated_layer_stats, stats_layout, predictions, 0)
|
apps/interface/models/interfacecase.py | rainydaygit/testtcloudserver | 349 | 11102111 | from library.api.db import EntityWithNameModel, db
class InterfaceCase(EntityWithNameModel):
ACTIVE = 0
DISABLE = 1
num = db.Column(db.Integer(), nullable=True, comment='用例序号')
name = db.Column(db.String(128), nullable=True, comment='用例名称')
desc = db.Column(db.String(256), comment='用例描述')
func_address = db.Column(db.String(256), comment='用例需要引用的函数')
variable = db.Column(db.Text(), comment='用例公共参数')
times = db.Column(db.Integer(), nullable=True, comment='执行次数')
project_id = db.Column(db.Integer, comment='所属的项目id')
case_set_id = db.Column(db.Integer, comment='所属的用例集id')
status = db.Column(db.Integer, default=ACTIVE) # 状态
|
Tools/sqfvalidator/sqf/expressions.py | Rowantrek/A3-Antistasi | 161 | 11102134 | from sqf.types import Keyword, Nothing, Anything, Type
from sqf.interpreter_types import InterpreterType
class Expression:
"""
A generic class to represent an expression. The expression matches according to the
types of their elements, listed in `types`.
"""
def __init__(self, types_or_values, return_type):
self.types_or_values = tuple(types_or_values)
self.return_type = return_type
for t_or_v in self.types_or_values:
assert (isinstance(t_or_v, (Type, Keyword)) or issubclass(t_or_v, Type))
assert(return_type is None or issubclass(return_type, Type))
def is_match(self, values, exact=True):
"""
Given a list of values, returns a list of matches when the values
match or not each condition of the expression
"""
if len(values) != len(self.types_or_values):
return False
for i, (t_or_v, value) in enumerate(zip(self.types_or_values, values)):
if isinstance(t_or_v, (Type, Keyword)): # it is a value
if value != t_or_v:
return False
else: # it is a type
if not (isinstance(value, t_or_v) or
(not exact and type(value) == Anything and
not issubclass(t_or_v, InterpreterType))):
return False
return True
def is_signature_match(self, values):
return self.is_match(values, exact=False)
def execute(self, values, interpreter):
raise NotImplementedError
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.types_or_values)
def __eq__(self, other):
if issubclass(other.__class__, Expression):
return self.types_or_values == other.types_or_values
else:
return False
@property
def keyword(self):
raise NotImplementedError
def _result_to_typed_result(self, value):
if self.return_type is None:
return value
elif self.return_type in (Anything, Nothing):
return self.return_type()
else:
if isinstance(value, tuple):
return self.return_type(*value)
else:
return self.return_type(value)
class UnaryExpression(Expression):
def __init__(self, op, rhs_type, return_type, action=None):
assert (isinstance(op, Keyword))
super().__init__([op, rhs_type], return_type)
if action is None and return_type is None:
action = lambda rhs, i: i.private_default_class()
elif action is None:
action = lambda rhs, i: None
self.action = action
def execute(self, values, interpreter):
result = self.action(values[1], interpreter)
return self._result_to_typed_result(result)
@property
def keyword(self):
return self.types_or_values[0]
class BinaryExpression(Expression):
def __init__(self, lhs_type, op, rhs_type, return_type, action=None):
assert(isinstance(op, Keyword))
super().__init__([lhs_type, op, rhs_type], return_type)
if action is None and return_type is None:
action = lambda lhs, rhs, i: i.private_default_class()
elif action is None:
action = lambda lhs, rhs, i: None
self.action = action
def execute(self, values, interpreter):
result = self.action(values[0], values[2], interpreter)
return self._result_to_typed_result(result)
@property
def keyword(self):
return self.types_or_values[1]
class NullExpression(Expression):
def __init__(self, op, return_type, action=None):
assert(isinstance(op, Keyword))
assert(return_type is not None)
super().__init__([op], return_type)
if action is None:
action = lambda i: None
self.action = action
def execute(self, values, interpreter):
result = self.action(interpreter)
return self._result_to_typed_result(result)
@property
def keyword(self):
return self.types_or_values[0]
|
clpy/_version.py | fixstars/clpy | 142 | 11102138 | __version__ = '2.1.0rc1'
|
lib/astc-encoder/Test/astc_test_python.py | atteneder/KTX-Software | 619 | 11102156 | <filename>lib/astc-encoder/Test/astc_test_python.py
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------
# Copyright 2020 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------------
"""
The python test runner is designed to run some basic tests against the Python
test code base.
"""
import re
import sys
import unittest
import pycodestyle
import pylint.epylint as lint
class PythonTests(unittest.TestCase):
"""
Some basic Python static analysis and style checks.
"""
def test_pylint(self):
"""
Run pylint over the codebase.
"""
pylintOut, _ = lint.py_run("./Test", True)
pattern = re.compile(r"Your code has been rated at (.*?)/10")
match = pattern.search(pylintOut.getvalue())
self.assertIsNotNone(match)
score = float(match.group(1))
self.assertGreaterEqual(score, 9.8)
with open("pylint.log", "w") as fileHandle:
fileHandle.write(pylintOut.getvalue())
def test_pycodestyle(self):
"""
Test that we conform to PEP-8.
"""
style = pycodestyle.StyleGuide()
result = style.check_files(["./Test"])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def main():
"""
The main function.
Returns:
int: The process return code.
"""
results = unittest.main(exit=False)
return 0 if results.result.wasSuccessful() else 1
if __name__ == "__main__":
sys.exit(main())
|
observations/r/accident.py | hajime9652/observations | 199 | 11102176 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def accident(path):
"""Ship Accidents
a cross-section
*number of observations* : 40
A dataframe containing :
type
ship type, a factor with levels (A,B,C,D,E)
constr
year constructed, a factor with levels (C6064,C6569,C7074,C7579)
operate
year operated, a factor with levels (O6074,O7579)
months
measure of service amount
acc
accidents
<NAME>. and <NAME> (1983) *Generalized linear methods*, New
York:Chapman and Hall.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `accident.csv`.
Returns:
Tuple of np.ndarray `x_train` with 40 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'accident.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/Accident.csv'
maybe_download_and_extract(path, url,
save_file_name='accident.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
pygithub3/requests/repos/watchers.py | teamorchard/python-github3 | 107 | 11102206 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from . import Request
from pygithub3.resources.users import User
from pygithub3.resources.repos import Repo
class List(Request):
uri = 'repos/{user}/{repo}/watchers'
resource = User
class List_repos(Request):
uri = 'users/{user}/watched'
resource = Repo
def clean_uri(self):
if not self.user:
return 'user/watched'
class Is_watching(Request):
uri = 'user/watched/{user}/{repo}'
class Watch(Request):
uri = 'user/watched/{user}/{repo}'
class Unwatch(Request):
uri = 'user/watched/{user}/{repo}'
|
external/android/xorpt.gyp | gordonjohnpatrick/XobotOS | 263 | 11102207 |
{
'includes': [
'../skia/gyp/common.gypi',
],
'targets': [
{
'target_name': 'xorpt',
'type': 'executable',
'mac_bundle' : 1,
'include_dirs' : [
'include'
],
'conditions': [
[ 'skia_os == "linux"', {
'cflags': [
'-fPIC', '-Wall'
],
'sources': [
],
}],
],
'sources' : [
'aapt/AaptAssets.cpp',
'aapt/Command.cpp',
'aapt/CrunchCache.cpp',
'aapt/FileFinder.cpp',
'aapt/Images.cpp',
'aapt/Main.cpp',
'aapt/Package.cpp',
'aapt/Resource.cpp',
'aapt/ResourceFilter.cpp',
'aapt/ResourceTable.cpp',
'aapt/SourcePos.cpp',
'aapt/StringPool.cpp',
'aapt/XMLNode.cpp',
'aapt/ZipEntry.cpp',
'aapt/ZipFile.cpp',
'aapt/AaptAssets.h',
'aapt/Bundle.h',
'aapt/CacheUpdater.h',
'aapt/CrunchCache.h',
'aapt/DirectoryWalker.h',
'aapt/FileFinder.h',
'aapt/Images.h',
'aapt/Main.h',
'aapt/ResourceFilter.h',
'aapt/ResourceTable.h',
'aapt/SourcePos.h',
'aapt/StringPool.h',
'aapt/XMLNode.h',
'aapt/ZipEntry.h',
'aapt/ZipFile.h'
],
'link_settings': {
'libraries': [
'-lpng'
],
},
'dependencies': [
'android-libs.gyp:android_libs',
'../expat/expat.gyp:expat',
'../jpeg/libjpeg.gyp:android_libjpeg'
],
}
]
}
|
src/python/grpcio_tests/tests_aio/unit/channel_ready_test.py | warlock135/grpc | 36,552 | 11102212 | <filename>src/python/grpcio_tests/tests_aio/unit/channel_ready_test.py
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the channel_ready function."""
import asyncio
import gc
import logging
import socket
import time
import unittest
import grpc
from grpc.experimental import aio
from tests.unit.framework.common import get_socket
from tests.unit.framework.common import test_constants
from tests_aio.unit import _common
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
class TestChannelReady(AioTestBase):
async def setUp(self):
address, self._port, self._socket = get_socket(
listen=False, sock_options=(socket.SO_REUSEADDR,))
self._channel = aio.insecure_channel(f"{address}:{self._port}")
self._socket.close()
async def tearDown(self):
await self._channel.close()
async def test_channel_ready_success(self):
# Start `channel_ready` as another Task
channel_ready_task = self.loop.create_task(
self._channel.channel_ready())
# Wait for TRANSIENT_FAILURE
await _common.block_until_certain_state(
self._channel, grpc.ChannelConnectivity.TRANSIENT_FAILURE)
try:
# Start the server
_, server = await start_test_server(port=self._port)
# The RPC should recover itself
await channel_ready_task
finally:
await server.stop(None)
async def test_channel_ready_blocked(self):
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(self._channel.channel_ready(),
test_constants.SHORT_TIMEOUT)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
local/speaker-id-from-server.py | slckl/kaldi-offline-transcriber | 199 | 11102224 | #! /usr/bin/env python3
import argparse
import requests
import json
import sys
from urllib3.filepost import encode_multipart_formdata, choose_boundary
from urllib3.fields import RequestField
import subprocess
def encode_multipart_related(fields, boundary=None):
if boundary is None:
boundary = choose_boundary()
body, _ = encode_multipart_formdata(fields, boundary)
content_type = str('multipart/related; boundary=%s' % boundary)
return body, content_type
def encode_media_related(audio_files):
rfs = []
for f in audio_files:
if f.endswith("|"):
p = subprocess.Popen(f[:-1], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=False)
data = p.stdout.read()
rf = RequestField(
name='placeholder2',
data=data,
headers={'Content-Type': "audio/wav"},
)
else:
rf = RequestField(
name='placeholder2',
data=open(f, 'rb').read(),
headers={'Content-Type': "audio/wav"},
)
rfs.append(rf)
return encode_multipart_related(rfs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Perform speaker ID using a dedicated server')
parser.add_argument('--url', default="http://localhost:8888")
parser.add_argument('spk2utt')
parser.add_argument('wav_scp')
parser.add_argument('output_json')
args = parser.parse_args()
wavs = {}
for l in open(args.wav_scp):
ss = l.split()
wavs[ss[0]] = " ".join(ss[1:])
spk2utt = {}
for l in open(args.spk2utt):
ss = l.split()
spk2utt[ss[0]] = [wavs[utt] for utt in ss[1:]]
output = {}
for speaker, wavs in spk2utt.items():
body, content_type = encode_media_related(wavs)
full_url = args.url + "/v1/identify?uploadType=multipart"
try:
print("Doing speaker ID for speaker %s using URL %s" % (speaker, full_url), file=sys.stderr)
r = requests.post(full_url, data=body, headers={"Content-Type": content_type})
if r.status_code == 200:
speaker_info = json.loads(r.content.decode("utf-8"))
output[speaker] = speaker_info
print("Speaker ID successful, speaker info: " + str(speaker_info), file=sys.stderr)
else:
print("Speaker ID not successful, status %d " % r.status_code, file=sys.stderr)
output[speaker] = {}
except Exception as ex:
print("Failed to do speaker ID using server URL %s" % full_url, file=sys.stderr)
print(ex, file=sys.stderr)
output[speaker] = {}
json.dump(output, open(args.output_json, "w"), sort_keys=False, indent=4)
|
build/plugins/credits.py | mjjohns1/catboost | 6,989 | 11102276 | from _common import rootrel_arc_src
def oncredits_disclaimer(unit, *args):
if unit.get('WITH_CREDITS'):
unit.message(["warn", "CREDITS WARNING: {}".format(' '.join(args))])
def oncheck_contrib_credits(unit, *args):
module_path = rootrel_arc_src(unit.path(), unit)
for arg in args:
if module_path.startswith(arg) and not unit.get('CREDITS_TEXTS_FILE') and not unit.get('NO_CREDITS_TEXTS_FILE'):
unit.message(["error", "License texts not found. See https://st.yandex-team.ru/DTCC-324"])
|
torcms/script/autocrud/html_tpl.py | bukun/TorCMS | 243 | 11102293 | # -*- coding:utf-8 -*-
'''
Tempaltes for CRUD.
'''
TPL_ADD = '''
{% extends "../../tmpl_kkkk/tpl_add.html" %}
{% block header %}
<h1>{{ header_text }}</h1>
{% end %}
{% block extrainfo %}
<div id="iga_add_rec_box">
xxxxxx
</div>
{% end %}
{% block footer %}
<p>{{ footer_text }}</p>
{% end %}'''
TPL_EDIT = '''
{% extends "../../tmpl_kkkk/tpl_edit.html" %}
{% block header %}
<h1>{{ header_text }}</h1>
{% end %}
{% block extrainfo %}
<div id="iga_edit_rec_box">
xxxxxx
</div>
{% end %}
{% block footer %}
<p>{{ footer_text }}</p>
{% end %}'''
TPL_LIST = '''
{% extends "../../tmpl_kkkk/tpl_list.html" %}
{% block header %}
{{ header_text }}
{% end %}
{% block infoselect %}
<div class="infoselect"> xxxxxx </div>
{% end %}
{% block infonav %}
{% end %}
{% block infolist %}
<div class="list_house">
<ul class="list-group">
<span id="resultSpan"></span>
</ul>
</div>
{% end %}
{% block footer %}
<p>{{ footer_text }}</p>
{% end %}'''
TPL_LISTINFO = '''{% extends "../../tmpl_kkkk/tpl_listinfo.html" %}'''
TPL_VIEW = '''{% extends "../../tmpl_kkkk/tpl_viewssss.html" %}
{% block header %}
<h1>{{ header_text }}</h1>
{% end %}
{% block extrainfo %}
<div id="iga_view_rec_box">
xxxxxx
</div>
{% end %}
{% block footer %}
<p>{{ footer_text }}</p>
{% end %}'''
HTML_INPUT_EDIT_DOWNLOAD = '''
<div class="form-group">
<label class="col-sm-2 control-label" for="{sig_en}">
<span><a class="glyphicon glyphicon-star" style="color: red;font-size: xx-small;">
</a>{sig_zh}</span>
</label>
<div class="col-sm-8">
<input id='{sig_en}' name="{sig_en}"
value="{{{{ postinfo.extinfo.get('{sig_en}','') }}}}"
type="{sig_type}" class="form-control"> </div>
<div class="col-sm-2"><a href="/entry/add" target="_blank" class="btn btn-primary" role="button">Upload</a></div>
</div>
'''
HTML_INPUT_EDIT = '''
<div class="form-group">
<label class="col-sm-2 control-label" for="{sig_en}">
<span><a class="glyphicon glyphicon-star" style="color: red;font-size: xx-small;">
</a>{sig_zh}</span>
</label>
<div class="col-sm-9">
<input id='{sig_en}' name="{sig_en}"
value="{{{{ postinfo.extinfo.get('{sig_en}','') }}}}"
type="{sig_type}" class="form-control"> </div>
<div class="col-sm-1">{sig_dic}</div>
</div>
'''
HTML_INPUT_ADD_DOWNLOAD = '''<div class="form-group">
<label class="col-sm-2 control-label" for="{sig_en}">
<span><a class="glyphicon glyphicon-star" style="color: red;font-size: xx-small;">
</a>{sig_zh}</span>
</label>
<div class="col-sm-8">
<input id='{sig_en}' name="{sig_en}" value="" type="{sig_type}"
class="form-control">
</div>
<div class="col-sm-2">
<a href="/entry/add" target="_blank" class="btn btn-primary" role="button">Upload</a>
</div></div>
'''
HTML_INPUT_ADD = '''
<div class="form-group">
<label class="col-sm-2 control-label" for="{sig_en}">
<span><a class="glyphicon glyphicon-star" style="color: red;font-size: xx-small;">
</a>{sig_zh}</span>
</label>
<div class="col-sm-9">
<input id='{sig_en}' name="{sig_en}" value="" type="{sig_type}"
class="form-control">
</div>
<div class="col-sm-1">
{sig_dic}
</div></div>
'''
HTML_INPUT_VIEW_DONWLOAD = '''<div class="row">
<div class="col-sm-4"><span class="des"><strong>{sig_zh}</strong></span></div>
<div class="col-sm-8">
{{% if userinfo %}}
{{% if postinfo.extinfo.get('tag_file_download') or postinfo.extinfo.get('tag__file_download') %}}
<a class="val btn-xs btn btn-warning" onclick="entity_down('{{{{postinfo.uid}}}}')"
id="file_download" style="cursor: pointer; color:#fff">
<span class="glyphicon glyphicon-download-alt"> Download</span>
{sig_unit}</a>
{{% else %}}
<span class="glyphicon glyphicon-ban-circle" style="color:red"> Unavailable</span>
{{% end %}}
{{% else %}}
<a href="/user/login">Please download after login, click to <span class="btn btn-primary btn-xs"> login in</span>. </a>
{{% end %}}
</div></div>
'''
HTML_INPUT_VIEW_LINK = '''<div class="row">
<div class="col-sm-4"><span class="des"><strong>{1}</strong></span></div>
<div class="col-sm-8">
<a class="val" target="_blank" href="{{{{ postinfo.extinfo.get('{0}','') }}}}
{2}" style="cursor: pointer; color:#069">
{{{{ postinfo.extinfo.get('{0}','') }}}}
{2} </a></div></div>
'''
HTML_INPUT_VIEW = '''<div class="row">
<div class="col-sm-4"><span class="des"><strong>{1}</strong></span></div>
<div class="col-sm-8">
<span class="val">{{{{ postinfo.extinfo.get('{0}','') }}}}
{2}</span></div></div>
'''
HTML_TPL_DICT = {
'input_add': HTML_INPUT_ADD,
'input_add_download': HTML_INPUT_ADD_DOWNLOAD,
'input_edit_download': HTML_INPUT_EDIT_DOWNLOAD,
'input_edit': HTML_INPUT_EDIT,
'input_view_download': HTML_INPUT_VIEW_DONWLOAD,
'input_view_link': HTML_INPUT_VIEW_LINK,
'input_view': HTML_INPUT_VIEW,
}
|
src/prod/tools/linux/lldb/fabdbg.py | gridgentoo/ServiceFabricAzure | 2,542 | 11102298 | # ------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
# ------------------------------------------------------------
"""
service fabric lldb extension
usage:
(lldb) script import fabdbg
(lldb) script help(fabdbg)
(lldb) script fabdbg.function(...)
"""
import lldb
import re
import collections
import operator
import time
import multiprocessing
import sys
from joblib import Parallel, delayed
PointerByteSize = 8
def addr_to_ptr(addr) :
# per lldb documentation, addr.load_addr should be used instead of addr.file_addr.
# however, our exectuable is loaded twice in lldb somehow, probably a bug in lldb.
# load_addr does not match what's stored in actuall C++ object, thus we have to
# use file_addr, which means it only works for types defined in executable, not
# types defined in so files. The suggested worked around is to use target.ResolveLoadAddress(ptrValue)
# and compare result SBAddress with vtable symbol SBAddress, but it slows things
# down significantly, we should investigate if it is possible to speed it up
return addr.file_addr
def vtable_addr (vtableSymbol):
addr = addr_to_ptr(vtableSymbol.addr)
if addr == lldb.LLDB_INVALID_ADDRESS :
return lldb.LLDB_INVALID_ADDRESS
return addr + 0x10
def get_search_regions() :
memoryRegions = lldb.process.GetMemoryRegions()
print 'total memory regions: ', memoryRegions.GetSize()
region = lldb.SBMemoryRegionInfo()
searchRegions = []
for i in range(memoryRegions.GetSize()):
if memoryRegions.GetMemoryRegionAtIndex(i, region):
if region.IsWritable() and region.IsReadable() and not region.IsExecutable():
#print '[{0:x},{1:x}]'.format(region.GetRegionBase(), region.GetRegionEnd())
searchRegions.append((region.GetRegionBase(), region.GetRegionEnd()))
#sort regions in descending order, so that large regions get processed early
searchRegions.sort(key=lambda tup: tup[1] - tup[0], reverse=True)
# for region in searchRegions :
# print '{0:x} [{1:x}, {2:x})'.format(region[1]-region[0], region[0], region[1])
searchRegionCount = len(searchRegions)
print 'target memory regions: ', searchRegionCount
return searchRegions
def findtype_in_region(region, vtableAddr) :
sys.stdout.write('+')
sys.stdout.flush()
startAddr = region[0]
endAddr = region[1]
#print '[{0:x},{1:x})'.format(startAddr, endAddr)
matches = set()
error = lldb.SBError()
for addr in range(startAddr, endAddr, PointerByteSize):
ptr = lldb.process.ReadPointerFromMemory(addr, error)
if error.success and ptr == vtableAddr :
matches.add(addr)
sys.stdout.write('.')
sys.stdout.flush()
return matches
def findtype (typename):
"""
find objects of the "virtual" type with given typename
for example, typename='Transport::TcpDatagramTransport'
wll search for all TcpDatagramTransport instances
"""
startTime = time.time()
matchCount = 0
vtblSymbol = 'vtable for ' + typename
symbols = lldb.target.FindSymbols(vtblSymbol)
if len(symbols) == 0 :
print '%s is not a virtual type' %typename
return
searchRegions = get_search_regions();
searchRegionCount = len(searchRegions)
processorCount = multiprocessing.cpu_count()
vtableAddr = vtable_addr(symbols[0].symbol)
if vtableAddr == lldb.LLDB_INVALID_ADDRESS :
print 'vtable address is LLDB_INVALID_ADDRESS'
return
print 'searching vtable address 0x{0:x} in target regions on {1} cores'.format(vtableAddr, processorCount)
#print '%x' % symbols[0].symbol.addr.load_addr
taskResults = Parallel(n_jobs=processorCount)(delayed(findtype_in_region)(searchRegions[i], vtableAddr) for i in range(searchRegionCount))
print
print
print '<<<matches>>>'
matchCount = 0
for taskResult in taskResults :
matchCount += len(taskResult)
for ptr in taskResult :
print '0x{0:x}'.format(ptr)
print
print 'total match found: ', matchCount
print 'time elapsed: ', time.time() - startTime
print
def findtypes_in_region(region, names) :
sys.stdout.write('+')
sys.stdout.flush()
startAddr = region[0]
endAddr = region[1]
#print '[{0:x},{1:x})'.format(startAddr, endAddr)
matches = dict()
error = lldb.SBError()
for addr in range(startAddr, endAddr, PointerByteSize):
ptr = lldb.process.ReadPointerFromMemory(addr, error)
if error.success and ptr in names:
if ptr in matches :
matches[ptr] += 1
else :
matches[ptr] = 1
sys.stdout.write('.')
sys.stdout.flush()
return matches
def type_regex_to_vtable_regex(typename) :
if len(typename) == 0 :
return '^vtable for'
if typename[0] == '^' :
return '^vtable for ' + typename[1:]
return '^vtable for .*' + typename
def get_all_pure_virtual_funcs() :
result = dict()
symbolCtxList = lldb.target.FindSymbols('__cxa_pure_virtual')
for ctx in symbolCtxList :
symbol = ctx.symbol
pvfAddr = addr_to_ptr(symbol.addr)
result[pvfAddr]=ctx.module.platform_file.basename
"""
print 'found %d pure virtual functions:' %len(result)
for pvf, n in result.iteritems() :
print '%0.16x : %s' % (pvf,n)
"""
return result
def has_pure_virtual(vtableAddr, pureVirtualFuncs) :
error = lldb.SBError()
vtableEndAddr = lldb.process.ReadPointerFromMemory(vtableAddr-PointerByteSize, error)
if not error.success :
return False
#print "vtable: [%0.16x, %0.16x)" % (vtableAddr, vtableEndAddr)
for addr in range(vtableAddr, vtableEndAddr, PointerByteSize) :
#print "read from address %.016x" % addr
funcAddr = lldb.process.ReadPointerFromMemory(addr, error)
if not error.success :
continue
if funcAddr in pureVirtualFuncs :
return True
return False
def findtypes (pattern, ignorePureVirtualType=True):
"""
count objects of "virtual" types that match pattern string and rank them based on object count
pattern: regular expression string for target types
for example:
pattern='' or pattern='.*' will match all virtual types
pathern='^(?!std)' will match all non-std virtual types
pattern='^Transport::' will match all Transport virtual types
pattern='Transport$' will match all virtual types ending with Transport
"""
startTime = time.time()
moduleCount = lldb.target.GetNumModules()
print 'search for matching virtual types in {0} modules ...'.format(moduleCount),
# find all virtual types first
symbolPattern = type_regex_to_vtable_regex(pattern)
symbolRegex = re.compile(symbolPattern)
names = dict()
matches = dict()
pureVirtualFuncs = set()
if ignorePureVirtualType :
pureVirtualFuncs = get_all_pure_virtual_funcs()
for i in range(moduleCount) :
module = lldb.target.GetModuleAtIndex(i)
symbolCount = module.GetNumSymbols()
for j in range(symbolCount) :
symbol = module.GetSymbolAtIndex(j)
symbolName = symbol.name
if symbolName and symbolRegex.match(symbolName) :
vtableAddr = vtable_addr(symbol)
if vtableAddr == lldb.LLDB_INVALID_ADDRESS :
continue
if ignorePureVirtualType and has_pure_virtual(vtableAddr, pureVirtualFuncs) :
continue
if vtableAddr in names and not names[vtableAddr] == symbol.GetName()[11:]:
print 'file_addr {0:x} conflicts: {1}, {2}'.format(vtableAddr, names[vtableAddr], symbol.GetName()[11:])
names[vtableAddr] = symbol.GetName()[11:]
matches[vtableAddr] = 0
"""
for vtableAddr, symbolName in names.items() :
print '0x{0:x} {1}'.format(vtableAddr, symbolName)
"""
print 'found {0}'.format(len(names))
if len(names) == 0 :
return
# search for matches of virtual types
searchRegions = get_search_regions();
searchRegionCount = len(searchRegions)
processorCount = multiprocessing.cpu_count()
print 'searching target regions on {0} cores'.format(processorCount)
taskResults = Parallel(n_jobs=processorCount)(delayed(findtypes_in_region)(searchRegions[i], names) for i in range(searchRegionCount))
for taskResult in taskResults :
for ptr, count in taskResult.iteritems() :
matches[ptr] += count
# output result
print
print
print 'object count {'
matchRanking = sorted(matches.items(), key=operator.itemgetter(1))
for vtableAddr, objCount in matchRanking :
if objCount > 0 :
print '{0:9} {1}'.format(objCount, names[vtableAddr])
print '} object count'
print
print 'time elapsed: ', time.time() - startTime
print
def findall(pattern, ignorePureVirtualType=True):
"""
findall is an alias of findtypes
"""
findtypes(pattern,ignorePureVirtualType)
def findptr_in_region(region, ptrValue) :
sys.stdout.write('+')
sys.stdout.flush()
startAddr = region[0]
endAddr = region[1]
#print '[{0:x},{1:x})'.format(startAddr, endAddr)
matches = set()
error = lldb.SBError()
for addr in range(startAddr, endAddr, PointerByteSize):
ptr = lldb.process.ReadPointerFromMemory(addr, error)
if error.success and ptr == ptrValue:
matches.add(addr)
sys.stdout.write('.')
sys.stdout.flush()
return matches
def findptr(ptrValue) :
"""
find pointer value or pointer size integer value
"""
startTime = time.time()
searchRegions = get_search_regions()
searchRegionCount = len(searchRegions)
processorCount = multiprocessing.cpu_count()
print 'searching target regions on {0} cores'.format(processorCount)
taskResults = Parallel(n_jobs=processorCount)(delayed(findptr_in_region)(searchRegions[i], ptrValue) for i in range(searchRegionCount))
print
print
print '<<<matches>>>'
matchCount = 0
for taskResult in taskResults :
for match in taskResult :
print '0x{0:x}'.format(match)
matchCount += 1
print
print 'total: ', matchCount
print 'time elapsed: ', time.time() - startTime
print
def findsptr_in_region(region, objPtr, refCountTypeVTable) :
sys.stdout.write('+')
sys.stdout.flush()
startAddr = region[0]
endAddr = region[1]
#print '[{0:x},{1:x})'.format(startAddr, endAddr)
matches = set()
error = lldb.SBError()
for addr in range(startAddr, endAddr-PointerByteSize, PointerByteSize):
ptr = lldb.process.ReadPointerFromMemory(addr, error)
if error.fail :
continue
if ptr != objPtr :
continue;
ptr2 = lldb.process.ReadPointerFromMemory(addr + PointerByteSize, error)
if error.fail :
continue
ptr3 = lldb.process.ReadPointerFromMemory(ptr2, error)
if error.fail :
continue
if ptr3 == refCountTypeVTable :
matches.add(addr)
sys.stdout.write('.')
sys.stdout.flush()
return matches
def findsptr(sptrAddr) :
"""
find shared_ptr or weak_ptr instances of a given object by matching
both pointer of shared object and vtable of shared_ptr/weak_ptr ref count type
strAddr: address of a shared_ptr/weak_ptr instance of the given object
"""
startTime = time.time()
error = lldb.SBError()
objPtr = lldb.process.ReadPointerFromMemory(sptrAddr, error)
if error.fail :
print 'failed to read from ', sptrAddr, ' : ', error
return
print 'address of shared object: 0x{0:x}'.format(objPtr)
refCountObjPtr = lldb.process.ReadPointerFromMemory(sptrAddr + PointerByteSize, error)
if error.fail :
print 'failed to read from {0}: {1}'.format(sptrAddr+PointerByteSize, error)
return
print 'address of shared_ptr ref count object: 0x{0:x}'.format(refCountObjPtr)
refCountTypeVTable = lldb.process.ReadPointerFromMemory(refCountObjPtr, error)
if error.fail :
print 'failed to read vtable address of shared_ptr ref count type : ', error
return
print 'vtable address of shared_ptr ref count type: 0x{0:x}'.format(refCountTypeVTable)
searchRegions = get_search_regions()
searchRegionCount = len(searchRegions)
processorCount = multiprocessing.cpu_count()
print 'searching target regions on {0} cores'.format(processorCount)
taskResults = Parallel(n_jobs=processorCount)(delayed(findsptr_in_region)(searchRegions[i], objPtr, refCountTypeVTable) for i in range(searchRegionCount))
print
print
print '<<<matches>>>'
matchCount = 0
for taskResult in taskResults :
for match in taskResult :
print '0x{0:x}'.format(match)
matchCount += 1
print
print 'total: ', matchCount
print 'time elapsed: ', time.time() - startTime
print
|
pyroomacoustics/adaptive/data_structures.py | Womac/pyroomacoustics | 915 | 11102330 | <reponame>Womac/pyroomacoustics
from __future__ import division, print_function
import numpy as np
class Buffer:
"""
A simple buffer class with amortized cost
Parameters
----------
length: int
buffer length
dtype: numpy.type
data type
"""
def __init__(self, length=20, dtype=np.float64):
self.buf = np.zeros(length, dtype=dtype)
self.len = length
self.head = self.len
def push(self, val):
"""Add one element at the front of the buffer"""
# Increase size if the buffer is too small
if self.head == 0:
self.buf = np.concatenate(
(np.zeros(self.len, dtype=self.buf.dtype), self.buf)
)
self.head += self.len
self.len *= 2
# store value at head
self.buf[self.head - 1] = val
# move head to next free spot
self.head -= 1
def top(self, n):
"""Returns the n elements at the front of the buffer from newest to oldest"""
return self.buf[self.head : self.head + n]
def flush(self, n):
"""Removes the n oldest elements in the buffer"""
if n > self.len - self.head:
n = self.len - self.head
new_head = self.head + n
# copy the remaining items to the right
self.buf[new_head:] = self.buf[self.head : -n]
# move head
self.head = new_head
def size(self):
"""Returns the number of elements in the buffer"""
return self.len - self.head
def __getitem__(self, r):
"""Allows to retrieve element at a specific position"""
# create a view that starts at head
ptr = self.buf[self.head :]
# returned desired range
return ptr[r]
def __repr__(self):
if self.head == self.len:
return "[]"
else:
return str(self.buf[self.head :])
class Powers:
"""
This class allows to store all powers of a small number
and get them 'a la numpy' with the bracket operator.
There is automatic increase when new values are requested
Parameters
----------
a: float
the number
length: int
the number of integer powers
dtype: numpy.type, optional
the data type (typically np.float32 or np.float64)
Example
-------
>>> an = Powers(0.5)
>>> print(an[4])
0.0625
"""
def __init__(self, a, length=20, dtype=np.float64):
self.a = dtype(a)
self.pwr = self.a ** np.arange(length)
def __getitem__(self, r):
# find maximum power requested
if isinstance(r, int):
high = r + 1
elif isinstance(r, slice):
high = r.stop
elif isinstance(r, list):
high = max(r) + 1
else:
high = int(r + 1)
# Compute it if needed
if high > self.pwr.shape[0]:
self.pwr = np.concatenate(
(self.pwr, self.a ** np.arange(self.pwr.shape[0], high))
)
return self.pwr[r]
def __repr__(self):
return str(self.pwr)
class CoinFlipper:
"""
This class efficiently generates large number of coin flips.
Because each call to ``numpy.random.rand`` is a little bit costly,
it is more efficient to generate many values at once.
This class does this and stores them in advance. It generates
new fresh numbers when needed.
Parameters
----------
p: float, 0 < p < 1
probability to output a 1
length: int
the number of flips to precompute
"""
def __init__(self, p, length=10000):
self.p = p
self.length = length
self.buffer = np.random.random(length) < p
self.dirty_coins = 0
def fresh_flips(self, n):
"""Generates n binary random values now"""
return np.random.random(n) < self.p
def flip_all(self):
"""Regenerates all the used up values"""
remaining = self.length - self.dirty_coins
self.buffer[: self.dirty_coins] = self.fresh_flips(self.dirty_coins)
self.dirty_coins = 0
def flip(self, n):
"""Get n random binary values from the buffer"""
# If more flips than computed are requested
# increase buffer size and flip again
if n > self.length:
self.buffer = np.pad(self.buffer, (0, 2 * n - self.length), mode="constant")
self.buffer[self.length :] = self.fresh_flips(2 * n - self.length)
self.length = 2 * n
remaining = self.length - self.dirty_coins
if remaining < n:
self.flip_all()
flips = self.buffer[self.dirty_coins : self.dirty_coins + n]
self.dirty_coins += n
return flips
|
applications/smart-distancing/libs/loggers/csv_logger.py | mhsekhavat/neuralet | 228 | 11102339 | <gh_stars>100-1000
import csv
import os
from datetime import date
from tools.objects_post_process import extract_violating_objects
import numpy as np
def prepare_object(detected_object, frame_number):
"""Construct a dictionary that is appropriate for csv writer.
This function transform a dictionary with list values to a dictionary
with scalar values. This transformation is necessary for csv writer to avoid
writing lists into csv.
Args:
detected_object: It is a dictionary that contains an detected object information after postprocessing.
frame_number: current frame number
Returns:
A transformed version of detected_object to a dictionary with only scalar values. It also contains an item
for frame number.
"""
object_dict = {}
object_dict.update({"frame_number": frame_number})
for key, value in detected_object.items():
if isinstance(value, (list, tuple)):
for i, item in enumerate(value):
# TODO: Inspect why some items are float and some are np.float32
if isinstance(item, (float, np.float32)):
item = round(float(item), 4)
object_dict.update({str(key) + "_" + str(i): item})
else:
# TODO: Inspect why some items are float and some are np.float32
if isinstance(value, (float, np.float32)):
value = round(float(value), 4)
object_dict.update({key: value})
return object_dict
class Logger:
"""A CSV logger class that store objects information and violated distances information into csv files.
This logger creates two csv file every day in two different directory, one for logging detected objects
and one for logging violated social distancing incidents. The file names are the same as recording date.
:param config: A ConfigEngine object which store all of the config parameters. Access to any parameter
is possible by calling get_section_dict method.
"""
def __init__(self, config):
self.config = config
# The parent directory that stores all log file.
self.log_directory = config.get_section_dict("Logger")["LogDirectory"]
# A directory inside the log_directory that stores object log files.
self.objects_log_directory = os.path.join(self.log_directory, "objects_log")
self.distances_log_directory = os.path.join(self.log_directory, "distances_log")
self.dist_threshold = config.get_section_dict("PostProcessor")["DistThreshold"]
if not os.path.exists(self.log_directory):
os.mkdir(self.log_directory)
if not os.path.exists(self.objects_log_directory):
os.mkdir(self.objects_log_directory)
if not os.path.exists(self.distances_log_directory):
os.mkdir(self.distances_log_directory)
def update(self, frame_number, objects_list, distances):
"""Write the object and violated distances information of a frame into log files.
Args: frame_number: current frame number objects_list: A list of dictionary where each dictionary stores
information of an object (person) in a frame. distances: A 2-d numpy array that stores distance between each
pair of objects.
"""
file_name = str(date.today())
objects_log_file_path = os.path.join(self.objects_log_directory, file_name + ".csv")
distances_log_file_path = os.path.join(self.distances_log_directory, file_name + ".csv")
self.log_objects(objects_list, frame_number, objects_log_file_path)
self.log_distances(distances, frame_number, distances_log_file_path)
@staticmethod
def log_objects(objects_list, frame_number, file_path):
"""Write objects information of a frame into the object log file.
Each row of the object log file consist of a detected object (person) information such as
object (person) ids, bounding box coordinates and frame number.
Args: objects_list: A list of dictionary where each dictionary stores information of an object (person) in a
frame. frame_number: current frame number file_path: log file path
"""
if len(objects_list) != 0:
object_dict = list(map(lambda x: prepare_object(x, frame_number), objects_list))
if not os.path.exists(file_path):
with open(file_path, "w", newline="") as csvfile:
field_names = list(object_dict[0].keys())
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writeheader()
with open(file_path, "a", newline="") as csvfile:
field_names = list(object_dict[0].keys())
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writerows(object_dict)
def log_distances(self, distances, frame_number, file_path):
"""Write violated incident's information of a frame into the object log file.
Each row of the distances log file consist of a violation information such as object (person) ids,
distance between these two object and frame number.
Args:
distances: A 2-d numpy array that stores distance between each pair of objects.
frame_number: current frame number
file_path: The path for storing log files
"""
violating_objects = extract_violating_objects(distances, self.dist_threshold)
if not os.path.exists(file_path):
with open(file_path, "w", newline="") as csvfile:
field_names = ["frame_number", "object_0", "object_1", "distance"]
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writeheader()
with open(file_path, "a", newline="") as csvfile:
field_names = ["frame_number", "object_0", "object_1", "distance"]
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writerows([{"frame_number": frame_number,
"object_0": indices[0],
"object_1": indices[1],
"distance": distances[indices[0], indices[1]]} for indices in violating_objects])
|
exp_ssl.py | noskill/nips14-ssl | 496 | 11102343 | import learn_yz_x_ss
import sys
learn_yz_x_ss.main(n_passes=3000, n_labeled=int(sys.argv[1]), dataset='mnist_2layer', n_z=50, n_hidden=tuple([int(sys.argv[2])]*int(sys.argv[3])), seed=int(sys.argv[4]), alpha=0.1, comment='')
|
alg/compartmental_gp/pyro_model/exponential_break.py | loramf/mlforhealthlabpub | 171 | 11102347 | import torch
import torch.nn as nn
import logging
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer import MCMC, NUTS, SVI, TraceEnum_ELBO
from pyro.infer.autoguide import AutoNormal, init_to_sample
from pyro.infer.predictive import _guess_max_plate_nesting
from pyro.nn.module import PyroModule
from pyro.optim import DCTAdam
from pyro.contrib.forecast.util import (MarkDCTParamMessenger, PrefixConditionMessenger, PrefixReplayMessenger, PrefixWarmStartMessenger,
reshape_batch)
logger = logging.getLogger(__name__)
class EnumForecaster(nn.Module):
"""
Forecaster for a :class:`ForecastingModel` using variational inference.
On initialization, this fits a distribution using variational inference
over latent variables and exact inference over the noise distribution,
typically a :class:`~pyro.distributions.GaussianHMM` or variant.
After construction this can be called to generate sample forecasts.
:ivar list losses: A list of losses recorded during training, typically
used to debug convergence. Defined by ``loss = -elbo / data.numel()``.
:param ForecastingModel model: A forecasting model subclass instance.
:param data: A tensor dataset with time dimension -2.
:type data: ~torch.Tensor
:param covariates: A tensor of covariates with time dimension -2.
For models not using covariates, pass a shaped empty tensor
``torch.empty(duration, 0)``.
:type covariates: ~torch.Tensor
:param guide: Optional guide instance. Defaults to a
:class:`~pyro.infer.autoguide.AutoNormal`.
:type guide: ~pyro.nn.module.PyroModule
:param callable init_loc_fn: A per-site initialization function for the
:class:`~pyro.infer.autoguide.AutoNormal` guide. Defaults to
:func:`~pyro.infer.autoguide.initialization.init_to_sample`. See
:ref:`autoguide-initialization` section for available functions.
:param float init_scale: Initial uncertainty scale of the
:class:`~pyro.infer.autoguide.AutoNormal` guide.
:param callable create_plates: An optional function to create plates for
subsampling with the :class:`~pyro.infer.autoguide.AutoNormal` guide.
:param optim: An optional Pyro optimizer. Defaults to a freshly constructed
:class:`~pyro.optim.optim.DCTAdam`.
:type optim: ~pyro.optim.optim.PyroOptim
:param float learning_rate: Learning rate used by
:class:`~pyro.optim.optim.DCTAdam`.
:param tuple betas: Coefficients for running averages used by
:class:`~pyro.optim.optim.DCTAdam`.
:param float learning_rate_decay: Learning rate decay used by
:class:`~pyro.optim.optim.DCTAdam`. Note this is the total decay
over all ``num_steps``, not the per-step decay factor.
:param float clip_norm: Norm used for gradient clipping during
optimization. Defaults to 10.0.
:param bool dct_gradients: Whether to discrete cosine transform gradients
in :class:`~pyro.optim.optim.DCTAdam`. Defaults to False.
:param bool subsample_aware: whether to update gradient statistics only
for those elements that appear in a subsample. This is used
by :class:`~pyro.optim.optim.DCTAdam`.
:param int num_steps: Number of :class:`~pyro.infer.svi.SVI` steps.
:param int num_particles: Number of particles used to compute the
:class:`~pyro.infer.elbo.ELBO`.
:param bool vectorize_particles: If ``num_particles > 1``, determines
whether to vectorize computation of the :class:`~pyro.infer.elbo.ELBO`.
Defaults to True. Set to False for models with dynamic control flow.
:param bool warm_start: Whether to warm start parameters from a smaller
time window. Note this may introduce statistical leakage; usage is
recommended for model exploration purposes only and should be disabled
when publishing metrics.
:param int log_every: Number of training steps between logging messages.
"""
def __init__(self, model, data, covariates, *,
guide=None,
init_loc_fn=init_to_sample,
init_scale=0.1,
create_plates=None,
optim=None,
learning_rate=0.01,
betas=(0.9, 0.99),
learning_rate_decay=0.1,
clip_norm=10.0,
dct_gradients=False,
subsample_aware=False,
num_steps=1001,
num_particles=1,
vectorize_particles=True,
warm_start=False,
log_every=100):
assert data.size(-2) == covariates.size(-2)
super().__init__()
self.model = model
if guide is None:
guide = AutoNormal(self.model, init_loc_fn=init_loc_fn, init_scale=init_scale,
create_plates=create_plates)
self.guide = guide
# Initialize.
if warm_start:
model = PrefixWarmStartMessenger()(model)
guide = PrefixWarmStartMessenger()(guide)
if dct_gradients:
model = MarkDCTParamMessenger("time")(model)
guide = MarkDCTParamMessenger("time")(guide)
elbo = TraceEnum_ELBO(num_particles=num_particles,
vectorize_particles=vectorize_particles)
elbo._guess_max_plate_nesting(model, guide, (data, covariates), {})
elbo.max_plate_nesting = max(elbo.max_plate_nesting, 1) # force a time plate
losses = []
if num_steps:
if optim is None:
optim = DCTAdam({"lr": learning_rate, "betas": betas,
"lrd": learning_rate_decay ** (1 / num_steps),
"clip_norm": clip_norm,
"subsample_aware": subsample_aware})
svi = SVI(self.model, self.guide, optim, elbo)
for step in range(num_steps):
loss = svi.step(data, covariates) / data.numel()
if log_every and step % log_every == 0:
logger.info("step {: >4d} loss = {:0.6g}".format(step, loss))
losses.append(loss)
self.guide.create_plates = None # Disable subsampling after training.
self.max_plate_nesting = elbo.max_plate_nesting
self.losses = losses
def __call__(self, data, covariates, num_samples, batch_size=None):
"""
Samples forecasted values of data for time steps in ``[t1,t2)``, where
``t1 = data.size(-2)`` is the duration of observed data and ``t2 =
covariates.size(-2)`` is the extended duration of covariates. For
example to forecast 7 days forward conditioned on 30 days of
observations, set ``t1=30`` and ``t2=37``.
:param data: A tensor dataset with time dimension -2.
:type data: ~torch.Tensor
:param covariates: A tensor of covariates with time dimension -2.
For models not using covariates, pass a shaped empty tensor
``torch.empty(duration, 0)``.
:type covariates: ~torch.Tensor
:param int num_samples: The number of samples to generate.
:param int batch_size: Optional batch size for sampling. This is useful
for generating many samples from models with large memory
footprint. Defaults to ``num_samples``.
:returns: A batch of joint posterior samples of shape
``(num_samples,1,...,1) + data.shape[:-2] + (t2-t1,data.size(-1))``,
where the ``1``'s are inserted to avoid conflict with model plates.
:rtype: ~torch.Tensor
"""
return super().__call__(data, covariates, num_samples, batch_size)
def forward(self, data, covariates, num_samples, batch_size=None):
assert data.size(-2) < covariates.size(-2)
assert isinstance(num_samples, int) and num_samples > 0
if batch_size is not None:
batches = []
while num_samples > 0:
batch = self.forward(data, covariates, min(num_samples, batch_size))
batches.append(batch)
num_samples -= batch_size
return torch.cat(batches)
assert self.max_plate_nesting >= 1
dim = -1 - self.max_plate_nesting
with torch.no_grad():
with poutine.trace() as tr:
with pyro.plate("particles", num_samples, dim=dim):
self.guide(data, covariates)
with PrefixReplayMessenger(tr.trace):
with PrefixConditionMessenger(self.model._prefix_condition_data):
with pyro.plate("particles", num_samples, dim=dim):
return self.model(data, covariates)
|
backend/config/settings/development.py | stungkit/doccano | 2,082 | 11102440 | from .base import * # noqa: F403
MIDDLEWARE.append("api.middleware.RangesMiddleware") # noqa: F405
CORS_ORIGIN_WHITELIST = ("http://127.0.0.1:3000", "http://0.0.0.0:3000", "http://localhost:3000")
CSRF_TRUSTED_ORIGINS = CORS_ORIGIN_WHITELIST
# LOGGING = {
# 'version': 1,
# 'handlers': {
# 'console': {
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# }
# },
# 'loggers': {
# 'django.db.backends': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# },
# }
# }
|
tools/mac/rewrite_modern_objc.py | zipated/src | 2,151 | 11102456 | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs clang's "modern objective-c" rewriter on chrome code.
Does the same as Xcode's Edit->Convert->To Modern Objective-C Syntax.
Note that this just runs compile commands and doesn't look at build
dependencies, i.e. it doesn't make sure generated headers exist. It also
requires goma to be disabled. Suggested workflow: Build the target you want
to convert locally with goma to create generated headers, then disable goma,
re-run gn, and then run this script.
"""
import argparse
import glob
import json
import math
import os
import shlex
import subprocess
import sys
def main():
# As far as I can tell, clang's ObjC rewriter can't do in-place rewriting
# (the ARC rewriter can). libclang exposes functions for parsing the remap
# file, but doing that manually in python seems a lot easier.
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('builddir', help='build directory, e.g. out/gn')
parser.add_argument('substr', default='', nargs='?',
help='source dir part, eg chrome/browser/ui/cocoa')
args = parser.parse_args()
rewrite_dir = os.path.abspath(
os.path.join(args.builddir, 'rewrite_modern_objc'))
try:
os.mkdir(rewrite_dir)
except OSError:
pass
remap_file = os.path.join(rewrite_dir, 'remap')
try:
# Remove remap files from prior runs.
os.remove(remap_file)
except OSError:
pass
# The basic idea is to call clang's objcmt rewriter for each source file.
# The rewriter writes a "remap" file containing N times 3 lines:
# Name of an original source file, the original file's timestamp
# at rewriting time, and the name of a temp file containing the rewritten
# contents.
# The rewriter gets confused if several instances run in parallel. We could
# be fancy and have num_cpus rewrite dirs and combine their contents in the
# end, but for now just run the rewrites serially.
# First, ask ninja for the compile commands of all .m and .mm files.
compdb = subprocess.check_output(
['ninja', '-C', args.builddir, '-t', 'compdb', 'objc', 'objcxx'])
for cmd in json.loads(compdb):
objc_file = cmd['file']
if args.substr not in objc_file:
continue
clang_cmd = cmd['command']
had_error = False
if 'gomacc' in clang_cmd:
print >>sys.stderr, 'need builddir with use_goma not set'
had_error = True
if 'jumbo' in clang_cmd:
print >>sys.stderr, 'need builddir with use_jumbo_build not set'
had_error = True
if 'precompile.h-m' in clang_cmd:
print >>sys.stderr, 'need builddir with enable_precompiled_headers=false'
had_error = True
if had_error:
sys.exit(1)
# Ninja creates the directory containing the build output, but we
# don't run ninja, so we need to do that ourselves.
split_cmd = shlex.split(clang_cmd)
o_index = split_cmd.index('-o')
assert o_index != -1
try:
os.makedirs(os.path.dirname(split_cmd[o_index + 1]))
except OSError:
pass
# Add flags to tell clang to do the rewriting.
# Passing "-ccc-objcmt-migrate dir" doesn't give us control over each
# individual setting, so use the Xclang flags. The individual flags are at
# http://llvm-cs.pcc.me.uk/tools/clang/include/clang/Driver/Options.td#291
# Note that -objcmt-migrate-all maps to ObjCMT_MigrateDecls in
# http://llvm-cs.pcc.me.uk/tools/clang/lib/Frontend/CompilerInvocation.cpp#1479
# which is not quite all the options:
# http://llvm-cs.pcc.me.uk/tools/clang/include/clang/Frontend/FrontendOptions.h#248
flags = ['-Xclang', '-mt-migrate-directory', '-Xclang', rewrite_dir]
flags += ['-Xclang', '-objcmt-migrate-subscripting' ]
flags += ['-Xclang', '-objcmt-migrate-literals' ]
#flags += ['-Xclang', '-objcmt-returns-innerpointer-property'] # buggy
#flags += ['-Xclang', '-objcmt-migrate-property-dot-syntax'] # do not want
# objcmt-migrate-all is the same as the flags following it here (it does
# not include the flags listed above it).
# Probably don't want ns-nonatomic-iosonly (or atomic-property), so we
# can't use migrate-alll which includes that, and have to manually set the
# bits of migrate-all we do want.
#flags += ['-Xclang', '-objcmt-migrate-all']
#flags += ['-Xclang', '-objcmt-migrate-property'] # not sure if want
flags += ['-Xclang', '-objcmt-migrate-annotation']
flags += ['-Xclang', '-objcmt-migrate-instancetype']
flags += ['-Xclang', '-objcmt-migrate-ns-macros']
#flags += ['-Xclang', '-objcmt-migrate-protocol-conformance'] # buggy
#flags += ['-Xclang', '-objcmt-atomic-property'] # not sure if want
#flags += ['-Xclang', '-objcmt-ns-nonatomic-iosonly'] # not sure if want
# Want, but needs careful manual review, and doesn't find everything:
#flags += ['-Xclang', '-objcmt-migrate-designated-init']
clang_cmd += ' ' + ' '.join(flags)
print objc_file
subprocess.check_call(clang_cmd, shell=True, cwd=cmd['directory'])
if not os.path.exists(remap_file):
print 'no changes'
return
# Done with rewriting. Now the read the above-described 'remap' file and
# copy modified files over the originals.
remap = open(remap_file).readlines()
for i in range(0, len(remap), 3):
infile, mtime, outfile = map(str.strip, remap[i:i+3])
if args.substr not in infile:
# Ignore rewritten header files not containing args.substr too.
continue
if math.trunc(os.path.getmtime(infile)) != int(mtime):
print '%s was modified since rewriting; exiting' % infile
sys.exit(1)
os.rename(outfile, infile) # Copy rewritten file over.
print 'all done. commit, run `git cl format`, commit again, and upload!'
if __name__ == '__main__':
main()
|
problems/euler/13/largesum.py | vidyadeepa/the-coding-interview | 1,571 | 11102466 | <reponame>vidyadeepa/the-coding-interview
numbers = [37107287533902102798797998220837590246510135740250,
46376937677490009712648124896970078050417018260538,
74324986199524741059474233309513058123726617309629,
91942213363574161572522430563301811072406154908250,
23067588207539346171171980310421047513778063246676,
89261670696623633820136378418383684178734361726757,
28112879812849979408065481931592621691275889832738,
44274228917432520321923589422876796487670272189318,
47451445736001306439091167216856844588711603153276,
70386486105843025439939619828917593665686757934951,
62176457141856560629502157223196586755079324193331,
64906352462741904929101432445813822663347944758178,
92575867718337217661963751590579239728245598838407,
58203565325359399008402633568948830189458628227828,
80181199384826282014278194139940567587151170094390,
35398664372827112653829987240784473053190104293586,
86515506006295864861532075273371959191420517255829,
71693888707715466499115593487603532921714970056938,
54370070576826684624621495650076471787294438377604,
53282654108756828443191190634694037855217779295145,
36123272525000296071075082563815656710885258350721,
45876576172410976447339110607218265236877223636045,
17423706905851860660448207621209813287860733969412,
81142660418086830619328460811191061556940512689692,
51934325451728388641918047049293215058642563049483,
62467221648435076201727918039944693004732956340691,
15732444386908125794514089057706229429197107928209,
55037687525678773091862540744969844508330393682126,
18336384825330154686196124348767681297534375946515,
80386287592878490201521685554828717201219257766954,
78182833757993103614740356856449095527097864797581,
16726320100436897842553539920931837441497806860984,
48403098129077791799088218795327364475675590848030,
87086987551392711854517078544161852424320693150332,
59959406895756536782107074926966537676326235447210,
69793950679652694742597709739166693763042633987085,
41052684708299085211399427365734116182760315001271,
65378607361501080857009149939512557028198746004375,
35829035317434717326932123578154982629742552737307,
94953759765105305946966067683156574377167401875275,
88902802571733229619176668713819931811048770190271,
25267680276078003013678680992525463401061632866526,
36270218540497705585629946580636237993140746255962,
24074486908231174977792365466257246923322810917141,
91430288197103288597806669760892938638285025333403,
34413065578016127815921815005561868836468420090470,
23053081172816430487623791969842487255036638784583,
11487696932154902810424020138335124462181441773470,
63783299490636259666498587618221225225512486764533,
67720186971698544312419572409913959008952310058822,
95548255300263520781532296796249481641953868218774,
76085327132285723110424803456124867697064507995236,
37774242535411291684276865538926205024910326572967,
23701913275725675285653248258265463092207058596522,
29798860272258331913126375147341994889534765745501,
18495701454879288984856827726077713721403798879715,
38298203783031473527721580348144513491373226651381,
34829543829199918180278916522431027392251122869539,
40957953066405232632538044100059654939159879593635,
29746152185502371307642255121183693803580388584903,
41698116222072977186158236678424689157993532961922,
62467957194401269043877107275048102390895523597457,
23189706772547915061505504953922979530901129967519,
86188088225875314529584099251203829009407770775672,
11306739708304724483816533873502340845647058077308,
82959174767140363198008187129011875491310547126581,
97623331044818386269515456334926366572897563400500,
42846280183517070527831839425882145521227251250327,
55121603546981200581762165212827652751691296897789,
32238195734329339946437501907836945765883352399886,
75506164965184775180738168837861091527357929701337,
62177842752192623401942399639168044983993173312731,
32924185707147349566916674687634660915035914677504,
99518671430235219628894890102423325116913619626622,
73267460800591547471830798392868535206946944540724,
76841822524674417161514036427982273348055556214818,
97142617910342598647204516893989422179826088076852,
87783646182799346313767754307809363333018982642090,
10848802521674670883215120185883543223812876952786,
71329612474782464538636993009049310363619763878039,
62184073572399794223406235393808339651327408011116,
66627891981488087797941876876144230030984490851411,
60661826293682836764744779239180335110989069790714,
85786944089552990653640447425576083659976645795096,
66024396409905389607120198219976047599490197230297,
64913982680032973156037120041377903785566085089252,
16730939319872750275468906903707539413042652315011,
94809377245048795150954100921645863754710598436791,
78639167021187492431995700641917969777599028300699,
15368713711936614952811305876380278410754449733078,
40789923115535562561142322423255033685442488917353,
44889911501440648020369068063960672322193204149535,
41503128880339536053299340368006977710650566631954,
81234880673210146739058568557934581403627822703280,
82616570773948327592232845941706525094512325230608,
22918802058777319719839450180888072429661980811197,
77158542502016545090413245809786882778948721859617,
72107838435069186155435662884062257473692284509516,
20849603980134001723930671666823555245252804609722,
53503534226472524250874054075591789781264330331690]
numbers_length = len(str(numbers[0]))
reverted = [str(n)[::-1] for n in numbers]
result = []
remainder = 0
for pos in range(numbers_length):
digit_sum = sum(int(d[pos]) for d in reverted) + remainder
print digit_sum
digit = digit_sum % 10
print digit
result.append(str(digit))
remainder = digit_sum / 10
print remainder
result.append(str(remainder))
result = "".join(r for r in result)[::-1]
print result[0:10]
|
src/lib/weakref.py | DTenore/skulpt | 2,671 | 11102505 | import _sk_fail; _sk_fail._("weakref")
|
tests/utils/common.py | niobeus/onnx2torch | 144 | 11102511 | <filename>tests/utils/common.py
import io
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import Union
import torch
import numpy as np
import onnx
import onnxruntime as ort
from onnx import defs
from onnx import numpy_helper
from onnx.helper import make_graph
from onnx.helper import make_model
from onnx.helper import make_operatorsetid
from onnx.helper import make_tensor_value_info
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
from onnx.onnx_ml_pb2 import ModelProto
from onnx.onnx_ml_pb2 import NodeProto
from onnx.onnx_ml_pb2 import ValueInfoProto
from onnx.shape_inference import infer_shapes
from onnx2torch.converter import convert
def make_model_from_nodes(
nodes: Union[NodeProto, Sequence[NodeProto]],
initializers: Dict[str, np.ndarray],
inputs_example: Optional[Dict[str, np.ndarray]] = None,
inputs_info: Optional[Sequence[ValueInfoProto]] = None,
outputs_info: Optional[Sequence[ValueInfoProto]] = None,
opset_version: Optional[int] = 11,
) -> ModelProto:
if inputs_info is None and inputs_example is None:
raise ValueError('inputs_example or inputs_info must be set')
if inputs_info is None:
inputs_info = []
for name, data in inputs_example.items():
elem_type = NP_TYPE_TO_TENSOR_TYPE[data.dtype]
inputs_info.append(make_tensor_value_info(name=name, elem_type=elem_type, shape=data.shape))
if outputs_info is None:
outputs_info = []
elem_type = inputs_info[0].type.tensor_type.elem_type
for name in tuple(nodes.output):
output_proto = make_tensor_value_info(name=name, elem_type=elem_type, shape=None)
outputs_info.append(output_proto)
graph_proto = make_graph(
nodes=(nodes,),
name='test_graph',
inputs=inputs_info,
outputs=outputs_info,
initializer=[
numpy_helper.from_array(data, name=name)
for name, data in initializers.items()
],
)
opset_imports = None
if opset_version is not None:
opset_imports = [
make_operatorsetid(
domain=defs.ONNX_DOMAIN,
version=opset_version,
),
]
model = make_model(graph_proto, opset_imports=opset_imports)
model = infer_shapes(model, check_type=False)
onnx.checker.check_model(model, False)
return model
def _convert_data(data: Any, from_type: Type, convert_function: Callable) -> Any:
if isinstance(data, Dict):
return {
k: _convert_data(v, from_type, convert_function)
for k, v in data.items()
}
if isinstance(data, (Tuple, List)):
return type(data)(
_convert_data(v, from_type, convert_function)
for v in data
)
if isinstance(data, from_type):
return convert_function(data)
return data
def convert_data_onnx2torch(data: Any, device: str = 'cpu') -> Any:
def convert_function(t):
return torch.from_numpy(t).to(device=device)
return _convert_data(data, from_type=np.ndarray, convert_function=convert_function)
def convert_data_torch2onnx(data: Any) -> Any:
def convert_function(t):
return t.detach().cpu().numpy()
return _convert_data(data, from_type=torch.Tensor, convert_function=convert_function)
def convert_onnx_inputs_to_torch_inputs(
onnx_model: ModelProto,
onnx_inputs: Dict[str, Any],
device: str = 'cpu',
) -> List[Any]:
return [
convert_data_onnx2torch(onnx_inputs[graph_input.name], device=device)
for graph_input in onnx_model.graph.input
if graph_input.name in onnx_inputs
]
def calc_ort_outputs(model: ModelProto, inputs: Dict[str, Any], skip_unused_inputs: bool = False) -> List[Any]:
ort_session = ort.InferenceSession(
model.SerializeToString(),
providers=['CPUExecutionProvider'],
)
if skip_unused_inputs:
graph_inputs = [i.name for i in model.graph.input]
inputs = {
k: v for k, v in inputs.items()
if k in graph_inputs
}
outputs = ort_session.run(
output_names=None,
input_feed=inputs,
)
return outputs
def calc_torch_outputs(model: ModelProto, inputs: Dict[str, Any], device: str = 'cpu') -> Any:
inputs = convert_onnx_inputs_to_torch_inputs(onnx_model=model, onnx_inputs=inputs, device=device)
model = convert(model).to(device=device)
outputs = model(*inputs)
return convert_data_torch2onnx(outputs)
def calc_torch_and_ort_outputs(
model: ModelProto,
test_inputs: Dict[str, np.ndarray],
):
torch_outputs = calc_torch_outputs(model=model, inputs=test_inputs)
ort_outputs = calc_ort_outputs(model=model, inputs=test_inputs)
return torch_outputs, ort_outputs
def convert_onnx2torch2onnx(
model: ModelProto,
inputs: Dict[str, np.ndarray],
opset_version: int = 13,
**export_kwargs,
) -> ModelProto:
torch_model = convert(model)
input_names = list(inputs.keys())
args = list(inputs.values())
args = tuple(torch.tensor(arg) for arg in args)
with io.BytesIO() as tmp_file:
torch.onnx.export(
model=torch_model,
args=args,
f=tmp_file,
input_names=input_names,
opset_version=opset_version,
**export_kwargs,
)
return onnx.load_from_string(tmp_file.getvalue())
def _check_onnx_model(
onnx_model: ModelProto,
onnx_inputs: Dict[str, Any],
onnx_torch_check_function: Callable,
torch_cpu_cuda_check_function: Optional[Callable] = None,
onnx_torch2onnx_check_function: Optional[Callable] = None,
opset_version: int = 13,
) -> None:
ort_outputs = calc_ort_outputs(onnx_model, onnx_inputs)
torch_outputs = calc_torch_outputs(onnx_model, onnx_inputs, device='cpu')
onnx_torch_check_function(ort_outputs, torch_outputs)
if torch_cpu_cuda_check_function is not None:
torch_cuda_outputs = calc_torch_outputs(onnx_model, onnx_inputs, device='cuda')
torch_cpu_cuda_check_function(torch_outputs, torch_cuda_outputs)
if onnx_torch2onnx_check_function is not None:
torch2onnx_model = convert_onnx2torch2onnx(onnx_model, inputs=onnx_inputs, opset_version=opset_version)
ort_torch2onnx_outputs = calc_ort_outputs(torch2onnx_model, onnx_inputs, skip_unused_inputs=True)
onnx_torch2onnx_check_function(ort_outputs, ort_torch2onnx_outputs)
def check_onnx_model(
onnx_model: ModelProto,
onnx_inputs: Dict[str, Any],
atol_onnx_torch: float = 0.0,
atol_torch_cpu_cuda: float = 0.0,
atol_onnx_torch2onnx: float = 0.0,
opset_version: int = 13,
) -> None:
def onnx_torch_check_function(onnx_output, torch_output):
if len(onnx_output) == 1:
torch_output = [torch_output]
for a, b in zip(onnx_output, torch_output):
assert np.all(np.isclose(a, b, atol=atol_onnx_torch)), 'ort and torch outputs have significant difference'
def torch_cpu_cuda_check_function(torch_cpu_output, torch_cuda_output):
if not isinstance(torch_cpu_output, (List, Tuple)):
torch_cpu_output = [torch_cpu_output]
torch_cuda_output = [torch_cuda_output]
for a, b in zip(torch_cpu_output, torch_cuda_output):
assert np.all(np.isclose(a, b, atol=atol_torch_cpu_cuda)), \
'torch cpu and torch cuda outputs have significant difference'
return True
def onnx_torch2onnx_check_function(onnx_output, torch2onnx_output):
for a, b in zip(onnx_output, torch2onnx_output):
assert np.all(np.isclose(a, b, atol=atol_onnx_torch2onnx)), \
'ort and ort+torch2onnx outputs have significant difference'
return True
_check_onnx_model(
onnx_model=onnx_model,
onnx_inputs=onnx_inputs,
onnx_torch_check_function=onnx_torch_check_function,
torch_cpu_cuda_check_function=torch_cpu_cuda_check_function,
onnx_torch2onnx_check_function=onnx_torch2onnx_check_function,
opset_version=opset_version,
)
def check_torch_model(
torch_model: torch.nn.Module,
onnx_inputs: Dict[str, Any],
atol_onnx_torch: float = 0.0,
atol_torch_cpu_cuda: float = 0.0,
atol_onnx_torch2onnx: float = 0.0,
opset_version: int = 13,
) -> None:
arguments = locals()
input_names = list(onnx_inputs.keys())
args = tuple(torch.tensor(arg) for arg in onnx_inputs.values())
with io.BytesIO() as tmp_file:
torch.onnx.export(
model=torch_model,
args=args,
f=tmp_file,
input_names=input_names,
opset_version=opset_version,
)
arguments.pop('torch_model')
arguments['onnx_model'] = onnx.load_from_string(tmp_file.getvalue())
check_onnx_model(**arguments)
|
RecoVertex/BeamSpotProducer/test/BeamFit_LumiBased_Workflow.py | ckamtsikis/cmssw | 852 | 11102545 | import FWCore.ParameterSet.Config as cms
process = cms.Process("BSworkflow")
# initialize MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("RecoVertex.BeamSpotProducer.d0_phi_analyzer_cff")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/express/Run2010B/StreamExpress/ALCARECO/TkAlMinBias-v2/000/147/984/00B7AE46-58D8-DF11-9A23-001D09F292D1.root'
)
)
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(1000000),
)
#process.source = cms.Source('PoolSource',
# debugVerbosity = cms.untracked.uint32(0),
# debugFlag = cms.untracked.bool(False)
# )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1) #1500
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
# this is for filtering on L1 technical trigger bit
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)
process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('0 AND ( 40 OR 41 ) AND NOT (36 OR 37 OR 38 OR 39)')
## reco PV
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'GR_R_38X_V11::All'
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cfi")
process.load("RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi")
process.offlinePrimaryVertices.TrackLabel = cms.InputTag("ALCARECOTkAlMinBias")
#### remove beam scraping events
process.noScraping= cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False), ## Or 'True' to get some per-event info
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.20)
)
process.p = cms.Path(
# process.hltLevel1GTSeed +
# process.offlineBeamSpot +
# process.offlinePrimaryVertices+
# process.noScraping +
process.d0_phi_analyzer)
process.MessageLogger.debugModules = ['BeamSpotAnalyzer']
################### Primary Vertex
process.offlinePrimaryVertices.PVSelParameters.maxDistanceToBeam = 2
process.offlinePrimaryVertices.TkFilterParameters.maxNormalizedChi2 = 20
process.offlinePrimaryVertices.TkFilterParameters.minSiliconLayersWithHits = 5
process.offlinePrimaryVertices.TkFilterParameters.maxD0Significance = 100
process.offlinePrimaryVertices.TkFilterParameters.minPixelLayersWithHits = 1
process.offlinePrimaryVertices.TkClusParameters.TkGapClusParameters.zSeparation = 1
#######################
process.d0_phi_analyzer.BeamFitter.TrackCollection = 'ALCARECOTkAlMinBias'
process.d0_phi_analyzer.BeamFitter.MinimumTotalLayers = 6
process.d0_phi_analyzer.BeamFitter.MinimumPixelLayers = -1
process.d0_phi_analyzer.BeamFitter.MaximumNormChi2 = 10
process.d0_phi_analyzer.BeamFitter.MinimumInputTracks = 50
process.d0_phi_analyzer.BeamFitter.MinimumPt = 1.0
process.d0_phi_analyzer.BeamFitter.MaximumImpactParameter = 1.0
process.d0_phi_analyzer.BeamFitter.TrackAlgorithm = cms.untracked.vstring()
#process.d0_phi_analyzer.BeamFitter.Debug = True
process.d0_phi_analyzer.PVFitter.Apply3DFit = True
process.d0_phi_analyzer.PVFitter.minNrVerticesForFit = 10
#########################
process.d0_phi_analyzer.BeamFitter.AsciiFileName = 'BeamFit_LumiBased_Workflow.txt'
process.d0_phi_analyzer.BeamFitter.AppendRunToFileName = False
process.d0_phi_analyzer.BeamFitter.OutputFileName = 'BeamFit_LumiBased_Workflow.root'
#process.d0_phi_analyzer.BeamFitter.SaveNtuple = True
process.d0_phi_analyzer.BeamFitter.SavePVVertices = True
# fit as function of lumi sections
process.d0_phi_analyzer.BSAnalyzerParameters.fitEveryNLumi = 1
process.d0_phi_analyzer.BSAnalyzerParameters.resetEveryNLumi = 1
|
orgsrc/plantuml.py | Sinamore/orgextended | 120 | 11102546 | <gh_stars>100-1000
import sublime
import sublime_plugin
import sys
import io
import re
import logging
import subprocess, os
import threading, time, signal
from shutil import copyfile
import OrgExtended.asettings as sets
# Python Babel Mode
def Extension(cmd):
return ".pu"
def WrapStart(cmd):
return "@startuml"
def WrapEnd(cmd):
return "@enduml"
# Actually do the work, return an array of output.
def Execute(cmd,sets):
jarfile = sets.Get("plantuml",None)
if(jarfile == None):
print("ERROR: cannot find plantuml jar file. Please setup the plantuml key in your settings file")
return ["ERROR - missing plantuml.jar file"]
cmd.output = cmd.params.Get('file',"diagram.png")
outpath = os.path.dirname(cmd.filename)
sourcepath = os.path.dirname(cmd.sourcefile)
commandLine = [r"java", "-jar", jarfile, cmd.filename]
try:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except:
startupinfo = None
cwd = os.path.join(sublime.packages_path(),"User")
popen = subprocess.Popen(commandLine, universal_newlines=True, cwd=cwd, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(o,e) = popen.communicate()
convertFile = os.path.join(outpath,os.path.splitext(os.path.basename(cmd.filename))[0] + ".png")
destFile = os.path.normpath(os.path.join(sourcepath,cmd.output))
os.makedirs(os.path.dirname(destFile), exist_ok=True)
copyfile(convertFile, destFile)
return o.split('\n') + e.split('\n')
# Run after results are in the buffer. We can do whatever
# Is needed to the buffer post execute here.
def PostExecute(cmd):
pass
# Create one of these and return true if we should show images after a execution.
def GeneratesImages(cmd):
return True |
auctioning_platform/auctions/auctions/tests/test_bidding.py | nhdinh/smp-modulith | 299 | 11102560 | <reponame>nhdinh/smp-modulith
from datetime import datetime, timedelta
from typing import Optional
from unittest.mock import Mock, call
from freezegun import freeze_time
import pytest
import pytz
from foundation.events import EventBus
from foundation.value_objects.factories import get_dollars
from auctions import BeginningAuction, BidderHasBeenOverbid, PlacingBid, WinningBidPlaced
from auctions.application.repositories import AuctionsRepository
from auctions.application.use_cases.beginning_auction import BeginningAuctionInputDto
from auctions.application.use_cases.placing_bid import PlacingBidInputDto, PlacingBidOutputBoundary, PlacingBidOutputDto
from auctions.domain.entities import Auction
from auctions.domain.exceptions import BidOnEndedAuction
from auctions.domain.value_objects import AuctionId
from auctions.tests.factories import AuctionFactory
from auctions.tests.in_memory_repo import InMemoryAuctionsRepo
class PlacingBidOutputBoundaryFake(PlacingBidOutputBoundary):
def __init__(self) -> None:
self.dto: Optional[PlacingBidOutputDto] = None
def present(self, output_dto: PlacingBidOutputDto) -> None:
self.dto = output_dto
@pytest.fixture()
def output_boundary() -> PlacingBidOutputBoundary:
return PlacingBidOutputBoundaryFake()
@pytest.fixture()
def auction() -> Auction:
return AuctionFactory()
@pytest.fixture()
def auction_id(auction: Auction) -> AuctionId:
return auction.id
@pytest.fixture()
def auction_title(auction: Auction) -> str:
return auction.title
@pytest.fixture()
def event_bus() -> Mock:
return Mock(spec_set=EventBus)
@pytest.fixture()
def auctions_repo(event_bus: Mock) -> AuctionsRepository:
return InMemoryAuctionsRepo(event_bus)
@pytest.fixture()
def place_bid_uc(
output_boundary: PlacingBidOutputBoundaryFake, auction: Auction, auctions_repo: AuctionsRepository
) -> PlacingBid:
auctions_repo.save(auction)
return PlacingBid(output_boundary, auctions_repo)
@pytest.fixture()
def beginning_auction_uc(auctions_repo: AuctionsRepository) -> BeginningAuction:
return BeginningAuction(auctions_repo)
def test_Auction_FirstBidHigherThanIntialPrice_IsWinning(
place_bid_uc: PlacingBid, output_boundary: PlacingBidOutputBoundaryFake, auction_id: AuctionId
) -> None:
place_bid_uc.execute(PlacingBidInputDto(1, auction_id, get_dollars("100")))
expected_dto = PlacingBidOutputDto(is_winner=True, current_price=get_dollars("100"))
assert output_boundary.dto == expected_dto
def test_Auction_BidLowerThanCurrentPrice_IsLosing(
place_bid_uc: PlacingBid, output_boundary: PlacingBidOutputBoundaryFake, auction_id: AuctionId
) -> None:
place_bid_uc.execute(PlacingBidInputDto(1, auction_id, get_dollars("5")))
assert output_boundary.dto == PlacingBidOutputDto(is_winner=False, current_price=get_dollars("10"))
def test_Auction_Overbid_IsWinning(
place_bid_uc: PlacingBid, output_boundary: PlacingBidOutputBoundaryFake, auction_id: AuctionId
) -> None:
place_bid_uc.execute(PlacingBidInputDto(1, auction_id, get_dollars("100")))
place_bid_uc.execute(PlacingBidInputDto(2, auction_id, get_dollars("120")))
assert output_boundary.dto == PlacingBidOutputDto(is_winner=True, current_price=get_dollars("120"))
def test_Auction_OverbidByWinner_IsWinning(
place_bid_uc: PlacingBid, output_boundary: PlacingBidOutputBoundaryFake, auction_id: AuctionId
) -> None:
place_bid_uc.execute(PlacingBidInputDto(1, auction_id, get_dollars("100")))
place_bid_uc.execute(PlacingBidInputDto(1, auction_id, get_dollars("120")))
assert output_boundary.dto == PlacingBidOutputDto(is_winner=True, current_price=get_dollars("120"))
def test_Auction_FirstBid_EmitsEvent(
place_bid_uc: PlacingBid, event_bus: Mock, auction_id: AuctionId, auction_title: str
) -> None:
place_bid_uc.execute(PlacingBidInputDto(1, auction_id, get_dollars("100")))
event_bus.post.assert_called_once_with(WinningBidPlaced(auction_id, 1, get_dollars("100"), auction_title))
# Uzyty w przykladzie to inicjalizowania modulu
def test_Auction_OverbidFromOtherBidder_EmitsEvents(
beginning_auction_uc: BeginningAuction, place_bid_uc: PlacingBid, event_bus: Mock
) -> None:
auction_id = 1
tomorrow = datetime.now(tz=pytz.UTC) + timedelta(days=1)
beginning_auction_uc.execute(BeginningAuctionInputDto(auction_id, "Foo", get_dollars("1.00"), tomorrow))
place_bid_uc.execute(PlacingBidInputDto(1, auction_id, get_dollars("2.0")))
event_bus.post.reset_mock()
place_bid_uc.execute(PlacingBidInputDto(2, auction_id, get_dollars("3.0")))
event_bus.post.assert_has_calls(
[
call(WinningBidPlaced(auction_id, 2, get_dollars("3.0"), "Foo")),
call(BidderHasBeenOverbid(auction_id, 1, get_dollars("3.0"), "Foo")),
],
any_order=True,
)
assert event_bus.post.call_count == 2
def test_Auction_OverbidFromOtherBidder_EmitsEvent(
place_bid_uc: PlacingBid, event_bus: Mock, auction_id: AuctionId, auction_title: str
) -> None:
place_bid_uc.execute(PlacingBidInputDto(1, auction_id, get_dollars("100")))
event_bus.post.reset_mock()
place_bid_uc.execute(PlacingBidInputDto(2, auction_id, get_dollars("120")))
event_bus.post.assert_has_calls(
[
call(WinningBidPlaced(auction_id, 2, get_dollars("120"), auction_title)),
call(BidderHasBeenOverbid(auction_id, 1, get_dollars("120"), auction_title)),
],
any_order=True,
)
assert event_bus.post.call_count == 2
def test_Auction_OverbidFromWinner_EmitsWinningBidEventOnly(
place_bid_uc: PlacingBid, event_bus: Mock, auction_id: AuctionId, auction_title: str
) -> None:
place_bid_uc.execute(PlacingBidInputDto(3, auction_id, get_dollars("100")))
event_bus.post.reset_mock()
place_bid_uc.execute(PlacingBidInputDto(3, auction_id, get_dollars("120")))
event_bus.post.assert_called_once_with(WinningBidPlaced(auction_id, 3, get_dollars("120"), auction_title))
def test_PlacingBid_BiddingOnEndedAuction_RaisesException(
beginning_auction_uc: BeginningAuction, place_bid_uc: PlacingBid
) -> None:
yesterday = datetime.now(tz=pytz.UTC) - timedelta(days=1)
with freeze_time(yesterday):
beginning_auction_uc.execute(
BeginningAuctionInputDto(1, "Bar", get_dollars("1.00"), yesterday + timedelta(hours=1))
)
with pytest.raises(BidOnEndedAuction):
place_bid_uc.execute(PlacingBidInputDto(1, 1, get_dollars("2.00")))
|
utils/data_util.py | taha-a/image | 161 | 11102568 | import numpy as np
import pandas as pd
from collections import Counter
from nltk.tokenize import word_tokenize
import pickle
import json
import os
max_len = 20
word_threshold = 2
counter = None
def prepare_coco_captions(filename="Dataset/captions_val2014.json"):
'''
Prepare COCO Captions in the Flickr annotation file format
'''
with open(filename, 'r') as f:
data = json.load(f)
images = data['images']
captions = data['annotations']
prefix = "COCO_train2014_"
for cap in captions:
image_id = str(cap['image_id'])
len_id = len(image_id)
zeros = '0'*(12-len_id)
image_id = prefix+zeros+image_id
cap['image_id'] = image_id
cap['caption'] = cap['caption'].replace('\n','')\
.replace(',', ' ,').replace('.', '')\
.replace('"','" ').replace("'s"," 's")\
.replace("'t"," 't")+ " ."
captions = sorted(captions, key=lambda k: k['image_id'])
cap_path="Dataset/COCOcaptions.txt"
with open(cap_path,'w') as f:
for i, cap in enumerate(captions):
f.write(cap['image_id']+'#'+str(i%5)+'\t'+cap['caption']+'\n')
return cap_path
def preprocess_coco_captions(filenames, captions):
df = pd.DataFrame()
df['FileNames'] = filenames
df['caption'] = captions
df.caption = df.caption.str.decode('utf')
df['caption'] = df['caption'].apply(word_tokenize).apply(lambda x: x[:20]).apply(" ".join).str.lower()
anomalies = df.FileNames.value_counts()[(df.FileNames.value_counts() > 5)].index.tolist()
for name in anomalies:
indexes = df[df.FileNames==name].index[5:]
df = df.drop(indexes)
df = df.reset_index(drop=True)
with open("Dataset/COCOcaptions.txt",'w') as f:
for i, row in df.iterrows():
f.write(row['FileNames']+'#'+str(i%5)+'\t'+row['caption']+'\n')
return df
def preprocess_flickr_captions(filenames, captions):
global max_len
print "Preprocessing Captions"
df = pd.DataFrame()
df['FileNames'] = filenames
df['caption'] = captions
df.caption = df.caption.str.decode('utf')
df['caption'] = df['caption'].apply(word_tokenize).apply(
lambda x: x[:max_len]).apply(" ".join).str.lower()
#df = df[:158900] #uncomment if flickr
return df
def generate_vocab(df):
global max_len, word_threshold, counter
print "Generating Vocabulary"
vocab = dict([w for w in counter.items() if w[1] >= word_threshold])
vocab["<UNK>"] = len(counter) - len(vocab)
vocab["<PAD>"] = df.caption.str.count("<PAD>").sum()
vocab["<S>"] = df.caption.str.count("<S>").sum()
vocab["</S>"] = df.caption.str.count("</S>").sum()
wtoidx = {}
wtoidx["<S>"] = 1
wtoidx["</S>"] = 2
wtoidx["<PAD>"] = 0
wtoidx["<UNK>"] = 3
print "Generating Word to Index and Index to Word"
i = 4
for word in vocab.keys():
if word not in ["<S>", "</S>", "<PAD>", "<UNK>"]:
wtoidx[word] = i
i += 1
print "Size of Vocabulary", len(vocab)
return vocab, wtoidx
def pad_captions(df):
global max_len
print "Padding Caption <PAD> to Max Length", max_len, "+ 2 for <S> and </S>"
dfPadded = df.copy()
dfPadded['caption'] = "<S> " + dfPadded['caption'] + " </S>"
max_len = max_len + 2
for i, row in dfPadded.iterrows():
cap = row['caption']
cap_len = len(cap.split())
if(cap_len < max_len):
pad_len = max_len - cap_len
pad_buf = "<PAD> " * pad_len
pad_buf = pad_buf.strip()
dfPadded.set_value(i, 'caption', cap + " " + pad_buf)
return dfPadded
def load_features(feature_path):
features = np.load(feature_path)
features = np.repeat(features, 5, axis=0)
print "Features Loaded", feature_path
return features
def split_dataset(df, features, ratio=0.8):
split_idx = int(df.shape[0] * ratio)
print "Data Statistics:"
print "# Records Total Data: ", df.shape[0]
print "# Records Training Data: ", split_idx
print "# Records Training Data: ", df.shape[0] - split_idx
print "Ration of Training: Validation = ", ratio * 100, ":", 100 - (ratio * 100)
val_features = features[split_idx:]
val_captions = np.array(df.caption)[split_idx:]
np.save("Dataset/Validation_Data", zip(val_features, val_captions))
return df[:split_idx], features[:split_idx]
def get_data(required_files):
ret = []
for fil in required_files:
ret.append(np.load("Dataset/" + fil + ".npy"))
return ret
def generate_captions(
wt=2,
ml=20,
cap_path='Dataset/results_20130124.token',#default set to flickr30k captions
feat_path='Dataset/features.npy',
data_is_coco=False):
required_files = ["vocab", "wordmap", "Training_Data"]
generate = False
for fil in required_files:
if not os.path.isfile('Dataset/' + fil + ".npy"):
generate = True
print "Required Files not present. Regenerating Data."
break
if not generate:
print "Dataset Present; Skipping Generation."
return get_data(required_files)
global max_len, word_threshold, counter
max_len = ml
word_threshold = wt
print "Loading Caption Data", cap_path
if data_is_coco:
# Prepare COCO captions in Flickr format
cap_path = prepare_coco_captions(cap_path)
# Load the COCO captions data
with open(cap_path, 'r') as f:
data = f.readlines()
filenames = [caps.split('\t')[0].split('#')[0] for caps in data]
captions = [caps.split('\t')[1] for caps in data]
df = preprocess_coco_captions(filenames, captions)
else:
with open(cap_path, 'r') as f:
data = f.readlines()
filenames = [caps.split('\t')[0].split('#')[0] for caps in data]
captions = [caps.replace('\n', '').split('\t')[1] for caps in data]
df = preprocess_flickr_captions(filenames, captions)
features = load_features(feat_path)
print features.shape, df.shape
idx = np.random.permutation(features.shape[0])
df = df.iloc[idx]
features = features[idx]
# df, features = split_dataset(df, features) #use flickr8k for validation
counter = Counter()
for i, row in df.iterrows():
counter.update(row["caption"].lower().split())
df = pad_captions(df)
vocab, wtoidx = generate_vocab(df)
captions = np.array(df.caption)
np.save("Dataset/Training_Data", zip(features, captions))
np.save("Dataset/wordmap", wtoidx)
np.save("Dataset/vocab", vocab)
print "Preprocessing Complete"
return get_data(required_files)
|
hl7apy/factories.py | ryoung29/hl7apy | 163 | 11102629 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
This module contains factory functions for hl7apy base data types.
The functions get the value of the data type as string and return the correct object
"""
from __future__ import absolute_import
from decimal import Decimal, InvalidOperation
from types import FunctionType
from hl7apy import load_library, get_default_validation_level, get_default_version
from hl7apy.exceptions import InvalidDataType
from hl7apy.utils import get_date_info, get_datetime_info, get_timestamp_info
def datatype_factory(datatype, value, version=None, validation_level=None):
"""
Factory function for both base and complex datatypes. It generates the correct object according
to the datatype in input.
It should be noted that if you use the factory it is not possible to specify
some parameters for the datatype (e.g. the format for datetime base datatypes)
If the value is not valid for the datatype specified if the ``validation_level`` is
:attr:`hl7apy.consts.VALIDATION_LEVEL.TOLERANT` it generates an :class:`hl7apy.base_datatypes.ST` object
:type datatype: ``str``
:param datatype: The datatype to be generated
:param value: The value of the datatype
:type version: ``str``
:param version: A valid HL7 version. It must be one of
:attr:`SUPPRTED_LIBRARIES <hl7apy.SUPPORTED_LIBRARIES>`
:type validation_level: ``int``
:param validation_level: It must be a value from class :attr:`validation_level`
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: The type specified in datatype
:raises :exc:`ValueError`: If the ``validation_level`` is
:attr:`VALIDATION_LEVEL.STRICT <hl7apy.consts.VALIDATION_LEVEL.STRICT>`
and the value is not valid for the specified datatype
:raises :exc:`InvalidDatatype <hl7apy.exceptions.InvalidDatatype>`: If the ``datatype`` specified is not
valid for the given ``version``
"""
from hl7apy.validation import Validator
if validation_level is None:
validation_level = get_default_validation_level()
if version is None:
version = get_default_version()
lib = load_library(version)
base_datatypes = lib.get_base_datatypes()
factories = base_datatypes.copy()
if 'DT' in factories:
factories['DT'] = date_factory
if 'TM' in factories:
factories['TM'] = timestamp_factory
if 'DTM' in factories:
factories['DTM'] = datetime_factory
if 'NM' in factories:
factories['NM'] = numeric_factory
if 'SI' in factories:
factories['SI'] = sequence_id_factory
try:
factory = factories[datatype]
if isinstance(factory, FunctionType):
return factory(value, base_datatypes[datatype], validation_level=validation_level)
return factory(value, validation_level=validation_level)
except KeyError:
raise InvalidDataType(datatype)
except ValueError as e:
print(e)
if Validator.is_strict(validation_level):
raise e
# TODO: Do we really want this? In that case the parent's datatype must be changed accordingly
return factories['ST'](value)
def date_factory(value, datatype_cls, validation_level=None):
"""
Creates a :class:`DT <hl7apy.base_datatypes.DT>` object
The value in input must be a string parsable with :meth:`datetime.strptime`.
The date format is chosen according to the length of the value as stated in this table:
+-------+-----------+
|Length |Format |
+=======+===========+
|4 |``%Y`` |
| | |
+-------+-----------+
|6 |``%Y%m`` |
| | |
+-------+-----------+
|8 |``%Y%m%d`` |
| | |
+-------+-----------+
Some examples that work are:
>>> from hl7apy.base_datatypes import DT
>>> date_factory("1974", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("198302", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("19880312", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
If the value does not match one of the valid format it raises :exc:`ValueError`
:type value: ``str``
:param value: the value to assign the date object
:type datatype_cls: `class`
:param value: the :class:`DT <hl7apy.base_datatypes.DT>` class to use. It has to be one implementation of
the different version modules
:type validation_level: ``int``
:param validation_level: It must be a value from class :attr:`validation_level`
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: :class:`hl7apy.base_datatypes.DT`
"""
dt_value, fmt = get_date_info(value)
return datatype_cls(dt_value, fmt)
def timestamp_factory(value, datatype_cls, validation_level=None):
"""
Creates a :class:`TM <hl7apy.base_datatypes.TM>` object
The value in input must be a string parsable with :meth:`datetime.strptime`.
It can also have an offset part specified with the format +/-HHMM.
The offset can be added with all the allowed format
The date format is chosen according to the length of the value as stated in this table:
+-------+-----------------+
|Length |Format |
+=======+=================+
|2 |``%H`` |
+-------+-----------------+
|4 |``%H%M`` |
+-------+-----------------+
|6 |``%H%M%S`` |
+-------+-----------------+
|10-13 |``%H%M%S.%f`` |
+-------+-----------------+
Some examples that work are:
>>> from hl7apy.base_datatypes import TM
>>> timestamp_factory("12", TM) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.TM object at 0x...>
>>> timestamp_factory("12+0300", TM) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.TM object at 0x...>
>>> timestamp_factory("1204", TM) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.TM object at 0x...>
>>> timestamp_factory("120434", TM) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.TM object at 0x...>
>>> timestamp_factory("120434-0400", TM) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.TM object at 0x...>
If the value does not match one of the valid format it raises :exc:ValueError`
:type value: ``str``
:param value: the value to assign the date object
:type datatype_cls: `class`
:param value: the :class:`TM <hl7apy.base_datatypes.TM>` class to use. It has to be one implementation
of the different version modules
:type validation_level: ``int``
:param validation_level: It must be a value from class :attr:`validation_level`
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: :class:`TM <hl7apy.base_datatypes.TM>`
"""
dt_value, fmt, offset, microsec = get_timestamp_info(value)
return datatype_cls(dt_value, fmt, offset, microsec)
def datetime_factory(value, datatype_cls, validation_level=None):
"""
Creates a :class:`hl7apy.base_datatypes.DTM` object
The value in input must be a string parsable with :meth:`datetime.strptime`.
It can also have an offset part specified with the format +HHMM -HHMM.
The offset can be added with all the allowed format.
The date format is chosen according to the length of the value as stated in this table:
+-------+-----------------------+
|Length |Format |
+=======+=======================+
|4 |``%Y`` |
+-------+-----------------------+
|6 |``%Y%m`` |
+-------+-----------------------+
|8 |``%Y%m%d`` |
+-------+-----------------------+
|10 |``%Y%m%d%H`` |
+-------+-----------------------+
|12 |``%Y%m%d%H%M`` |
+-------+-----------------------+
|14 |``%Y%m%d%H%M%S`` |
+-------+-----------------------+
|18-21 |``%Y%m%d%H%M%S.%f`` |
+-------+-----------------------+
Some examples that work are:
>>> from hl7apy.base_datatypes import DTM
>>> datetime_factory("1924", DTM) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DTM object at 0x...>
>>> datetime_factory("1924+0300", DTM) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DTM object at 0x...>
>>> datetime_factory("19220430", DTM) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DTM object at 0x...>
>>> datetime_factory("19220430-0400", DTM) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DTM object at 0x...>
If the value does not match one of the valid format it raises :exc:`ValueError`
:type value: ``str``
:param value: the value to assign the date object
:type datatype_cls: `class`
:param value: the :class:`DTM <hl7apy.base_datatypes.DTM>` class to use. It has to be one implementation
of the different version modules
:type validation_level: ``int``
:param validation_level: It must be a value from class :attr:`validation_level`
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: :class:`DTM <hl7apy.base_datatypes.DTM>`
"""
dt_value, fmt, offset, microsec = get_datetime_info(value)
return datatype_cls(dt_value, fmt, offset, microsec)
def numeric_factory(value, datatype_cls, validation_level=None):
"""
Creates a :class:`NM <hl7apy.base_datatypes.NM>` object
The value in input can be a string representing a decimal number or a ``float``.
(i.e. a string valid for :class:`decimal.Decimal()`).
If it's not, a :exc:`ValueError` is raised
Also an empty string or ``None`` are allowed
:type value: ``str`` or ``None``
:param value: the value to assign the numeric object
:type datatype_cls: :class:`class`
:param value: the :class:`NM <hl7apy.base_datatypes.NM>` class to use. It has to be one implementation
of the different version modules
:type validation_level: ``int``
:param validation_level: It must be a value from class
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: :class:`NM <hl7apy.base_datatypes.NM>`
"""
if not value:
return datatype_cls(validation_level=validation_level)
try:
return datatype_cls(Decimal(value), validation_level=validation_level)
except InvalidOperation:
raise ValueError('{0} is not an HL7 valid NM value'.format(value))
def sequence_id_factory(value, datatype_cls, validation_level=None):
"""
Creates a :class:`SI <hl7apy.base_datatypes.SI>` object
The value in input can be a string representing an integer number or an ``int``.
(i.e. a string valid for ``int()`` ).
If it's not, a :exc:`ValueError` is raised
Also an empty string or ``None`` are allowed
:type value: ``str`` or ``None``
:param value: the value to assign the date object
:type datatype_cls: `class`
:param value: the SI class to use. It has to be loaded from one implementation of the different version
modules
:type validation_level: ``int``
:param validation_level: It must be a value from class
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: :class:`SI <hl7apy.base_datatypes.SI>`
"""
if not value:
return datatype_cls(validation_level=validation_level)
try:
return datatype_cls(int(value), validation_level=validation_level)
except ValueError:
raise ValueError('{0} is not an HL7 valid SI value'.format(value))
if __name__ == '__main__':
import doctest
doctest.testmod() |
am4/rom/tools/disa29.py | yshestakov/cpu11 | 118 | 11102650 | <reponame>yshestakov/cpu11<filename>am4/rom/tools/disa29.py<gh_stars>100-1000
#!/usr/bin/python3
#
# Disassembler for M4 processor Am2900 microcode
# Copyright 2015 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Command line example:
# meta29.py am29_m4.def m4.mic -l m4.lst -o m4.mif
#
import argparse
import sys
import re
import os
# concatenation patterns
CON = '#'
CONT = '\n' + CON + '\t'
CONS = ' ' + CON + ' '
MCU = (20, 4)
NAF = (44, 12)
CC = (25, 4)
# mnemo, addr, cond
mcu_t = (
('jz ', False, False),
('cjs ', True, True),
('jmap', False, False),
('cjp ', True, True),
('push', False, True),
('jsrp', True, True),
('cjv ', True, True),
('jrp ', True, True),
('rfct', False, True),
('rpct', True, True),
('crtn', False, True),
('cjpp', True, True),
('ldct', False, False),
('loop', False, True),
('cont', False, False),
('jp ', True, False),
)
# PDP-11 instruction predecoder map entries (0x11 xor correction is done)
map_t = {
0x10: 'undef',
0x11: 'halt',
0x12: 'wait',
0x13: 'rti/rtt',
0x15: 'bpt',
0x16: 'iot',
0x17: 'reset',
0x18: 'mark',
0x19: 'sxt',
0x1A: 'xor',
0x1B: 'sob',
0x1C: 'adc',
0x1D: 'mfps',
0x1E: 'fis',
0x1F: 'jmp',
0x01: 'bis',
0x02: 'cmp',
0x03: 'clr',
0x04: 'ror',
0x05: 'com',
0x06: 'rol',
0x07: 'inc',
0x08: 'sub',
0x09: 'dec',
0x0A: 'asr',
0x0B: 'neg',
0x0C: 'asl',
0x0D: 'bit',
0x0E: 'br',
0x0F: 'bic',
0x30: 'bicb',
0x31: 'bis Rs, Rd',
0x32: 'bisb',
0x33: 'clr Rd',
0x34: 'clrb',
0x35: 'com Rd',
0x36: 'comb',
0x37: 'inc Rd',
0x38: 'incb',
0x39: 'dec Rd',
0x3A: 'decb',
0x3B: 'neg Rd',
0x3C: 'negb',
0x3D: 'tst',
0x3E: 'tstb',
0x3F: 'mtps',
0x20: 'mov',
0x21: 'mov Rs, Rd',
0x22: 'movb',
0x23: 'movb Rd, Rs',
0x24: 'add',
0x25: 'add Rs, xx',
0x26: 'jsr',
0x27: 'rts',
0x28: 'emt',
0x29: 'trap',
0x2A: 'sub Rd, Rs',
0x2B: 'cmp Rs, Rd',
0x2C: 'cmpb',
0x2D: 'bit Rs, Rd',
0x2E: 'bitb',
0x2F: 'bic Rs, Rd',
0x50: 'swab',
0x51: 'clx',
0x52: 'sex',
0x53: 'ash',
0x54: 'ashc',
0x55: 'swab Rd',
0x56: 'mul',
0x59: 'div',
0x5B: 'sbc Rd',
0x5C: 'adc Rd',
0x40: 'adcb',
0x42: 'sbc',
0x43: 'sbcb',
0x44: 'ror Rd',
0x45: 'rorb',
0x47: 'rol Rd',
0x48: 'rolb',
0x4A: 'asr Rd',
0x4B: 'asrb',
0x4D: 'asl Rd',
0x4E: 'aslb',
0x65: 'add Rs, Rd',
0x7D: 'tst Rd'
}
OR_nEN = (24, 1)
OMX = (41, 3)
omx_t = (
'OR_MD', # destination mode
'OR_MS', # source mode
'OR_RR', # register mode
'OR_IV', # interrupt vector
'OR_LD', # bootloader mode
'OR_BT', # byte exchange
'OR_TC', # timer/counter
'OR_R67' # not SP/PC
)
ALU_S = (0, 3) # ALU source control
ALU_F = (3, 3) # ALU function control
ALU_Q = (6, 3) # ALU destination control
ALU_M = (0, 10) # integrated for tst and none
ALU_TST = 0b0001011100 # or 0, NOP, ZA
ALU_TSTD = 0b0001011111 # or 0, NOP, DZ
ALU_NOPA = 0b0001100111 # and 0, NOP, DZ
ALU_N = (9, 11)
alus_t = ('AQ', 'AB', 'ZQ', 'ZB', 'ZA', 'DA', 'DQ', 'DZ')
aluf_t = ('add', 'subr', 'subs', 'or', 'and', 'nand', 'xor', 'xnor')
aluq_t = ('QREG', 'NOP', 'RAMA', 'RAMF', 'RAMQD', 'RAMD', 'RAMQU', 'RAMU')
ALU_CI = (9, 1)
ALU_AS = (10, 1)
ALU_BS = (11, 1)
ALU_A = (12, 4)
ALU_B = (16, 4)
porta_t = ('Ad0', 'Ad1', 'As0', 'As1')
portb_t = ('Bs0', 'Bs1', 'Bd0', 'Bd1')
cc_t = (
'CCC', 'CCV', 'CCN', 'CCZ', 'CCT', 'CCIN', 'CCE', 'CCA',
'CCCN', 'CCVN', 'CCNN', 'CCZN', 'CCTN', 'CCI', 'CCEN', 'CCAN'
)
D_MUX = (33, 2)
D_IMM = (40, 16)
S_MUX = (44, 12)
dmux_t = ('dpsw', 'dbus', 'dimm', 'dswp')
REG_C = (29, 4)
pswc_t = (
'SPSW', 'WPSW', 'BPSW', 'LPSW',
'SPSW' + CONS + 'CSAV', 'WPSW' + CONS + 'CSAV',
'BPSW' + CONS + 'CSAV', 'LPSW' + CONS + 'CSAV'
)
TTL_M = (50, 3)
ttl_t = (
'NONE0', 'INITC', 'REFC', 'REFS',
'INITS', 'NONE5', 'ACLOC', 'EVENTC'
)
QIO = (35, 9)
qio_t = (
'WAIT', 'IOEN', 'DOUT', 'SYNC',
'WFIN', 'IAKO', 'DIN', 'RDIN', 'WTBT'
)
#
# Table scanned from documentation
#
# ASHCR 1111 0101 0110 -> F56, RAMQD
# RORB 0101 X1X1 11XX -> *55C, RAMD
# ASRB 0111 X1X1 0X00 -> *750, RAMD
# ROR 1001 X101 11XX -> *95C, RAMD
# ASR 1011 X101 0110 -> *B56, RAMD
# ASHR 1011 X101 0110 -> *B56, RAMD - dup
# ASHCL 0011 1010 1010 -> 3AA, RAMQU
# ASLB 0010 1X11 1001 -> *2B9, RAMU
# ROLB 0010 1X11 1011 -> *2BB, RAMU
# ROL 0011 1X10 1011 -> *3AB, RAMU
# ASL 0011 1X10 1001 -> *3A9, RAMU
# ASHL 0011 1X10 1001 -> *3A9, RAMU - dup
#
ash_t = { # actually used in microcode
0x2B9: ('ASLB', 'U'), # 0010 1X11 1001 RAMU
0x2BB: ('ROLB', 'U'), # 0010 1X11 1011 RAMU
0x3A9: ('ASL', 'U'), # 0011 1X10 1001 RAMU
0x3AA: ('ASHCL', 'U'), # 0011 1010 1010 RAMQU
0x3AB: ('ROL', 'U'), # 0011 1X10 1011 RAMU
0x55C: ('RORB', 'D'), # 0101 X1X1 11XX RAMD
0x756: ('ASRB', 'D'), # 0111 X1X1 0X?0 RAMD
0x95C: ('ROR', 'D'), # 1001 X101 11XX RAMD
0xB56: ('ASR', 'D'), # 1011 X101 0110 RAMD
0xF55: ('ASHXR', 'D'), # 1111 0101 0101 RAMQD
0xF56: ('ASHCR', 'D'), # 1111 0101 0110 RAMQD
}
def zhex(value, width):
s = hex(value)[2:].upper()
if width == 0:
return s
return s.rjust((width + 3) // 4, '0')
class Bf(object):
''' Arbitrary records data buffer '''
def __init__(self, size=1024):
self.ecnt = 0 # error counter
self.wcnt = 0 # warning counter
self.width = 8 # record width
self.size = size # buffer size
self.aradx = 16 # address radix
self.dradx = 16 # data radix
self.data = [None] * size
self.npas = 0
self.flst = None
self.label = []
self.word = 0
def close_file(self, file):
if file is not None:
file.close()
return
def load_mif(self, name):
#
# Add the default file name extension if needed
#
if not os.path.splitext(name)[1]:
name += '.mif'
try:
f = open(name, 'r', -1, None, None)
except OSError as err:
raise RuntimeError(err)
#
# Compiled regular expressions
#
re_comment = re.compile(r'(?:--)')
re_depth = re.compile(r'DEPTH\s*=\s*([0-9]+)\s*;\s*$')
re_width = re.compile(r'WIDTH\s*=\s*([0-9]+)\s*;\s*$')
re_aradx = re.compile(r'ADDRESS_RADIX\s*=\s*(HEX|DEC|OCT|BIN)\s*;\s*$')
re_dradx = re.compile(r'DATA_RADIX\s*=\s*(HEX|DEC|OCT|BIN)\s*;\s*$')
re_skip = re.compile(r'(BEGIN$|^END|^CONTENT)')
re_single = re.compile(r'([A-Z0-9]+)\s*:\s*([A-Z0-9]+)\s*;\s*$')
re_range = re.compile(
r'\[([A-Z0-9]+)..([A-Z0-9]+)\]\s*:\s*([A-Z0-9]+)\s*;\s*$')
lnum = 0
for text in f:
lnum += 1
line = text.strip('\r\n \t')
if not line:
continue
line = line.upper()
match = re_comment.match(line)
if match is not None:
line = line[0:match.start()]
line = line.strip('\r\n \t')
if not line:
continue
match = re_single.match(line)
if match is not None:
addr = int(match.group(1), self.aradx)
data = int(match.group(2), self.dradx)
if addr >= self.size:
raise SyntaxError('line %d addr out of range: %s' %
(lnum, text))
if data >= 1 << self.width:
raise SyntaxError('line %d data out of range: %s' %
(lnum, text))
self.data.insert(addr, data)
continue
match = re_range.match(line)
if match is not None:
beg = int(match.group(1), self.aradx)
end = int(match.group(2), self.aradx) + 1
data = int(match.group(3), self.dradx)
for addr in range(beg, end):
if addr >= self.size:
raise SyntaxError('line %d addr out of range: %s' %
(lnum, text))
if data >= 1 << self.width:
raise SyntaxError('line %d data out of range: %s' %
(lnum, text))
self.data.insert(addr, data)
continue
match = re_skip.match(line)
if match is not None:
continue
match = re_depth.match(line)
if match is not None:
self.size = int(match.group(1), 10)
self.data = [None] * self.size
continue
match = re_width.match(line)
if match is not None:
self.width = int(match.group(1), 10)
continue
match = re_aradx.match(line)
if match is not None:
radix = match.group(1)
if radix == 'HEX':
self.aradx = 16
continue
if radix == 'DEC':
self.aradx = 10
continue
if radix == 'OCT':
self.aradx = 8
continue
if radix == 'BIN':
self.aradx = 2
continue
raise SyntaxError('line %d invalid radix: %s' % (lnum, text))
match = re_dradx.match(line)
if match is not None:
radix = match.group(1)
if radix == 'HEX':
self.dradx = 16
continue
if radix == 'DEC':
self.dradx = 10
continue
if radix == 'OCT':
self.dradx = 8
continue
if radix == 'BIN':
self.dradx = 2
continue
raise SyntaxError('line %d invalid radix: %s' % (lnum, text))
raise SyntaxError('line %d syntax error: %s' % (lnum, text))
self.close_file(f)
return
def set_pass(self, npas):
self.npas = npas
if npas == 1:
self.label = []
return
def set_list(self, flst):
self.flst = flst
return
def fiw(self, field):
start = field[0]
width = field[1]
assert(start >= 0)
assert(width >= 0)
assert(start < self.width)
assert(start + width <= self.width)
return (self.word >> start) & ((1 << width) - 1)
def get_raw(self, addr):
bmask = bin(self.word)[2:]
bmask = bmask.rjust(self.width, '0')
line = ''
if addr & 0x7 == 0 and map_t.get(addr >> 3) is not None:
line += '; "%s" opcode\n' % map_t.get(addr >> 3)
line += '; %04X\t%s.%s.%s.%s\n' % (addr, bmask[0:16], bmask[16:32],
bmask[32:47], bmask[47:])
return line
def get_mcu(self, addr, mcu):
line = ''
if mcu_t[mcu][1]:
naf = self.fiw(NAF)
if naf >= self.size:
line = '; Warning: next address is out of range\n'
self.wcnt += 1
if addr in self.label:
line += 'L%03X:' % addr
line += '\t%s' % mcu_t[mcu][0]
if mcu_t[mcu][1]:
line += '\tL%03X' % naf
if mcu == 0:
return line
if self.fiw(OR_nEN) == 0:
if mcu_t[mcu][1]:
line += ', '
else:
line += '\t'
line += omx_t[self.fiw(OMX)]
if self.fiw(CC) or mcu_t[mcu][2]:
line += CONS + cc_t[self.fiw(CC)]
elif self.fiw(CC) or mcu_t[mcu][2]:
if mcu_t[mcu][1]:
line += ', '
else:
line += '\t'
line += cc_t[self.fiw(CC)]
return line
def get_alu(self):
bshown = 0
line = CONT
alum = self.fiw(ALU_M)
alus = self.fiw(ALU_S)
aluq = self.fiw(ALU_Q)
if alum == ALU_TST:
line += 'tst\t'
elif alum == ALU_TSTD:
line += 'tstd'
if self.fiw(ALU_N) == 0:
return line
line += '\t'
elif alum == ALU_NOPA:
line += 'nopa'
if self.fiw(ALU_N) == 0:
if self.fiw(D_MUX) == 3 and (self.fiw(REG_C) & 1) == 0:
line = ''
return line
line += '\t'
else:
line += aluf_t[self.fiw(ALU_F)] + '\t'
if (alum != ALU_TST and alum != ALU_TSTD) or \
self.fiw(ALU_B) or self.fiw(ALU_BS) or \
alus == 1 or alus == 3 or aluq >= 2:
if self.fiw(ALU_BS):
line += portb_t[self.fiw(ALU_B) & 3]
else:
line += 'B%d' % self.fiw(ALU_B)
bshown = 1
if self.fiw(ALU_AS) or (alus & 3) <= 1 or aluq == 2:
if bshown:
line += CONS
if self.fiw(ALU_AS):
line += porta_t[self.fiw(ALU_A) & 3]
else:
line += 'A%d' % self.fiw(ALU_A)
if alum == ALU_TST or alum == ALU_TSTD or alum == ALU_NOPA:
return line
line += ', C%d' % self.fiw(ALU_CI)
line += ', ' + aluq_t[aluq]
line += ', ' + alus_t[alus]
return line
def get_dmux(self):
line = ''
dmux = self.fiw(D_MUX)
alum = self.fiw(ALU_M)
if alum == ALU_NOPA and dmux == 3:
return line
if alum == ALU_TST and dmux == 3:
return line
if self.fiw(ALU_S) >= 5:
line = CONT + dmux_t[dmux]
if dmux_t[dmux] == 'dimm':
line += '\t0x%X' % self.fiw(D_IMM)
if self.fiw(ALU_Q) != 2 and dmux == 3:
line += '\n; Warning: dswp combinatorial loop'
self.wcnt += 1
else:
if dmux == 3:
line = ''
else:
line = CONT + dmux_t[dmux]
if dmux_t[dmux] == 'dimm':
line += '\t0x%X' % self.fiw(D_IMM)
return line
def get_shift(self):
line = ''
q = self.fiw(ALU_Q)
s = self.fiw(S_MUX)
if q < 4:
return line
line = CONT + 'shift\t'
sh = ash_t.get(s)
if sh is None:
line += 'B#' + bin(s)[2:]
line += '\n; Warning: unrecognized shift configuration'
self.wcnt += 1
return line
line += sh[0]
if sh[1] == 'U' and q & 2 != 2:
line += '\n; Warning: shift configuration requires RAMU/RAMQU'
self.wcnt += 1
return line
if sh[1] == 'D' and q & 2 != 0:
line += '\n; Warning: shift configuration requires RAMD/RAMQD'
self.wcnt += 1
return line
return line
def get_rc(self):
line = ''
rc = self.fiw(REG_C)
if rc == 0:
return line
if rc & 1:
line = CONT + 'cpsw\t' + pswc_t[rc >> 1]
else:
if rc & 2:
line += CONT + 'pl\t' + '0x%X' % self.fiw(NAF)
if rc & 4:
line += CONT + 'ir'
if rc & 8:
line += CONT + 'ttl'
if self.fiw(TTL_M):
line += '\t' + ttl_t[self.fiw(TTL_M)]
return line
def get_io(self):
shown = 0
line = ''
rc = self.fiw(QIO)
if rc & 0x18 == 0x18:
line = CONT + 'dreq'
if rc & 3 == 2 and rc & 0x8:
if self.fiw(NAF) == 0x2C and self.fiw(MCU) == 0xF:
line += CONT + 'nqio' # workaround for inactive silly RDIN
return line
line = CONT + 'qio'
rc = rc ^ 0x8A
for i in range(9):
if rc & (1 << i):
if shown:
line += CONS
else:
shown = 1
line += '\t'
line += qio_t[i]
if rc & 0x80 == 0:
if shown:
line += CONS
else:
line += '\t'
line += 'NORD'
return line
def get_hint(self):
d = dmux_t[self.fiw(D_MUX)]
d = d[1:].upper()
if d == 'IMM':
d = '0x' + zhex(self.fiw(D_IMM), 16)
s = alus_t[self.fiw(ALU_S)]
r = s[0]
s = s[1]
if self.fiw(ALU_AS):
if self.fiw(ALU_A) & 2:
a = 'Rs'
else:
a = 'Rd'
else:
a = 'R%d' % self.fiw(ALU_A)
if self.fiw(ALU_BS):
if self.fiw(ALU_B) & 2:
b = 'Rd'
else:
b = 'Rs'
else:
b = 'R%d' % self.fiw(ALU_B)
if r == 'A':
r = a
elif r == 'D':
r = d
else:
assert(r == 'Z')
if s == 'A':
s = a
elif s == 'B':
s = b
elif s == 'Q':
s = 'Q'
else:
assert(s == 'Z')
f = self.fiw(ALU_F)
c = self.fiw(ALU_CI)
if f == 0: # add R + S
if c:
c = ' + 1'
else:
c = ''
if r == 'Z':
f = s + c
elif s == 'Z':
f = r + c
else:
f = r + ' + ' + s + c
elif f == 1: # subr S - R
if c:
c = ''
else:
c = ' - 1'
if r == 'Z':
f = s + c
elif s == 'Z':
f = '-' + r + c
else:
f = s + ' - ' + r + c
elif f == 2: # subs R - S
if c:
c = ''
else:
c = ' - 1'
if s == 'Z':
f = r + c
elif r == 'Z':
f = '-' + s + c
else:
f = r + ' - ' + s + c
elif f == 3: # or R | S
if r == 'Z':
f = s
elif s == 'Z':
f = r
else:
f = r + ' | ' + s
elif f == 4: # and R & S
if r == 'Z' or s == 'Z':
f = 'Z'
else:
f = r + ' & ' + s
elif f == 5: # nand ~R & S
f = '~' + r + ' & ' + s
elif f == 6: # xor R ^ S
f = r + ' ^ ' + s
else: # nxor ~R ^ S
f = '~' + r + ' ^ ' + s
q = self.fiw(ALU_Q)
if q == 2:
y = a
else:
y = f
if q == 2 or q == 3:
ram = '='
elif q == 4 or q == 5:
ram = '>>='
elif q == 6 or q == 7:
ram = '<<='
else:
ram = ''
if q == 0:
q = '='
elif q == 4:
q = '>>='
elif q == 6:
q = '<<='
else:
q = ''
if f == 'SWP':
f += '(%s)' % y
items = []
if ram != '':
items.append('%s %s %s' % (b, ram, f))
if q != '':
if q == '=':
items.append('Q %s %s' % (q, f))
else:
items.append('Q %s Q' % q)
rc = self.fiw(REG_C)
if rc & 7 == 7:
items.append('PSW = %s' % y)
if self.fiw(ALU_CI) and self.fiw(ALU_F) & 0x4:
items.append('SXT')
if not items:
return ''
line = '; ' + items[0]
for s in items[1:]:
line += ', %s' % s
return line + '\n'
def get_loc(self, addr):
return '\t.loc\t0x%03X\n' % addr
def do_disasm(self):
for addr in range(self.size):
data = self.data[addr]
if data is None:
continue
self.word = data
#
# Gather target addresses for the label database
#
line = ''
mcu = self.fiw(MCU)
if mcu == 0b0000 and data != 0:
line += '\n; Warning: jump zero with not zero word at %X' % addr
if self.npas == 1:
if mcu_t[mcu][1]:
target = self.fiw(NAF)
if target not in self.label:
self.label.append(target)
continue
if self.npas != 2:
continue
#
# Build the listing
#
line += self.get_raw(addr)
#
# Check page boundary crossing
#
# if addr & 7 == 7 and \
# mcu != 15 and mcu != 7 and mcu != 2 and \
# not (mcu == 10 and self.fiw(CC) == 14):
# line += '; Warning: not jp/jrp/jmap/crtn at page boundary\n'
# self.wcnt += 1
#
# Provide the hint comment
#
line += self.get_hint()
#
# Provide the location counter directive
#
line += self.get_loc(addr)
if self.word:
#
# Analyze microsequencer instruction
#
line += self.get_mcu(addr, mcu)
#
# Analyze ALU opcode and operands
#
line += self.get_alu()
#
# Analyze data mux
#
line += self.get_dmux()
#
# Analyze shift mux field
#
line += self.get_shift()
#
# Analyze PSW and register control
#
line += self.get_rc()
#
# Analyze IO transaction
#
line += self.get_io()
else:
line += '\tresv\n'
#
# Output result to listing file
#
print('%s\n' % line, file=self.flst)
if self.npas == 2:
#
# Final .end directive
#
print('\t.end', file=self.flst)
#
# Show final statistics
#
line = '\r\nErrors: %d\r\nWarnings: %d\r\n' % (self.ecnt, self.wcnt)
if self.ecnt or self.wcnt:
print(line, file=sys.stderr)
return
def createParser():
p = argparse.ArgumentParser(
description='Am2900 M4 Microcode Disassembler, '
'Version 20.06a, (c) 1801BM1')
p.add_argument('mif', nargs=1,
help='input microcode file', metavar='file')
p.add_argument('-l', '--lst', help='output listing file',
type=argparse.FileType('w'), nargs='?',
default = sys.stdout, metavar='file')
return p
def main():
parser = createParser()
params = parser.parse_args()
try:
code = Bf()
#
# Load the microcode from source file
#
code.load_mif(params.mif[0])
code.set_list(params.lst)
code.set_pass(1)
code.do_disasm()
code.set_pass(2)
code.do_disasm()
except RuntimeError as err:
print('\r\nerror: %s' % err, file=sys.stderr)
sys.exit(1)
except SyntaxError as err:
print('\r\nerror: %s' % err, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
examples/hf_transformers/custom/loss.py | AhmedHussKhalifa/torchdistill | 576 | 11102701 | import torch
from torch import nn
from torch.nn import functional
from torchdistill.losses.single import register_org_loss
from torchdistill.losses.util import register_func2extract_org_output
@register_func2extract_org_output
def extract_transformers_loss(org_criterion, student_outputs, teacher_outputs, targets, uses_teacher_output, **kwargs):
org_loss_dict = dict()
org_loss_dict['loss'] = student_outputs.loss
return org_loss_dict
@register_org_loss
class KDLoss4Transformer(nn.KLDivLoss):
"""
"Distilling the Knowledge in a Neural Network"
"""
def __init__(self, temperature, alpha=None, reduction='batchmean', **kwargs):
super().__init__(reduction=reduction)
self.temperature = temperature
self.alpha = alpha
self.beta = 1 - alpha
def compute_soft_loss(self, student_logits, teacher_logits):
return super().forward(torch.log_softmax(student_logits / self.temperature, dim=1),
torch.softmax(teacher_logits / self.temperature, dim=1))
def compute_hard_loss(self, logits, positions, ignored_index):
return functional.cross_entropy(logits, positions, reduction=self.cel_reduction, ignore_index=ignored_index)
def forward(self, student_output, teacher_output, targets=None, *args, **kwargs):
soft_loss = self.compute_soft_loss(student_output.logits, teacher_output.logits)
if self.alpha is None or self.alpha == 0 or targets is None:
return soft_loss
hard_loss = student_output.loss
return self.alpha * hard_loss + self.beta * (self.temperature ** 2) * soft_loss
|
falkon/hopt/optimization/gd_train.py | mohamad-amin/falkon | 130 | 11102705 | import time
from functools import reduce
from typing import Dict, List, Any, Optional
import numpy as np
import torch
from falkon.hopt.objectives.objectives import HyperoptObjective, FakeTorchModelMixin
from falkon.hopt.optimization.reporting import pred_reporting, EarlyStop, epoch_bookkeeping
__all__ = [
"train_complexity_reg",
"train_complexity_reg_mb",
]
def hp_grad(model: FakeTorchModelMixin, *loss_terms, accumulate_grads=True, verbose=True, losses_are_grads=False):
grads = []
hparams = model.parameters()
if not losses_are_grads:
if verbose:
for loss in loss_terms:
grads.append(torch.autograd.grad(loss, hparams, retain_graph=True, allow_unused=True))
else:
loss = reduce(torch.add, loss_terms)
grads.append(torch.autograd.grad(loss, hparams, retain_graph=False))
else:
grads = loss_terms
if accumulate_grads:
for g in grads:
for i in range(len(hparams)):
hp = hparams[i]
if hp.grad is None:
hp.grad = torch.zeros_like(hp)
if g[i] is not None:
hp.grad += g[i]
return grads
def create_optimizer(opt_type: str, model: HyperoptObjective, learning_rate: float):
center_lr_div = 1
schedule = None
named_params = model.named_parameters()
print("Creating optimizer with the following parameters:")
for k, v in named_params.items():
print(f"\t{k} : {v.shape}")
if opt_type == "adam":
if 'penalty' not in named_params:
opt_modules = [
{"params": named_params.values(), 'lr': learning_rate}
]
else:
opt_modules = []
if 'sigma' in named_params:
opt_modules.append({"params": named_params['sigma'], 'lr': learning_rate})
if 'penalty' in named_params:
opt_modules.append({"params": named_params['penalty'], 'lr': learning_rate})
if 'centers' in named_params:
opt_modules.append({
"params": named_params['centers'], 'lr': learning_rate / center_lr_div})
opt_hp = torch.optim.Adam(opt_modules)
# schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(opt_hp, factor=0.5, patience=1)
# schedule = torch.optim.lr_scheduler.MultiStepLR(opt_hp, [2, 10, 40], gamma=0.5)
schedule = torch.optim.lr_scheduler.StepLR(opt_hp, 200, gamma=0.1)
elif opt_type == "sgd":
opt_hp = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
elif opt_type == "lbfgs":
if model.losses_are_grads:
raise ValueError("L-BFGS not valid for model %s" % (model))
opt_hp = torch.optim.LBFGS(model.parameters(), lr=learning_rate,
history_size=100, )
elif opt_type == "rmsprop":
opt_hp = torch.optim.RMSprop(model.parameters(), lr=learning_rate)
else:
raise ValueError("Optimizer type %s not recognized" % (opt_type))
return opt_hp, schedule
def train_complexity_reg(
Xtr: torch.Tensor,
Ytr: torch.Tensor,
Xts: torch.Tensor,
Yts: torch.Tensor,
model: HyperoptObjective,
err_fn,
learning_rate: float,
num_epochs: int,
cuda: bool,
verbose: bool,
loss_every: int,
early_stop_epochs: int,
cgtol_decrease_epochs: Optional[int],
optimizer: str,
retrain_nkrr: bool = False,
) -> List[Dict[str, float]]:
if cuda:
Xtr, Ytr, Xts, Yts = Xtr.cuda(), Ytr.cuda(), Xts.cuda(), Yts.cuda()
opt_hp, schedule = create_optimizer(optimizer, model, learning_rate)
print(f"Starting hyperparameter optimization on model {model}.")
print(f"Will run for {num_epochs} epochs with {opt_hp} optimizer.")
logs = []
cum_time = 0
with torch.autograd.profiler.profile(enabled=False) as prof:
for epoch in range(num_epochs):
t_start = time.time()
grads: Any = None
losses: Any = None
def closure():
opt_hp.zero_grad()
nonlocal grads, losses
losses = model.hp_loss(Xtr, Ytr)
grads = hp_grad(model, *losses, accumulate_grads=True,
losses_are_grads=model.losses_are_grads, verbose=False)
loss = reduce(torch.add, losses)
return float(loss)
try:
opt_hp.step(closure)
except RuntimeError as e:
if "Cholesky" not in str(e):
raise e
print(f"Cholesky failure at epoch {epoch}. Exiting optimization!")
break
cum_time += time.time() - t_start
try:
epoch_bookkeeping(epoch=epoch, model=model, data={'Xtr': Xtr, 'Ytr': Ytr, 'Xts': Xts, 'Yts': Yts},
err_fn=err_fn, grads=grads, losses=losses, loss_every=loss_every,
early_stop_patience=early_stop_epochs, schedule=schedule, minibatch=None,
logs=logs, cum_time=cum_time, verbose=verbose,
accuracy_increase_patience=cgtol_decrease_epochs)
except EarlyStop as e:
print(e)
break
finally:
del grads, losses
torch.cuda.empty_cache()
if prof is not None:
print(prof.key_averages().table())
if retrain_nkrr:
print(f"Final retrain after {num_epochs} epochs:")
pred_dict = pred_reporting(
model=model, Xtr=Xtr, Ytr=Ytr, Xts=Xts, Yts=Yts,
err_fn=err_fn, epoch=num_epochs, cum_time=cum_time,
resolve_model=True)
logs.append(pred_dict)
return logs
def train_complexity_reg_mb(
Xtr: torch.Tensor,
Ytr: torch.Tensor,
Xts: torch.Tensor,
Yts: torch.Tensor,
model: HyperoptObjective,
err_fn,
learning_rate: float,
num_epochs: int,
cuda: bool,
verbose: bool,
loss_every: int,
early_stop_epochs: int,
cgtol_decrease_epochs: Optional[int],
optimizer: str,
minibatch: int,
retrain_nkrr: bool = False,
) -> List[Dict[str, float]]:
Xtrc, Ytrc, Xtsc, Ytsc = Xtr, Ytr, Xts, Yts
if cuda:
Xtrc, Ytrc, Xtsc, Ytsc = Xtr.cuda(), Ytr.cuda(), Xts.cuda(), Yts.cuda()
opt_hp, schedule = create_optimizer(optimizer, model, learning_rate)
print(f"Starting hyperparameter optimization on model {model}.")
print(f"Will run for {num_epochs} epochs with {opt_hp} optimizer, "
f"mini-batch size {minibatch}.")
logs = []
cum_time = 0
mb_indices = np.arange(Xtr.shape[0])
for epoch in range(num_epochs):
t_start = time.time()
np.random.shuffle(mb_indices)
for mb_start in range(0, Xtr.shape[0], minibatch):
Xtr_batch = (Xtr[mb_indices[mb_start: mb_start + minibatch], :]).contiguous()
Ytr_batch = (Ytr[mb_indices[mb_start: mb_start + minibatch], :]).contiguous()
if cuda:
Xtr_batch, Ytr_batch = Xtr_batch.cuda(), Ytr_batch.cuda()
opt_hp.zero_grad()
loss = model.hp_loss(Xtr_batch, Ytr_batch)[0] # There is only one loss!
loss.backward()
opt_hp.step()
cum_time += time.time() - t_start
try:
epoch_bookkeeping(epoch=epoch, model=model, data={'Xtr': Xtrc, 'Ytr': Ytrc, 'Xts': Xtsc, 'Yts': Ytsc},
err_fn=err_fn, grads=None, losses=None, loss_every=loss_every,
early_stop_patience=early_stop_epochs, schedule=schedule, minibatch=minibatch,
logs=logs, cum_time=cum_time, verbose=verbose,
accuracy_increase_patience=cgtol_decrease_epochs)
except EarlyStop as e:
print(e)
break
if retrain_nkrr:
print(f"Final retrain after {num_epochs} epochs:")
pred_dict = pred_reporting(
model=model, Xtr=Xtrc, Ytr=Ytrc, Xts=Xtsc, Yts=Ytsc,
err_fn=err_fn, epoch=num_epochs, cum_time=cum_time,
resolve_model=True)
logs.append(pred_dict)
return logs
|
libsortvis/algos/mergesort.py | tknuth/sortvis | 117 | 11102767 | <gh_stars>100-1000
def mergesort(lst, left=0, right=None):
if right is None:
right = len(lst) - 1
if left >= right:
return
middle = (left + right) // 2
mergesort(lst, left, middle)
mergesort(lst, middle + 1, right)
i, end_i, j = left, middle, middle + 1
while i <= end_i and j <= right:
if lst[i] < lst[j]:
i += 1
continue
lst[i], lst[i+1:j+1] = lst[j], lst[i:j]
lst.log()
i, end_i, j = i + 1, end_i + 1, j + 1
|
tests/__init__.py | peddamat/home-assistant-supervisor-test | 597 | 11102774 | <filename>tests/__init__.py
"""Supervisor Testframework."""
|
etl/parsers/etw/Microsoft_Windows_OneX.py | IMULMUL/etl-parser | 104 | 11102831 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-OneX
GUID : ab0d8ef9-866d-4d39-b83f-453f3b8f6325
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=1, version=0)
class Microsoft_Windows_OneX_1_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=2, version=0)
class Microsoft_Windows_OneX_2_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=3, version=0)
class Microsoft_Windows_OneX_3_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=4, version=0)
class Microsoft_Windows_OneX_4_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"WinError" / Int32ul,
"ReasonCode" / Int32ul,
"EAPMethodType" / Int8ul,
"RootCauseString" / WString
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=5, version=0)
class Microsoft_Windows_OneX_5_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=6, version=0)
class Microsoft_Windows_OneX_6_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"WinError" / Int32ul,
"ReasonCode" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=7, version=0)
class Microsoft_Windows_OneX_7_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"UserDataSize" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=8, version=0)
class Microsoft_Windows_OneX_8_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"UserDataSize" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=9, version=0)
class Microsoft_Windows_OneX_9_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=10, version=0)
class Microsoft_Windows_OneX_10_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"Response" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=11, version=0)
class Microsoft_Windows_OneX_11_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=12, version=0)
class Microsoft_Windows_OneX_12_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=13, version=0)
class Microsoft_Windows_OneX_13_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=14, version=0)
class Microsoft_Windows_OneX_14_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=15, version=0)
class Microsoft_Windows_OneX_15_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=16, version=0)
class Microsoft_Windows_OneX_16_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=17, version=0)
class Microsoft_Windows_OneX_17_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=18, version=0)
class Microsoft_Windows_OneX_18_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=19, version=0)
class Microsoft_Windows_OneX_19_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=20, version=0)
class Microsoft_Windows_OneX_20_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"UIRequestCode" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=21, version=0)
class Microsoft_Windows_OneX_21_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=22, version=0)
class Microsoft_Windows_OneX_22_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=23, version=0)
class Microsoft_Windows_OneX_23_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=24, version=0)
class Microsoft_Windows_OneX_24_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=25, version=0)
class Microsoft_Windows_OneX_25_0(Etw):
pattern = Struct(
"WarningCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=26, version=0)
class Microsoft_Windows_OneX_26_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=27, version=0)
class Microsoft_Windows_OneX_27_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=28, version=0)
class Microsoft_Windows_OneX_28_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=29, version=0)
class Microsoft_Windows_OneX_29_0(Etw):
pattern = Struct(
"EAPMethodType" / Int8ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=30, version=0)
class Microsoft_Windows_OneX_30_0(Etw):
pattern = Struct(
"EAPMethodType" / Int8ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=31, version=0)
class Microsoft_Windows_OneX_31_0(Etw):
pattern = Struct(
"ProfilesCount" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=32, version=0)
class Microsoft_Windows_OneX_32_0(Etw):
pattern = Struct(
"EAPMethodType" / Int8ul,
"AuthMode" / WString
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=33, version=0)
class Microsoft_Windows_OneX_33_0(Etw):
pattern = Struct(
"EAPMethodType" / Int8ul,
"MediaType" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=34, version=0)
class Microsoft_Windows_OneX_34_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"UIRequestCode" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=35, version=0)
class Microsoft_Windows_OneX_35_0(Etw):
pattern = Struct(
"ChangeType" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=36, version=0)
class Microsoft_Windows_OneX_36_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"FriendlyName" / WString
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=37, version=0)
class Microsoft_Windows_OneX_37_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=38, version=0)
class Microsoft_Windows_OneX_38_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"UIRequestCode" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=39, version=0)
class Microsoft_Windows_OneX_39_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=40, version=0)
class Microsoft_Windows_OneX_40_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"PacketLength" / Int16ul,
"PacketType" / Int32ul,
"Identifier" / Int8ul,
"EapMethodType" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=41, version=0)
class Microsoft_Windows_OneX_41_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=42, version=0)
class Microsoft_Windows_OneX_42_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=43, version=0)
class Microsoft_Windows_OneX_43_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"Reason" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=44, version=0)
class Microsoft_Windows_OneX_44_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=45, version=0)
class Microsoft_Windows_OneX_45_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=46, version=0)
class Microsoft_Windows_OneX_46_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"TimeTaken" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=47, version=0)
class Microsoft_Windows_OneX_47_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"AuthIdentity" / WString,
"SessionId" / Int32ul,
"Username" / WString,
"Domain" / WString
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=48, version=0)
class Microsoft_Windows_OneX_48_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=49, version=0)
class Microsoft_Windows_OneX_49_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"Reason" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=50, version=0)
class Microsoft_Windows_OneX_50_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=51, version=0)
class Microsoft_Windows_OneX_51_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=52, version=0)
class Microsoft_Windows_OneX_52_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=53, version=0)
class Microsoft_Windows_OneX_53_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=54, version=0)
class Microsoft_Windows_OneX_54_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=55, version=0)
class Microsoft_Windows_OneX_55_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"SessionId" / Int32ul,
"UIRequestSessionId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=56, version=0)
class Microsoft_Windows_OneX_56_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"Size" / Int32ul,
"SessionId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=57, version=0)
class Microsoft_Windows_OneX_57_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"Reason" / Int32ul,
"SessionId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=58, version=0)
class Microsoft_Windows_OneX_58_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=59, version=0)
class Microsoft_Windows_OneX_59_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"WinError" / Int32ul,
"ReasonCode" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=60, version=0)
class Microsoft_Windows_OneX_60_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=61, version=0)
class Microsoft_Windows_OneX_61_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=62, version=0)
class Microsoft_Windows_OneX_62_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=63, version=0)
class Microsoft_Windows_OneX_63_0(Etw):
pattern = Struct(
"Result" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=64, version=0)
class Microsoft_Windows_OneX_64_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"PacketLength" / Int16ul,
"PacketType" / Int32ul,
"Identifier" / Int8ul,
"EapMethodType" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=65, version=0)
class Microsoft_Windows_OneX_65_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"Identity" / CString
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=66, version=0)
class Microsoft_Windows_OneX_66_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"ExplicitCredentials" / Int8ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=68, version=0)
class Microsoft_Windows_OneX_68_0(Etw):
pattern = Struct(
"PortId" / Int32ul,
"ExplicitCredentials" / Int8ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=70, version=0)
class Microsoft_Windows_OneX_70_0(Etw):
pattern = Struct(
"PortId" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=60001, version=0)
class Microsoft_Windows_OneX_60001_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=60002, version=0)
class Microsoft_Windows_OneX_60002_0(Etw):
pattern = Struct(
"WarningCode" / Int32ul,
"Location" / Int32ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=60003, version=0)
class Microsoft_Windows_OneX_60003_0(Etw):
pattern = Struct(
"NextState" / Int8ul,
"Context" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=60004, version=0)
class Microsoft_Windows_OneX_60004_0(Etw):
pattern = Struct(
"Context" / Int32ul,
"UpdateReasonCode" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=60101, version=0)
class Microsoft_Windows_OneX_60101_0(Etw):
pattern = Struct(
"SourceAddress" / Int32ul,
"SourcePort" / Int32ul,
"DestinationAddress" / Int32ul,
"DestinationPort" / Int32ul,
"Protocol" / Int32ul,
"ReferenceContext" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=60102, version=0)
class Microsoft_Windows_OneX_60102_0(Etw):
pattern = Struct(
"SourcePort" / Int32ul,
"DestinationPort" / Int32ul,
"Protocol" / Int32ul,
"ReferenceContext" / Int32ul
)
@declare(guid=guid("ab0d8ef9-866d-4d39-b83f-453f3b8f6325"), event_id=60103, version=0)
class Microsoft_Windows_OneX_60103_0(Etw):
pattern = Struct(
"IfGuid" / Guid,
"IfIndex" / Int32ul,
"IfLuid" / Int64ul,
"ReferenceContext" / Int32ul
)
|
05-record-like/cards.py | hdcpereira/example-code-2e | 990 | 11102840 | <gh_stars>100-1000
from dataclasses import dataclass
@dataclass(order=True)
class Card:
rank: str
suit: str
ranks = [str(n) for n in range(2, 10)] + list('JQKA')
suits = 'spades diamonds clubs hearts'.split()
|
aif360/metrics/mdss/generator.py | sumacm/fairattr | 982 | 11102875 | <reponame>sumacm/fairattr
import pandas as pd
import numpy as np
def get_entire_subset():
"""
Returns the entire subset, which is an empty dictionary
:return: empty dictionary
"""
return {}
def get_random_subset(coordinates: pd.DataFrame, prob: float, min_elements: int = 0):
"""
Returns a random subset
:param coordinates: data frame containing having as columns the features
:param prob: probability to select a value of a feature
:param min_elements: minimum number of elements to be included in the randomly generated sub-population
:return: dictionary representing a random sub-population
"""
subset_random_values = {}
shuffled_column_names = np.random.permutation(coordinates.columns.values)
# consider each column once, in random order
for column_name in shuffled_column_names:
# get unique values of the current column
temp = coordinates[column_name].unique()
# include each attribute value with probability = prob
mask_values = np.random.rand(len(temp)) < prob
if mask_values.sum() < len(temp):
# set values for the current column
subset_random_values[column_name] = temp[mask_values].tolist()
# compute the remaining records
mask_subset = coordinates[subset_random_values.keys()].isin(subset_random_values).all(axis=1)
remaining_records = len(coordinates.loc[mask_subset])
# only filter on this attribute if at least min_elements records would be kept
if remaining_records < min_elements:
del subset_random_values[column_name]
return subset_random_values |
recipes/Python/552739_Mixing_features_tree_md5sum__treemd5_/recipe-552739.py | tdiprima/code | 2,023 | 11102882 | <gh_stars>1000+
#!/usr/bin/python
import os
import os.path
import sys
import md5
from stat import *
from optparse import OptionParser
class Stats:
def __init__(self):
self.filenb = 0
self.dirnb = 0
self.othernb = 0
self.unstatablenb = 0
def scan_tree(lst, maxlen, dirname, dirpath, prefix, nxt_prefix, options, stats):
"""params:
lst: I/O list of (tree_ascii_art_repr_line, path_if_regular_file_else_None)
where both are strings and the second one can also be None
maxlen: integer that contains the rightmost column number of ascii repr of
the tree known by the caller
dirname: name of the directory from which a tree repr is wanted
dirpath: path to the directory from which a tree repr is wanted
prefix: string to prepend to the dirname to form the first line of the ascii
repr of the subtree
nxt_prefix: string to prepend to every lines of the repr of the subtree but
the first one (which uses prefix)
options: options as extracted by the optparse module from cmd line options
stats: Stats instance
returns a new value for maxlen
"""
try:
dir_content = os.listdir(dirpath)
dir_content.sort()
except OSError:
dir_content = None
ascii_art_tree_repr = prefix + dirname
maxlen = max(maxlen, len(ascii_art_tree_repr))
if dir_content is None:
lst.append((ascii_art_tree_repr + ' [error reading dir]', None))
return maxlen
if not options.all:
dir_content = [child for child in dir_content if child[0] != '.']
lst.append((ascii_art_tree_repr, None))
sub_prefix = nxt_prefix + '|-- '
sub_nxt_prefix = nxt_prefix + '| '
for num, child in enumerate(dir_content):
if num == len(dir_content) - 1:
sub_prefix = nxt_prefix + '`-- '
sub_nxt_prefix = nxt_prefix + ' '
joined_path = os.path.join(dirpath, child)
try:
lmode = os.lstat(joined_path)[ST_MODE]
except:
lmode = None
ascii_art_tree_repr = sub_prefix + child
maxlen = max(maxlen, len(ascii_art_tree_repr))
if lmode is None:
stats.unstatablenb += 1
lst.append((ascii_art_tree_repr + ' [error stating child]', None))
elif S_ISREG(lmode):
stats.filenb += 1
lst.append((ascii_art_tree_repr, joined_path))
elif S_ISDIR(lmode):
stats.dirnb += 1
maxlen = scan_tree(lst, maxlen, child, joined_path, sub_prefix, sub_nxt_prefix, options, stats)
elif S_ISLNK(lmode):
stats.filenb += 1
try:
lst.append((ascii_art_tree_repr + ' -> ' + os.readlink(joined_path), None))
except OSError:
lst.append((ascii_art_tree_repr + ' [cannot read symlink]', None))
elif S_ISCHR(lmode):
stats.othernb += 1
lst.append((ascii_art_tree_repr + ' [char device]', None))
elif S_ISBLK(lmode):
stats.othernb += 1
lst.append((ascii_art_tree_repr + ' [block device]', None))
elif S_ISFIFO(lmode):
stats.othernb += 1
lst.append((ascii_art_tree_repr + ' [fifo]', None))
elif S_ISSOCK(lmode):
stats.othernb += 1
lst.append((ascii_art_tree_repr + ' [socket]', None))
else:
stats.othernb += 1
lst.append((ascii_art_tree_repr + ' [unknown]', None))
return maxlen
def md5_from_path(path):
"""Returns an hex repr of the md5sum of the file content path points to.
On IOError returns '<unable to read file>'.
"""
try:
f = open(path)
m = md5.new()
while True:
b = f.read(262144)
if not b:
break
m.update(b)
f.close()
return m.hexdigest()
except IOError:
return '<unable to read file>'
def main():
parser = OptionParser(usage="usage: %prog [options] [dir1 [dir2 [...]]]")
parser.add_option("-a", "--all", action='store_true', dest='all', default=False, help="All files are listed.")
options, roots = parser.parse_args()
stats = Stats()
if not roots:
roots = ['.']
for root in roots:
lst = []
maxlen = scan_tree(lst, 0, root, root, "", "", options, stats)
for line, path in lst:
if path is not None:
m = md5_from_path(path)
print line + ' ' * (maxlen+1-len(line)) + m
else:
print line
print
print ', '.join((
('%d directory', '%d directories')[stats.dirnb > 1] % stats.dirnb,
('%d file', '%d files')[stats.filenb > 1] % stats.filenb,
('%d other', '%d others')[stats.othernb > 1] % stats.othernb,
('%d unstatable', '%d unstatables')[stats.unstatablenb > 1] % stats.unstatablenb))
if __name__ == "__main__":
main()
|
fugue/extensions/_builtins/processors.py | kvnkho/fugue | 547 | 11102922 | <reponame>kvnkho/fugue
from typing import Any, List, Type, no_type_check
from fugue.collections.partition import PartitionCursor
from fugue.dataframe import (
ArrayDataFrame,
DataFrame,
DataFrames,
LocalDataFrame,
to_local_bounded_df,
)
from fugue.column import ColumnExpr, SelectColumns as ColumnsSelect
from fugue.exceptions import FugueWorkflowError
from fugue.execution import make_sql_engine
from fugue.execution.execution_engine import _generate_comap_empty_dfs
from fugue.extensions.processor import Processor
from fugue.extensions.transformer import CoTransformer, Transformer, _to_transformer
from fugue.rpc import EmptyRPCHandler, to_rpc_handler
from triad.collections import ParamDict
from triad.collections.schema import Schema
from triad.utils.assertion import assert_or_throw
from triad.utils.convert import to_type
class RunTransformer(Processor):
@no_type_check
def process(self, dfs: DataFrames) -> DataFrame:
df = dfs[0]
tf = _to_transformer(
self.params.get_or_none("transformer", object),
self.params.get_or_none("schema", object),
)
tf._workflow_conf = self.execution_engine.conf
tf._params = self.params.get("params", ParamDict()) # type: ignore
tf._partition_spec = self.partition_spec
rpc_handler = to_rpc_handler(self.params.get_or_throw("rpc_handler", object))
if not isinstance(rpc_handler, EmptyRPCHandler):
tf._rpc_client = self.execution_engine.rpc_server.make_client(rpc_handler)
ie = self.params.get("ignore_errors", [])
self._ignore_errors = [to_type(x, Exception) for x in ie]
tf.validate_on_runtime(df)
if isinstance(tf, Transformer):
return self.transform(df, tf)
else:
return self.cotransform(df, tf)
def transform(self, df: DataFrame, tf: Transformer) -> DataFrame:
tf._key_schema = self.partition_spec.get_key_schema(df.schema) # type: ignore
tf._output_schema = Schema(tf.get_output_schema(df)) # type: ignore
tr = _TransformerRunner(df, tf, self._ignore_errors) # type: ignore
return self.execution_engine.map(
df=df,
map_func=tr.run,
output_schema=tf.output_schema, # type: ignore
partition_spec=tf.partition_spec,
on_init=tr.on_init,
)
@no_type_check
def cotransform(self, df: DataFrame, tf: CoTransformer) -> DataFrame:
assert_or_throw(
df.metadata.get("serialized", False), "must use serialized dataframe"
)
tf._key_schema = df.schema - list(df.metadata["serialized_cols"].values())
# TODO: currently, get_output_schema only gets empty dataframes
empty_dfs = _generate_comap_empty_dfs(
df.metadata["schemas"], df.metadata.get("serialized_has_name", False)
)
tf._output_schema = Schema(tf.get_output_schema(empty_dfs))
tr = _CoTransformerRunner(df, tf, self._ignore_errors)
return self.execution_engine.comap(
df=df,
map_func=tr.run,
output_schema=tf.output_schema,
partition_spec=tf.partition_spec,
on_init=tr.on_init,
)
class RunJoin(Processor):
def process(self, dfs: DataFrames) -> DataFrame:
if len(dfs) == 1:
return dfs[0]
how = self.params.get_or_throw("how", str)
on = self.params.get("on", [])
df = dfs[0]
for i in range(1, len(dfs)):
df = self.execution_engine.join(df, dfs[i], how=how, on=on)
return df
class RunSetOperation(Processor):
def process(self, dfs: DataFrames) -> DataFrame:
if len(dfs) == 1:
return dfs[0]
how = self.params.get_or_throw("how", str)
func: Any = {
"union": self.execution_engine.union,
"subtract": self.execution_engine.subtract,
"intersect": self.execution_engine.intersect,
}[how]
distinct = self.params.get("distinct", True)
df = dfs[0]
for i in range(1, len(dfs)):
df = func(df, dfs[i], distinct=distinct)
return df
class Distinct(Processor):
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
return self.execution_engine.distinct(dfs[0])
class Dropna(Processor):
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
how = self.params.get("how", "any")
assert_or_throw(
how in ["any", "all"],
FugueWorkflowError("how' needs to be either 'any' or 'all'"),
)
thresh = self.params.get_or_none("thresh", int)
subset = self.params.get_or_none("subset", list)
return self.execution_engine.dropna(
dfs[0], how=how, thresh=thresh, subset=subset
)
class Fillna(Processor):
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
value = self.params.get_or_none("value", object)
assert_or_throw(
(not isinstance(value, list)) and (value is not None),
FugueWorkflowError("fillna value cannot be None or list"),
)
if isinstance(value, dict):
assert_or_throw(
(None not in value.values()) and (any(value.values())),
FugueWorkflowError(
"fillna dict can't contain None and must have len > 1"
),
)
subset = self.params.get_or_none("subset", list)
return self.execution_engine.fillna(dfs[0], value=value, subset=subset)
class RunSQLSelect(Processor):
def process(self, dfs: DataFrames) -> DataFrame:
statement = self.params.get_or_throw("statement", str)
engine = self.params.get_or_none("sql_engine", object)
engine_params = self.params.get("sql_engine_params", ParamDict())
sql_engine = make_sql_engine(engine, self.execution_engine, **engine_params)
return sql_engine.select(dfs, statement)
class Zip(Processor):
def process(self, dfs: DataFrames) -> DataFrame:
how = self.params.get("how", "inner")
partition_spec = self.partition_spec
# TODO: this should also search on workflow conf
temp_path = self.params.get_or_none("temp_path", str)
to_file_threshold = self.params.get_or_none("to_file_threshold", object)
return self.execution_engine.zip_all(
dfs,
how=how,
partition_spec=partition_spec,
temp_path=temp_path,
to_file_threshold=to_file_threshold,
)
class Select(Processor):
def validate_on_compile(self):
sc = self.params.get_or_throw("columns", ColumnsSelect)
sc.assert_all_with_names()
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
columns = self.params.get_or_throw("columns", ColumnsSelect)
where = None if "where" not in self.params else self.params["where"]
having = None if "having" not in self.params else self.params["having"]
return self.execution_engine.select(
df=dfs[0], cols=columns, where=where, having=having
)
class Filter(Processor):
def validate_on_compile(self):
self.params.get_or_throw("condition", ColumnExpr)
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
condition = self.params.get_or_throw("condition", ColumnExpr)
return self.execution_engine.filter(df=dfs[0], condition=condition)
class Assign(Processor):
def validate_on_compile(self):
self.params.get_or_throw("columns", list)
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
columns = self.params.get_or_throw("columns", list)
return self.execution_engine.assign(df=dfs[0], columns=columns)
class Aggregate(Processor):
def validate_on_compile(self):
self.params.get_or_throw("columns", list)
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
columns = self.params.get_or_throw("columns", list)
return self.execution_engine.aggregate(
df=dfs[0], partition_spec=self.partition_spec, agg_cols=columns
)
class Rename(Processor):
def validate_on_compile(self):
self.params.get_or_throw("columns", dict)
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
columns = self.params.get_or_throw("columns", dict)
return dfs[0].rename(columns)
class AlterColumns(Processor):
def validate_on_compile(self):
Schema(self.params.get_or_throw("columns", object))
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
columns = self.params.get_or_throw("columns", object)
return dfs[0].alter_columns(columns)
class DropColumns(Processor):
def validate_on_compile(self):
self.params.get_or_throw("columns", list)
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
if_exists = self.params.get("if_exists", False)
columns = self.params.get_or_throw("columns", list)
if if_exists:
columns = set(columns).intersection(dfs[0].schema.keys())
return dfs[0].drop(list(columns))
class SelectColumns(Processor):
def validate_on_compile(self):
self.params.get_or_throw("columns", list)
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
columns = self.params.get_or_throw("columns", list)
return dfs[0][columns]
class Sample(Processor):
def validate_on_compile(self):
n = self.params.get_or_none("n", int)
frac = self.params.get_or_none("frac", float)
assert_or_throw(
(n is None and frac is not None) or (n is not None and frac is None),
ValueError("one and only one of n and frac should be set"),
)
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
n = self.params.get_or_none("n", int)
frac = self.params.get_or_none("frac", float)
replace = self.params.get("replace", False)
seed = self.params.get_or_none("seed", int)
return self.execution_engine.sample(
dfs[0], n=n, frac=frac, replace=replace, seed=seed
)
class Take(Processor):
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
# All _get_or operations convert float to int
n = self.params.get_or_none("n", int)
presort = self.params.get_or_none("presort", str)
na_position = self.params.get("na_position", "last")
partition_spec = self.partition_spec
return self.execution_engine.take(
dfs[0],
n,
presort=presort,
na_position=na_position,
partition_spec=partition_spec,
)
class SaveAndUse(Processor):
def process(self, dfs: DataFrames) -> DataFrame:
assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input"))
kwargs = self.params.get("params", dict())
path = self.params.get_or_throw("path", str)
format_hint = self.params.get("fmt", "")
mode = self.params.get("mode", "overwrite")
partition_spec = self.partition_spec
force_single = self.params.get("single", False)
self.execution_engine.save_df(
df=dfs[0],
path=path,
format_hint=format_hint,
mode=mode,
partition_spec=partition_spec,
force_single=force_single,
**kwargs
)
return self.execution_engine.load_df(path=path, format_hint=format_hint)
class _TransformerRunner(object):
def __init__(
self, df: DataFrame, transformer: Transformer, ignore_errors: List[type]
):
self.schema = df.schema
self.metadata = df.metadata
self.transformer = transformer
self.ignore_errors = tuple(ignore_errors)
def run(self, cursor: PartitionCursor, df: LocalDataFrame) -> LocalDataFrame:
self.transformer._cursor = cursor # type: ignore
df._metadata = self.metadata
if len(self.ignore_errors) == 0:
return self.transformer.transform(df)
else:
try:
return to_local_bounded_df(self.transformer.transform(df))
except self.ignore_errors: # type: ignore # pylint: disable=E0712
return ArrayDataFrame([], self.transformer.output_schema)
def on_init(self, partition_no: int, df: DataFrame) -> None:
s = self.transformer.partition_spec
self.transformer._cursor = s.get_cursor( # type: ignore
self.schema, partition_no
)
df._metadata = self.metadata
self.transformer.on_init(df)
class _CoTransformerRunner(object):
def __init__(
self,
df: DataFrame,
transformer: CoTransformer,
ignore_errors: List[Type[Exception]],
):
self.schema = df.schema
self.metadata = df.metadata
self.transformer = transformer
self.ignore_errors = tuple(ignore_errors)
def run(self, cursor: PartitionCursor, dfs: DataFrames) -> LocalDataFrame:
self.transformer._cursor = cursor # type: ignore
if len(self.ignore_errors) == 0:
return self.transformer.transform(dfs)
else:
try:
return to_local_bounded_df(self.transformer.transform(dfs))
except self.ignore_errors: # type: ignore # pylint: disable=E0712
return ArrayDataFrame([], self.transformer.output_schema)
def on_init(self, partition_no: int, dfs: DataFrames) -> None:
s = self.transformer.partition_spec
self.transformer._cursor = s.get_cursor( # type: ignore
self.schema, partition_no
)
self.transformer.on_init(dfs)
|
chapter5_语音降噪/C5_4_y.py | busyyang/python_sound_open | 165 | 11102960 | from chapter2_基础.soundBase import *
from chapter5_语音降噪.Wavelet import *
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def awgn(x, snr):
snr = 10 ** (snr / 10.0)
xpower = np.sum(x ** 2) / len(x)
npower = xpower / snr
return x + np.random.randn(len(x)) * np.sqrt(npower)
data, fs = soundBase('C5_4_y.wav').audioread()
data -= np.mean(data)
data /= np.max(np.abs(data))
SNR = 5
N = len(data)
s = awgn(data, SNR)
time = [i / fs for i in range(N)] # 设置时间
wname = 'db7'
jN = 6
res_s = Wavelet_Soft(s, jN, wname)
res_d = Wavelet_Hard(s, jN, wname)
res_hs = Wavelet_hardSoft(s, jN, wname)
res_a = Wavelet_average(s, jN, wname)
plt.figure(figsize=(14, 10))
plt.subplot(3, 2, 1)
plt.plot(time, data)
plt.ylabel('原始信号')
plt.subplot(3, 2, 2)
plt.plot(time, s)
plt.ylabel('加噪声信号')
plt.subplot(3, 2, 3)
plt.ylabel('小波软阈值滤波')
plt.plot(time, res_s)
plt.subplot(3, 2, 4)
plt.ylabel('小波硬阈值滤波')
plt.plot(time, res_d)
plt.subplot(3, 2, 5)
plt.ylabel('小波折中阈值滤波')
plt.plot(time, res_hs)
plt.subplot(3, 2, 6)
plt.ylabel('小波加权滤波')
plt.plot(time, res_a)
plt.savefig('images/wavelet.png')
plt.close()
|
jsonrpcserver/main.py | bcb/jsonrpcserver | 144 | 11102982 | <gh_stars>100-1000
"""The public functions.
These three public functions all perform the same function of dispatching a JSON-RPC
request, but they each give a different return value.
- dispatch_to_responses: Returns Response(s) (or None for notifications).
- dispatch_to_serializable: Returns a Python dict or list of dicts (or None for
notifications).
- dispatch_to_json/dispatch: Returns a JSON-RPC response string (or an empty string for
notifications).
"""
from typing import Any, Callable, Dict, List, Optional, Union, cast
import json
from jsonschema.validators import validator_for # type: ignore
from pkg_resources import resource_string
from .dispatcher import dispatch_to_response_pure, Deserialized
from .methods import Methods, global_methods
from .response import Response, to_serializable_one
from .sentinels import NOCONTEXT
from .utils import identity
default_deserializer = json.loads
# Prepare the jsonschema validator. This is global so it loads only once, not every
# time dispatch is called.
schema = json.loads(resource_string(__name__, "request-schema.json"))
klass = validator_for(schema)
klass.check_schema(schema)
default_validator = klass(schema).validate
def dispatch_to_response(
request: str,
methods: Optional[Methods] = None,
*,
context: Any = NOCONTEXT,
deserializer: Callable[[str], Deserialized] = json.loads,
validator: Callable[[Deserialized], Deserialized] = default_validator,
post_process: Callable[[Response], Any] = identity,
) -> Union[Response, List[Response], None]:
"""Takes a JSON-RPC request string and dispatches it to method(s), giving Response
namedtuple(s) or None.
This is a public wrapper around dispatch_to_response_pure, adding globals and
default values to be nicer for end users.
Args:
request: The JSON-RPC request string.
methods: Dictionary of methods that can be called - mapping of function names to
functions. If not passed, uses the internal global_methods dict which is
populated with the @method decorator.
context: If given, will be passed as the first argument to methods.
deserializer: Function that deserializes the request string.
validator: Function that validates the JSON-RPC request. The function should
raise an exception if the request is invalid. To disable validation, pass
lambda _: None.
post_process: Function that will be applied to Responses.
Returns:
A Response, list of Responses or None.
Examples:
>>> dispatch('{"jsonrpc": "2.0", "method": "ping", "id": 1}')
'{"jsonrpc": "2.0", "result": "pong", "id": 1}'
"""
return dispatch_to_response_pure(
deserializer=deserializer,
validator=validator,
post_process=post_process,
context=context,
methods=global_methods if methods is None else methods,
request=request,
)
def dispatch_to_serializable(
*args: Any, **kwargs: Any
) -> Union[Dict[str, Any], List[Dict[str, Any]], None]:
"""Takes a JSON-RPC request string and dispatches it to method(s), giving responses
as dicts (or None).
"""
return cast(
Union[Dict[str, Any], List[Dict[str, Any]], None],
dispatch_to_response(*args, post_process=to_serializable_one, **kwargs),
)
def dispatch_to_json(
*args: Any,
serializer: Callable[
[Union[Dict[str, Any], List[Dict[str, Any]], str]], str
] = json.dumps,
**kwargs: Any,
) -> str:
"""Takes a JSON-RPC request string and dispatches it to method(s), giving a JSON-RPC
response string.
This is the main public method, it goes through the entire JSON-RPC process - it's a
function from JSON-RPC request string to JSON-RPC response string.
Args:
serializer: A function to serialize a Python object to json.
The rest: Passed through to dispatch_to_serializable.
"""
response = dispatch_to_serializable(*args, **kwargs)
# Better to respond with the empty string instead of json "null", because "null" is
# an invalid JSON-RPC response.
return "" if response is None else serializer(response)
# "dispatch" aliases dispatch_to_json.
dispatch = dispatch_to_json
|
packages/core/minos-microservice-common/minos/common/testing/database/__init__.py | minos-framework/minos-python | 247 | 11103000 | <gh_stars>100-1000
from .clients import (
MockedDatabaseClient,
)
from .factories import (
MockedLockDatabaseOperationFactory,
MockedManagementDatabaseOperationFactory,
)
from .operations import (
MockedDatabaseOperation,
)
|
backend/conduit/settings/docker.py | fivehoho75/aws-workshop | 135 | 11103012 | <filename>backend/conduit/settings/docker.py
import json
import os
from conduit.settings.defaults import *
DEBUG = os.environ.get('DJANGO_DEBUG', 'False') == 'True'
STATIC_ROOT = '/data/static/'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['DATABASE_NAME'],
'USER': os.environ['DATABASE_USER'],
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': os.environ['DATABASE_HOST'],
'PORT': os.environ.get('DATABASE_PORT', '5432'),
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'django.file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/data/django.log',
},
'django.security.file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/data/django.security.log',
},
},
'loggers': {
'django.request': {
'handlers': ['django.file'],
'level': 'DEBUG',
'propagate': True,
},
'django.security': {
'handlers': ['django.security.file'],
'level': 'DEBUG',
'propagate': True,
},
'django.db.backends': {
'handlers': [],
'level': 'DEBUG',
'propagate': True,
},
},
}
CORS_ORIGIN_WHITELIST = tuple(json.loads(os.environ.get(
'DJANGO_CORS_ORIGIN_WHITELIST',
'[]'
)))
|
backend/category/ssh/ssh_connection.py | zerlee/open-cmdb | 126 | 11103027 | import paramiko
from rest_framework.exceptions import ParseError
'''
https://blog.csdn.net/qq_24674131/article/details/95618304
免密登陆的用户
1. 本机到远程做免密
2. 远程用户加入sudoers,并设置免密sudo
'''
class SSHConnection:
# 初始化连接创建Transport通道
def __init__(self, host='xxx.xxx.xxx.xxx', port=22, user='xxx', pwd='<PASSWORD>', key_file=''):
self.host = host
self.port = port
self.user = user
self.pwd = <PASSWORD>
self.key_file = key_file
transport = paramiko.Transport((self.host, self.port))
if self.key_file:
try:
private_key = paramiko.RSAKey.from_private_key_file(self.key_file)
transport.connect(username=self.user, pkey=private_key)
except Exception as e:
raise ParseError(f'用户{self.key_file}免密连接{self.user}@{self.host}:{self.port}失败,')
else:
transport.connect(username=self.user, password=<PASSWORD>)
self.__transport = transport
self.sftp = paramiko.SFTPClient.from_transport(self.__transport)
# 关闭通道
def close(self):
self.sftp.close()
self.__transport.close()
# 上传文件到远程主机
def upload(self, local_path, remote_path):
self.sftp.put(local_path, remote_path)
# 从远程主机下载文件到本地
def download(self, local_path, remote_path):
self.sftp.get(remote_path, local_path)
# 在远程主机上创建目录
def mkdir(self, target_path, mode='0777'):
self.sftp.mkdir(target_path, mode)
# 删除远程主机上的目录
def rmdir(self, target_path):
self.sftp.rmdir(target_path)
# 查看目录下文件以及子目录(如果需要更加细粒度的文件信息建议使用listdir_attr)
def listdir(self, target_path):
return self.sftp.listdir(target_path)
# 删除文件
def remove(self, target_path):
self.sftp.remove(target_path)
# 查看目录下文件以及子目录的详细信息(包含内容和参考os.stat返回一个FSTPAttributes对象,对象的具体属性请用__dict__查看)
def listdir_attr(self, target_path):
try:
files = self.sftp.listdir_attr(target_path)
except BaseException as e:
print(e)
return files
# 获取文件详情
def stat(self, remote_path):
return self.sftp.stat(remote_path)
# SSHClient输入命令远程操作主机
def cmd(self, command):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
ssh._transport = self.__transport
stdin, stdout, stderr = ssh.exec_command(command)
result = stdout.read()
return result.decode('utf8')
|
src/pretix/helpers/escapejson.py | fabm3n/pretix | 1,248 | 11103065 | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.utils.encoding import force_str
from django.utils.functional import keep_lazy
from django.utils.safestring import SafeText, mark_safe
_json_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
_json_escapes_attr = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('"'): '"',
ord("'"): ''',
ord("="): '=',
}
@keep_lazy(str, SafeText)
def escapejson(value):
"""Hex encodes characters for use in a application/json type script."""
return mark_safe(force_str(value).translate(_json_escapes))
@keep_lazy(str, SafeText)
def escapejson_attr(value):
"""Hex encodes characters for use in a html attributw script."""
return mark_safe(force_str(value).translate(_json_escapes_attr))
|
test/specs/openapi/parameters/test_simple_payloads.py | gluhar2006/schemathesis | 659 | 11103068 | <gh_stars>100-1000
"""Tests for behavior not specific to forms."""
import pytest
from schemathesis.parameters import PayloadAlternatives
from schemathesis.specs.openapi.parameters import OpenAPI20Body, OpenAPI30Body
@pytest.mark.parametrize(
"consumes",
(
["application/json"],
# Multiple values in "consumes" implies multiple payload variants
["application/json", "application/xml"],
),
)
def test_payload_open_api_2(
consumes,
assert_parameters,
make_openapi_2_schema,
open_api_2_user_form_with_file_parameters,
open_api_2_user_in_body,
user_jsonschema,
):
# A single "body" parameter is used for all payload variants
schema = make_openapi_2_schema(consumes, [open_api_2_user_in_body])
assert_parameters(
schema,
PayloadAlternatives(
[OpenAPI20Body(definition=open_api_2_user_in_body, media_type=value) for value in consumes]
),
# For each one the schema is extracted from the parameter definition and transformed to the proper JSON Schema
[user_jsonschema] * len(consumes),
)
@pytest.mark.parametrize(
"media_types",
(
["application/json"],
# Each media type corresponds to a payload variant
["application/json", "application/xml"],
# Forms can be also combined
["application/x-www-form-urlencoded", "multipart/form-data"],
),
)
def test_payload_open_api_3(media_types, assert_parameters, make_openapi_3_schema, open_api_3_user, user_jsonschema):
schema = make_openapi_3_schema(
{
"required": True,
"content": {media_type: {"schema": open_api_3_user} for media_type in media_types},
}
)
assert_parameters(
schema,
PayloadAlternatives(
[
OpenAPI30Body(definition={"schema": open_api_3_user}, media_type=media_type, required=True)
for media_type in media_types
]
),
# The converted schema should correspond the schema in the relevant "requestBody" part
# In this case they are the same
[user_jsonschema] * len(media_types),
)
|
tests/nlu_hc_tests/training_tests/chunk_resolution/chunk_resolver_tests.py | milyiyo/nlu | 480 | 11103078 | <gh_stars>100-1000
import unittest
import pandas as pd
import nlu
from sparknlp.annotator import BertSentenceEmbeddings
import tests.nlu_hc_tests.secrets as sct
class ChunkResolverTrainingTests(unittest.TestCase):
def test_chunk_resolver_training(self):
"""When training a chunk resolver, word_embedding are required.
If none specifeid, the default `glove` word_embeddings will be used
Alternatively, if a Word Embedding is specified in the load command before the train.chunk_resolver,
it will be used instead of the default glove
"""
cols = ["y","_y","text"]
p='/home/ckl/Documents/freelance/jsl/nlu/nlu4realgit2/tests/datasets/AskAPatient.fold-0.train.txt'
dataset = pd.read_csv(p,sep="\t",encoding="ISO-8859-1",header=None)
dataset.columns = cols
SPARK_NLP_LICENSE = sct.SPARK_NLP_LICENSE
AWS_ACCESS_KEY_ID = sct.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = sct.AWS_SECRET_ACCESS_KEY
JSL_SECRET = sct.JSL_SECRET
nlu.auth(SPARK_NLP_LICENSE,AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,JSL_SECRET)
trainable_pipe = nlu.load('train.resolve_chunks', verbose=True)
trainable_pipe.print_info()
fitted_pipe = trainable_pipe.fit(dataset)
res = fitted_pipe.predict(dataset, multithread=False)
for c in res :
print(c)
print(res[c])
def test_chunk_resolver_training_custom_embeds(self):
pass
"""When training a chunk resolver, word_embedding are required.
If none specifeid, the default `glove` word_embeddings will be used
Alternatively, if a Word Embedding is specified in the load command before the train.chunk_resolver,
it will be used instead of the default glove
"""
dataset = pd.DataFrame({
'text': ['The Tesla company is good to invest is', 'TSLA is good to invest','TESLA INC. we should buy','PUT ALL MONEY IN TSLA inc!!'],
'y': ['23','23','23','23'],
'_y': ['TESLA','TESLA','TESLA','TESLA'],
})
cols = ["y","_y","text"]
p='/home/ckl/Documents/freelance/jsl/nlu/nlu4realgit2/tests/datasets/AskAPatient.fold-0.train.txt'
dataset = pd.read_csv(p,sep="\t",encoding="ISO-8859-1",header=None)
dataset.columns = cols
SPARK_NLP_LICENSE = sct.SPARK_NLP_LICENSE
AWS_ACCESS_KEY_ID = sct.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = sct.AWS_SECRET_ACCESS_KEY
JSL_SECRET = sct.JSL_SECRET
nlu.auth(SPARK_NLP_LICENSE,AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,JSL_SECRET)
# trainable_pipe = nlu.load('glove train.resolve_chunks', verbose=True)
# trainable_pipe = nlu.load('bert train.resolve_chunks', verbose=True)
# trainable_pipe = nlu.load('bert train.resolve_chunks', verbose=True)
trainable_pipe = nlu.load('en.embed.glove.healthcare_100d train.resolve_chunks')
trainable_pipe['chunk_resolver'].setNeighbours(350)
# TODO bert/elmo give wierd storage ref errors...
# TODO WRITE ISSUE IN HEALTHCARE LIB ABOUT THIS!!!
# ONLY GLOVE WORKS!!
# trainable_pipe = nlu.load('bert train.resolve_chunks', verbose=True)
trainable_pipe.print_info()
fitted_pipe = trainable_pipe.fit(dataset)
res = fitted_pipe.predict(dataset, multithread=False)
for c in res :
print(c)
print(res[c])
# print(res)
if __name__ == '__main__':
ChunkResolverTrainingTests().test_entities_config()
|
2017-07_Seminar/Session 3 - Relation CNN/code/cnn.py | dineshsonachalam/deeplearning4nlp-tutorial | 593 | 11103110 | <reponame>dineshsonachalam/deeplearning4nlp-tutorial
"""
This is a CNN for relation classification within a sentence. The architecture is based on:
<NAME>, <NAME>, <NAME>, <NAME> and <NAME>, 2014, Relation Classification via Convolutional Deep Neural Network
Performance (without hyperparameter optimization):
Accuracy: 0.7943
Macro-Averaged F1 (without Other relation): 0.7612
Performance Zeng et al.
Macro-Averaged F1 (without Other relation): 0.789
Code was tested with:
- Python 2.7 & Python 3.6
- Theano 0.9.0 & TensorFlow 1.2.1
- Keras 2.0.5
"""
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
import gzip
import sys
if (sys.version_info > (3, 0)):
import pickle as pkl
else: #Python 2.7 imports
import cPickle as pkl
import keras
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Activation, Flatten, concatenate
from keras.layers import Embedding
from keras.layers import Convolution1D, MaxPooling1D, GlobalMaxPooling1D
from keras.regularizers import Regularizer
from keras.preprocessing import sequence
batch_size = 64
nb_filter = 100
filter_length = 3
hidden_dims = 100
nb_epoch = 100
position_dims = 50
print("Load dataset")
f = gzip.open('pkl/sem-relations.pkl.gz', 'rb')
data = pkl.load(f)
f.close()
embeddings = data['wordEmbeddings']
yTrain, sentenceTrain, positionTrain1, positionTrain2 = data['train_set']
yTest, sentenceTest, positionTest1, positionTest2 = data['test_set']
max_position = max(np.max(positionTrain1), np.max(positionTrain2))+1
n_out = max(yTrain)+1
#train_y_cat = np_utils.to_categorical(yTrain, n_out)
max_sentence_len = sentenceTrain.shape[1]
print("sentenceTrain: ", sentenceTrain.shape)
print("positionTrain1: ", positionTrain1.shape)
print("yTrain: ", yTrain.shape)
print("sentenceTest: ", sentenceTest.shape)
print("positionTest1: ", positionTest1.shape)
print("yTest: ", yTest.shape)
print("Embeddings: ",embeddings.shape)
words_input = Input(shape=(max_sentence_len,), dtype='int32', name='words_input')
words = Embedding(embeddings.shape[0], embeddings.shape[1], weights=[embeddings], trainable=False)(words_input)
distance1_input = Input(shape=(max_sentence_len,), dtype='int32', name='distance1_input')
distance1 = Embedding(max_position, position_dims)(distance1_input)
distance2_input = Input(shape=(max_sentence_len,), dtype='int32', name='distance2_input')
distance2 = Embedding(max_position, position_dims)(distance2_input)
output = concatenate([words, distance1, distance2])
output = Convolution1D(filters=nb_filter,
kernel_size=filter_length,
padding='same',
activation='tanh',
strides=1)(output)
# we use standard max over time pooling
output = GlobalMaxPooling1D()(output)
output = Dropout(0.25)(output)
output = Dense(n_out, activation='softmax')(output)
model = Model(inputs=[words_input, distance1_input, distance2_input], outputs=[output])
model.compile(loss='sparse_categorical_crossentropy',optimizer='Adam', metrics=['accuracy'])
model.summary()
print("Start training")
max_prec, max_rec, max_acc, max_f1 = 0,0,0,0
def getPrecision(pred_test, yTest, targetLabel):
#Precision for non-vague
targetLabelCount = 0
correctTargetLabelCount = 0
for idx in range(len(pred_test)):
if pred_test[idx] == targetLabel:
targetLabelCount += 1
if pred_test[idx] == yTest[idx]:
correctTargetLabelCount += 1
if correctTargetLabelCount == 0:
return 0
return float(correctTargetLabelCount) / targetLabelCount
def predict_classes(prediction):
return prediction.argmax(axis=-1)
for epoch in range(nb_epoch):
model.fit([sentenceTrain, positionTrain1, positionTrain2], yTrain, batch_size=batch_size, verbose=True,epochs=1)
pred_test = predict_classes(model.predict([sentenceTest, positionTest1, positionTest2], verbose=False))
dctLabels = np.sum(pred_test)
totalDCTLabels = np.sum(yTest)
acc = np.sum(pred_test == yTest) / float(len(yTest))
max_acc = max(max_acc, acc)
print("Accuracy: %.4f (max: %.4f)" % (acc, max_acc))
f1Sum = 0
f1Count = 0
for targetLabel in range(1, max(yTest)):
prec = getPrecision(pred_test, yTest, targetLabel)
recall = getPrecision(yTest, pred_test, targetLabel)
f1 = 0 if (prec+recall) == 0 else 2*prec*recall/(prec+recall)
f1Sum += f1
f1Count +=1
macroF1 = f1Sum / float(f1Count)
max_f1 = max(max_f1, macroF1)
print("Non-other Macro-Averaged F1: %.4f (max: %.4f)\n" % (macroF1, max_f1)) |
keanu-python/tests/test_proposal_distributions.py | rs992214/keanu | 153 | 11103117 | <filename>keanu-python/tests/test_proposal_distributions.py<gh_stars>100-1000
import numpy as np
import pytest
from keanu import BayesNet, Model
from keanu.algorithm._proposal_distribution import ProposalDistribution
from keanu.vertex import Gamma, Gaussian
from keanu.vartypes import tensor_arg_types
@pytest.fixture
def net() -> BayesNet:
with Model() as m:
m.gamma = Gamma(1., 1.)
m.gaussian = Gaussian(0., m.gamma)
return m.to_bayes_net()
def test_you_can_create_a_prior_proposal_distribution(net) -> None:
ProposalDistribution("prior", latents=list(net.iter_latent_vertices()))
@pytest.mark.parametrize("sigma", [1., np.array(1.), [1., 2.], [np.array(1.), np.array(2.)]])
def test_you_can_create_a_gaussian_proposal_distribution(sigma: tensor_arg_types, net: BayesNet) -> None:
ProposalDistribution("gaussian", latents=list(net.iter_latent_vertices()), sigma=sigma)
def test_it_throws_if_you_specify_gaussian_without_a_value_for_sigma(net: BayesNet) -> None:
with pytest.raises(
TypeError, match=r"Gaussian Proposal Distribution requires a sigma or a list of sigmas for each latent"):
ProposalDistribution("gaussian", latents=list(net.iter_latent_vertices()))
def test_it_throws_if_you_specify_gaussian_with_not_enough_sigmas_for_each_latent(net: BayesNet) -> None:
with pytest.raises(
TypeError, match=r"Gaussian Proposal Distribution requires a sigma or a list of sigmas for each latent"):
ProposalDistribution("gaussian", latents=list(net.iter_latent_vertices()), sigma=[1.])
def test_it_throws_if_you_specify_gaussian_without_values_for_latents() -> None:
with pytest.raises(TypeError, match=r"Gaussian Proposal Distribution requires values for latents"):
ProposalDistribution("gaussian", sigma=1.)
def test_it_throws_if_you_specify_gaussian_with_empty_list_of_latents(net: BayesNet) -> None:
with pytest.raises(TypeError, match=r"Gaussian Proposal Distribution requires values for latents"):
ProposalDistribution("gaussian", latents=[], sigma=[])
def test_it_throws_if_you_specify_sigma_but_the_type_isnt_gaussian() -> None:
with pytest.raises(TypeError, match=r'Parameter sigma is not valid unless type is "gaussian"'):
ProposalDistribution("prior", sigma=1.)
def test_it_throws_if_it_doesnt_recognise_the_type() -> None:
with pytest.raises(KeyError, match=r"'foo'"):
ProposalDistribution("foo")
|
pliers/utils/__init__.py | nickduran/pliers | 229 | 11103181 | <filename>pliers/utils/__init__.py
""" Utilities """
from .base import (listify, flatten, batch_iterable, classproperty, isiterable,
isgenerator, progress_bar_wrapper, attempt_to_import,
EnvironmentKeyMixin, verify_dependencies, set_iterable_type,
APIDependent, flatten_dict, resample)
__all__ = [
'listify',
'flatten',
'flatten_dict',
'batch_iterable',
'classproperty',
'isiterable',
'isgenerator',
'progress_bar_wrapper',
'attempt_to_import',
'EnvironmentKeyMixin',
'verify_dependencies',
'set_iterable_type',
'APIDependent',
'resample'
]
|
src/ralph/dashboards/migrations/0003_graph_push_to_statsd.py | DoNnMyTh/ralph | 1,668 | 11103200 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboards', '0002_auto_20170509_1404'),
]
operations = [
migrations.AddField(
model_name='graph',
name='push_to_statsd',
field=models.BooleanField(default=False, help_text="Push graph's data to statsd."),
),
]
|
template_filters.py | simon987/od-database | 133 | 11103248 | <filename>template_filters.py
import datetime
import time
import od_util
def setup_template_filters(app):
app.jinja_env.globals.update(truncate_path=od_util.truncate_path)
app.jinja_env.globals.update(get_color=od_util.get_color)
app.jinja_env.globals.update(get_mime=od_util.get_category)
@app.template_filter("date_format")
def date_format(value, format='%Y-%m-%d'):
return time.strftime(format, time.gmtime(value))
@app.template_filter("datetime_format")
def datetime_format(value, format='%Y-%m-%d %H:%M:%S'):
return time.strftime(format, time.gmtime(value))
@app.template_filter("duration_format")
def duration_format(value):
delay = datetime.timedelta(seconds=value)
if delay.days > 0:
out = str(delay).replace(" days, ", ":")
else:
out = str(delay)
out_ar = out.split(':')
out_ar = ["%02d" % (int(float(x))) for x in out_ar]
out = ":".join(out_ar)
return out
@app.template_filter("from_timestamp")
def from_timestamp(value):
return datetime.datetime.fromtimestamp(value)
|
2021/visuals/day_02/submarine/commands.py | salt-die/Advent-of-Code | 105 | 11103256 | import yaml
import re
from pathlib import Path
_THIS_DIR = Path(__file__).parent
_INPUTS = _THIS_DIR.parent.parent.parent / "aoc_helper" / "inputs.yaml"
_RAW = yaml.full_load(_INPUTS.read_text())["2"]
COMMANDS = [
(command, int(amount))
for command, amount in re.findall(r"(\w+) (\d+)", _RAW)
]
|
tests/integration/exception_test.py | markowanga/stweet | 101 | 11103276 | import pytest
import stweet as st
from stweet import WebClient
from stweet.auth import SimpleAuthTokenProvider
from stweet.exceptions import RefreshTokenException, ScrapBatchBadResponse
from stweet.exceptions.too_many_requests_exception import TooManyRequestsException
from stweet.http_request import RequestResponse
from stweet.twitter_api.twitter_auth_web_client_interceptor import TwitterAuthWebClientInterceptor
from tests.mock_web_client import MockWebClient
def get_client_with_default_response(response: RequestResponse = RequestResponse(None, None)) -> WebClient:
return MockWebClient(
default_response=response,
interceptors=[TwitterAuthWebClientInterceptor()]
)
def test_get_simple_auth_token_with_incorrect_response_1():
with pytest.raises(RefreshTokenException):
SimpleAuthTokenProvider().get_new_token(get_client_with_default_response(RequestResponse(400, None)))
def test_get_auth_token_with_incorrect_response_2():
with pytest.raises(TooManyRequestsException):
SimpleAuthTokenProvider(50, 150).get_new_token(get_client_with_default_response(RequestResponse(429, None)))
def test_get_auth_token_with_incorrect_response_3():
with pytest.raises(RefreshTokenException):
SimpleAuthTokenProvider().get_new_token(get_client_with_default_response(RequestResponse(200, '{}')))
def test_get_auth_token_with_incorrect_response_4():
with pytest.raises(RefreshTokenException):
SimpleAuthTokenProvider().get_new_token(get_client_with_default_response(RequestResponse(200, 'LALA')))
def test_runner_exceptions():
class TokenExpiryExceptionWebClient(st.WebClient):
count_dict = dict({
'https://api.twitter.com/2/search/adaptive.json': 0,
'https://api.twitter.com/1.1/guest/activate.json': 0
})
def run_clear_request(self, params: st.http_request.RequestDetails) -> st.http_request.RequestResponse:
self.count_dict[params.url] = self.count_dict[params.url] + 1
if params.url == 'https://api.twitter.com/2/search/adaptive.json':
if self.count_dict[params.url] == 1:
return st.http_request.RequestResponse(429, None)
else:
return st.http_request.RequestResponse(400, '')
else:
return st.http_request.RequestResponse(200, '{"guest_token":"<PASSWORD>"}')
with pytest.raises(ScrapBatchBadResponse):
search_tweets_task = st.SearchTweetsTask(
all_words='#koronawirus'
)
st.TweetSearchRunner(
search_tweets_task=search_tweets_task,
tweet_outputs=[],
web_client=TokenExpiryExceptionWebClient(interceptors=[TwitterAuthWebClientInterceptor()]),
).run()
def test_get_not_existing_user():
task = st.GetUsersTask(['fcbewkjdsncvjwkfs'])
result = st.GetUsersRunner(task, []).run()
assert result.users_count == 0
|
InvenTree/stock/migrations/0066_stockitem_scheduled_for_deletion.py | carlos-riquelme/InvenTree | 656 | 11103349 | <reponame>carlos-riquelme/InvenTree<filename>InvenTree/stock/migrations/0066_stockitem_scheduled_for_deletion.py
# Generated by Django 3.2.4 on 2021-09-07 06:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock', '0065_auto_20210701_0509'),
]
operations = [
migrations.AddField(
model_name='stockitem',
name='scheduled_for_deletion',
field=models.BooleanField(default=False, help_text='This StockItem will be deleted by the background worker', verbose_name='Scheduled for deletion'),
),
]
|
sdk/python/pulumi_gcp/compute/get_health_check.py | sisisin/pulumi-gcp | 121 | 11103356 | <reponame>sisisin/pulumi-gcp<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetHealthCheckResult',
'AwaitableGetHealthCheckResult',
'get_health_check',
'get_health_check_output',
]
@pulumi.output_type
class GetHealthCheckResult:
"""
A collection of values returned by getHealthCheck.
"""
def __init__(__self__, check_interval_sec=None, creation_timestamp=None, description=None, grpc_health_checks=None, healthy_threshold=None, http2_health_checks=None, http_health_checks=None, https_health_checks=None, id=None, log_configs=None, name=None, project=None, self_link=None, ssl_health_checks=None, tcp_health_checks=None, timeout_sec=None, type=None, unhealthy_threshold=None):
if check_interval_sec and not isinstance(check_interval_sec, int):
raise TypeError("Expected argument 'check_interval_sec' to be a int")
pulumi.set(__self__, "check_interval_sec", check_interval_sec)
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if grpc_health_checks and not isinstance(grpc_health_checks, list):
raise TypeError("Expected argument 'grpc_health_checks' to be a list")
pulumi.set(__self__, "grpc_health_checks", grpc_health_checks)
if healthy_threshold and not isinstance(healthy_threshold, int):
raise TypeError("Expected argument 'healthy_threshold' to be a int")
pulumi.set(__self__, "healthy_threshold", healthy_threshold)
if http2_health_checks and not isinstance(http2_health_checks, list):
raise TypeError("Expected argument 'http2_health_checks' to be a list")
pulumi.set(__self__, "http2_health_checks", http2_health_checks)
if http_health_checks and not isinstance(http_health_checks, list):
raise TypeError("Expected argument 'http_health_checks' to be a list")
pulumi.set(__self__, "http_health_checks", http_health_checks)
if https_health_checks and not isinstance(https_health_checks, list):
raise TypeError("Expected argument 'https_health_checks' to be a list")
pulumi.set(__self__, "https_health_checks", https_health_checks)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if log_configs and not isinstance(log_configs, list):
raise TypeError("Expected argument 'log_configs' to be a list")
pulumi.set(__self__, "log_configs", log_configs)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
pulumi.set(__self__, "self_link", self_link)
if ssl_health_checks and not isinstance(ssl_health_checks, list):
raise TypeError("Expected argument 'ssl_health_checks' to be a list")
pulumi.set(__self__, "ssl_health_checks", ssl_health_checks)
if tcp_health_checks and not isinstance(tcp_health_checks, list):
raise TypeError("Expected argument 'tcp_health_checks' to be a list")
pulumi.set(__self__, "tcp_health_checks", tcp_health_checks)
if timeout_sec and not isinstance(timeout_sec, int):
raise TypeError("Expected argument 'timeout_sec' to be a int")
pulumi.set(__self__, "timeout_sec", timeout_sec)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unhealthy_threshold and not isinstance(unhealthy_threshold, int):
raise TypeError("Expected argument 'unhealthy_threshold' to be a int")
pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold)
@property
@pulumi.getter(name="checkIntervalSec")
def check_interval_sec(self) -> int:
return pulumi.get(self, "check_interval_sec")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> str:
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="grpcHealthChecks")
def grpc_health_checks(self) -> Sequence['outputs.GetHealthCheckGrpcHealthCheckResult']:
return pulumi.get(self, "grpc_health_checks")
@property
@pulumi.getter(name="healthyThreshold")
def healthy_threshold(self) -> int:
return pulumi.get(self, "healthy_threshold")
@property
@pulumi.getter(name="http2HealthChecks")
def http2_health_checks(self) -> Sequence['outputs.GetHealthCheckHttp2HealthCheckResult']:
return pulumi.get(self, "http2_health_checks")
@property
@pulumi.getter(name="httpHealthChecks")
def http_health_checks(self) -> Sequence['outputs.GetHealthCheckHttpHealthCheckResult']:
return pulumi.get(self, "http_health_checks")
@property
@pulumi.getter(name="httpsHealthChecks")
def https_health_checks(self) -> Sequence['outputs.GetHealthCheckHttpsHealthCheckResult']:
return pulumi.get(self, "https_health_checks")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="logConfigs")
def log_configs(self) -> Sequence['outputs.GetHealthCheckLogConfigResult']:
return pulumi.get(self, "log_configs")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> Optional[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="sslHealthChecks")
def ssl_health_checks(self) -> Sequence['outputs.GetHealthCheckSslHealthCheckResult']:
return pulumi.get(self, "ssl_health_checks")
@property
@pulumi.getter(name="tcpHealthChecks")
def tcp_health_checks(self) -> Sequence['outputs.GetHealthCheckTcpHealthCheckResult']:
return pulumi.get(self, "tcp_health_checks")
@property
@pulumi.getter(name="timeoutSec")
def timeout_sec(self) -> int:
return pulumi.get(self, "timeout_sec")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="unhealthyThreshold")
def unhealthy_threshold(self) -> int:
return pulumi.get(self, "unhealthy_threshold")
class AwaitableGetHealthCheckResult(GetHealthCheckResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHealthCheckResult(
check_interval_sec=self.check_interval_sec,
creation_timestamp=self.creation_timestamp,
description=self.description,
grpc_health_checks=self.grpc_health_checks,
healthy_threshold=self.healthy_threshold,
http2_health_checks=self.http2_health_checks,
http_health_checks=self.http_health_checks,
https_health_checks=self.https_health_checks,
id=self.id,
log_configs=self.log_configs,
name=self.name,
project=self.project,
self_link=self.self_link,
ssl_health_checks=self.ssl_health_checks,
tcp_health_checks=self.tcp_health_checks,
timeout_sec=self.timeout_sec,
type=self.type,
unhealthy_threshold=self.unhealthy_threshold)
def get_health_check(name: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHealthCheckResult:
"""
Get information about a HealthCheck.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
health_check = gcp.compute.get_health_check(name="my-hc")
```
:param str name: Name of the resource.
:param str project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getHealthCheck:getHealthCheck', __args__, opts=opts, typ=GetHealthCheckResult).value
return AwaitableGetHealthCheckResult(
check_interval_sec=__ret__.check_interval_sec,
creation_timestamp=__ret__.creation_timestamp,
description=__ret__.description,
grpc_health_checks=__ret__.grpc_health_checks,
healthy_threshold=__ret__.healthy_threshold,
http2_health_checks=__ret__.http2_health_checks,
http_health_checks=__ret__.http_health_checks,
https_health_checks=__ret__.https_health_checks,
id=__ret__.id,
log_configs=__ret__.log_configs,
name=__ret__.name,
project=__ret__.project,
self_link=__ret__.self_link,
ssl_health_checks=__ret__.ssl_health_checks,
tcp_health_checks=__ret__.tcp_health_checks,
timeout_sec=__ret__.timeout_sec,
type=__ret__.type,
unhealthy_threshold=__ret__.unhealthy_threshold)
@_utilities.lift_output_func(get_health_check)
def get_health_check_output(name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetHealthCheckResult]:
"""
Get information about a HealthCheck.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
health_check = gcp.compute.get_health_check(name="my-hc")
```
:param str name: Name of the resource.
:param str project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.