max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
python/spec/fixtures/setup_files/imports_version.py | lioramilbaum/dependabot-core | 2,670 | 11068753 | <gh_stars>1000+
from setuptools import setup, find_packages
from split_settings import __version__
setup(name='python-package',
version=__version__,
description='Example setup.py',
url='httos://github.com/example/python-package',
author='Dependabot',
scripts=[],
packages=find_packages(),
setup_requires=[
'numpy==1.11.0',
'pytest-runner',
],
install_requires=[
'boto3==1.3.1',
'flake8 > 2.5.4, < 3.0.0',
'gocardless_pro',
'numpy>=1.11.0',
'pandas==0.19.2',
'pep8==1.7.0',
'psycopg2==2.6.1',
'raven == 5.32.0',
'requests==2.12.*',
'scipy==0.18.1',
'scikit-learn==0.18.1',
],
tests_require=[
'pytest==2.9.1',
'responses==0.5.1',
]
)
|
parlai/chat_service/utils/config.py | zl930216/ParlAI | 9,228 | 11068774 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Config Utils.
"""
import yaml
from collections import namedtuple
WorldConfig = namedtuple(
"WorldConfig",
[
"world_name",
"onboarding_name",
"task_name",
"max_time_in_pool",
"agents_required",
"backup_task",
],
)
def parse_configuration_file(config_path):
"""
Read the config file for an experiment to get ParlAI settings.
:param config_path:
path to config
:return:
parsed configuration dictionary
"""
result = {}
result["configs"] = {}
with open(config_path) as f:
cfg = yaml.load(f.read(), Loader=yaml.SafeLoader)
# get world path
result["world_path"] = cfg.get("world_module")
if not result["world_path"]:
raise ValueError("Did not specify world module")
result["overworld"] = cfg.get("overworld")
if not result["overworld"]:
raise ValueError("Did not specify overworld")
result["max_workers"] = cfg.get("max_workers")
if not result["max_workers"]:
raise ValueError("Did not specify max_workers")
result["task_name"] = cfg.get("task_name")
if not result["task_name"]:
raise ValueError("Did not specify task name")
task_world = cfg.get("tasks")
if task_world is None or len(task_world) == 0:
raise ValueError("task not in config file")
# get task file
for task_name, configuration in task_world.items():
if "task_world" not in configuration:
raise ValueError("{} does not specify a task".format(task_name))
result["configs"][task_name] = WorldConfig(
world_name=task_name,
onboarding_name=configuration.get("onboard_world"),
task_name=configuration.get("task_world"),
max_time_in_pool=configuration.get("timeout") or 300,
agents_required=configuration.get("agents_required") or 1,
backup_task=configuration.get("backup_task"),
)
# get world options, additional args
result["world_opt"] = cfg.get("opt", {})
result["additional_args"] = cfg.get("additional_args", {})
return result
|
tests/test_visitors/test_ast/test_complexity/test_classes/test_bases_classes_counts.py | cdhiraj40/wemake-python-styleguide | 1,931 | 11068778 | import pytest
from wemake_python_styleguide.violations.complexity import (
TooManyBaseClassesViolation,
)
from wemake_python_styleguide.visitors.ast.complexity.classes import (
ClassComplexityVisitor,
)
correct_count = """
class CorrectClassName(
FirstParentClass,
SecondParentClass,
ThirdParentClass,
): ...
"""
correct_count_with_keywords = """
class CorrectClassName(
FirstParentClass,
SecondParentClass,
ThirdParentClass,
first=1,
second=2,
third=3,
fourth=4,
fifth=5,
): ...
"""
too_many_count = """
class SomeClassName(
FirstParentClass,
SecondParentClass,
ThirdParentClass,
CustomClass,
AddedClass,
): ...
"""
@pytest.mark.parametrize('code', [
correct_count,
correct_count_with_keywords,
])
def test_correct_count(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing of correct base classes number."""
tree = parse_ast_tree(code)
visitor = ClassComplexityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
too_many_count,
])
def test_bad_number_default_option(
assert_errors,
assert_error_text,
parse_ast_tree,
code,
default_options,
):
"""Testing of base classes number with default options."""
tree = parse_ast_tree(code)
visitor = ClassComplexityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [TooManyBaseClassesViolation])
assert_error_text(visitor, '5', default_options.max_base_classes)
@pytest.mark.parametrize('code', [
too_many_count,
correct_count,
correct_count_with_keywords,
])
def test_bad_number_custom_option(
assert_errors,
parse_ast_tree,
code,
options,
):
"""Testing of base classes number with custom options."""
tree = parse_ast_tree(code)
options = options(max_base_classes=5)
visitor = ClassComplexityVisitor(options, tree=tree)
visitor.run()
assert_errors(visitor, [])
|
tests/test_plugin.py | Vlczech/spectree | 183 | 11068818 | <gh_stars>100-1000
import pytest
from spectree.utils import get_model_key, get_model_path_key, get_model_schema
from .common import JSON, SECURITY_SCHEMAS, Cookies, Headers, Query, Resp, get_paths
from .test_plugin_falcon import api as falcon_api
from .test_plugin_flask import api as flask_api
from .test_plugin_flask import api_global_secure as flask_api_global_secure
from .test_plugin_flask import api_secure as flask_api_secure
from .test_plugin_flask_blueprint import api as flask_bp_api
from .test_plugin_flask_view import api as flask_view_api
from .test_plugin_starlette import api as starlette_api
@pytest.mark.parametrize(
"api",
[
flask_api,
flask_bp_api,
flask_view_api,
falcon_api,
starlette_api,
],
)
def test_plugin_spec(api):
models = {
get_model_key(model=m): get_model_schema(model=m)
for m in (Query, JSON, Resp, Cookies, Headers)
}
for name, schema in models.items():
schema.pop("definitions", None)
assert api.spec["components"]["schemas"][name] == schema
assert api.spec["tags"] == [
{"name": "test"},
{"name": "health"},
{
"description": "🐱",
"externalDocs": {
"description": "",
"url": "https://pypi.org",
},
"name": "API",
},
]
assert get_paths(api.spec) == [
"/api/user/{name}",
"/api/user_annotated/{name}",
"/ping",
]
ping = api.spec["paths"]["/ping"]["get"]
assert ping["tags"] == ["test", "health"]
assert ping["parameters"][0]["in"] == "header"
assert ping["summary"] == "summary"
assert ping["description"] == "description"
assert ping["operationId"] == "get_/ping"
user = api.spec["paths"]["/api/user/{name}"]["post"]
assert user["tags"] == ["API", "test"]
assert (
user["requestBody"]["content"]["application/json"]["schema"]["$ref"]
== f"#/components/schemas/{get_model_path_key('tests.common.JSON')}"
)
assert len(user["responses"]) == 3
params = user["parameters"]
for param in params:
if param["in"] == "path":
assert param["name"] == "name"
elif param["in"] == "query":
assert param["name"] == "order"
def test_secure_spec():
assert [*flask_api_secure.spec["components"]["securitySchemes"].keys()] == [
scheme.name for scheme in SECURITY_SCHEMAS
]
paths = flask_api_secure.spec["paths"]
# iter paths
for path, path_data in paths.items():
security = path_data["get"].get("security")
# check empty-secure path
if path == "/no-secure-ping":
assert security is None
else:
# iter secure names and params
for secure_key, secure_value in security[0].items():
# check secure names valid
assert secure_key in [scheme.name for scheme in SECURITY_SCHEMAS]
# check if flow exist
if secure_value:
scopes = [
scheme.data.flows["authorizationCode"]["scopes"]
for scheme in SECURITY_SCHEMAS
if scheme.name == secure_key
]
assert set(secure_value).issubset(*scopes)
def test_secure_global_spec():
assert [*flask_api_global_secure.spec["components"]["securitySchemes"].keys()] == [
scheme.name for scheme in SECURITY_SCHEMAS
]
paths = flask_api_global_secure.spec["paths"]
global_security = flask_api_global_secure.spec["security"]
assert global_security == [{"auth_apiKey": []}]
# iter paths
for path, path_data in paths.items():
security = path_data["get"].get("security")
# check empty-secure path
if path == "/no-secure-override-ping":
# check if it is defined overridden no auth specification
assert security == []
elif path == "/oauth2-flows-override-ping":
# check if it is defined overridden security specification
assert security == [{"auth_oauth2": ["admin", "read"]}]
elif path == "/global-secure-ping":
# check if local security specification is missing,
# when was not specified explicitly
assert security is None
elif path == "/security_and":
# check if AND operation is supported
assert security == [{"auth_apiKey": [], "auth_apiKey_backup": []}]
elif path == "/security_or":
# check if OR operation is supported
assert security == [{"auth_apiKey": []}, {"auth_apiKey_backup": []}]
|
core/migrations/0008_auto__add_field_attachment_filename.py | nuwainfo/treeio | 242 | 11068890 | <gh_stars>100-1000
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Attachment.filename'
db.add_column('core_attachment', 'filename', self.gf(
'django.db.models.fields.CharField')(default='', max_length=64), keep_default=False)
def backwards(self, orm):
# Deleting field 'Attachment.filename'
db.delete_column('core_attachment', 'filename')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.accessentity': {
'Meta': {'object_name': 'AccessEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.attachment': {
'Meta': {'object_name': 'Attachment'},
'attached_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'attached_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Object']", 'null': 'True', 'blank': 'True'}),
'attached_record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UpdateRecord']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"})
},
'core.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"})
},
'core.configsetting': {
'Meta': {'object_name': 'ConfigSetting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group', '_ormbases': ['core.AccessEntity']},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.invitation': {
'Meta': {'object_name': 'Invitation'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'})
},
'core.location': {
'Meta': {'object_name': 'Location', '_ormbases': ['core.Object']},
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Location']"})
},
'core.module': {
'Meta': {'ordering': "['name']", 'object_name': 'Module', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'system': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'core.modulesetting': {
'Meta': {'object_name': 'ModuleSetting'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Module']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'perspective': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Perspective']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'objects_created'", 'null': 'True', 'to': "orm['core.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'full_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_full_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", 'null': 'True', 'blank': 'True'}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.page': {
'Meta': {'ordering': "['name']", 'object_name': 'Page', '_ormbases': ['core.Object']},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.PageFolder']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'core.pagefolder': {
'Meta': {'object_name': 'PageFolder', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'core.perspective': {
'Meta': {'object_name': 'Perspective', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'modules': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Module']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'core.revision': {
'Meta': {'object_name': 'Revision'},
'change_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Object']"}),
'previous': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'next'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Revision']"})
},
'core.revisionfield': {
'Meta': {'object_name': 'RevisionField'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Revision']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_key': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisionfield_key'", 'null': 'True', 'to': "orm['core.Object']"}),
'value_key_acc': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisionfield_key_acc'", 'null': 'True', 'to': "orm['core.AccessEntity']"}),
'value_m2m': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'revisionfield_m2m'", 'symmetrical': 'False', 'to': "orm['core.Object']"}),
'value_m2m_acc': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'revisionfield_m2m_acc'", 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"})
},
'core.tag': {
'Meta': {'ordering': "['name']", 'object_name': 'Tag'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'core.updaterecord': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'UpdateRecord'},
'about': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'updates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Object']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sent_updates'", 'null': 'True', 'to': "orm['core.User']"}),
'body': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_on_updates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'updates_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'format_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'format_strings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'updates_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'received_updates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sent_updates'", 'null': 'True', 'to': "orm['core.Object']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User', '_ormbases': ['core.AccessEntity']},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}),
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_access': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.widget': {
'Meta': {'ordering': "['weight']", 'object_name': 'Widget'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'module_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'perspective': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Perspective']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'widget_name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['core']
|
OpenDataCatalog/comments/widgets.py | runonthespot/Open-Data-Catalog | 105 | 11068923 | from django.forms.util import flatatt
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
class StarsRadioInput(StrAndUnicode):
"""
An object used by RadioFieldRenderer that represents a single
<input type='radio'>.
"""
def __init__(self, name, value, attrs, choice, index):
self.name, self.value = name, value
self.attrs = attrs
self.choice_value = force_unicode(choice[0])
self.choice_label = force_unicode(choice[1])
self.index = index
def __unicode__(self):
return mark_safe(u'%s' % self.tag())
def is_checked(self):
return self.value == self.choice_value
def tag(self):
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
final_attrs = dict(self.attrs, type='radio', name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class StarsRadioFieldRenderer(StrAndUnicode):
def __init__(self, name, value, attrs, choices):
self.name, self.value, self.attrs = name, value, attrs
self.choices = choices
def __iter__(self):
for i, choice in enumerate(self.choices):
yield StarsRadioInput(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return StarsRadioInput(self.name, self.value, self.attrs.copy(), choice, idx)
def __unicode__(self):
return self.render()
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'\n%s\n' % u'\n'.join([u'%s'
% force_unicode(w) for w in self]))
|
tools/mo/openvino/tools/mo/front/tf/roll_ext.py | ryanloney/openvino-1 | 1,127 | 11068947 | <filename>tools/mo/openvino/tools/mo/front/tf/roll_ext.py
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.roll import Roll
from openvino.tools.mo.front.extractor import FrontExtractorOp
class RollExtractor(FrontExtractorOp):
op = 'Roll'
enabled = True
@classmethod
def extract(cls, node):
Roll.update_node_stat(node, {})
return cls.enabled
|
jiant/tasks/lib/udpos.py | yzpang/jiant | 1,108 | 11068951 | <reponame>yzpang/jiant<filename>jiant/tasks/lib/udpos.py
import numpy as np
import torch
from dataclasses import dataclass
from typing import List, Union
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import (
labels_to_bimap,
create_input_set_from_tokens_and_segments,
construct_single_input_tokens_and_segment_ids,
pad_single_with_feat_spec,
)
from jiant.utils.python.datastructures import zip_equal
from jiant.utils.python.io import read_file_lines
ARBITRARY_OVERLY_LONG_WORD_CONSTRAINT = 100
# In a rare number of cases, a single word (usually something like a mis-processed URL)
# is overly long, and should not be treated as a real multi-subword-token word.
# In these cases, we simply replace it with an UNK token.
@dataclass
class Example(BaseExample):
guid: str
tokens: List[str]
pos_list: List[str]
def tokenize(self, tokenizer):
all_tokenized_tokens = []
labels = []
label_mask = []
for token, pos in zip_equal(self.tokens, self.pos_list):
# Tokenize each "token" separately, assign label only to first token
tokenized = tokenizer.tokenize(token)
# If the token can't be tokenized, or is too long, replace with a single <unk>
if len(tokenized) == 0 or len(tokenized) > ARBITRARY_OVERLY_LONG_WORD_CONSTRAINT:
tokenized = [tokenizer.unk_token]
all_tokenized_tokens += tokenized
padding_length = len(tokenized) - 1
labels += [UdposTask.LABEL_TO_ID.get(pos, None)] + [None] * padding_length
label_mask += [1] + [0] * padding_length
return TokenizedExample(
guid=self.guid, tokens=all_tokenized_tokens, labels=labels, label_mask=label_mask,
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
tokens: List
labels: List[Union[int, None]]
label_mask: List[int]
def featurize(self, tokenizer, feat_spec):
unpadded_inputs = construct_single_input_tokens_and_segment_ids(
input_tokens=self.tokens, tokenizer=tokenizer, feat_spec=feat_spec,
)
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
# Replicate padding / additional tokens for the label ids and mask
if feat_spec.sep_token_extra:
label_suffix = [None, None]
mask_suffix = [0, 0]
special_tokens_count = 3 # CLS, SEP-SEP
else:
label_suffix = [None]
mask_suffix = [0]
special_tokens_count = 2 # CLS, SEP
unpadded_labels = (
[None] + self.labels[: feat_spec.max_seq_length - special_tokens_count] + label_suffix
)
unpadded_labels = [i if i is not None else -1 for i in unpadded_labels]
unpadded_label_mask = (
[0] + self.label_mask[: feat_spec.max_seq_length - special_tokens_count] + mask_suffix
)
padded_labels = pad_single_with_feat_spec(
ls=unpadded_labels, feat_spec=feat_spec, pad_idx=-1,
)
padded_label_mask = pad_single_with_feat_spec(
ls=unpadded_label_mask, feat_spec=feat_spec, pad_idx=0,
)
return DataRow(
guid=self.guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
label_ids=np.array(padded_labels),
label_mask=np.array(padded_label_mask),
tokens=unpadded_inputs.unpadded_tokens,
)
@dataclass
class DataRow(BaseDataRow):
guid: str
input_ids: np.ndarray
input_mask: np.ndarray
segment_ids: np.ndarray
label_ids: np.ndarray
label_mask: np.ndarray
tokens: list
@dataclass
class Batch(BatchMixin):
input_ids: torch.LongTensor
input_mask: torch.LongTensor
segment_ids: torch.LongTensor
label_ids: torch.LongTensor
label_mask: torch.LongTensor
tokens: list
class UdposTask(Task):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
TASK_TYPE = TaskTypes.TAGGING
LABELS = [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
def __init__(self, name, path_dict, language):
super().__init__(name=name, path_dict=path_dict)
self.language = language
@property
def num_labels(self):
return len(self.LABELS)
def get_train_examples(self):
return self._create_examples(data_path=self.path_dict["train"], set_type="train")
def get_val_examples(self):
return self._create_examples(data_path=self.path_dict["val"], set_type="val")
def get_test_examples(self):
return self._create_examples(data_path=self.path_dict["test"], set_type="test")
@classmethod
def _create_examples(cls, data_path, set_type):
curr_token_list, curr_pos_list = [], []
data_lines = read_file_lines(data_path, "r", encoding="utf-8")
examples = []
idx = 0
for data_line in data_lines:
data_line = data_line.strip()
if data_line:
if set_type == "test":
line_tokens = data_line.split("\t")
if len(line_tokens) == 2:
token, pos = line_tokens
else:
token, pos = data_line, None
else:
token, pos = data_line.split("\t")
curr_token_list.append(token)
curr_pos_list.append(pos)
else:
examples.append(
Example(
guid=f"{set_type}-{idx}", tokens=curr_token_list, pos_list=curr_pos_list,
)
)
idx += 1
curr_token_list, curr_pos_list = [], []
if curr_token_list:
examples.append(
Example(guid=f"{set_type}-{idx}", tokens=curr_token_list, pos_list=curr_pos_list)
)
return examples
|
datathon/cbis_ddsm/scripts/cbis_ddsm_ml/trainer/tpu_model.py | kourtneyshort/healthcare | 310 | 11068953 | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple CNN model for classifying CBIS-DDSM images based on breast density (https://breast-cancer.ca/densitbi-rads/) categories.
This model is built on top of the "2018 NUS-MIT Datathon Tutorial: Machine
Learning on CBIS-DDSM" tutorial (you can find it at https://git.io/vhgOu).
The architecture of the core model itself remains the same (i.e. 6 layers CNN).
However, substantial changes around data loading and metrics colletion have
been made to feed data to TPUs for training. For detailed explanation on how
the model works, please go to https://www.tensorflow.org/tutorials/layers.
This model asssumes that the data needed for training and evaluation has
already been generated and stored on GCS in TFRecords (more can be found:
https://www.tensorflow.org/programmers_guide/datasets). You can find a script
at https://git.io/vhg3K which helps you transform existing images to the
desired data format.
Please check out the tutorials folder (https://git.io/vhgaw) for instructions
on training this model with TPU.
"""
import tensorflow as tf
# Cloud TPU Cluster Resolver flags.
tf.flags.DEFINE_string(
"tpu",
default=None,
help="The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone",
default="us-central1-b",
help="[Optional] GCE zone where the Cloud TPU is located in. At this "
"moment, only us-central provides TPU access.")
tf.flags.DEFINE_string(
"gcp_project",
default=None,
help="[Optional] Project name for the Cloud TPU-enabled project.")
# Input data specific flags.
tf.flags.DEFINE_string(
"training_data",
default=None,
help="Path to training data. This should be a GCS path, "
"e.g. gs://datathon-cbis-ddsm-colab/cache/ddsm_train.tfrecords")
tf.flags.DEFINE_string(
"eval_data",
default=None,
help="Path to evaluation data. This should be a GCS path, "
"e.g. gs://datathon-cbis-ddsm-colab/cache/ddsm_eval.tfrecords")
tf.flags.DEFINE_integer(
"image_width",
default=0,
help="Wdith of input images. All images are expected to share the same "
"size.")
tf.flags.DEFINE_integer(
"image_height",
default=0,
help="Height of input images. All images are expected to share the same "
"size.")
tf.flags.DEFINE_integer(
"image_channel",
default=1,
help="[Optional] Number of channels in input images.")
# Model specific flags.
tf.flags.DEFINE_string("model_dir", default=None, help="Estimator model_dir.")
tf.flags.DEFINE_integer(
"batch_size",
default=96,
help="Mini-batch size for the training. Note that this is the global batch "
"size and not the per-shard batch.")
tf.flags.DEFINE_integer(
"training_steps", default=1000, help="Total number of training steps.")
tf.flags.DEFINE_integer(
"eval_steps", default=100, help="Total number of evaluation steps.")
tf.flags.DEFINE_float("learning_rate", default=0.05, help="Learning rate.")
tf.flags.DEFINE_integer(
"iterations",
default=50,
help="Number of iterations per TPU training loop.")
tf.flags.DEFINE_integer(
"num_shards", default=8, help="Number of shards (TPU chips).")
tf.flags.DEFINE_integer(
"category_count", default=0, help="Number of categories.")
FLAGS = tf.flags.FLAGS
def metric_fn(labels, logits):
"""Record metrics for evaluation."""
predictions = tf.argmax(logits, 1)
return {
"accuracy": tf.metrics.precision(labels=labels, predictions=predictions)
}
def cnn_model_fn(features, labels, mode, params):
"""CNN core model.
Please read the tensorflow doc for how to customize this function:
https://www.tensorflow.org/get_started/custom_estimators
"""
del params # Not needed.
# Input Layer.
# Reshape to 4-D tensor: [batch_size, height, width, channels]
input_layer = tf.reshape(
features,
[-1, FLAGS.image_height, FLAGS.image_width, FLAGS.image_channel])
# Convolutional Layer #1.
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1.
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2.
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #2.
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
filtered_width = FLAGS.image_width / 4
filtered_height = FLAGS.image_height / 4
pool2_flat = tf.reshape(pool2, [-1, filtered_width * filtered_height * 64])
# Dense Layer.
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Dropout operation.
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=(mode == tf.estimator.ModeKeys.TRAIN))
# Logits Layer.
logits = tf.layers.dense(inputs=dropout, units=FLAGS.category_count)
# Loss Calculation.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,
tf.train.get_global_step(),
100000, 0.96)
optimizer = tf.contrib.tpu.CrossShardOptimizer(
tf.train.GradientDescentOptimizer(learning_rate))
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_global_step())
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, train_op=train_op)
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits]))
def get_input_fn(filename):
"""Returns an `input_fn` for training and evaluation."""
def input_fn(params):
# Retrieves the batch size for the current shard. The number of shards is
# computed according to the input pipeline deployment. See
# https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/RunConfig
# for details.
batch_size = params["batch_size"]
def parse(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
"label": tf.FixedLenFeature([], tf.int64),
"image": tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features["image"], tf.float32)
image = tf.reshape(image, [FLAGS.image_width, FLAGS.image_height])
label = tf.cast(features["label"], tf.int32)
return image, label
dataset = tf.data.TFRecordDataset(filename, buffer_size=500000)
dataset = dataset.map(parse).cache().repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
images, labels = dataset.make_one_shot_iterator().get_next()
return images, labels
return input_fn
def main(_):
"""Set up training and evaluation steps."""
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tf.contrib.tpu.TPUConfig(FLAGS.iterations, FLAGS.num_shards),
)
classifier = tf.contrib.tpu.TPUEstimator(
model_fn=cnn_model_fn,
use_tpu=True,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size,
config=run_config)
# Set up logging for predictions.
# Log the values in the "Softmax" tensor with label "probabilities".
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Training.
classifier.train(
input_fn=get_input_fn(FLAGS.training_data), steps=FLAGS.training_steps)
# Evaluation.
classifier.evaluate(
input_fn=get_input_fn(FLAGS.eval_data),
steps=FLAGS.eval_steps,
hooks=[logging_hook])
if __name__ == "__main__":
# Set logging level to INFO.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
tests/api/conftest.py | Gorlym/WEB | 187 | 11068968 | import pytest
from alembic.command import upgrade
from sqlalchemy import create_engine
from analyzer.api.__main__ import parser
from analyzer.api.app import create_app
@pytest.fixture
async def migrated_postgres(alembic_config, postgres):
"""
Возвращает URL к БД с примененными миграциями.
"""
upgrade(alembic_config, 'head')
return postgres
@pytest.fixture
def arguments(aiomisc_unused_port, migrated_postgres):
"""
Аргументы для запуска приложения.
"""
return parser.parse_args(
[
'--log-level=debug',
'--api-address=127.0.0.1',
f'--api-port={aiomisc_unused_port}',
f'--pg-url={migrated_postgres}'
]
)
@pytest.fixture
async def api_client(aiohttp_client, arguments):
app = create_app(arguments)
client = await aiohttp_client(app, server_kwargs={
'port': arguments.api_port
})
try:
yield client
finally:
await client.close()
@pytest.fixture
def migrated_postgres_connection(migrated_postgres):
"""
Синхронное соединение со смигрированной БД.
"""
engine = create_engine(migrated_postgres)
conn = engine.connect()
try:
yield conn
finally:
conn.close()
engine.dispose()
|
metrics/log_collectors/training_data_service_client/connect.py | adrian555/FfDL | 680 | 11068994 | #
# Copyright 2017-2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import grpc
import time
from log_collectors.training_data_service_client import training_data_pb2_grpc as td
def get_connection()->td.TrainingDataStub:
with open('log_collectors/training_data_service_client/certs/server.crt') as f:
certificate = f.read()
credentials = grpc.ssl_channel_credentials(root_certificates=certificate)
# TODO: Change these to be configurable when/if we get the viper issue straightened out.
isTLSEnabled = True
isLocal = False
if isLocal:
host_url = '127.0.0.1'
port = '30015'
else:
training_data_namespace = os.environ["TRAINING_DATA_NAMESPACE"]
host_url = "ffdl-trainingdata.%s.svc.cluster.local" % training_data_namespace
port = '80'
host_url = '{}:{}'.format(host_url, port)
print("host_url: "+host_url)
sys.stdout.flush()
channel = None
for retryCount in range(0, 10):
try:
if isTLSEnabled:
channel = grpc.secure_channel(host_url, credentials,
options=(('grpc.ssl_target_name_override', 'dlaas.ibm.com',),))
else:
channel = grpc.insecure_channel(host_url)
if channel is not None:
break
except Exception as inst:
print("Exception trying to connect:",
sys.exc_info()[0])
print(inst)
sys.stdout.flush()
time.sleep(.5)
if channel is not None:
tdClient = td.TrainingDataStub(channel)
else:
tdClient = None
return tdClient
|
apache/flask/webapp/validators.py | jamitupya/SweetSecurity3alpha | 792 | 11068996 | import re
def convertMac(macAddress):
if re.match(r"^[A-za-z0-9]{2}-[A-za-z0-9]{2}-[A-za-z0-9]{2}-[A-za-z0-9]{2}-[A-za-z0-9]{2}-[A-za-z0-9]{2}$",macAddress):
macAddress=macAddress.replace('-','')
elif re.match(r"^[A-za-z0-9]{2}:[A-za-z0-9]{2}:[A-za-z0-9]{2}:[A-za-z0-9]{2}:[A-za-z0-9]{2}:[A-za-z0-9]{2}$",macAddress):
macAddress=macAddress.replace(':','')
return macAddress.upper()
def macAddress(macAddress):
dashMatch = re.match(r"^[A-Fa-f0-9]{2}-[A-Fa-f0-9]{2}-[A-Fa-f0-9]{2}-[A-Fa-f0-9]{2}-[A-Fa-f0-9]{2}-[A-Fa-f0-9]{2}$",macAddress)
colonMatch = re.match(r"^[A-Fa-f0-9]{2}:[A-Fa-f0-9]{2}:[A-Fa-f0-9]{2}:[A-Fa-f0-9]{2}:[A-Fa-f0-9]{2}:[A-Fa-f0-9]{2}$",macAddress)
alphaMatch = re.match(r"^[A-Fa-f0-9]{12}$",macAddress)
if dashMatch or colonMatch or alphaMatch:
return True
else:
return False
def url(url):
urlMatch= re.match(r"^([a-zA-Z0-9][a-zA-Z0-9\-\_]+[a-zA-Z0-9]\.)+([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-\_])+[A-Za-z0-9]$",url)
ipMatch= re.match(r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",url)
if urlMatch or ipMatch:
return True
else:
return False
def hostname(hostname):
hostnameMatch= re.match(r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-\_]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-\_]*[A-Za-z0-9])$",hostname)
noHostnameMatch = re.match(r"^(\d+\.\d+\.\d+\.\d+\s\(\w{12}\))",hostname)
if hostnameMatch or noHostnameMatch:
return True
else:
return False
def ipAddress(ipAddress):
ipMatch= re.match(r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",ipAddress)
if ipMatch:
return True
else:
return False
def ignoreStatus(ignored):
ignoreMatch = re.match(r"^[0-1]$",ignored)
if ignoreMatch:
return True
else:
return False
|
examples/multioutput-example/predict.py | Anenizer/igel | 3,110 | 11069011 | <filename>examples/multioutput-example/predict.py
from igel import Igel
"""
The goal of igel is to use ML without writing code. Therefore, the right and simplest way to use igel is from terminal.
You can run ` igel predict -dp path_to_dataset`.
Alternatively, you can write code if you want. This example below demonstrates how to use igel if you want to write code.
However, I suggest you try and use the igel CLI. Type igel -h in your terminal to know more.
"""
mock_pred_params = {'data_path': './test-linnerud.csv',
'cmd': 'predict'}
Igel(**mock_pred_params)
|
examples/sharepoint/files/upload_file.py | juguerre/Office365-REST-Python-Client | 544 | 11069018 | <reponame>juguerre/Office365-REST-Python-Client<gh_stars>100-1000
import os
from office365.sharepoint.client_context import ClientContext
from tests import test_user_credentials, test_team_site_url
test_team_site_url = test_team_site_url
ctx = ClientContext(test_team_site_url).with_credentials(test_user_credentials)
path = "../../data/report #123.csv"
with open(path, 'rb') as content_file:
file_content = content_file.read()
list_title = "Documents"
target_folder = ctx.web.lists.get_by_title(list_title).root_folder
name = os.path.basename(path)
target_file = target_folder.upload_file(name, file_content).execute_query()
print("File has been uploaded to url: {0}".format(target_file.serverRelativeUrl))
|
src/eventgrid/azext_eventgrid/vendored_sdks/eventgrid/operations/__init__.py | Mannan2812/azure-cli-extensions | 207 | 11069030 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._domains_operations import DomainsOperations
from ._domain_topics_operations import DomainTopicsOperations
from ._event_channels_operations import EventChannelsOperations
from ._event_subscriptions_operations import EventSubscriptionsOperations
from ._system_topic_event_subscriptions_operations import SystemTopicEventSubscriptionsOperations
from ._partner_topic_event_subscriptions_operations import PartnerTopicEventSubscriptionsOperations
from ._operations import Operations
from ._partner_namespaces_operations import PartnerNamespacesOperations
from ._partner_registrations_operations import PartnerRegistrationsOperations
from ._partner_topics_operations import PartnerTopicsOperations
from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
from ._private_link_resources_operations import PrivateLinkResourcesOperations
from ._system_topics_operations import SystemTopicsOperations
from ._topics_operations import TopicsOperations
from ._extension_topics_operations import ExtensionTopicsOperations
from ._topic_types_operations import TopicTypesOperations
__all__ = [
'DomainsOperations',
'DomainTopicsOperations',
'EventChannelsOperations',
'EventSubscriptionsOperations',
'SystemTopicEventSubscriptionsOperations',
'PartnerTopicEventSubscriptionsOperations',
'Operations',
'PartnerNamespacesOperations',
'PartnerRegistrationsOperations',
'PartnerTopicsOperations',
'PrivateEndpointConnectionsOperations',
'PrivateLinkResourcesOperations',
'SystemTopicsOperations',
'TopicsOperations',
'ExtensionTopicsOperations',
'TopicTypesOperations',
]
|
pyinfra/operations/windows.py | yggdr/pyinfra | 1,532 | 11069040 | <gh_stars>1000+
'''
The windows module handles misc windows operations.
'''
from __future__ import unicode_literals
from pyinfra.api import operation
# Tip: Use 'Get-Command -Noun Service' to search for what commands are available or
# simply 'Get-Command' to see what you can do...)
# Tip: To see the windows help page about a command, use 'Get-Help'.
# Might have to run 'Update-Help' if you want to use arguments like '-Examples'.
# ex: 'Get-Help Stop-Service'
# ex: 'Get-Help Stop-Service -Examples'
# ex: 'Get-Help Stop-Service -Detailed'
# ex: 'Get-Help Stop-Service -Full'
# FUTURE: add ability to stop processes (ex: "Stop-Process <id>")
@operation
def service(service, running=True, restart=False, suspend=False, state=None, host=None):
'''
Stop/Start a Windows service.
+ service: name of the service to manage
+ running: whether the the service should be running or stopped
+ restart: whether the the service should be restarted
+ suspend: whether the the service should be suspended
Example:
.. code:: python
windows.service(
{'Stop the spooler service'},
'service',
running=False,
)
'''
if suspend or not running:
if suspend:
yield 'Suspend-Service -Name {0}'.format(service)
else:
yield 'Stop-Service -Name {0}'.format(service)
else:
if restart:
yield 'Restart-Service -Name {0}'.format(service)
else:
if running:
yield 'Start-Service -Name {0}'.format(service)
@operation
def reboot(state=None, host=None):
'''
Restart the server.
'''
yield 'Restart-Computer -Force'
|
flexneuart/retrieval/cand_provider.py | gitter-badger/FlexNeuART | 101 | 11069048 | <filename>flexneuart/retrieval/cand_provider.py
#
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Access to FlexNeuART candidate providers (i.e., basic querying)
"""
from collections import namedtuple
from jnius import autoclass
from flexneuart.config import TEXT_FIELD_NAME
from flexneuart.retrieval.utils import query_dict_to_dataentry_fields, DataEntryFields
CandidateEntry = namedtuple('CandidateEntry', ['doc_id', 'score'])
JCandidateEntry = autoclass('edu.cmu.lti.oaqa.flexneuart.cand_providers.CandidateEntry')
JCandidateProvider = autoclass('edu.cmu.lti.oaqa.flexneuart.cand_providers.CandidateProvider')
PROVIDER_TYPE_LUCENE = JCandidateProvider.CAND_TYPE_LUCENE
PROVIDER_TYPE_NMSLIB = JCandidateProvider.CAND_TYPE_NMSLIB
PROVIDER_TYPE_TREC_RUNS = JCandidateProvider.CAND_TYPE_TREC_RUNS
PROVIDER_TYPE_LIST = [PROVIDER_TYPE_LUCENE, PROVIDER_TYPE_NMSLIB, PROVIDER_TYPE_TREC_RUNS]
FAKE_QUERY_ID='fake_query_id'
def create_cand_provider(resource_manager, provider_type, provider_uri, add_config_file=None):
"""Create a candidate provider (for basic querying). Configuration and
index file paths are relative to the collection root (stored in the resource manager)
:param resource_manager: a resource manager object
:param provider_type: a provider type
:param provider_uri: a provider index location (or address, e.g., for NMSLIB)
:param add_config_file: an optional provider configuration file (not needed for Lucene and NMSLIB)
:return: a candidate provider object
"""
if provider_type not in PROVIDER_TYPE_LIST:
raise Exception(f'Unsupported provider type: {provider_type}, supported providers are: ' + ' '.join(PROVIDER_TYPE_LIST))
# FlexNeuART is multi-thread and for each thread we may need a separate provider object
# (if the provider is not thread-safe), but in Python we generate only one provider (as we
# have no real threads anyways)
return resource_manager.createCandProviders(provider_type,
provider_uri,
add_config_file,
1)[0]
def create_text_query_obj(query_text,
query_id=FAKE_QUERY_ID, field_name=TEXT_FIELD_NAME):
"""Create a Java object with text query information.
:param query_text: query text: *WHITE-SPACE* tokenized query tokens
:param query_id: a query ID (can be anything or just stick to default)
:param field_name: a field name (currently it's hardcoded in FlexNeuART anyways, so don't change this default)
:return:
"""
obj = DataEntryFields(str(query_id))
obj.setString(field_name, query_text)
return obj
def run_query_internal(cand_provider, top_qty, query_obj):
"""An auxilliary function not intended to be used directly"""
cand_info = cand_provider.getCandidates(0, query_obj, top_qty)
return cand_info.mNumFound, \
[CandidateEntry(doc_id=e.mDocId, score=e.mScore) for e in cand_info.mEntries]
def run_text_query(cand_provider,
top_qty,
query_text,
query_id=FAKE_QUERY_ID, field_name=TEXT_FIELD_NAME):
"""Run a single-field text query.
:param cand_provider: a candidate provider object
:param top_qty: a number of top-scored entries to return
:param query_text: query text: *WHITE-SPACE* tokenized query tokens
:param query_id: a query ID (can be anything or just stick to default)
:param field_name: a field name (currently it's hardcoded in FlexNeuART anyways, so don't change this default)
:return: a tuple: # of entries found, an array of candidate entries: (document ID, score) objects
"""
query_obj = create_text_query_obj(query_text, query_id, field_name)
return run_query_internal(cand_provider, top_qty, query_obj)
def run_query(cand_provider,
top_qty,
query_dict,
default_query_id=FAKE_QUERY_ID):
"""Run a generic (not-necessarily single-field text) query.
:param cand_provider: a candidate provider object
:param top_qty: a number of top-scored entries to return
:param query_dict: query key-value dictionary that may or may not have the query/doc ID
:param default_query_id: a default query ID to use if query_dict has none.
:return: a tuple: # of entries found, an array of candidate entries: (document ID, score) objects
"""
if type(query_dict) != dict:
raise Exception('A query object should be a dictionary!')
query_obj = query_dict_to_dataentry_fields(query_dict, default_query_id)
return run_query_internal(cand_provider, top_qty, query_obj)
|
docs/examples/ex22.py | bhaveshshrimali/scikit-fem | 238 | 11069054 | <gh_stars>100-1000
r"""Adaptive Poisson equation.
This example solves `ex01.py` adaptively in an L-shaped domain.
Using linear elements, the error indicators read
.. math::
\eta_K^2 = h_K^2 \|f\|_{0,K}^2
for each element :math:`K`, and
.. math::
\eta_E^2 = h_E \| [[\nabla u_h \cdot n ]] \|_{0,E}^2
for each edge :math:`E`.
"""
from skfem import *
from skfem.models.poisson import laplace
from skfem.helpers import grad
import numpy as np
m = MeshTri.init_lshaped().refined(2)
e = ElementTriP1()
def load_func(x, y):
return 1.
@LinearForm
def load(v, w):
x, y = w.x
return load_func(x, y) * v
def eval_estimator(m, u):
# interior residual
basis = Basis(m, e)
@Functional
def interior_residual(w):
h = w.h
x, y = w.x
return h ** 2 * load_func(x, y) ** 2
eta_K = interior_residual.elemental(basis, w=basis.interpolate(u))
# facet jump
fbasis = [InteriorFacetBasis(m, e, side=i) for i in [0, 1]]
w = {'u' + str(i + 1): fbasis[i].interpolate(u) for i in [0, 1]}
@Functional
def edge_jump(w):
h = w.h
n = w.n
dw1 = grad(w['u1'])
dw2 = grad(w['u2'])
return h * ((dw1[0] - dw2[0]) * n[0] +
(dw1[1] - dw2[1]) * n[1]) ** 2
eta_E = edge_jump.elemental(fbasis[0], **w)
tmp = np.zeros(m.facets.shape[1])
np.add.at(tmp, fbasis[0].find, eta_E)
eta_E = np.sum(.5 * tmp[m.t2f], axis=0)
return eta_K + eta_E
if __name__ == "__main__":
from skfem.visuals.matplotlib import draw, plot, show
draw(m)
for itr in reversed(range(9)):
basis = Basis(m, e)
K = asm(laplace, basis)
f = asm(load, basis)
I = m.interior_nodes()
u = solve(*condense(K, f, I=I))
if itr > 0:
m = m.refined(adaptive_theta(eval_estimator(m, u)))
if __name__ == "__main__":
draw(m)
plot(m, u, shading='gouraud')
show()
|
OcCo_Torch/models/pointnet_jigsaw.py | sun-pyo/OcCo | 158 | 11069063 | # Copyright (c) 2020. <NAME>, <EMAIL>
import torch, torch.nn as nn, torch.nn.functional as F
from pointnet_util import PointNetEncoder, feature_transform_regularizer
class get_model(nn.Module):
def __init__(self, num_class, num_channel=3, **kwargs):
super(get_model, self).__init__()
self.num_class = num_class
self.feat = PointNetEncoder(global_feat=False,
feature_transform=True,
channel=num_channel)
self.conv1 = nn.Conv1d(1088, 512, 1)
self.conv2 = nn.Conv1d(512, 256, 1)
self.conv3 = nn.Conv1d(256, 128, 1)
self.conv4 = nn.Conv1d(128, self.num_class, 1)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(128)
def forward(self, x):
batch_size, _, num_points = x.size()
x, trans, trans_feat = self.feat(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.conv4(x)
x = x.transpose(2, 1).contiguous()
x = F.log_softmax(x.view(-1, self.num_class), dim=-1)
x = x.view(batch_size, num_points, self.num_class)
return x, trans_feat
class get_loss(nn.Module):
def __init__(self, mat_diff_loss_scale=0.001):
super(get_loss, self).__init__()
self.mat_diff_loss_scale = mat_diff_loss_scale
def forward(self, pred, target, trans_feat):
loss = F.nll_loss(pred, target)
mat_diff_loss = feature_transform_regularizer(trans_feat)
total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale
return total_loss
if __name__ == '__main__':
model = get_model(num_class=13, num_channel=3)
xyz = torch.rand(12, 3, 2048)
model(xyz)
|
tests/st/func/wizard/test_resnet50.py | lvyufeng/mindconverter_standalone | 216 | 11069097 | <gh_stars>100-1000
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Test the various combinations based on ResNet50.
"""
import os
import pytest
from mindinsight.wizard.base.utility import load_network_maker
NETWORK_NAME = 'resnet50'
class TestResNet50:
"""Test ResNet50 Module."""
@pytest.mark.level0
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.parametrize('params', [{
'config': {'loss': 'SoftmaxCrossEntropyWithLogits',
'optimizer': 'Momentum',
'dataset': 'Cifar10'},
'dataset_loader_name': 'Cifar10Dataset'
}, {
'config': {'loss': 'SoftmaxCrossEntropyWithLogits',
'optimizer': 'Adam',
'dataset': 'Cifar10'},
'dataset_loader_name': 'Cifar10Dataset'
}, {
'config': {'loss': 'SoftmaxCrossEntropyWithLogits',
'optimizer': 'SGD',
'dataset': 'Cifar10'},
'dataset_loader_name': 'Cifar10Dataset'
}, {
'config': {'loss': 'SoftmaxCrossEntropyWithLogits',
'optimizer': 'Momentum',
'dataset': 'ImageNet'},
'dataset_loader_name': 'ImageFolderDataset'
}, {
'config': {'loss': 'SoftmaxCrossEntropyWithLogits',
'optimizer': 'Adam',
'dataset': 'ImageNet'},
'dataset_loader_name': 'ImageFolderDataset'
}, {
'config': {'loss': 'SoftmaxCrossEntropyWithLogits',
'optimizer': 'SGD',
'dataset': 'ImageNet'},
'dataset_loader_name': 'ImageFolderDataset'
}])
def test_combinations(self, params):
"""Do testing."""
network_maker_name = NETWORK_NAME
config = params['config']
dataset_loader_name = params['dataset_loader_name']
network_maker = load_network_maker(network_maker_name)
network_maker.configure(config)
self.source_files = network_maker.generate(**config)
self.check_scripts()
self.check_src(dataset_loader_name, config)
self.check_train_eval_readme(config['dataset'], config['loss'], config['optimizer'])
def check_src(self, dataset_name, config):
"""Check src file."""
dataset_is_right = False
config_dataset_is_right = False
config_optimizer_is_right = False
network_is_right = False
cross_entorpy_smooth_is_right = False
generator_lr_is_right = False
for source_file in self.source_files:
if source_file.file_relative_path == os.path.normpath('src/dataset.py'):
if dataset_name in source_file.content:
dataset_is_right = True
if source_file.file_relative_path == os.path.join('src', NETWORK_NAME.lower() + '.py'):
network_is_right = True
if source_file.file_relative_path == os.path.normpath('src/CrossEntropySmooth.py'):
cross_entorpy_smooth_is_right = True
if source_file.file_relative_path == os.path.normpath('src/lr_generator.py'):
generator_lr_is_right = True
if source_file.file_relative_path == os.path.normpath('src/config.py'):
content = source_file.content
config_dataset_is_right = self._check_config_dataset(config, content)
config_optimizer_is_right = self._check_config_optimizer(config, content)
assert dataset_is_right
assert config_dataset_is_right
assert config_optimizer_is_right
assert network_is_right
assert cross_entorpy_smooth_is_right
assert generator_lr_is_right
@staticmethod
def _check_config_dataset(config, content):
"""Check dataset in config."""
config_dataset_is_right = False
if config['dataset'] == 'Cifar10':
if "'num_classes': 10" in content \
and "'warmup_epochs': 5" in content \
and "'lr_decay_mode': 'poly'" in content:
config_dataset_is_right = True
elif config['dataset'] == 'ImageNet':
if "'num_classes': 1001" in content \
and "'warmup_epochs': 0" in content \
and "'lr_decay_mode': 'cosine'":
config_dataset_is_right = True
return config_dataset_is_right
@staticmethod
def _check_config_optimizer(config, content):
"""Check optimizer in config."""
config_optimizer_is_right = False
if config['optimizer'] == 'Momentum':
if "'lr': 0.01" in content and \
"'momentum': 0.9" in content:
config_optimizer_is_right = True
elif config['optimizer'] == 'SGD':
if "'lr': 0.01" in content:
config_optimizer_is_right = True
else:
if "'lr': 0.001" in content:
config_optimizer_is_right = True
return config_optimizer_is_right
def check_train_eval_readme(self, dataset_name, loss_name, optimizer_name):
"""Check train and eval."""
train_is_right = False
eval_is_right = False
readme_is_right = False
for source_file in self.source_files:
if source_file.file_relative_path == 'train.py':
content = source_file.content
if 'resnet50' in content and optimizer_name in content:
if dataset_name == 'ImageNet' and loss_name == 'SoftmaxCrossEntropyWithLogits' \
and 'loss = CrossEntropySmooth' in content:
train_is_right = True
elif loss_name in content:
train_is_right = True
if source_file.file_relative_path == 'eval.py':
content = source_file.content
if 'resnet50' in content:
if dataset_name == 'ImageNet' and loss_name == 'SoftmaxCrossEntropyWithLogits' \
and 'loss = CrossEntropySmooth' in content:
eval_is_right = True
elif loss_name in content:
eval_is_right = True
if source_file.file_relative_path == 'README.md':
content = source_file.content
if 'ResNet50' in content and dataset_name in content:
readme_is_right = True
assert train_is_right
assert eval_is_right
assert readme_is_right
def check_scripts(self):
"""Check scripts."""
exist_run_distribute_train = False
exist_run_distribute_train_gpu = False
exist_run_eval = False
exist_run_eval_gpu = False
exist_run_standalone_train = False
exist_run_standalone_train_gpu = False
for source_file in self.source_files:
if source_file.file_relative_path == os.path.normpath('scripts/run_distribute_train.sh'):
exist_run_distribute_train = True
if source_file.file_relative_path == os.path.normpath('scripts/run_distribute_train_gpu.sh'):
exist_run_distribute_train_gpu = True
if source_file.file_relative_path == os.path.normpath('scripts/run_eval.sh'):
exist_run_eval = True
if source_file.file_relative_path == os.path.normpath('scripts/run_eval_gpu.sh'):
exist_run_eval_gpu = True
if source_file.file_relative_path == os.path.normpath('scripts/run_standalone_train.sh'):
exist_run_standalone_train = True
if source_file.file_relative_path == os.path.normpath('scripts/run_standalone_train_gpu.sh'):
exist_run_standalone_train_gpu = True
assert exist_run_distribute_train
assert exist_run_distribute_train_gpu
assert exist_run_eval
assert exist_run_eval_gpu
assert exist_run_standalone_train
assert exist_run_standalone_train_gpu
|
mypaas/stats/client_code.py | lilahtovmoon/mypaas | 208 | 11069109 | <reponame>lilahtovmoon/mypaas<filename>mypaas/stats/client_code.py
"""
Python code that will be transpiled to JS to implement the client side.
"""
from pscript.stubs import window, document, undefined, Math, Date # JS
from pscript.stubs import data_per_db, text_color # are made available
panels = []
# %% Button callbacks
def toggle_utc():
info = get_hash_info()
if info.get("utc", False):
info.pop("utc")
else:
info["utc"] = True
return refresh(None, info)
def toggle_columns():
info = get_hash_info()
columns = info.get("columns", 0)
if not columns:
if window.document.body.clientWidth >= 1200:
info["columns"] = 2
else:
info["columns"] = 1
else:
info.pop("columns")
return refresh(None, info)
def update_range(action=""):
ndays = window.ndays
daysago = window.daysago
if action == "zoomout":
if ndays < 4:
ndays += 1
elif ndays < 10:
ndays += 2
elif ndays < 30:
ndays += 5
else:
ndays += 10
elif action == "zoomin":
if ndays <= 4:
ndays -= 1
elif ndays <= 10:
ndays -= 2
elif ndays <= 30:
ndays -= 5
else:
ndays -= 10
ndays = max(1, ndays)
elif action == "older":
daysago += ndays
elif action == "newer":
daysago -= ndays
info = get_query_info()
info["ndays"] = ndays
if daysago > 0:
info["daysago"] = daysago
else:
info.pop("daysago", None)
return refresh(info, None)
def refresh(self, query_info=None, hash_info=None):
if query_info is None:
query_info = get_query_info()
if hash_info is None:
hash_info = get_hash_info()
url = window.location.origin + window.location.pathname
encode_uri_component = window.encodeURIComponent
if query_info:
url += "?" + "&".join(
[key + "=" + encode_uri_component(val) for key, val in query_info.items()]
)
if True:
url += "#" + "&".join(
[key + "=" + encode_uri_component(val) for key, val in hash_info.items()]
)
if url == window.location.href:
window.location.reload()
else:
window.location.href = url
return undefined
def panel_sort_func(x):
t = x.split("|")[1]
if t:
t = {"num": "anum", "cat": "zcat"}.get(t, t)
return (t + "|" + x).lower()
# %%
def on_init():
for dbname, data in data_per_db.items():
# Create panel container (and a title)
title_el = document.createElement("div")
container_el = document.createElement("div")
title_el.innerText = dbname # .replace("_", " ")
title_el.classList.add("panelcontainertitle")
container_el.classList.add("panelcontainer")
document.body.appendChild(title_el)
document.body.appendChild(container_el)
if dbname == "system" and window.info:
panels.append(InfoPanel(container_el, dbname, "info", "system info"))
# Collect panel types
panel_kinds = {}
for i in range(len(data)):
aggr = data[i]
for key in aggr.keys():
panel_kinds[key] = True
# Sort the panel types - count, dcount, num, cat
panel_kinds = panel_kinds.keys()
panel_kinds.sort(key=panel_sort_func)
# Create panels
for i in range(len(panel_kinds)):
key = panel_kinds[i]
# Select panel class
key_parts = key.split("|")
if len(key_parts) == 2:
name, type = key_parts
unit = ""
elif len(key_parts) == 3:
name, type, unit = key_parts
else:
continue
if type == "time":
continue # skip time info
elif type == "count":
title = "# " + name
Cls = CountPanel # noqa: N806
elif type == "dcount":
title = "# daily " + name
Cls = DailyCountPanel # noqa: N806
elif type == "mcount":
title = "# monthly " + name
Cls = MonthlyCountPanel # noqa: N806
elif type == "cat":
title = name + "'s"
Cls = CategoricalPanel # noqa: N806
elif type == "num":
title = name
Cls = NumericalPanel # noqa: N806
else:
window.console.warn(f"Don't know what to do with {key}")
continue
if unit:
title = title + " " + unit
# Create panel
panel = Cls(container_el, dbname, key, title, unit)
panels.append(panel)
on_hash_change() # calls on_resize()
def on_resize():
window.setTimeout(_on_resize, 1)
def get_query_info():
url = window.location.href
q = ""
if "?" in url:
q = window.location.href.split("?", 1)[-1].split("#")[0]
return get_dict_from_hash_or_query(q)
def get_hash_info():
return get_dict_from_hash_or_query(window.location.hash.lstrip("#"))
def get_dict_from_hash_or_query(s):
info = {}
for s in s.split("&"):
key, _, val = s.partition("=")
if key and val:
val = window.decodeURIComponent(val)
if val.lower() == "true":
val = True
elif val.lower() == "false":
val = False
elif val in "0123456789":
val = int(val)
info[key] = val
elif s:
info[s] = True
return info
def on_hash_change():
info = get_hash_info()
containers = document.getElementsByClassName("panelcontainer")
columns = int(info.get("columns", "")) or 0
if columns > 0:
grid_template_columns = "auto ".repeat(columns)
else:
grid_template_columns = None
height = int(info.get("height", "")) or 0
if height > 0:
grid_auto_rows = height + "px"
else:
grid_auto_rows = None
for i in range(len(containers)):
containers[i].style.gridAutoRows = grid_auto_rows
containers[i].style.gridTemplateColumns = grid_template_columns
on_resize()
def _on_resize():
for panel in panels:
if panel.canvas:
# Get dimensions
w = panel.node.clientWidth - 10
h = panel.node.clientHeight - 35
pixel_ratio = get_pixel_ratio(panel.canvas.getContext("2d"))
# Set dimensions
panel.canvas.style.width = w + "px"
panel.canvas.style.height = h + "px"
panel.canvas.width = w * pixel_ratio
panel.canvas.height = h * pixel_ratio
# Set some info on the object
panel.pixel_ratio = pixel_ratio
panel.width = w
panel.height = h
if panel.draw:
panel.draw()
def get_pixel_ratio(ctx):
"""Get the ratio of logical pixel to screen pixel."""
PSCRIPT_OVERLOAD = False # noqa
dpr = window.devicePixelRatio or 1
bsr = (
ctx.webkitBackingStorePixelRatio
or ctx.mozBackingStorePixelRatio
or ctx.msBackingStorePixelRatio
or ctx.oBackingStorePixelRatio
or ctx.backingStorePixelRatio
or 1
)
return dpr / bsr
def _create_tick_units():
# Create tick units
tick_units = []
for e in range(-14, 14):
for i in [10, 20, 25, 50]:
tick_units.append(i * 10 ** e)
return tick_units
_tick_units = _create_tick_units()
# def split_group(s, sep):
# group, _, sub = s.partition(sep)
# if len(sub) == 0:
# return "", group
# else:
# return group, sub
class BasePanel:
def __init__(self, container, dbname, key, title, unit):
self.dbname = dbname
self.key = key
self.title = title
self.unit = unit
self.node = document.createElement("div")
self.node.classList.add("panel")
container.appendChild(self.node)
self.titlenode = document.createElement("div")
self.titlenode.classList.add("title")
self.titlenode.innerText = title
self.node.appendChild(self.titlenode)
class InfoPanel(BasePanel):
def __init__(self, *args):
super().__init__(*args)
self.content = document.createElement("div")
self.content.classList.add("content")
self.node.appendChild(self.content)
hider = document.createElement("div")
hider.classList.add("scrollhider")
self.node.appendChild(hider)
self._create()
def _create(self):
PSCRIPT_OVERLOAD = False # noqa
if not window.info:
return
lines = []
lines.append("<table>")
for key, value in window.info.items():
lines.append(f"<tr> <td>{key}</td> <td>{value}</td> </tr>")
lines.append("</table>")
self.content.innerHTML = "\n".join(lines)
class CategoricalPanel(InfoPanel):
def _create(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
# First aggregate
data = data_per_db[self.dbname]
totalcount = 0
rows = {}
for i in range(len(data)):
aggr = data[i]
meas = aggr.get(key, {})
for k, v in meas.items():
rows[k] = rows.get(k, 0) + v
totalcount += v
# Group so we can sort in a grouped fashion
groups = {}
group_counts = {}
for key, count in rows.items():
group, _, subkey = key.partition(" - ")
groups.setdefault(group, []).append((subkey, count))
group_counts[group] = group_counts.get(group, 0) + count
group_counts = [(k, v) for k, v in group_counts.items()]
# Sort groups and items inside the groupd
group_counts.sort(key=lambda x: -x[1])
for subs in groups.values():
subs.sort(key=lambda x: -x[1])
lines = []
lines.append("<table>")
for group, _ in group_counts:
for sub, count in groups[group]:
key = group + " - " + sub
key = key.strip(" -")
pct = 100 * count / totalcount
lines.append(
f"<tr> <td>{pct:0.0f}%</td> <td>{count}</td> <td>{key}</td> </tr>"
)
lines.append("</table>")
self.content.innerHTML = "\n".join(lines)
class PlotPanel(BasePanel):
_values_are_integer = False
def __init__(self, *args):
super().__init__(*args)
self.canvas = document.createElement("canvas")
self.node.appendChild(self.canvas)
def _draw_text(self, ctx, text, x, y, angle=0):
PSCRIPT_OVERLOAD = False # noqa
ctx.save()
ctx.translate(x, y)
ctx.scale(1, -1)
ctx.rotate(angle)
ctx.fillText(text, 0, 0)
ctx.restore()
def _get_min_max(self):
return 0, 1
def _get_ticks(self, scale, mi, ma, min_tick_dist=40):
PSCRIPT_OVERLOAD = False # noqa
# Inspired from flexx' PlotWidget, which took inspirartion from visvis
# Get tick multipliers and unit modifier
if self.unit == "iB":
if ma >= 2 ** 30:
mult, unit = 1 / 2 ** 30, "G"
elif ma >= 2 ** 20:
mult, unit = 1 / 2 ** 20, "M"
elif ma >= 2 ** 10:
mult, unit = 1 / 2 ** 10, "K"
else:
mult, unit = 1, ""
else:
if ma >= 10_000_000_000:
mult, unit = 1 / 1_000_000_000, "G"
elif ma >= 10_000_000:
mult, unit = 1 / 1_000_000, "M"
elif ma >= 10000:
mult, unit = 1 / 1000, "K"
elif ma < 0.0001:
mult, unit = 1_000_000, "u"
elif ma < 0.1:
mult, unit = 1000, "m"
else:
mult, unit = 1, ""
if self.unit in ("iB", "s"):
title = self.title.replace(" " + self.unit, " " + unit + self.unit)
self.titlenode.innerText = title
unit = ""
# Get tick unit
is_int = self._values_are_integer
for tick_unit in _tick_units:
if is_int and str(tick_unit).indexOf(".") >= 0:
continue
if tick_unit * scale / mult >= min_tick_dist:
break
else:
return []
# Calculate tick values
first_tick = Math.ceil(mi * mult / tick_unit) * tick_unit
last_tick = Math.floor(ma * mult / tick_unit) * tick_unit
ticks = {}
t = first_tick # t does not mean time here!
while t <= last_tick:
ticks[t / mult] = t
t += tick_unit
# Stringify
for realt, t in ticks.items():
if t == 0:
s = "0"
elif mult == 1 and is_int:
s = str(int(t))
else:
s = t.toPrecision(4) # t is already multiplied
if "." in s:
while len(s) > 5 and s.endsWith("0"):
s = s[:-1]
ticks[realt] = s + unit
return ticks
def draw(self):
PSCRIPT_OVERLOAD = False # noqa
ctx = self.canvas.getContext("2d")
# Prepare hidpi mode for canvas (flush state just in case)
for i in range(4):
ctx.restore()
ctx.save()
ctx.scale(self.pixel_ratio, self.pixel_ratio)
# Flip y-axis
ctx.scale(1, -1)
ctx.translate(0, -self.height)
# Clear bg
ctx.clearRect(0, 0, self.width, self.height)
# Determine drawing area
x0 = 45
y0 = 35
width = self.width - x0 - 15
height = self.height - y0 - 5
data = data_per_db[self.dbname]
if len(data) == 0:
return
# Get bounding box
t1 = data[0].time_start
t2 = data[-1].time_stop
mi, ma = self._get_min_max()
if ma <= mi:
return
hscale = width / (t2 - t1)
vscale = height / (ma - mi)
unix_from_utc_tuple = Date.UTC # avoid triggering new
utc = get_hash_info().get("utc", False)
xticks = {}
# Prepare x ticks for hours (one hour is the smallest granularity)
hourly_tick_units = (1, 3600), (2, 7200), (6, 21600)
min_tick_dist = 60
for nhours, tick_unit in hourly_tick_units:
if tick_unit * hscale >= min_tick_dist:
break
else:
tick_unit = 0
#
if tick_unit > 0:
d = Date(t1 * 1000)
if utc:
tup = [
d.getUTCFullYear(),
d.getUTCMonth(),
d.getUTCDate(),
d.getUTCHours(),
]
tup[-1] = nhours * int(tup[-1] / nhours)
t = unix_from_utc_tuple(tup[0], tup[1], tup[2], tup[3]) / 1000
else:
tup = [d.getFullYear(), d.getMonth(), d.getDate(), d.getHours()]
tup[-1] = nhours * int(tup[-1] / nhours)
t = Date(tup[0], tup[1], tup[2], tup[3]).getTime() / 1000
while t <= t2:
if t >= t1:
d = Date(t * 1000)
if utc:
xticks[t] = f"{d.getUTCHours():02i}:{d.getUTCMinutes():02i}"
else:
xticks[t] = f"{d.getHours():02i}:{d.getMinutes():02i}"
t += tick_unit
# Prepare x ticks for days/months
day_tick_units = (2, 1), (2, 2), (2, 5), (1, 1), (1, 2), (1, 3), (0, 365)
min_tick_dist = 60
for dindex, nsomething in day_tick_units:
tick_unit = nsomething * [365 * 86400, 30 * 86400, 86400][dindex]
if tick_unit * hscale >= min_tick_dist:
break
else:
tick_unit = nsomething = 0
#
n_date_ticks = 0
if nsomething > 0:
d = Date(t1 * 1000)
if utc:
tup = [d.getUTCFullYear(), d.getUTCMonth(), d.getUTCDate()]
tup[dindex] = nsomething * int(tup[dindex] / nsomething)
t = unix_from_utc_tuple(tup[0], tup[1], tup[2]) / 1000
else:
tup = [d.getFullYear(), d.getMonth(), d.getDate()]
tup[dindex] = nsomething * int(tup[dindex] / nsomething)
t = Date(tup[0], tup[1], tup[2]).getTime() / 1000
while t <= t2:
if t >= t1:
n_date_ticks += 1
d = Date(t * 1000)
if utc:
dd = f"{d.getUTCDate():02i}"
mm = f"{d.getUTCMonth()+1:02i}"
yy = f"{d.getUTCFullYear()}"
xticks[t] = f"{dd}-{mm}-{yy}"
else:
dd = f"{d.getDate():02i}"
mm = f"{d.getMonth()+1:02i}"
yy = f"{d.getFullYear()}"
xticks[t] = f"{dd}-{mm}-{yy}"
tup[dindex] += nsomething
if utc:
t = unix_from_utc_tuple(tup[0], tup[1], tup[2]) / 1000
else:
t = Date(tup[0], tup[1], tup[2]).getTime() / 1000
#
extra_x_tick = ""
if n_date_ticks < 2:
xtickskeys = xticks.keys()
if len(xtickskeys) > 0 and hscale * (xtickskeys[0] - t1) < 30:
xticks.pop(xtickskeys[0])
d = Date(t1 * 1000)
if utc:
extra_x_tick = (
f"{d.getUTCFullYear()}-{d.getUTCMonth()+1:02i}-{d.getUTCDate():02i}"
)
else:
extra_x_tick = (
f"{d.getFullYear()}-{d.getMonth()+1:02i}-{d.getDate():02i}"
)
# Prepare y ticks
yticks = self._get_ticks(vscale, mi, ma, 25) # text -> value
# Prepare drawing
ctx.lineWidth = 1
# Draw grid lines
ctx.strokeStyle = "rgba(128, 128, 128, 0.3)"
ctx.beginPath()
for v, text in yticks.items():
y = y0 + (float(v) - mi) * vscale
ctx.moveTo(x0, y)
ctx.lineTo(x0 + width, y)
ctx.stroke()
# Draw x ticks
ctx.strokeStyle = text_color
ctx.fillStyle = text_color
ctx.textAlign = "center"
ctx.textBaseline = "top" # middle
ctx.beginPath()
for t, text in xticks.items():
x = x0 + (float(t) - t1) * hscale
ctx.moveTo(x, y0)
ctx.lineTo(x, y0 - 4)
ctx.stroke()
for t, text in xticks.items():
x = x0 + (float(t) - t1) * hscale
angle = 0 # -0.15 * Math.PI
x = min(x, x0 + width - 15)
self._draw_text(ctx, text, x, y0 - 10, angle)
if extra_x_tick:
ctx.textAlign = "left"
ctx.textBaseline = "bottom"
self._draw_text(ctx, extra_x_tick, 0, 0)
# Draw y ticks
ctx.textAlign = "right"
ctx.textBaseline = "middle"
ctx.beginPath()
for v, text in yticks.items():
y = y0 + (float(v) - mi) * vscale
ctx.moveTo(x0 - 4, y)
ctx.lineTo(x0, y)
ctx.stroke()
for v, text in yticks.items():
y = y0 + (float(v) - mi) * vscale
self._draw_text(ctx, text, x0 - 8, y)
# Draw axis
ctx.strokeStyle = text_color
ctx.beginPath()
ctx.moveTo(x0, y0)
ctx.lineTo(x0 + width, y0)
ctx.moveTo(x0, y0)
ctx.lineTo(x0, y0 + height)
ctx.stroke()
# Draw content
self._draw_content(ctx, mi, ma, t1, t2, x0, y0, hscale, vscale)
# Draw local / UTC
ctx.fillStyle = "rgba(128, 128, 128, 0.5)"
ctx.textAlign = "right"
ctx.textBaseline = "bottom"
self._draw_text(ctx, "UTC" if utc else "Local time", self.width, 0)
class CountPanel(PlotPanel):
_values_are_integer = True
clr = 50, 250, 50
def _get_min_max(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
mi = 0
ma = -9_999_999
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
v = aggr[key]
if v is undefined:
continue
ma = max(ma, v)
return mi, ma
def _draw_content(self, ctx, mi, ma, t1, t2, x0, y0, hscale, vscale):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
clr = self.clr
ctx.fillStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 0.8)"
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
v = aggr[key]
if v is undefined:
continue
if aggr.time_start > t2:
continue
x = x0 + (aggr.time_start - t1) * hscale
w = (aggr.time_stop - aggr.time_start) * hscale
w = max(w - 1, 1)
ctx.fillRect(x, y0, w, v * vscale)
class DailyCountPanel(CountPanel):
clr = 220, 250, 0
def _get_min_max(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
mi = 0
ma = -9_999_999
self.daily = daily = []
prev_day = ""
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
v = aggr[key]
if v is undefined:
continue
day = aggr.time_key[:10]
if day != prev_day:
if len(daily) > 0:
ma = max(ma, daily[-1][key])
new_aggr = {"time_start": aggr.time_start, "time_stop": aggr.time_stop}
new_aggr[key] = aggr[key]
daily.append(new_aggr)
prev_day = day
else:
daily[-1][key] += v
daily[-1].time_stop = aggr.time_stop
if len(daily) > 0:
ma = max(ma, daily[-1][key])
return mi, ma
def _draw_content(self, ctx, mi, ma, t1, t2, x0, y0, hscale, vscale):
PSCRIPT_OVERLOAD = False # noqa
# Draw daily
key = self.key
clr = self.clr
ctx.fillStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 0.4)"
for i in range(len(self.daily)):
aggr = self.daily[i]
v = aggr[key]
if aggr.time_start > t2:
continue
x = x0 + (aggr.time_start - t1) * hscale
w = (aggr.time_stop - aggr.time_start) * hscale
w = max(w - 1, 1)
ctx.fillRect(x, y0, w, v * vscale)
# Draw per unit
super()._draw_content(ctx, mi, ma, t1, t2, x0, y0, hscale, vscale)
class MonthlyCountPanel(CountPanel):
clr = 250, 200, 0
def _get_min_max(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
mi = 0
ma = -9_999_999
self.monthly = monthly = []
prev_month = ""
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
v = aggr[key]
if v is undefined:
continue
month = aggr.time_key[:7]
if month != prev_month:
if len(monthly) > 0:
ma = max(ma, monthly[-1][key])
new_aggr = {"time_start": aggr.time_start, "time_stop": aggr.time_stop}
new_aggr[key] = aggr[key]
monthly.append(new_aggr)
prev_month = month
else:
monthly[-1][key] += v
monthly[-1].time_stop = aggr.time_stop
if len(monthly) > 0:
ma = max(ma, monthly[-1][key])
return mi, ma
def _draw_content(self, ctx, mi, ma, t1, t2, x0, y0, hscale, vscale):
PSCRIPT_OVERLOAD = False # noqa
# Draw monthly
key = self.key
clr = self.clr
ctx.fillStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 0.4)"
for i in range(len(self.monthly)):
aggr = self.monthly[i]
v = aggr[key]
if aggr.time_start > t2:
continue
x = x0 + (aggr.time_start - t1) * hscale
w = (aggr.time_stop - aggr.time_start) * hscale
w = max(w - 1, 1)
ctx.fillRect(x, y0, w, v * vscale)
# Draw per unit
super()._draw_content(ctx, mi, ma, t1, t2, x0, y0, hscale, vscale)
class NumericalPanel(PlotPanel):
clr = 0, 220, 250
def __init__(self, *args):
super().__init__(*args)
def _get_min_max(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
mi = +1e20
ma = -1e20
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
meas = aggr[key]
if meas is undefined or meas.n == 0:
continue
mi = min(mi, meas.min)
ma = max(ma, meas.max)
if ma >= mi:
mi = min(0.8 * ma, mi) # Select a good min point
mi = 0
if self.unit == "%":
mi = 0
ma = max(ma, 100) # percentages can be larger than 100
return mi, ma
def _draw_content(self, ctx, mi, ma, t1, t2, x0, y0, hscale, vscale):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
clr = self.clr
ctx.fillStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 0.2)"
ctx.strokeStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 1.0)"
data = data_per_db[self.dbname]
mean_points = []
for i in range(len(data)):
aggr = data[i]
meas = aggr[key]
if meas is undefined or meas.n == 0:
continue
if aggr.time_start > t2:
continue
x = x0 + (aggr.time_start - t1) * hscale
w = (aggr.time_stop - aggr.time_start) * hscale
w = max(w, 1)
# Draw rectangle for min max
y = y0 + (meas.min - mi) * vscale
h = (meas.max - meas.min) * vscale
ctx.fillRect(x, y, w, h)
# Draw rectangle for std
mean = meas.mean
std = (meas.magic / meas.n) ** 0.5 # Welford
st1 = max(meas.min, mean - std)
st2 = min(meas.max, mean + std)
y = y0 + (st1 - mi) * vscale
h = (st2 - st1) * vscale
ctx.fillRect(x, y, w, h)
y = y0 + (mean - mi) * vscale
mean_points.append((x + 0.3333 * w, y))
mean_points.append((x + 0.6666 * w, y))
# Draw mean
if len(mean_points) > 0:
ctx.beginPath()
ctx.moveTo(mean_points[0], mean_points[1])
for x, y in mean_points:
ctx.lineTo(x, y)
ctx.stroke()
window.addEventListener("load", on_init)
window.addEventListener("resize", on_resize)
window.addEventListener("hashchange", on_hash_change)
|
test/unit/test_k8s_job_name_sanitizer.py | Netflix/metaflow | 5,821 | 11069117 | import re
from metaflow.plugins.aws.eks.kubernetes import generate_rfc1123_name
rfc1123 = re.compile(r'^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?$')
def test_job_name_santitizer():
# Basic name
assert rfc1123.match(generate_rfc1123_name('HelloFlow', '1', 'end', '321', '1'))
# Step name ends with _
assert rfc1123.match(generate_rfc1123_name('HelloFlow', '1', '_end', '321', '1'))
# Step name starts and ends with _
assert rfc1123.match(generate_rfc1123_name('HelloFlow', '1', '_end_', '321', '1'))
# Flow name ends with _
assert rfc1123.match(generate_rfc1123_name('HelloFlow_', '1', 'end', '321', '1'))
# Same flow name, different case must produce different job names
assert generate_rfc1123_name('Helloflow', '1', 'end', '321', '1') != generate_rfc1123_name('HelloFlow', '1', 'end', '321', '1')
# Very long step name should be fine
assert rfc1123.match(generate_rfc1123_name('Helloflow', '1', 'end'*50, '321', '1'))
# Very long run id should be fine too
assert rfc1123.match(generate_rfc1123_name('Helloflow', '1'*100, 'end', '321', '1')) |
sandbox/library/apps.py | caplena/django-hashid-field | 310 | 11069121 | from django.apps import AppConfig
class LibraryConfig(AppConfig):
name = 'library'
|
x-pack/filebeat/tests/system/test_xpack_modules.py | tetianakravchenko/beats | 9,729 | 11069146 | <gh_stars>1000+
import os
import sys
import test_modules
class XPackTest(test_modules.Test):
@classmethod
def setUpClass(self):
self.beat_name = "filebeat"
self.beat_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../"))
super(test_modules.Test, self).setUpClass()
def setUp(self):
super(test_modules.Test, self).setUp()
|
homeassistant/components/panasonic_viera/__init__.py | MrDelik/core | 30,023 | 11069153 | """The Panasonic Viera integration."""
from functools import partial
import logging
from urllib.error import HTTPError, URLError
from panasonic_viera import EncryptionRequired, Keys, RemoteControl, SOAPError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_OFF,
STATE_ON,
Platform,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_DEVICE_INFO,
ATTR_REMOTE,
ATTR_UDN,
CONF_APP_ID,
CONF_ENCRYPTION_KEY,
CONF_ON_ACTION,
DEFAULT_NAME,
DEFAULT_PORT,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ON_ACTION): cv.SCRIPT_SCHEMA,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = [Platform.MEDIA_PLAYER, Platform.REMOTE]
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Panasonic Viera from configuration.yaml."""
if DOMAIN not in config:
return True
for conf in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up Panasonic Viera from a config entry."""
panasonic_viera_data = hass.data.setdefault(DOMAIN, {})
config = config_entry.data
host = config[CONF_HOST]
port = config[CONF_PORT]
if (on_action := config[CONF_ON_ACTION]) is not None:
on_action = Script(hass, on_action, config[CONF_NAME], DOMAIN)
params = {}
if CONF_APP_ID in config and CONF_ENCRYPTION_KEY in config:
params["app_id"] = config[CONF_APP_ID]
params["encryption_key"] = config[CONF_ENCRYPTION_KEY]
remote = Remote(hass, host, port, on_action, **params)
await remote.async_create_remote_control(during_setup=True)
panasonic_viera_data[config_entry.entry_id] = {ATTR_REMOTE: remote}
# Add device_info to older config entries
if ATTR_DEVICE_INFO not in config or config[ATTR_DEVICE_INFO] is None:
device_info = await remote.async_get_device_info()
unique_id = config_entry.unique_id
if device_info is None:
_LOGGER.error(
"Couldn't gather device info; Please restart Home Assistant with your TV turned on and connected to your network"
)
else:
unique_id = device_info[ATTR_UDN]
hass.config_entries.async_update_entry(
config_entry,
unique_id=unique_id,
data={**config, ATTR_DEVICE_INFO: device_info},
)
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
class Remote:
"""The Remote class. It stores the TV properties and the remote control connection itself."""
def __init__(
self,
hass,
host,
port,
on_action=None,
app_id=None,
encryption_key=None,
):
"""Initialize the Remote class."""
self._hass = hass
self._host = host
self._port = port
self._on_action = on_action
self._app_id = app_id
self._encryption_key = encryption_key
self.state = None
self.available = False
self.volume = 0
self.muted = False
self.playing = True
self._control = None
async def async_create_remote_control(self, during_setup=False):
"""Create remote control."""
try:
params = {}
if self._app_id and self._encryption_key:
params["app_id"] = self._app_id
params["encryption_key"] = self._encryption_key
self._control = await self._hass.async_add_executor_job(
partial(RemoteControl, self._host, self._port, **params)
)
if during_setup:
await self.async_update()
except (URLError, SOAPError, OSError) as err:
_LOGGER.debug("Could not establish remote connection: %s", err)
self._control = None
self.state = STATE_OFF
self.available = self._on_action is not None
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("An unknown error occurred: %s", err)
self._control = None
self.state = STATE_OFF
self.available = self._on_action is not None
async def async_update(self):
"""Update device data."""
if self._control is None:
await self.async_create_remote_control()
return
await self._handle_errors(self._update)
def _update(self):
"""Retrieve the latest data."""
self.muted = self._control.get_mute()
self.volume = self._control.get_volume() / 100
async def async_send_key(self, key):
"""Send a key to the TV and handle exceptions."""
try:
key = getattr(Keys, key)
except (AttributeError, TypeError):
key = getattr(key, "value", key)
await self._handle_errors(self._control.send_key, key)
async def async_turn_on(self, context):
"""Turn on the TV."""
if self._on_action is not None:
await self._on_action.async_run(context=context)
await self.async_update()
elif self.state != STATE_ON:
await self.async_send_key(Keys.power)
await self.async_update()
async def async_turn_off(self):
"""Turn off the TV."""
if self.state != STATE_OFF:
await self.async_send_key(Keys.power)
self.state = STATE_OFF
await self.async_update()
async def async_set_mute(self, enable):
"""Set mute based on 'enable'."""
await self._handle_errors(self._control.set_mute, enable)
async def async_set_volume(self, volume):
"""Set volume level, range 0..1."""
volume = int(volume * 100)
await self._handle_errors(self._control.set_volume, volume)
async def async_play_media(self, media_type, media_id):
"""Play media."""
_LOGGER.debug("Play media: %s (%s)", media_id, media_type)
await self._handle_errors(self._control.open_webpage, media_id)
async def async_get_device_info(self):
"""Return device info."""
if self._control is None:
return None
device_info = await self._handle_errors(self._control.get_device_info)
_LOGGER.debug("Fetched device info: %s", str(device_info))
return device_info
async def _handle_errors(self, func, *args):
"""Handle errors from func, set available and reconnect if needed."""
try:
result = await self._hass.async_add_executor_job(func, *args)
self.state = STATE_ON
self.available = True
return result
except EncryptionRequired:
_LOGGER.error(
"The connection couldn't be encrypted. Please reconfigure your TV"
)
self.available = False
except (SOAPError, HTTPError) as err:
_LOGGER.debug("An error occurred: %s", err)
self.state = STATE_OFF
self.available = True
await self.async_create_remote_control()
except (URLError, OSError) as err:
_LOGGER.debug("An error occurred: %s", err)
self.state = STATE_OFF
self.available = self._on_action is not None
await self.async_create_remote_control()
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("An unknown error occurred: %s", err)
self.state = STATE_OFF
self.available = self._on_action is not None
|
wechat_sdk/__init__.py | upcwangying/wechat-python-sdk | 1,150 | 11069213 | # -*- coding: utf-8 -*-
from wechat_sdk.core.conf import WechatConf
from wechat_sdk.basic import WechatBasic
from wechat_sdk.ext import WechatExt
__all__ = ['WechatConf', 'WechatBasic', 'WechatExt']
__version__ = "0.6.4"
|
net/data/gencerts/__init__.py | Yannic/chromium | 575 | 11069224 | <reponame>Yannic/chromium
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Set of helpers to generate signed X.509v3 certificates.
This works by shelling out calls to the 'openssl req' and 'openssl ca'
commands, and passing the appropriate command line flags and configuration file
(.cnf).
"""
import base64
import hashlib
import os
import shutil
import subprocess
import sys
import openssl_conf
# Enum for the "type" of certificate that is to be created. This is used to
# select sane defaults for the .cnf file and command line flags, but they can
# all be overridden.
TYPE_CA = 2
TYPE_END_ENTITY = 3
# March 1st, 2015 12:00 UTC
MARCH_1_2015_UTC = '150301120000Z'
# March 2nd, 2015 12:00 UTC
MARCH_2_2015_UTC = '150302120000Z'
# January 1st, 2015 12:00 UTC
JANUARY_1_2015_UTC = '150101120000Z'
# September 1st, 2015 12:00 UTC
SEPTEMBER_1_2015_UTC = '150901120000Z'
# January 1st, 2016 12:00 UTC
JANUARY_1_2016_UTC = '160101120000Z'
# November 3rd, 2020 12:00 UTC
NOVEMBER_3_2020_UTC = '201103120000Z'
# November 3rd, 2021 12:00 UTC
NOVEMBER_3_2021_UTC = '211103120000Z'
KEY_PURPOSE_ANY = 'anyExtendedKeyUsage'
KEY_PURPOSE_SERVER_AUTH = 'serverAuth'
KEY_PURPOSE_CLIENT_AUTH = 'clientAuth'
DEFAULT_KEY_PURPOSE = KEY_PURPOSE_SERVER_AUTH
# Counters used to generate unique (but readable) path names.
g_cur_path_id = {}
# Output paths used:
# - g_tmp_dir: where any temporary files (cert req, signing db etc) are
# saved to.
# See init() for how these are assigned.
g_tmp_dir = None
g_invoking_script_path = None
# The default validity range of generated certificates. Can be modified with
# set_default_validity_range(). Chosen to end on a Wednesday, since these
# will have to be manually re-generated.
g_default_start_date = NOVEMBER_3_2020_UTC
g_default_end_date = NOVEMBER_3_2021_UTC
def set_default_validity_range(start_date, end_date):
"""Sets the validity range that will be used for certificates created with
Certificate"""
global g_default_start_date
global g_default_end_date
g_default_start_date = start_date
g_default_end_date = end_date
def get_unique_path_id(name):
"""Returns a base filename that contains 'name', but is unique to the output
directory"""
# Use case-insensitive matching for counting duplicates, since some
# filesystems are case insensitive, but case preserving.
lowercase_name = name.lower()
path_id = g_cur_path_id.get(lowercase_name, 0)
g_cur_path_id[lowercase_name] = path_id + 1
# Use a short and clean name for the first use of this name.
if path_id == 0:
return name
# Otherwise append the count to make it unique.
return '%s_%d' % (name, path_id)
def get_path_in_tmp_dir(name, suffix):
return os.path.join(g_tmp_dir, '%s%s' % (name, suffix))
class Key(object):
"""Describes a public + private key pair. It is a dumb wrapper around an
on-disk key."""
def __init__(self, path):
self.path = path
def get_path(self):
"""Returns the path to a file that contains the key contents."""
return self.path
def get_or_generate_key(generation_arguments, path):
"""Helper function to either retrieve a key from an existing file |path|, or
generate a new one using the command line |generation_arguments|."""
generation_arguments_str = ' '.join(generation_arguments)
# If the file doesn't already exist, generate a new key using the generation
# parameters.
if not os.path.isfile(path):
key_contents = subprocess.check_output(generation_arguments)
# Prepend the generation parameters to the key file.
write_string_to_file(generation_arguments_str + '\n' + key_contents,
path)
else:
# If the path already exists, confirm that it is for the expected key type.
first_line = read_file_to_string(path).splitlines()[0]
if first_line != generation_arguments_str:
sys.stderr.write(('\nERROR: The existing key file:\n %s\nis not '
'compatible with the requested parameters:\n "%s" vs "%s".\n'
'Delete the file if you want to re-generate it with the new '
'parameters, otherwise pick a new filename\n') % (
path, first_line, generation_arguments_str))
sys.exit(1)
return Key(path)
def get_or_generate_rsa_key(size_bits, path):
"""Retrieves an existing key from a file if the path exists. Otherwise
generates an RSA key with the specified bit size and saves it to the path."""
return get_or_generate_key(['openssl', 'genrsa', str(size_bits)], path)
def get_or_generate_ec_key(named_curve, path):
"""Retrieves an existing key from a file if the path exists. Otherwise
generates an EC key with the specified named curve and saves it to the
path."""
return get_or_generate_key(['openssl', 'ecparam', '-name', named_curve,
'-genkey'], path)
def create_key_path(base_name):
"""Generates a name that contains |base_name| in it, and is relative to the
"keys/" directory. If create_key_path(xxx) is called more than once during
the script run, a suffix will be added."""
# Save keys to CWD/keys/*.key
keys_dir = 'keys'
# Create the keys directory if it doesn't exist
if not os.path.exists(keys_dir):
os.makedirs(keys_dir)
return get_unique_path_id(os.path.join(keys_dir, base_name)) + '.key'
class Certificate(object):
"""Helper for building an X.509 certificate."""
def __init__(self, name, cert_type, issuer):
# The name will be used for the subject's CN, and also as a component of
# the temporary filenames to help with debugging.
self.name = name
self.path_id = get_unique_path_id(name)
# Allow the caller to override the key later. If no key was set will
# auto-generate one.
self.key = None
# The issuer is also a Certificate object. Passing |None| means it is a
# self-signed certificate.
self.issuer = issuer
if issuer is None:
self.issuer = self
# The config contains all the OpenSSL options that will be passed via a
# .cnf file. Set up defaults.
self.config = openssl_conf.Config()
self.init_config()
# Some settings need to be passed as flags rather than in the .cnf file.
# Technically these can be set though a .cnf, however doing so makes it
# sticky to the issuing certificate, rather than selecting it per
# subordinate certificate.
self.validity_flags = []
self.md_flags = []
# By default OpenSSL will use the current time for the start time. Instead
# default to using a fixed timestamp for more predictable results each time
# the certificates are re-generated.
self.set_validity_range(g_default_start_date, g_default_end_date)
# Use SHA-256 when THIS certificate is signed (setting it in the
# configuration would instead set the hash to use when signing other
# certificates with this one).
self.set_signature_hash('sha256')
# Set appropriate key usages and basic constraints. For flexibility in
# testing (since want to generate some flawed certificates) these are set
# on a per-certificate basis rather than automatically when signing.
if cert_type == TYPE_END_ENTITY:
self.get_extensions().set_property('keyUsage',
'critical,digitalSignature,keyEncipherment')
self.get_extensions().set_property('extendedKeyUsage',
'serverAuth,clientAuth')
else:
self.get_extensions().set_property('keyUsage',
'critical,keyCertSign,cRLSign')
self.get_extensions().set_property('basicConstraints', 'critical,CA:true')
# Tracks whether the PEM file for this certificate has been written (since
# generation is done lazily).
self.finalized = False
# Initialize any files that will be needed if this certificate is used to
# sign other certificates. Picks a pseudo-random starting serial number
# based on the file system path, and will increment this for each signed
# certificate.
if not os.path.exists(self.get_serial_path()):
write_string_to_file('%s\n' % self.make_serial_number(),
self.get_serial_path())
if not os.path.exists(self.get_database_path()):
write_string_to_file('', self.get_database_path())
def set_validity_range(self, start_date, end_date):
"""Sets the Validity notBefore and notAfter properties for the
certificate"""
self.validity_flags = ['-startdate', start_date, '-enddate', end_date]
def set_signature_hash(self, md):
"""Sets the hash function that will be used when signing this certificate.
Can be sha1, sha256, sha512, md5, etc."""
self.md_flags = ['-md', md]
def get_extensions(self):
return self.config.get_section('req_ext')
def get_subject(self):
"""Returns the configuration section responsible for the subject of the
certificate. This can be used to alter the subject to be more complex."""
return self.config.get_section('req_dn')
def get_path(self, suffix):
"""Forms a path to an output file for this certificate, containing the
indicated suffix. The certificate's name will be used as its basis."""
return os.path.join(g_tmp_dir, '%s%s' % (self.path_id, suffix))
def get_name_path(self, suffix):
"""Forms a path to an output file for this CA, containing the indicated
suffix. If multiple certificates have the same name, they will use the same
path."""
return get_path_in_tmp_dir(self.name, suffix)
def set_key(self, key):
assert self.finalized is False
self.set_key_internal(key)
def set_key_internal(self, key):
self.key = key
# Associate the private key with the certificate.
section = self.config.get_section('root_ca')
section.set_property('private_key', self.key.get_path())
def get_key(self):
if self.key is None:
self.set_key_internal(
get_or_generate_rsa_key(2048, create_key_path(self.name)))
return self.key
def get_cert_path(self):
return self.get_path('.pem')
def get_serial_path(self):
return self.get_name_path('.serial')
def make_serial_number(self):
"""Returns a hex number that is generated based on the certificate file
path. This serial number will likely be globally unique, which makes it
easier to use the certificates with NSS (which assumes certificate
equivalence based on issuer and serial number)."""
# Hash some predictable values together to get the serial number. The
# predictability is so that re-generating certificate chains is
# a no-op, however each certificate ends up with a unique serial number.
m = hashlib.sha1()
# Mix in up to the last 3 components of the path for the generating script.
# For example,
# "verify_certificate_chain_unittest/my_test/generate_chains.py"
script_path = os.path.realpath(g_invoking_script_path)
script_path = "/".join(script_path.split(os.sep)[-3:])
m.update(script_path)
# Mix in the path_id, which corresponds to a unique path for the
# certificate under out/ (and accounts for non-unique certificate names).
m.update(self.path_id)
serial_bytes = m.digest()
# SHA1 digest is 20 bytes long, which is appropriate for a serial number.
# However, need to also make sure the most significant bit is 0 so it is
# not a "negative" number.
serial_bytes = chr(ord(serial_bytes[0]) & 0x7F) + serial_bytes[1:]
return serial_bytes.encode("hex")
def get_csr_path(self):
return self.get_path('.csr')
def get_database_path(self):
return self.get_name_path('.db')
def get_config_path(self):
return self.get_path('.cnf')
def get_cert_pem(self):
# Finish generating a .pem file for the certificate.
self.finalize()
# Read the certificate data.
return read_file_to_string(self.get_cert_path())
def finalize(self):
"""Finishes the certificate creation process. This generates any needed
key, creates and signs the CSR. On completion the resulting PEM file can be
found at self.get_cert_path()"""
if self.finalized:
return # Already finalized, no work needed.
self.finalized = True
# Ensure that the issuer has been "finalized", since its outputs need to be
# accessible. Note that self.issuer could be the same as self.
self.issuer.finalize()
# Ensure the certificate has a key (gets lazily created by this call if
# missing).
self.get_key()
# Serialize the config to a file.
self.config.write_to_file(self.get_config_path())
# Create a CSR.
subprocess.check_call(
['openssl', 'req', '-new',
'-key', self.key.get_path(),
'-out', self.get_csr_path(),
'-config', self.get_config_path()])
cmd = ['openssl', 'ca', '-batch', '-in',
self.get_csr_path(), '-out', self.get_cert_path(), '-config',
self.issuer.get_config_path()]
if self.issuer == self:
cmd.append('-selfsign')
# Add in any extra flags.
cmd.extend(self.validity_flags)
cmd.extend(self.md_flags)
# Run the 'openssl ca' command.
subprocess.check_call(cmd)
def init_config(self):
"""Initializes default properties in the certificate .cnf file that are
generic enough to work for all certificates (but can be overridden later).
"""
# --------------------------------------
# 'req' section
# --------------------------------------
section = self.config.get_section('req')
section.set_property('encrypt_key', 'no')
section.set_property('utf8', 'yes')
section.set_property('string_mask', 'utf8only')
section.set_property('prompt', 'no')
section.set_property('distinguished_name', 'req_dn')
section.set_property('req_extensions', 'req_ext')
# --------------------------------------
# 'req_dn' section
# --------------------------------------
# This section describes the certificate subject's distinguished name.
section = self.config.get_section('req_dn')
section.set_property('commonName', '"%s"' % (self.name))
# --------------------------------------
# 'req_ext' section
# --------------------------------------
# This section describes the certificate's extensions.
section = self.config.get_section('req_ext')
section.set_property('subjectKeyIdentifier', 'hash')
# --------------------------------------
# SECTIONS FOR CAs
# --------------------------------------
# The following sections are used by the 'openssl ca' and relate to the
# signing operation. They are not needed for end-entity certificate
# configurations, but only if this certifiate will be used to sign other
# certificates.
# --------------------------------------
# 'ca' section
# --------------------------------------
section = self.config.get_section('ca')
section.set_property('default_ca', 'root_ca')
section = self.config.get_section('root_ca')
section.set_property('certificate', self.get_cert_path())
section.set_property('new_certs_dir', g_tmp_dir)
section.set_property('serial', self.get_serial_path())
section.set_property('database', self.get_database_path())
section.set_property('unique_subject', 'no')
# These will get overridden via command line flags.
section.set_property('default_days', '365')
section.set_property('default_md', 'sha256')
section.set_property('policy', 'policy_anything')
section.set_property('email_in_dn', 'no')
section.set_property('preserve', 'yes')
section.set_property('name_opt', 'multiline,-esc_msb,utf8')
section.set_property('cert_opt', 'ca_default')
section.set_property('copy_extensions', 'copy')
section.set_property('x509_extensions', 'signing_ca_ext')
section.set_property('default_crl_days', '30')
section.set_property('crl_extensions', 'crl_ext')
section = self.config.get_section('policy_anything')
section.set_property('domainComponent', 'optional')
section.set_property('countryName', 'optional')
section.set_property('stateOrProvinceName', 'optional')
section.set_property('localityName', 'optional')
section.set_property('organizationName', 'optional')
section.set_property('organizationalUnitName', 'optional')
section.set_property('commonName', 'optional')
section.set_property('emailAddress', 'optional')
section = self.config.get_section('signing_ca_ext')
section.set_property('subjectKeyIdentifier', 'hash')
section.set_property('authorityKeyIdentifier', 'keyid:always')
section.set_property('authorityInfoAccess', '@issuer_info')
section.set_property('crlDistributionPoints', '@crl_info')
section = self.config.get_section('issuer_info')
section.set_property('caIssuers;URI.0',
'http://url-for-aia/%s.cer' % (self.name))
section = self.config.get_section('crl_info')
section.set_property('URI.0', 'http://url-for-crl/%s.crl' % (self.name))
section = self.config.get_section('crl_ext')
section.set_property('authorityKeyIdentifier', 'keyid:always')
section.set_property('authorityInfoAccess', '@issuer_info')
def text_data_to_pem(block_header, text_data):
return '%s\n-----BEGIN %s-----\n%s\n-----END %s-----\n' % (text_data,
block_header, base64.b64encode(text_data), block_header)
def write_chain(description, chain, out_pem):
"""Writes the chain to a .pem file as a series of CERTIFICATE blocks"""
# Prepend the script name that generated the file to the description.
test_data = '[Created by: %s]\n\n%s\n' % (sys.argv[0], description)
# Write the certificate chain to the output file.
for cert in chain:
test_data += '\n' + cert.get_cert_pem()
write_string_to_file(test_data, out_pem)
def write_string_to_file(data, path):
with open(path, 'w') as f:
f.write(data)
def read_file_to_string(path):
with open(path, 'r') as f:
return f.read()
def init(invoking_script_path):
"""Creates an output directory to contain all the temporary files that may be
created, as well as determining the path for the final output. These paths
are all based off of the name of the calling script.
"""
global g_tmp_dir
global g_invoking_script_path
g_invoking_script_path = invoking_script_path
# The scripts assume to be run from within their containing directory (paths
# to things like "keys/" are written relative).
expected_cwd = os.path.realpath(os.path.dirname(invoking_script_path))
actual_cwd = os.path.realpath(os.getcwd())
if actual_cwd != expected_cwd:
sys.stderr.write(
('Your current working directory must be that containing the python '
'scripts:\n%s\nas the script may reference paths relative to this\n')
% (expected_cwd))
sys.exit(1)
# Use an output directory that is a sibling of the invoking script.
g_tmp_dir = 'out'
# Ensure the output directory exists and is empty.
sys.stdout.write('Creating output directory: %s\n' % (g_tmp_dir))
shutil.rmtree(g_tmp_dir, True)
os.makedirs(g_tmp_dir)
def create_self_signed_root_certificate(name):
return Certificate(name, TYPE_CA, None)
def create_intermediate_certificate(name, issuer):
return Certificate(name, TYPE_CA, issuer)
def create_end_entity_certificate(name, issuer):
return Certificate(name, TYPE_END_ENTITY, issuer)
init(sys.argv[0])
|
bigflow_python/python/bigflow/transform_impls/select_elements_processor.py | tushushu/bigflow | 1,236 | 11069290 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2015 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
########################################################################
"""
File: select_elements_processor.py
Author: panyunhong(<EMAIL>)
Date: 2015/05/08 11:49:39
"""
import heapq
from bigflow.transform_impls import processor
def default_select_min_comparator(first_key, second_key):
return first_key > second_key
class SelectElementsProcessor(processor.AbstractProcessor):
class __wrapper(object):
def __init__(self, key, record, comparer):
self._key = key
self._record = record
self._comparer = comparer
def __lt__(self, wrapper_obj):
return self._comparer(self._key(self._record), \
wrapper_obj._key(wrapper_obj._record))
def __init__(self, _n, _key, comparer=default_select_min_comparator):
super(SelectElementsProcessor, self).__init__(_key)
self.n = _n
self.heap = []
self.key = (lambda x: x) if _key is None else _key
self._comparer = comparer
def process(self, index, record):
wrapper_obj = SelectElementsProcessor.__wrapper(self.key, record, self._comparer)
if len(self.heap) < self.n:
heapq.heappush(self.heap, wrapper_obj)
else:
if self.heap[0] < wrapper_obj:
heapq.heapreplace(self.heap, wrapper_obj)
def end(self):
while len(self.heap) > 0:
wrapper_obj = heapq.heappop(self.heap)
self._emitter.emit(wrapper_obj._record)
|
VMEncryption/main/CommandExecutor.py | shridpant/azure-linux-extensions | 266 | 11069292 | <reponame>shridpant/azure-linux-extensions
#!/usr/bin/env python
#
# VMEncryption extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import os
import os.path
import shlex
import sys
from subprocess import *
from threading import Timer
class ProcessCommunicator(object):
def __init__(self):
self.stdout = None
self.stderr = None
class CommandExecutor(object):
"""description of class"""
def __init__(self, logger):
self.logger = logger
def Execute(self, command_to_execute, raise_exception_on_failure=False, communicator=None, input=None, suppress_logging=False, timeout=0):
if type(command_to_execute) == unicode:
command_to_execute = command_to_execute.encode('ascii', 'ignore')
if not suppress_logging:
self.logger.log("Executing: {0}".format(command_to_execute))
args = shlex.split(command_to_execute)
proc = None
timer = None
return_code = None
try:
proc = Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE, close_fds=True)
except Exception as e:
if raise_exception_on_failure:
raise
else:
if not suppress_logging:
self.logger.log("Process creation failed: " + str(e))
return -1
def timeout_process():
proc.kill()
self.logger.log("Command {0} didn't finish in {1} seconds. Timing it out".format(command_to_execute, timeout))
try:
if timeout>0:
timer = Timer(timeout, timeout_process)
timer.start()
stdout, stderr = proc.communicate(input=input)
finally:
if timer is not None:
timer.cancel()
return_code = proc.returncode
if isinstance(communicator, ProcessCommunicator):
communicator.stdout, communicator.stderr = stdout, stderr
if int(return_code) != 0:
msg = "Command {0} failed with return code {1}".format(command_to_execute, return_code)
msg += "\nstdout:\n" + stdout
msg += "\nstderr:\n" + stderr
if not suppress_logging:
self.logger.log(msg)
if raise_exception_on_failure:
raise Exception(msg)
return return_code
def ExecuteInBash(self, command_to_execute, raise_exception_on_failure=False, communicator=None, input=None, suppress_logging=False):
command_to_execute = 'bash -c "{0}{1}"'.format('set -e; ' if raise_exception_on_failure else '',
command_to_execute)
return self.Execute(command_to_execute, raise_exception_on_failure, communicator, input, suppress_logging)
|
tools/mb/mb.py | chromium/chromium | 14,668 | 11069298 | <reponame>chromium/chromium
#!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""MB - the Meta-Build wrapper around GN.
MB is a wrapper script for GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import ast
import collections
import errno
import json
import os
import pipes
import platform
import re
import shutil
import sys
import subprocess
import tempfile
import traceback
import zipfile
if sys.version_info.major == 2:
from urllib2 import urlopen
else:
from urllib.request import urlopen
CHROMIUM_SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
sys.path = [os.path.join(CHROMIUM_SRC_DIR, 'build')] + sys.path
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..'))
import gn_helpers
from mb.lib import validation
def DefaultVals():
"""Default mixin values"""
return {
'args_file': '',
'gn_args': '',
}
def PruneVirtualEnv():
# Set by VirtualEnv, no need to keep it.
os.environ.pop('VIRTUAL_ENV', None)
# Set by VPython, if scripts want it back they have to set it explicitly.
os.environ.pop('PYTHONNOUSERSITE', None)
# Look for "activate_this.py" in this path, which is installed by VirtualEnv.
# This mechanism is used by vpython as well to sanitize VirtualEnvs from
# $PATH.
os.environ['PATH'] = os.pathsep.join([
p for p in os.environ.get('PATH', '').split(os.pathsep)
if not os.path.isfile(os.path.join(p, 'activate_this.py'))
])
def main(args):
# Prune all evidence of VPython/VirtualEnv out of the environment. This means
# that we 'unwrap' vpython VirtualEnv path/env manipulation. Invocations of
# `python` from GN should never inherit the gn.py's own VirtualEnv. This also
# helps to ensure that generated ninja files do not reference python.exe from
# the VirtualEnv generated from depot_tools' own .vpython file (or lack
# thereof), but instead reference the default python from the PATH.
PruneVirtualEnv()
mbw = MetaBuildWrapper()
return mbw.Main(args)
class MetaBuildWrapper(object):
def __init__(self):
self.chromium_src_dir = CHROMIUM_SRC_DIR
self.default_config = os.path.join(self.chromium_src_dir, 'tools', 'mb',
'mb_config.pyl')
self.default_isolate_map = os.path.join(self.chromium_src_dir, 'testing',
'buildbot', 'gn_isolate_map.pyl')
self.executable = sys.executable
self.platform = sys.platform
self.sep = os.sep
self.args = argparse.Namespace()
self.configs = {}
self.public_artifact_builders = None
self.builder_groups = {}
self.mixins = {}
self.isolate_exe = 'isolate.exe' if self.platform.startswith(
'win') else 'isolate'
self.use_luci_auth = False
self.rts_out_dir = self.PathJoin('gen', 'rts')
self.banned_from_rts = set()
def PostArgsInit(self):
self.use_luci_auth = getattr(self.args, 'luci_auth', False)
if 'config_file' in self.args and self.args.config_file is None:
self.args.config_file = self.default_config
if 'expectations_dir' in self.args and self.args.expectations_dir is None:
self.args.expectations_dir = os.path.join(
os.path.dirname(self.args.config_file), 'mb_config_expectations')
banned_from_rts_map = json.loads(
self.ReadFile(
self.PathJoin(self.chromium_src_dir, 'tools', 'mb',
'rts_banned_suites.json')))
self.banned_from_rts.update(banned_from_rts_map.get('*', set()))
if getattr(self.args, 'builder', None):
self.banned_from_rts.update(
banned_from_rts_map.get(self.args.builder, set()))
def Main(self, args):
self.ParseArgs(args)
self.PostArgsInit()
try:
ret = self.args.func()
if ret != 0:
self.DumpInputFiles()
return ret
except KeyboardInterrupt:
self.Print('interrupted, exiting')
return 130
except Exception:
self.DumpInputFiles()
s = traceback.format_exc()
for l in s.splitlines():
self.Print(l)
return 1
def ParseArgs(self, argv):
def AddCommonOptions(subp):
group = subp.add_mutually_exclusive_group()
group.add_argument(
'-m', '--builder-group',
help='builder group name to look up config from')
subp.add_argument('-b', '--builder',
help='builder name to look up config from')
subp.add_argument('-c', '--config',
help='configuration to analyze')
subp.add_argument('--phase',
help='optional phase name (used when builders '
'do multiple compiles with different '
'arguments in a single build)')
subp.add_argument('-i', '--isolate-map-file', metavar='PATH',
help='path to isolate map file '
'(default is %(default)s)',
default=[],
action='append',
dest='isolate_map_files')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
subp.add_argument('-v', '--verbose', action='store_true',
help='verbose logging')
subp.add_argument('--root', help='Path to GN source root')
subp.add_argument('--dotfile', help='Path to GN dotfile')
AddExpansionOptions(subp)
def AddExpansionOptions(subp):
# These are the args needed to expand a config file into the full
# parsed dicts of GN args.
subp.add_argument('-f',
'--config-file',
metavar='PATH',
help=('path to config file '
'(default is mb_config.pyl'))
subp.add_argument('-g', '--goma-dir', help='path to goma directory')
subp.add_argument('--android-version-code',
help='Sets GN arg android_default_version_code')
subp.add_argument('--android-version-name',
help='Sets GN arg android_default_version_name')
subp.add_argument('--use-rts',
action='store_true',
default=False,
help='whether or not to use regression test selection'
' For more info about RTS, please see'
' //docs/testing/regression-test-selection.md')
subp.add_argument('--use-st',
action='store_true',
default=False,
help='whether or not to add filter stable tests during'
' RTS selection')
# TODO(crbug.com/1060857): Remove this once swarming task templates
# support command prefixes.
luci_auth_group = subp.add_mutually_exclusive_group()
luci_auth_group.add_argument(
'--luci-auth',
action='store_true',
help='Run isolated commands under `luci-auth context`.')
luci_auth_group.add_argument(
'--no-luci-auth',
action='store_false',
dest='luci_auth',
help='Do not run isolated commands under `luci-auth context`.')
parser = argparse.ArgumentParser(
prog='mb', description='mb (meta-build) is a python wrapper around GN. '
'See the user guide in '
'//tools/mb/docs/user_guide.md for detailed usage '
'instructions.')
subps = parser.add_subparsers()
subp = subps.add_parser('analyze',
description='Analyze whether changes to a set of '
'files will cause a set of binaries to '
'be rebuilt.')
AddCommonOptions(subp)
subp.add_argument('path',
help='path build was generated into.')
subp.add_argument('input_path',
help='path to a file containing the input arguments '
'as a JSON object.')
subp.add_argument('output_path',
help='path to a file containing the output arguments '
'as a JSON object.')
subp.add_argument('--json-output',
help='Write errors to json.output')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('export',
description='Print out the expanded configuration '
'for each builder as a JSON object.')
AddExpansionOptions(subp)
subp.set_defaults(func=self.CmdExport)
subp = subps.add_parser('get-swarming-command',
description='Get the command needed to run the '
'binary under swarming')
AddCommonOptions(subp)
subp.add_argument('--no-build',
dest='build',
default=True,
action='store_false',
help='Do not build, just isolate')
subp.add_argument('--as-list',
action='store_true',
help='return the command line as a JSON-formatted '
'list of strings instead of single string')
subp.add_argument('path',
help=('path to generate build into (or use).'
' This can be either a regular path or a '
'GN-style source-relative path like '
'//out/Default.'))
subp.add_argument('target', help='ninja target to build and run')
subp.set_defaults(func=self.CmdGetSwarmingCommand)
subp = subps.add_parser('train',
description='Writes the expanded configuration '
'for each builder as JSON files to a configured '
'directory.')
subp.add_argument('-f',
'--config-file',
metavar='PATH',
help='path to config file (default is mb_config.pyl')
subp.add_argument('--expectations-dir',
metavar='PATH',
help='path to dir containing expectation files')
subp.add_argument('-n',
'--dryrun',
action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
subp.add_argument('-v',
'--verbose',
action='store_true',
help='verbose logging')
subp.set_defaults(func=self.CmdTrain)
subp = subps.add_parser('gen',
description='Generate a new set of build files.')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='generates runtime dependencies for targets listed '
'in file as .isolate and .isolated.gen.json files. '
'Targets should be listed by name, separated by '
'newline.')
subp.add_argument('--json-output',
help='Write errors to json.output')
subp.add_argument('--rts-target-change-recall',
type=float,
help='how much safety is needed when selecting tests. '
'0.0 is the lowest and 1.0 is the highest')
subp.add_argument('path',
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
subp = subps.add_parser('isolate-everything',
description='Generates a .isolate for all targets. '
'Requires that mb.py gen has already '
'been run.')
AddCommonOptions(subp)
subp.set_defaults(func=self.CmdIsolateEverything)
subp.add_argument('path',
help='path build was generated into')
subp = subps.add_parser('isolate',
description='Generate the .isolate files for a '
'given binary.')
AddCommonOptions(subp)
subp.add_argument('--no-build', dest='build', default=True,
action='store_false',
help='Do not build, just isolate')
subp.add_argument('-j', '--jobs', type=int,
help='Number of jobs to pass to ninja')
subp.add_argument('path',
help='path build was generated into')
subp.add_argument('target',
help='ninja target to generate the isolate for')
subp.set_defaults(func=self.CmdIsolate)
subp = subps.add_parser('lookup',
description='Look up the command for a given '
'config or builder.')
AddCommonOptions(subp)
subp.add_argument('--quiet', default=False, action='store_true',
help='Print out just the arguments, '
'do not emulate the output of the gen subcommand.')
subp.add_argument('--recursive', default=False, action='store_true',
help='Lookup arguments from imported files, '
'implies --quiet')
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser('try',
description='Try your change on a remote builder')
AddCommonOptions(subp)
subp.add_argument('target',
help='ninja target to build and run')
subp.add_argument('--force', default=False, action='store_true',
help='Force the job to run. Ignores local checkout state;'
' by default, the tool doesn\'t trigger jobs if there are'
' local changes which are not present on Gerrit.')
subp.set_defaults(func=self.CmdTry)
subp = subps.add_parser(
'run', formatter_class=argparse.RawDescriptionHelpFormatter)
subp.description = (
'Build, isolate, and run the given binary with the command line\n'
'listed in the isolate. You may pass extra arguments after the\n'
'target; use "--" if the extra arguments need to include switches.\n'
'\n'
'Examples:\n'
'\n'
' % tools/mb/mb.py run -m chromium.linux -b "Linux Builder" \\\n'
' //out/Default content_browsertests\n'
'\n'
' % tools/mb/mb.py run out/Default content_browsertests\n'
'\n'
' % tools/mb/mb.py run out/Default content_browsertests -- \\\n'
' --test-launcher-retry-limit=0'
'\n'
)
AddCommonOptions(subp)
subp.add_argument('-j', '--jobs', type=int,
help='Number of jobs to pass to ninja')
subp.add_argument('--no-build', dest='build', default=True,
action='store_false',
help='Do not build, just isolate and run')
subp.add_argument('path',
help=('path to generate build into (or use).'
' This can be either a regular path or a '
'GN-style source-relative path like '
'//out/Default.'))
subp.add_argument('-s', '--swarmed', action='store_true',
help='Run under swarming with the default dimensions')
subp.add_argument('-d', '--dimension', default=[], action='append', nargs=2,
dest='dimensions', metavar='FOO bar',
help='dimension to filter on')
subp.add_argument('--tags', default=[], action='append', metavar='FOO:BAR',
help='Tags to assign to the swarming task')
subp.add_argument('--no-default-dimensions', action='store_false',
dest='default_dimensions', default=True,
help='Do not automatically add dimensions to the task')
subp.add_argument('target',
help='ninja target to build and run')
subp.add_argument('extra_args', nargs='*',
help=('extra args to pass to the isolate to run. Use '
'"--" as the first arg if you need to pass '
'switches'))
subp.set_defaults(func=self.CmdRun)
subp = subps.add_parser('validate',
description='Validate the config file.')
AddExpansionOptions(subp)
subp.add_argument('--expectations-dir',
metavar='PATH',
help='path to dir containing expectation files')
subp.add_argument('--skip-dcheck-check',
help='Skip check for dcheck_always_on.',
action='store_true')
subp.set_defaults(func=self.CmdValidate)
subp = subps.add_parser('zip',
description='Generate a .zip containing the files '
'needed for a given binary.')
AddCommonOptions(subp)
subp.add_argument('--no-build', dest='build', default=True,
action='store_false',
help='Do not build, just isolate')
subp.add_argument('-j', '--jobs', type=int,
help='Number of jobs to pass to ninja')
subp.add_argument('path',
help='path build was generated into')
subp.add_argument('target',
help='ninja target to generate the isolate for')
subp.add_argument('zip_path',
help='path to zip file to create')
subp.set_defaults(func=self.CmdZip)
subp = subps.add_parser('help',
help='Get help on a subcommand.')
subp.add_argument(nargs='?', action='store', dest='subcommand',
help='The command to get help for.')
subp.set_defaults(func=self.CmdHelp)
self.args = parser.parse_args(argv)
def DumpInputFiles(self):
def DumpContentsOfFilePassedTo(arg_name, path):
if path and self.Exists(path):
self.Print("\n# To recreate the file passed to %s:" % arg_name)
self.Print("%% cat > %s <<EOF" % path)
contents = self.ReadFile(path)
self.Print(contents)
self.Print("EOF\n%\n")
if getattr(self.args, 'input_path', None):
DumpContentsOfFilePassedTo(
'argv[0] (input_path)', self.args.input_path)
if getattr(self.args, 'swarming_targets_file', None):
DumpContentsOfFilePassedTo(
'--swarming-targets-file', self.args.swarming_targets_file)
def CmdAnalyze(self):
vals = self.Lookup()
return self.RunGNAnalyze(vals)
def CmdExport(self):
obj = self._ToJsonish()
s = json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
self.Print(s)
return 0
def CmdTrain(self):
expectations_dir = self.args.expectations_dir
if not self.Exists(expectations_dir):
self.Print('Expectations dir (%s) does not exist.' % expectations_dir)
return 1
# Removing every expectation file then immediately re-generating them will
# clear out deleted groups.
for f in self.ListDir(expectations_dir):
self.RemoveFile(os.path.join(expectations_dir, f))
obj = self._ToJsonish()
for builder_group, builder in sorted(obj.items()):
expectation_file = os.path.join(expectations_dir, builder_group + '.json')
json_s = json.dumps(builder,
indent=2,
sort_keys=True,
separators=(',', ': '))
self.WriteFile(expectation_file, json_s)
return 0
def RtsSelect(self):
model_dir = self.PathJoin(
self.chromium_src_dir, 'testing', 'rts', self._CipdPlatform())
exe = self.PathJoin(model_dir, 'rts-chromium')
if self.platform == 'win32':
exe += '.exe'
args = [
exe, 'select',
'-model-dir', model_dir, \
'-out', self.PathJoin(self.ToAbsPath(self.args.path), self.rts_out_dir),
'-checkout', self.chromium_src_dir,
]
if self.args.rts_target_change_recall:
if (self.args.rts_target_change_recall < 0
or self.args.rts_target_change_recall > 1):
self.WriteFailureAndRaise(
'rts-target-change-recall must be between (0 and 1]', None)
args += ['-target-change-recall', str(self.args.rts_target_change_recall)]
ret, _, _ = self.Run(args, force_verbose=True)
if ret != 0:
self.WriteFailureAndRaise(err, None)
def CmdGen(self):
if self.args.use_rts:
self.RtsSelect()
vals = self.Lookup()
return self.RunGNGen(vals)
def CmdGetSwarmingCommand(self):
vals = self.GetConfig()
command, _ = self.GetSwarmingCommand(self.args.target, vals)
if self.args.as_list:
self.Print(json.dumps(command))
else:
self.Print(' '.join(command))
return 0
def CmdIsolateEverything(self):
vals = self.Lookup()
return self.RunGNGenAllIsolates(vals)
def CmdHelp(self):
if self.args.subcommand:
self.ParseArgs([self.args.subcommand, '--help'])
else:
self.ParseArgs(['--help'])
def CmdIsolate(self):
vals = self.GetConfig()
if not vals:
return 1
if self.args.build:
ret = self.Build(self.args.target)
if ret != 0:
return ret
return self.RunGNIsolate(vals)
def CmdLookup(self):
vals = self.Lookup()
_, gn_args = self.GNArgs(vals, expand_imports=self.args.recursive)
if self.args.quiet or self.args.recursive:
self.Print(gn_args, end='')
else:
cmd = self.GNCmd('gen', '_path_')
self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
self.PrintCmd(cmd)
return 0
def CmdTry(self):
ninja_target = self.args.target
if ninja_target.startswith('//'):
self.Print("Expected a ninja target like base_unittests, got %s" % (
ninja_target))
return 1
_, out, _ = self.Run(['git', 'cl', 'diff', '--stat'], force_verbose=False)
if out:
self.Print("Your checkout appears to local changes which are not uploaded"
" to Gerrit. Changes must be committed and uploaded to Gerrit"
" to be tested using this tool.")
if not self.args.force:
return 1
json_path = self.PathJoin(self.chromium_src_dir, 'out.json')
try:
ret, out, err = self.Run(
['git', 'cl', 'issue', '--json=out.json'], force_verbose=False)
if ret != 0:
self.Print(
"Unable to fetch current issue. Output and error:\n%s\n%s" % (
out, err
))
return ret
with open(json_path) as f:
issue_data = json.load(f)
finally:
if self.Exists(json_path):
os.unlink(json_path)
if not issue_data['issue']:
self.Print("Missing issue data. Upload your CL to Gerrit and try again.")
return 1
class LedException(Exception):
pass
def run_cmd(previous_res, cmd):
if self.args.verbose:
self.Print(('| ' if previous_res else '') + ' '.join(cmd))
res, out, err = self.Call(cmd, stdin=previous_res)
if res != 0:
self.Print("Err while running '%s'. Output:\n%s\nstderr:\n%s" % (
' '.join(cmd), out, err))
raise LedException()
return out
try:
result = LedResult(None, run_cmd).then(
# TODO(martiniss): maybe don't always assume the bucket?
'led', 'get-builder', 'luci.chromium.try:%s' % self.args.builder).then(
'led', 'edit', '-r', 'chromium_trybot_experimental',
'-p', 'tests=["%s"]' % ninja_target).then(
'led', 'edit-system', '--tag=purpose:user-debug-mb-try').then(
'led', 'edit-cr-cl', issue_data['issue_url']).then(
'led', 'launch').result
except LedException:
self.Print("If this is an unexpected error message, please file a bug"
" with https://goto.google.com/mb-try-bug")
raise
swarming_data = json.loads(result)['swarming']
self.Print("Launched task at https://%s/task?id=%s" % (
swarming_data['host_name'], swarming_data['task_id']))
def CmdRun(self):
vals = self.GetConfig()
if not vals:
return 1
if self.args.build:
self.Print('')
ret = self.Build(self.args.target)
if ret:
return ret
self.Print('')
ret = self.RunGNIsolate(vals)
if ret:
return ret
self.Print('')
if self.args.swarmed:
cmd, _ = self.GetSwarmingCommand(self.args.target, vals)
return self._RunUnderSwarming(self.args.path, self.args.target, cmd)
return self._RunLocallyIsolated(self.args.path, self.args.target)
def CmdZip(self):
ret = self.CmdIsolate()
if ret:
return ret
zip_dir = None
try:
zip_dir = self.TempDir()
remap_cmd = [
self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
self.isolate_exe), 'remap', '-i',
self.PathJoin(self.args.path, self.args.target + '.isolate'),
'-outdir', zip_dir
]
ret, _, _ = self.Run(remap_cmd)
if ret:
return ret
zip_path = self.args.zip_path
with zipfile.ZipFile(
zip_path, 'w', zipfile.ZIP_DEFLATED, allowZip64=True) as fp:
for root, _, files in os.walk(zip_dir):
for filename in files:
path = self.PathJoin(root, filename)
fp.write(path, self.RelPath(path, zip_dir))
return 0
finally:
if zip_dir:
self.RemoveDirectory(zip_dir)
def _RunUnderSwarming(self, build_dir, target, isolate_cmd):
cas_instance = 'chromium-swarm'
swarming_server = 'chromium-swarm.appspot.com'
# TODO(dpranke): Look up the information for the target in
# the //testing/buildbot.json file, if possible, so that we
# can determine the isolate target, command line, and additional
# swarming parameters, if possible.
#
# TODO(dpranke): Also, add support for sharding and merging results.
dimensions = []
for k, v in self._DefaultDimensions() + self.args.dimensions:
dimensions += ['-d', '%s=%s' % (k, v)]
archive_json_path = self.ToSrcRelPath(
'%s/%s.archive.json' % (build_dir, target))
cmd = [
self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
self.isolate_exe),
'archive',
'-i',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
'-cas-instance',
cas_instance,
'-dump-json',
archive_json_path,
]
# Talking to the isolateserver may fail because we're not logged in.
# We trap the command explicitly and rewrite the error output so that
# the error message is actually correct for a Chromium check out.
self.PrintCmd(cmd)
ret, out, _ = self.Run(cmd, force_verbose=False)
if ret:
self.Print(' -> returned %d' % ret)
if out:
self.Print(out, end='')
return ret
try:
archive_hashes = json.loads(self.ReadFile(archive_json_path))
except Exception:
self.Print(
'Failed to read JSON file "%s"' % archive_json_path, file=sys.stderr)
return 1
try:
cas_digest = archive_hashes[target]
except Exception:
self.Print(
'Cannot find hash for "%s" in "%s", file content: %s' %
(target, archive_json_path, archive_hashes),
file=sys.stderr)
return 1
tags = ['-tag=%s' % tag for tag in self.args.tags]
try:
json_dir = self.TempDir()
json_file = self.PathJoin(json_dir, 'task.json')
cmd = [
self.PathJoin('tools', 'luci-go', 'swarming'),
'trigger',
'-digest',
cas_digest,
'-server',
swarming_server,
'-tag=purpose:user-debug-mb',
'-relative-cwd',
self.ToSrcRelPath(build_dir),
'-dump-json',
json_file,
] + tags + dimensions + ['--'] + list(isolate_cmd)
if self.args.extra_args:
cmd += self.args.extra_args
self.Print('')
ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
if ret:
return ret
task_json = self.ReadFile(json_file)
task_id = json.loads(task_json)["tasks"][0]['task_id']
collect_output = self.PathJoin(json_dir, 'collect_output.json')
cmd = [
self.PathJoin('tools', 'luci-go', 'swarming'),
'collect',
'-server',
swarming_server,
'-task-output-stdout=console',
'-task-summary-json',
collect_output,
task_id,
]
ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
if ret != 0:
return ret
collect_json = json.loads(self.ReadFile(collect_output))
# The exit_code field is not included if the task was successful.
ret = collect_json.get(task_id, {}).get('results', {}).get('exit_code', 0)
finally:
if json_dir:
self.RemoveDirectory(json_dir)
return ret
def _RunLocallyIsolated(self, build_dir, target):
cmd = [
self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
self.isolate_exe),
'run',
'-i',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
]
if self.args.extra_args:
cmd += ['--'] + self.args.extra_args
ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
return ret
def _DefaultDimensions(self):
if not self.args.default_dimensions:
return []
# This code is naive and just picks reasonable defaults per platform.
if self.platform == 'darwin':
os_dim = ('os', 'Mac-10.13')
elif self.platform.startswith('linux'):
os_dim = ('os', 'Ubuntu-16.04')
elif self.platform == 'win32':
os_dim = ('os', 'Windows-10')
else:
raise MBErr('unrecognized platform string "%s"' % self.platform)
return [('pool', 'chromium.tests'),
('cpu', 'x86-64'),
os_dim]
def _ToJsonish(self):
"""Dumps the config file into a json-friendly expanded dict.
Returns:
A dict with builder group -> builder -> all GN args mapping.
"""
self.ReadConfigFile(self.args.config_file)
obj = {}
for builder_group, builders in self.builder_groups.items():
obj[builder_group] = {}
for builder in builders:
config = self.builder_groups[builder_group][builder]
if not config:
continue
if isinstance(config, dict):
# This is a 'phased' builder. Each key in the config is a different
# phase of the builder.
args = {}
for k, v in config.items():
args[k] = gn_helpers.FromGNArgs(
FlattenConfig(self.configs, self.mixins, v)['gn_args'])
elif config.startswith('//'):
args = config
else:
flattened_config = FlattenConfig(self.configs, self.mixins, config)
if flattened_config['gn_args'] == 'error':
continue
args = {'gn_args': gn_helpers.FromGNArgs(flattened_config['gn_args'])}
if flattened_config.get('args_file'):
args['args_file'] = flattened_config['args_file']
obj[builder_group][builder] = args
return obj
def CmdValidate(self, print_ok=True):
errs = []
self.ReadConfigFile(self.args.config_file)
# Build a list of all of the configs referenced by builders.
all_configs = validation.GetAllConfigs(self.builder_groups)
# Check that every referenced args file or config actually exists.
for config, loc in all_configs.items():
if config.startswith('//'):
if not self.Exists(self.ToAbsPath(config)):
errs.append('Unknown args file "%s" referenced from "%s".' %
(config, loc))
elif not config in self.configs:
errs.append('Unknown config "%s" referenced from "%s".' %
(config, loc))
# Check that every config and mixin is referenced.
validation.CheckAllConfigsAndMixinsReferenced(errs, all_configs,
self.configs, self.mixins)
if self.args.config_file == self.default_config:
validation.EnsureNoProprietaryMixins(errs, self.builder_groups,
self.configs, self.mixins)
validation.CheckDuplicateConfigs(errs, self.configs, self.mixins,
self.builder_groups, FlattenConfig)
if not self.args.skip_dcheck_check:
self._ValidateEach(errs, validation.CheckDebugDCheckOrOfficial)
if errs:
raise MBErr(('mb config file %s has problems:\n ' %
self.args.config_file) + '\n '.join(errs))
expectations_dir = self.args.expectations_dir
# TODO(crbug.com/1117577): Force all versions of mb_config.pyl to have
# expectations. For now, just ignore those that don't have them.
if self.Exists(expectations_dir):
jsonish_blob = self._ToJsonish()
if not validation.CheckExpectations(self, jsonish_blob, expectations_dir):
raise MBErr("Expectations out of date. Please run 'mb.py train'.")
if print_ok:
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
def _ValidateEach(self, errs, validate):
"""Checks a validate function against every builder config.
This loops over all the builders in the config file, invoking the
validate function against the full set of GN args. Any errors found
should be appended to the errs list passed in; the validation
function signature is
validate(errs:list, gn_args:dict, builder_group:str, builder:str,
phase:(str|None))"""
for builder_group, builders in self.builder_groups.items():
for builder, config in builders.items():
if isinstance(config, dict):
for phase, phase_config in config.items():
vals = FlattenConfig(self.configs, self.mixins, phase_config)
if vals['gn_args'] == 'error':
continue
try:
parsed_gn_args, _ = self.GNArgs(vals, expand_imports=True)
except IOError:
# The builder must use an args file that was not checked out or
# generated, so we should just ignore it.
parsed_gn_args, _ = self.GNArgs(vals, expand_imports=False)
validate(errs, parsed_gn_args, builder_group, builder, phase)
else:
vals = FlattenConfig(self.configs, self.mixins, config)
if vals['gn_args'] == 'error':
continue
try:
parsed_gn_args, _ = self.GNArgs(vals, expand_imports=True)
except IOError:
# The builder must use an args file that was not checked out or
# generated, so we should just ignore it.
parsed_gn_args, _ = self.GNArgs(vals, expand_imports=False)
validate(errs, parsed_gn_args, builder_group, builder, phase=None)
def GetConfig(self):
build_dir = self.args.path
vals = DefaultVals()
if self.args.builder or self.args.builder_group or self.args.config:
vals = self.Lookup()
# Re-run gn gen in order to ensure the config is consistent with the
# build dir.
self.RunGNGen(vals)
return vals
toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
'toolchain.ninja')
if not self.Exists(toolchain_path):
self.Print('Must either specify a path to an existing GN build dir '
'or pass in a -m/-b pair or a -c flag to specify the '
'configuration')
return {}
vals['gn_args'] = self.GNArgsFromDir(build_dir)
return vals
def GNArgsFromDir(self, build_dir):
args_contents = ""
gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn')
if self.Exists(gn_args_path):
args_contents = self.ReadFile(gn_args_path)
# Handle any .gni file imports, e.g. the ones used by CrOS. This should
# be automatically handled by gn_helpers.FromGNArgs (via its call to
# gn_helpers.GNValueParser.ReplaceImports), but that currently breaks
# mb_unittest since it mocks out file reads itself instead of using
# pyfakefs. This results in gn_helpers trying to read a non-existent file.
# The implementation of ReplaceImports here can be removed once the
# unittests use pyfakefs.
def ReplaceImports(input_contents):
output_contents = ''
for l in input_contents.splitlines(True):
if not l.strip().startswith('#') and 'import(' in l:
import_file = l.split('"', 2)[1]
import_file = self.ToAbsPath(import_file)
imported_contents = self.ReadFile(import_file)
output_contents += ReplaceImports(imported_contents) + '\n'
else:
output_contents += l
return output_contents
args_contents = ReplaceImports(args_contents)
args_dict = gn_helpers.FromGNArgs(args_contents)
# Re-add the quotes around strings so they show up as they would in the
# args.gn file.
for k, v in args_dict.items():
if isinstance(v, str):
args_dict[k] = '"%s"' % v
return ' '.join(['%s=%s' % (k, v) for (k, v) in args_dict.items()])
def Lookup(self):
self.ReadConfigFile(self.args.config_file)
try:
config = self.ConfigFromArgs()
except MBErr as e:
# TODO(crbug.com/912681) While iOS bots are migrated to use the
# Chromium recipe, we want to ensure that we're checking MB's
# configurations first before going to iOS.
# This is to be removed once the migration is complete.
vals = self.ReadIOSBotConfig()
if not vals:
raise e
return vals
# TODO(crbug.com/912681) Some iOS bots have a definition, with ios_error
# as an indicator that it's incorrect. We utilize this to check the
# iOS JSON instead, and error out if there exists no definition at all.
# This is to be removed once the migration is complete.
if config == 'ios_error':
vals = self.ReadIOSBotConfig()
if not vals:
raise MBErr('No iOS definition was found. Please ensure there is a '
'definition for the given iOS bot under '
'mb_config.pyl or a JSON file definition under '
'//ios/build/bots.')
return vals
if config.startswith('//'):
if not self.Exists(self.ToAbsPath(config)):
raise MBErr('args file "%s" not found' % config)
vals = DefaultVals()
vals['args_file'] = config
else:
if not config in self.configs:
raise MBErr(
'Config "%s" not found in %s' % (config, self.args.config_file))
vals = FlattenConfig(self.configs, self.mixins, config)
return vals
def ReadIOSBotConfig(self):
if not self.args.builder_group or not self.args.builder:
return {}
path = self.PathJoin(self.chromium_src_dir, 'ios', 'build', 'bots',
self.args.builder_group, self.args.builder + '.json')
if not self.Exists(path):
return {}
contents = json.loads(self.ReadFile(path))
gn_args = ' '.join(contents.get('gn_args', []))
vals = DefaultVals()
vals['gn_args'] = gn_args
return vals
def ReadConfigFile(self, config_file):
if not self.Exists(config_file):
raise MBErr('config file not found at %s' % config_file)
try:
contents = ast.literal_eval(self.ReadFile(config_file))
except SyntaxError as e:
raise MBErr('Failed to parse config file "%s": %s' % (config_file, e))
self.configs = contents['configs']
self.mixins = contents['mixins']
self.builder_groups = contents.get('builder_groups')
self.public_artifact_builders = contents.get('public_artifact_builders')
def ReadIsolateMap(self):
if not self.args.isolate_map_files:
self.args.isolate_map_files = [self.default_isolate_map]
for f in self.args.isolate_map_files:
if not self.Exists(f):
raise MBErr('isolate map file not found at %s' % f)
isolate_maps = {}
for isolate_map in self.args.isolate_map_files:
try:
isolate_map = ast.literal_eval(self.ReadFile(isolate_map))
duplicates = set(isolate_map).intersection(isolate_maps)
if duplicates:
raise MBErr(
'Duplicate targets in isolate map files: %s.' %
', '.join(duplicates))
isolate_maps.update(isolate_map)
except SyntaxError as e:
raise MBErr(
'Failed to parse isolate map file "%s": %s' % (isolate_map, e))
return isolate_maps
def ConfigFromArgs(self):
if self.args.config:
if self.args.builder_group or self.args.builder:
raise MBErr('Can not specific both -c/--config and --builder-group '
'or -b/--builder')
return self.args.config
if not self.args.builder_group or not self.args.builder:
raise MBErr('Must specify either -c/--config or '
'(--builder-group and -b/--builder)')
if not self.args.builder_group in self.builder_groups:
raise MBErr('Builder group name "%s" not found in "%s"' %
(self.args.builder_group, self.args.config_file))
if not self.args.builder in self.builder_groups[self.args.builder_group]:
raise MBErr('Builder name "%s" not found under groups[%s] in "%s"' %
(self.args.builder, self.args.builder_group,
self.args.config_file))
config = self.builder_groups[self.args.builder_group][self.args.builder]
if isinstance(config, dict):
if self.args.phase is None:
raise MBErr('Must specify a build --phase for %s on %s' %
(self.args.builder, self.args.builder_group))
phase = str(self.args.phase)
if phase not in config:
raise MBErr('Phase %s doesn\'t exist for %s on %s' %
(phase, self.args.builder, self.args.builder_group))
return config[phase]
if self.args.phase is not None:
raise MBErr('Must not specify a build --phase for %s on %s' %
(self.args.builder, self.args.builder_group))
return config
def RunGNGen(self, vals, compute_inputs_for_analyze=False, check=True):
build_dir = self.args.path
if check:
cmd = self.GNCmd('gen', build_dir, '--check')
else:
cmd = self.GNCmd('gen', build_dir)
_, gn_args = self.GNArgs(vals)
if compute_inputs_for_analyze:
gn_args += ' compute_inputs_for_analyze=true'
# Since GN hasn't run yet, the build directory may not even exist.
self.MaybeMakeDirectory(self.ToAbsPath(build_dir))
gn_args_path = self.ToAbsPath(build_dir, 'args.gn')
self.WriteFile(gn_args_path, gn_args, force_verbose=True)
if getattr(self.args, 'swarming_targets_file', None):
# We need GN to generate the list of runtime dependencies for
# the compile targets listed (one per line) in the file so
# we can run them via swarming. We use gn_isolate_map.pyl to convert
# the compile targets to the matching GN labels.
path = self.args.swarming_targets_file
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path,
output_path=None)
contents = self.ReadFile(path)
isolate_targets = set(contents.splitlines())
isolate_map = self.ReadIsolateMap()
self.RemovePossiblyStaleRuntimeDepsFiles(vals, isolate_targets,
isolate_map, build_dir)
err, labels = self.MapTargetsToLabels(isolate_map, isolate_targets)
if err:
raise MBErr(err)
gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps')
self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
ret, output, _ = self.Run(cmd)
if ret != 0:
if self.args.json_output:
# write errors to json.output
self.WriteJSON({'output': output}, self.args.json_output)
# If `gn gen` failed, we should exit early rather than trying to
# generate isolates. Run() will have already logged any error output.
self.Print('GN gen failed: %d' % ret)
return ret
if getattr(self.args, 'swarming_targets_file', None):
ret = self.GenerateIsolates(vals, isolate_targets, isolate_map, build_dir)
return ret
def RunGNGenAllIsolates(self, vals):
"""
This command generates all .isolate files.
This command assumes that "mb.py gen" has already been run, as it relies on
"gn ls" to fetch all gn targets. If uses that output, combined with the
isolate_map, to determine all isolates that can be generated for the current
gn configuration.
"""
build_dir = self.args.path
ret, output, _ = self.Run(self.GNCmd('ls', build_dir),
force_verbose=False)
if ret != 0:
# If `gn ls` failed, we should exit early rather than trying to
# generate isolates.
self.Print('GN ls failed: %d' % ret)
return ret
# Create a reverse map from isolate label to isolate dict.
isolate_map = self.ReadIsolateMap()
isolate_dict_map = {}
for key, isolate_dict in isolate_map.items():
isolate_dict_map[isolate_dict['label']] = isolate_dict
isolate_dict_map[isolate_dict['label']]['isolate_key'] = key
runtime_deps = []
isolate_targets = []
# For every GN target, look up the isolate dict.
for line in output.splitlines():
target = line.strip()
if target in isolate_dict_map:
if isolate_dict_map[target]['type'] == 'additional_compile_target':
# By definition, additional_compile_targets are not tests, so we
# shouldn't generate isolates for them.
continue
isolate_targets.append(isolate_dict_map[target]['isolate_key'])
runtime_deps.append(target)
self.RemovePossiblyStaleRuntimeDepsFiles(vals, isolate_targets,
isolate_map, build_dir)
gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps')
self.WriteFile(gn_runtime_deps_path, '\n'.join(runtime_deps) + '\n')
cmd = self.GNCmd('gen', build_dir)
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
self.Run(cmd)
return self.GenerateIsolates(vals, isolate_targets, isolate_map, build_dir)
def RemovePossiblyStaleRuntimeDepsFiles(self, vals, targets, isolate_map,
build_dir):
# TODO(crbug.com/932700): Because `gn gen --runtime-deps-list-file`
# puts the runtime_deps file in different locations based on the actual
# type of a target, we may end up with multiple possible runtime_deps
# files in a given build directory, where some of the entries might be
# stale (since we might be reusing an existing build directory).
#
# We need to be able to get the right one reliably; you might think
# we can just pick the newest file, but because GN won't update timestamps
# if the contents of the files change, an older runtime_deps
# file might actually be the one we should use over a newer one (see
# crbug.com/932387 for a more complete explanation and example).
#
# In order to avoid this, we need to delete any possible runtime_deps
# files *prior* to running GN. As long as the files aren't actually
# needed during the build, this hopefully will not cause unnecessary
# build work, and so it should be safe.
#
# Ultimately, we should just make sure we get the runtime_deps files
# in predictable locations so we don't have this issue at all, and
# that's what crbug.com/932700 is for.
possible_rpaths = self.PossibleRuntimeDepsPaths(vals, targets, isolate_map)
for rpaths in possible_rpaths.values():
for rpath in rpaths:
path = self.ToAbsPath(build_dir, rpath)
if self.Exists(path):
self.RemoveFile(path)
def _FilterOutUnneededSkylabDeps(self, deps):
"""Filter out the runtime dependencies not used by Skylab.
Skylab is CrOS infra facilities for us to run hardware tests. These files
may appear in the test target's runtime_deps but unnecessary for our tests
to execute in a CrOS device.
"""
file_ignore_list = [
re.compile(r'.*build/android.*'),
re.compile(r'.*build/chromeos.*'),
re.compile(r'.*build/cros_cache.*'),
# The following matches anything under //testing/ that isn't under
# //testing/buildbot/filters/.
re.compile(r'.*testing/(?!buildbot/filters).*'),
re.compile(r'.*third_party/chromite.*'),
# No test target should rely on files in [output_dir]/gen.
re.compile(r'^gen/.*'),
]
return [f for f in deps if not any(r.match(f) for r in file_ignore_list)]
def _DedupDependencies(self, deps):
"""Remove the deps already contained by other paths."""
def _add(root, path):
cur = path.popleft()
# Only continue the recursion if the path has child nodes
# AND the current node is not ended by other existing paths.
if path and root.get(cur) != {}:
return _add(root.setdefault(cur, {}), path)
# Cut this path, because child nodes are already included.
root[cur] = {}
return root
def _list(root, prefix, res):
for k, v in root.items():
if v == {}:
res.append('%s/%s' % (prefix, k))
continue
_list(v, '%s/%s' % (prefix, k), res)
return res
root = {}
for d in deps:
q = collections.deque(d.rstrip('/').split('/'))
_add(root, q)
return [p.lstrip('/') for p in _list(root, '', [])]
def GenerateIsolates(self, vals, ninja_targets, isolate_map, build_dir):
"""
Generates isolates for a list of ninja targets.
Ninja targets are transformed to GN targets via isolate_map.
This function assumes that a previous invocation of "mb.py gen" has
generated runtime deps for all targets.
"""
possible_rpaths = self.PossibleRuntimeDepsPaths(vals, ninja_targets,
isolate_map)
for target, rpaths in possible_rpaths.items():
# TODO(crbug.com/932700): We don't know where each .runtime_deps
# file might be, but assuming we called
# RemovePossiblyStaleRuntimeDepsFiles prior to calling `gn gen`,
# there should only be one file.
found_one = False
path_to_use = None
for r in rpaths:
path = self.ToAbsPath(build_dir, r)
if self.Exists(path):
if found_one:
raise MBErr('Found more than one of %s' % ', '.join(rpaths))
path_to_use = path
found_one = True
if not found_one:
raise MBErr('Did not find any of %s' % ', '.join(rpaths))
command, extra_files = self.GetSwarmingCommand(target, vals)
runtime_deps = self.ReadFile(path_to_use).splitlines()
runtime_deps = self._DedupDependencies(runtime_deps)
if 'is_skylab=true' in vals['gn_args']:
runtime_deps = self._FilterOutUnneededSkylabDeps(runtime_deps)
# For more info about RTS, please see
# //docs/testing/regression-test-selection.md
if self.args.use_rts or self.args.use_st:
self.AddFilterFileArg(target, build_dir, command)
canonical_target = target.replace(':','_').replace('/','_')
ret = self.WriteIsolateFiles(build_dir, command, canonical_target,
runtime_deps, vals, extra_files)
if ret != 0:
return ret
return 0
def AddFilterFileArg(self, target, build_dir, command):
if target in self.banned_from_rts:
self.Print('%s is banned for RTS on this builder' % target)
else:
filter_file = target + '.filter'
filter_file_path = self.PathJoin(self.rts_out_dir, filter_file)
abs_filter_file_path = self.ToAbsPath(build_dir, filter_file_path)
self.CreateOrAppendStableTestFilter(abs_filter_file_path, build_dir,
target)
if self.Exists(abs_filter_file_path):
command.append('--test-launcher-filter-file=%s' % filter_file_path)
self.Print('added RTS filter file to command: %s' % filter_file)
def CreateOrAppendStableTestFilter(self, abs_filter_file_path, build_dir,
target):
if self.args.use_st:
stable_filter_file = self.PathJoin(
self.chromium_src_dir, 'testing',
'buildbot', 'filters', 'stable_test_filters',
getattr(self.args, 'builder', None), target) + '.filter'
# The path to the filter file to append
abs_stable_filter_file = self.ToAbsPath(build_dir, stable_filter_file)
if self.Exists(abs_stable_filter_file):
# A stable filter exists
if not self.args.use_rts:
self.Print('RTS disabled, using stable filter')
dest_dir = os.path.dirname(abs_filter_file_path)
if not self.Exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(abs_stable_filter_file, abs_filter_file_path)
else:
# Rts is enabled and will delete ALL .filter files
# only rts filters generated this run should remain
if not self.Exists(abs_filter_file_path):
self.Print('No RTS filter found, using stable filter')
shutil.copy(abs_stable_filter_file, abs_filter_file_path)
else:
self.Print('Adding stable tests filter to RTS filter')
with open(abs_filter_file_path, 'a+') as select_filter_file, open(
abs_stable_filter_file, 'r') as stable_filter_file:
select_filter_file.write('\n')
select_filter_file.write(stable_filter_file.read())
else:
self.Print('No stable filter found at %s' % abs_stable_filter_file)
else:
self.Print('No stable filter')
return 0
def PossibleRuntimeDepsPaths(self, vals, ninja_targets, isolate_map):
"""Returns a map of targets to possible .runtime_deps paths.
Each ninja target maps on to a GN label, but depending on the type
of the GN target, `gn gen --runtime-deps-list-file` will write
the .runtime_deps files into different locations. Unfortunately, in
some cases we don't actually know which of multiple locations will
actually be used, so we return all plausible candidates.
The paths that are returned are relative to the build directory.
"""
android = 'target_os="android"' in vals['gn_args']
ios = 'target_os="ios"' in vals['gn_args']
fuchsia = 'target_os="fuchsia"' in vals['gn_args']
win = self.platform == 'win32' or 'target_os="win"' in vals['gn_args']
possible_runtime_deps_rpaths = {}
for target in ninja_targets:
target_type = isolate_map[target]['type']
label = isolate_map[target]['label']
stamp_runtime_deps = 'obj/%s.stamp.runtime_deps' % label.replace(':', '/')
# TODO(https://crbug.com/876065): 'official_tests' use
# type='additional_compile_target' to isolate tests. This is not the
# intended use for 'additional_compile_target'.
if (target_type == 'additional_compile_target' and
target != 'official_tests'):
# By definition, additional_compile_targets are not tests, so we
# shouldn't generate isolates for them.
raise MBErr('Cannot generate isolate for %s since it is an '
'additional_compile_target.' % target)
if fuchsia or ios or target_type == 'generated_script':
# iOS and Fuchsia targets end up as groups.
# generated_script targets are always actions.
rpaths = [stamp_runtime_deps]
elif android:
# Android targets may be either android_apk or executable. The former
# will result in runtime_deps associated with the stamp file, while the
# latter will result in runtime_deps associated with the executable.
label = isolate_map[target]['label']
rpaths = [
target + '.runtime_deps',
stamp_runtime_deps]
elif (target_type == 'script'
or isolate_map[target].get('label_type') == 'group'):
# For script targets, the build target is usually a group,
# for which gn generates the runtime_deps next to the stamp file
# for the label, which lives under the obj/ directory, but it may
# also be an executable.
label = isolate_map[target]['label']
rpaths = [stamp_runtime_deps]
if win:
rpaths += [ target + '.exe.runtime_deps' ]
else:
rpaths += [ target + '.runtime_deps' ]
elif win:
rpaths = [target + '.exe.runtime_deps']
else:
rpaths = [target + '.runtime_deps']
possible_runtime_deps_rpaths[target] = rpaths
return possible_runtime_deps_rpaths
def RunGNIsolate(self, vals):
target = self.args.target
isolate_map = self.ReadIsolateMap()
err, labels = self.MapTargetsToLabels(isolate_map, [target])
if err:
raise MBErr(err)
label = labels[0]
build_dir = self.args.path
command, extra_files = self.GetSwarmingCommand(target, vals)
# Any warning for an unused arg will get interleaved into the cmd's
# stdout. When that happens, the isolate step below will fail with an
# obscure error when it tries processing the lines of the warning. Fail
# quickly in that case to avoid confusion
cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps',
'--fail-on-unused-args')
ret, out, _ = self.Call(cmd)
if ret != 0:
if out:
self.Print(out)
return ret
runtime_deps = out.splitlines()
ret = self.WriteIsolateFiles(build_dir, command, target, runtime_deps, vals,
extra_files)
if ret != 0:
return ret
ret, _, _ = self.Run([
self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
self.isolate_exe),
'check',
'-i',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
],
buffer_output=False)
return ret
def WriteIsolateFiles(self, build_dir, command, target, runtime_deps, vals,
extra_files):
isolate_path = self.ToAbsPath(build_dir, target + '.isolate')
files = sorted(set(runtime_deps + extra_files))
# Complain if any file is a directory that's inside the build directory,
# since that makes incremental builds incorrect. See
# https://crbug.com/912946
is_android = 'target_os="android"' in vals['gn_args']
is_cros = ('target_os="chromeos"' in vals['gn_args']
or 'is_chromeos_device=true' in vals['gn_args'])
is_mac = self.platform == 'darwin'
is_msan = 'is_msan=true' in vals['gn_args']
is_ios = 'target_os="ios"' in vals['gn_args']
err = ''
for f in files:
# Skip a few configs that need extra cleanup for now.
# TODO(https://crbug.com/912946): Fix everything on all platforms and
# enable check everywhere.
if is_android:
break
# iOS has generated directories in gn data items.
# Skipping for iOS instead of listing all apps.
if is_ios:
break
# Skip a few existing violations that need to be cleaned up. Each of
# these will lead to incorrect incremental builds if their directory
# contents change. Do not add to this list, except for mac bundles until
# crbug.com/1000667 is fixed.
# TODO(https://crbug.com/912946): Remove this if statement.
if ((is_msan and f == 'instrumented_libraries_prebuilt/')
or f == 'mr_extension/' or # https://crbug.com/997947
f.startswith('nacl_test_data/') or
f.startswith('ppapi_nacl_tests_libs/') or
(is_cros and f in ( # https://crbug.com/1002509
'chromevox_test_data/',
'gen/ui/file_manager/file_manager/',
'resources/chromeos/',
'resources/chromeos/accessibility/accessibility_common/',
'resources/chromeos/accessibility/chromevox/',
'resources/chromeos/accessibility/select_to_speak/',
'test_data/chrome/browser/resources/chromeos/accessibility/'
'accessibility_common/',
'test_data/chrome/browser/resources/chromeos/accessibility/'
'chromevox/',
'test_data/chrome/browser/resources/chromeos/accessibility/'
'select_to_speak/',
)) or (is_mac and f in ( # https://crbug.com/1000667
'Chromium Framework.framework/',
'Chromium Helper.app/',
'Chromium.app/',
'ChromiumUpdater.app/',
'Content Shell.app/',
'Google Chrome Framework.framework/',
'Google Chrome Helper (Alerts).app/',
'Google Chrome Helper (GPU).app/',
'Google Chrome Helper (Plugin).app/',
'Google Chrome Helper (Renderer).app/',
'Google Chrome Helper.app/',
'Google Chrome.app/',
'GoogleUpdater.app/',
'UpdaterTestApp Framework.framework/',
'UpdaterTestApp.app/',
'blink_deprecated_test_plugin.plugin/',
'blink_test_plugin.plugin/',
'corb_test_plugin.plugin/',
'obj/tools/grit/brotli_mac_asan_workaround/',
'ppapi_tests.plugin/',
'ui_unittests Framework.framework/',
))):
continue
# This runs before the build, so we can't use isdir(f). But
# isolate.py luckily requires data directories to end with '/', so we
# can check for that.
if not f.startswith('../../') and f.endswith('/'):
# Don't use self.PathJoin() -- all involved paths consistently use
# forward slashes, so don't add one single backslash on Windows.
err += '\n' + build_dir + '/' + f
if err:
self.Print('error: gn `data` items may not list generated directories; '
'list files in directory instead for:' + err)
return 1
self.WriteFile(isolate_path,
json.dumps({
'variables': {
'command': command,
'files': files,
}
}, sort_keys=True) + '\n')
self.WriteJSON(
{
'args': [
'--isolate',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
return 0
def MapTargetsToLabels(self, isolate_map, targets):
labels = []
err = ''
for target in targets:
if target == 'all':
labels.append(target)
elif target.startswith('//'):
labels.append(target)
else:
if target in isolate_map:
if isolate_map[target]['type'] == 'unknown':
err += ('test target "%s" type is unknown\n' % target)
else:
labels.append(isolate_map[target]['label'])
else:
err += ('target "%s" not found in '
'//testing/buildbot/gn_isolate_map.pyl\n' % target)
return err, labels
def GNCmd(self, subcommand, path, *args):
if self.platform.startswith('linux'):
subdir, exe = 'linux64', 'gn'
elif self.platform == 'darwin':
subdir, exe = 'mac', 'gn'
elif self.platform == 'aix6':
subdir, exe = 'aix', 'gn'
else:
subdir, exe = 'win', 'gn.exe'
gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, exe)
cmd = [gn_path, subcommand]
if self.args.root:
cmd += ['--root=' + self.args.root]
if self.args.dotfile:
cmd += ['--dotfile=' + self.args.dotfile]
return cmd + [path] + list(args)
def GNArgs(self, vals, expand_imports=False):
"""Returns the gn args from vals as a Python dict and a text string.
If expand_imports is true, any import() lines will be read in and
valuese them will be included."""
gn_args = vals['gn_args']
if self.args.goma_dir:
gn_args += ' goma_dir="%s"' % self.args.goma_dir
android_version_code = self.args.android_version_code
if android_version_code:
gn_args += ' android_default_version_code="%s"' % android_version_code
android_version_name = self.args.android_version_name
if android_version_name:
gn_args += ' android_default_version_name="%s"' % android_version_name
if self.args.use_rts or self.args.use_st:
gn_args += ' use_rts=true'
args_gn_lines = []
parsed_gn_args = {}
args_file = vals.get('args_file', None)
if args_file:
if expand_imports:
content = self.ReadFile(self.ToAbsPath(args_file))
parsed_gn_args = gn_helpers.FromGNArgs(content)
else:
args_gn_lines.append('import("%s")' % args_file)
# Canonicalize the arg string into a sorted, newline-separated list
# of key-value pairs, and de-dup the keys if need be so that only
# the last instance of each arg is listed.
parsed_gn_args.update(gn_helpers.FromGNArgs(gn_args))
args_gn_lines.append(gn_helpers.ToGNString(parsed_gn_args))
return parsed_gn_args, '\n'.join(args_gn_lines)
def GetSwarmingCommand(self, target, vals):
isolate_map = self.ReadIsolateMap()
is_android = 'target_os="android"' in vals['gn_args']
is_fuchsia = 'target_os="fuchsia"' in vals['gn_args']
is_cros = ('target_os="chromeos"' in vals['gn_args']
or 'is_chromeos_device=true' in vals['gn_args'])
is_cros_device = 'is_chromeos_device=true' in vals['gn_args']
is_ios = 'target_os="ios"' in vals['gn_args']
is_mac = self.platform == 'darwin' and not is_ios
is_win = self.platform == 'win32' or 'target_os="win"' in vals['gn_args']
is_lacros = 'chromeos_is_browser_only=true' in vals['gn_args']
test_type = isolate_map[target]['type']
if self.use_luci_auth:
cmdline = ['luci-auth.exe' if is_win else 'luci-auth', 'context', '--']
else:
cmdline = []
if test_type == 'generated_script' or is_ios or is_lacros:
assert 'script' not in isolate_map[target], (
'generated_scripts can no longer customize the script path')
if is_win:
default_script = 'bin\\run_{}.bat'.format(target)
else:
default_script = 'bin/run_{}'.format(target)
script = isolate_map[target].get('script', default_script)
# TODO(crbug.com/816629): remove any use of 'args' from
# generated_scripts.
cmdline += [script] + isolate_map[target].get('args', [])
return cmdline, []
# TODO(crbug.com/816629): Convert all targets to generated_scripts
# and delete the rest of this function.
# This should be true if tests with type='windowed_test_launcher' are
# expected to run using xvfb. For example, Linux Desktop, X11 CrOS and
# Ozone CrOS builds on Linux (xvfb is not used on CrOS HW or VMs). Note
# that one Ozone build can be used to run different backends. Currently,
# tests are executed for the headless and X11 backends and both can run
# under Xvfb on Linux.
# TODO(tonikitoo,msisov,fwang): Find a way to run tests for the Wayland
# backend.
use_xvfb = (self.platform.startswith('linux') and not is_android
and not is_fuchsia and not is_cros_device)
asan = 'is_asan=true' in vals['gn_args']
msan = 'is_msan=true' in vals['gn_args']
tsan = 'is_tsan=true' in vals['gn_args']
cfi_diag = 'use_cfi_diag=true' in vals['gn_args']
clang_coverage = 'use_clang_coverage=true' in vals['gn_args']
java_coverage = 'use_jacoco_coverage=true' in vals['gn_args']
javascript_coverage = 'use_javascript_coverage=true' in vals['gn_args']
executable = isolate_map[target].get('executable', target)
executable_suffix = isolate_map[target].get(
'executable_suffix', '.exe' if is_win else '')
if isolate_map[target].get('python3', True):
extra_files = ['../../.vpython3']
vpython_exe = 'vpython3'
else:
extra_files = ['../../.vpython']
vpython_exe = 'vpython'
extra_files += [
'../../testing/test_env.py',
]
if is_android and test_type != 'script':
if asan:
cmdline += [os.path.join('bin', 'run_with_asan'), '--']
cmdline += [
vpython_exe, '../../build/android/test_wrapper/logdog_wrapper.py',
'--target', target, '--logdog-bin-cmd',
'../../.task_template_packages/logdog_butler', '--store-tombstones'
]
if clang_coverage or java_coverage:
cmdline += ['--coverage-dir', '${ISOLATED_OUTDIR}']
elif is_fuchsia and test_type != 'script':
cmdline += [
os.path.join('bin', 'run_%s' % target),
'--test-launcher-bot-mode',
'--logs-dir=${ISOLATED_OUTDIR}',
]
elif is_cros_device and test_type != 'script':
cmdline += [
os.path.join('bin', 'run_%s' % target),
'--logs-dir=${ISOLATED_OUTDIR}',
]
elif use_xvfb and test_type == 'windowed_test_launcher':
extra_files.append('../../testing/xvfb.py')
cmdline += [
vpython_exe,
'../../testing/xvfb.py',
'./' + str(executable) + executable_suffix,
'--test-launcher-bot-mode',
'--asan=%d' % asan,
# Enable lsan when asan is enabled except on Windows where LSAN isn't
# supported.
# TODO(https://crbug.com/948939): Enable on Mac once things pass.
# TODO(https://crbug.com/974478): Enable on ChromeOS once things pass.
'--lsan=%d' % (asan and not is_mac and not is_win and not is_cros),
'--msan=%d' % msan,
'--tsan=%d' % tsan,
'--cfi-diag=%d' % cfi_diag,
]
if javascript_coverage:
cmdline += ['--devtools-code-coverage=${ISOLATED_OUTDIR}']
elif test_type in ('windowed_test_launcher', 'console_test_launcher'):
cmdline += [
vpython_exe,
'../../testing/test_env.py',
'./' + str(executable) + executable_suffix,
'--test-launcher-bot-mode',
'--asan=%d' % asan,
# Enable lsan when asan is enabled except on Windows where LSAN isn't
# supported.
# TODO(https://crbug.com/948939): Enable on Mac once things pass.
# TODO(https://crbug.com/974478): Enable on ChromeOS once things pass.
'--lsan=%d' % (asan and not is_mac and not is_win and not is_cros),
'--msan=%d' % msan,
'--tsan=%d' % tsan,
'--cfi-diag=%d' % cfi_diag,
]
elif test_type == 'script':
# If we're testing a CrOS simplechrome build, assume we need to prepare a
# DUT for testing. So prepend the command to run with the test wrapper.
if is_cros_device:
cmdline += [
os.path.join('bin', 'cros_test_wrapper'),
'--logs-dir=${ISOLATED_OUTDIR}',
'--',
]
if is_android:
extra_files.append('../../build/android/test_wrapper/logdog_wrapper.py')
cmdline += [
vpython_exe,
'../../testing/test_env.py',
'../../build/android/test_wrapper/logdog_wrapper.py',
'--script',
'../../' + self.ToSrcRelPath(isolate_map[target]['script']),
'--logdog-bin-cmd',
'../../.task_template_packages/logdog_butler',
]
else:
cmdline += [
vpython_exe, '../../testing/test_env.py',
'../../' + self.ToSrcRelPath(isolate_map[target]['script'])
]
elif test_type == 'additional_compile_target':
cmdline = [
'./' + str(target) + executable_suffix,
]
else:
self.WriteFailureAndRaise('No command line for %s found (test type %s).'
% (target, test_type), output_path=None)
cmdline += isolate_map[target].get('args', [])
return cmdline, extra_files
def ToAbsPath(self, build_path, *comps):
return self.PathJoin(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
*comps)
def ToSrcRelPath(self, path):
"""Returns a relative path from the top of the repo."""
if path.startswith('//'):
return path[2:].replace('/', self.sep)
return self.RelPath(path, self.chromium_src_dir)
def RunGNAnalyze(self, vals):
# Analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
ret = self.RunGNGen(vals, compute_inputs_for_analyze=True, check=False)
if ret != 0:
return ret
build_path = self.args.path
input_path = self.args.input_path
gn_input_path = input_path + '.gn'
output_path = self.args.output_path
gn_output_path = output_path + '.gn'
inp = self.ReadInputJSON(['files', 'test_targets',
'additional_compile_targets'])
if self.args.verbose:
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
# This shouldn't normally happen, but could due to unusual race conditions,
# like a try job that gets scheduled before a patch lands but runs after
# the patch has landed.
if not inp['files']:
self.Print('Warning: No files modified in patch, bailing out early.')
self.WriteJSON({
'status': 'No dependency',
'compile_targets': [],
'test_targets': [],
}, output_path)
return 0
gn_inp = {}
gn_inp['files'] = ['//' + f for f in inp['files'] if not f.startswith('//')]
isolate_map = self.ReadIsolateMap()
err, gn_inp['additional_compile_targets'] = self.MapTargetsToLabels(
isolate_map, inp['additional_compile_targets'])
if err:
raise MBErr(err)
err, gn_inp['test_targets'] = self.MapTargetsToLabels(
isolate_map, inp['test_targets'])
if err:
raise MBErr(err)
labels_to_targets = {}
for i, label in enumerate(gn_inp['test_targets']):
labels_to_targets[label] = inp['test_targets'][i]
try:
self.WriteJSON(gn_inp, gn_input_path)
cmd = self.GNCmd('analyze', build_path, gn_input_path, gn_output_path)
ret, output, _ = self.Run(cmd, force_verbose=True)
if ret != 0:
if self.args.json_output:
# write errors to json.output
self.WriteJSON({'output': output}, self.args.json_output)
return ret
gn_outp_str = self.ReadFile(gn_output_path)
try:
gn_outp = json.loads(gn_outp_str)
except Exception as e:
self.Print("Failed to parse the JSON string GN returned: %s\n%s"
% (repr(gn_outp_str), str(e)))
raise
outp = {}
if 'status' in gn_outp:
outp['status'] = gn_outp['status']
if 'error' in gn_outp:
outp['error'] = gn_outp['error']
if 'invalid_targets' in gn_outp:
outp['invalid_targets'] = gn_outp['invalid_targets']
if 'compile_targets' in gn_outp:
all_input_compile_targets = sorted(
set(inp['test_targets'] + inp['additional_compile_targets']))
# If we're building 'all', we can throw away the rest of the targets
# since they're redundant.
if 'all' in gn_outp['compile_targets']:
outp['compile_targets'] = ['all']
else:
outp['compile_targets'] = gn_outp['compile_targets']
# crbug.com/736215: When GN returns targets back, for targets in
# the default toolchain, GN will have generated a phony ninja
# target matching the label, and so we can safely (and easily)
# transform any GN label into the matching ninja target. For
# targets in other toolchains, though, GN doesn't generate the
# phony targets, and we don't know how to turn the labels into
# compile targets. In this case, we also conservatively give up
# and build everything. Probably the right thing to do here is
# to have GN return the compile targets directly.
if any("(" in target for target in outp['compile_targets']):
self.Print('WARNING: targets with non-default toolchains were '
'found, building everything instead.')
outp['compile_targets'] = all_input_compile_targets
else:
outp['compile_targets'] = [
label.replace('//', '') for label in outp['compile_targets']]
# Windows has a maximum command line length of 8k; even Linux
# maxes out at 128k; if analyze returns a *really long* list of
# targets, we just give up and conservatively build everything instead.
# Probably the right thing here is for ninja to support response
# files as input on the command line
# (see https://github.com/ninja-build/ninja/issues/1355).
# Android targets use a lot of templates and often exceed 7kb.
# https://crbug.com/946266
max_cmd_length_kb = 64 if platform.system() == 'Linux' else 7
if len(' '.join(outp['compile_targets'])) > max_cmd_length_kb * 1024:
self.Print('WARNING: Too many compile targets were affected.')
self.Print('WARNING: Building everything instead to avoid '
'command-line length issues.')
outp['compile_targets'] = all_input_compile_targets
if 'test_targets' in gn_outp:
outp['test_targets'] = [
labels_to_targets[label] for label in gn_outp['test_targets']]
if self.args.verbose:
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
self.WriteJSON(outp, output_path)
finally:
if self.Exists(gn_input_path):
self.RemoveFile(gn_input_path)
if self.Exists(gn_output_path):
self.RemoveFile(gn_output_path)
return 0
def ReadInputJSON(self, required_keys):
path = self.args.input_path
output_path = self.args.output_path
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
try:
inp = json.loads(self.ReadFile(path))
except Exception as e:
self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
(path, e), output_path)
for k in required_keys:
if not k in inp:
self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
output_path)
return inp
def WriteFailureAndRaise(self, msg, output_path):
if output_path:
self.WriteJSON({'error': msg}, output_path, force_verbose=True)
raise MBErr(msg)
def WriteJSON(self, obj, path, force_verbose=False):
try:
self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n',
force_verbose=force_verbose)
except Exception as e:
raise MBErr('Error %s writing to the output path "%s"' %
(e, path))
def PrintCmd(self, cmd):
if self.platform == 'win32':
shell_quoter = QuoteForCmd
else:
shell_quoter = pipes.quote
if cmd[0] == self.executable:
cmd = ['python'] + cmd[1:]
self.Print(*[shell_quoter(arg) for arg in cmd])
def PrintJSON(self, obj):
self.Print(json.dumps(obj, indent=2, sort_keys=True))
def Build(self, target):
build_dir = self.ToSrcRelPath(self.args.path)
if self.platform == 'win32':
# On Windows use the batch script since there is no exe
ninja_cmd = ['autoninja.bat', '-C', build_dir]
else:
ninja_cmd = ['autoninja', '-C', build_dir]
if self.args.jobs:
ninja_cmd.extend(['-j', '%d' % self.args.jobs])
ninja_cmd.append(target)
ret, _, _ = self.Run(ninja_cmd, buffer_output=False)
return ret
def Run(self, cmd, env=None, force_verbose=True, buffer_output=True):
# This function largely exists so it can be overridden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.PrintCmd(cmd)
if self.args.dryrun:
return 0, '', ''
ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output)
if self.args.verbose or force_verbose:
if ret != 0:
self.Print(' -> returned %d' % ret)
if out:
# This is the error seen on the logs
self.Print(out, end='')
if err:
self.Print(err, end='', file=sys.stderr)
return ret, out, err
def Call(self, cmd, env=None, buffer_output=True, stdin=None):
if buffer_output:
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env, stdin=subprocess.PIPE)
out, err = p.communicate(input=stdin)
out = out.decode('utf-8')
err = err.decode('utf-8')
else:
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
env=env)
p.wait()
out = err = ''
return p.returncode, out, err
def _CipdPlatform(self):
"""Returns current CIPD platform, e.g. linux-amd64.
Assumes AMD64.
"""
if self.platform == 'win32':
return 'windows-amd64'
if self.platform == 'darwin':
return 'mac-amd64'
return 'linux-amd64'
def ExpandUser(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.expanduser(path)
def Exists(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.exists(path)
def Fetch(self, url):
# This function largely exists so it can be overridden for testing.
f = urlopen(url)
contents = f.read()
f.close()
return contents
def ListDir(self, path):
# This function largely exists so it can be overridden for testing.
return os.listdir(path)
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def PathJoin(self, *comps):
# This function largely exists so it can be overriden for testing.
return os.path.join(*comps)
def Print(self, *args, **kwargs):
# This function largely exists so it can be overridden for testing.
print(*args, **kwargs)
if kwargs.get('stream', sys.stdout) == sys.stdout:
sys.stdout.flush()
def ReadFile(self, path):
# This function largely exists so it can be overriden for testing.
with open(path) as fp:
return fp.read()
def RelPath(self, path, start='.'):
# This function largely exists so it can be overriden for testing.
return os.path.relpath(path, start)
def RemoveFile(self, path):
# This function largely exists so it can be overriden for testing.
os.remove(path)
def RemoveDirectory(self, abs_path):
if self.platform == 'win32':
# In other places in chromium, we often have to retry this command
# because we're worried about other processes still holding on to
# file handles, but when MB is invoked, it will be early enough in the
# build that their should be no other processes to interfere. We
# can change this if need be.
self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
else:
shutil.rmtree(abs_path, ignore_errors=True)
def TempDir(self):
# This function largely exists so it can be overriden for testing.
return tempfile.mkdtemp(prefix='mb_')
def TempFile(self, mode='w'):
# This function largely exists so it can be overriden for testing.
return tempfile.NamedTemporaryFile(mode=mode, delete=False)
def WriteFile(self, path, contents, force_verbose=False):
# This function largely exists so it can be overriden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
with open(path, 'w') as fp:
return fp.write(contents)
class LedResult(object):
"""Holds the result of a led operation. Can be chained using |then|."""
def __init__(self, result, run_cmd):
self._result = result
self._run_cmd = run_cmd
@property
def result(self):
"""The mutable result data of the previous led call as decoded JSON."""
return self._result
def then(self, *cmd):
"""Invoke led, passing it the current `result` data as input.
Returns another LedResult object with the output of the command.
"""
return self.__class__(
self._run_cmd(self._result, cmd), self._run_cmd)
def FlattenConfig(config_pool, mixin_pool, config):
mixins = config_pool[config]
vals = DefaultVals()
visited = []
FlattenMixins(mixin_pool, mixins, vals, visited)
return vals
def FlattenMixins(mixin_pool, mixins_to_flatten, vals, visited):
for m in mixins_to_flatten:
if m not in mixin_pool:
raise MBErr('Unknown mixin "%s"' % m)
visited.append(m)
mixin_vals = mixin_pool[m]
if 'args_file' in mixin_vals:
if vals['args_file']:
raise MBErr('args_file specified multiple times in mixins '
'for mixin %s' % m)
vals['args_file'] = mixin_vals['args_file']
if 'gn_args' in mixin_vals:
if vals['gn_args']:
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
if 'mixins' in mixin_vals:
FlattenMixins(mixin_pool, mixin_vals['mixins'], vals, visited)
return vals
class MBErr(Exception):
pass
# See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful
# details of this next section, which handles escaping command lines
# so that they can be copied and pasted into a cmd window.
UNSAFE_FOR_SET = set('^<>&|')
UNSAFE_FOR_CMD = UNSAFE_FOR_SET.union(set('()%'))
ALL_META_CHARS = UNSAFE_FOR_CMD.union(set('"'))
def QuoteForSet(arg):
if any(a in UNSAFE_FOR_SET for a in arg):
arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg)
return arg
def QuoteForCmd(arg):
# First, escape the arg so that CommandLineToArgvW will parse it properly.
if arg == '' or ' ' in arg or '"' in arg:
quote_re = re.compile(r'(\\*)"')
arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
# Then check to see if the arg contains any metacharacters other than
# double quotes; if it does, quote everything (including the double
# quotes) for safety.
if any(a in UNSAFE_FOR_CMD for a in arg):
arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg)
return arg
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
release/stubs.min/System/Diagnostics/__init___parts/DebuggerBrowsableAttribute.py | htlcnn/ironpython-stubs | 182 | 11069304 | class DebuggerBrowsableAttribute(Attribute,_Attribute):
"""
Determines if and how a member is displayed in the debugger variable windows. This class cannot be inherited.
DebuggerBrowsableAttribute(state: DebuggerBrowsableState)
"""
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,state):
""" __new__(cls: type,state: DebuggerBrowsableState) """
pass
State=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the display state for the attribute.
Get: State(self: DebuggerBrowsableAttribute) -> DebuggerBrowsableState
"""
|
packages/plugins/minos-database-aiopg/minos/plugins/aiopg/factories/aggregate/snapshots/__init__.py | minos-framework/minos-python | 247 | 11069306 | from .impl import (
AiopgSnapshotDatabaseOperationFactory,
)
from .queries import (
AiopgSnapshotQueryDatabaseOperationBuilder,
)
|
base_model/mobilenetv1.py | Abhishekvats1997/ResRep | 176 | 11069307 | import torch.nn as nn
from builder import ConvBuilder
import torch.nn.functional as F
from constants import MI1_ORIGIN_DEPS
class MobileV1Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, builder:ConvBuilder, in_planes, out_planes, stride=1):
super(MobileV1Block, self).__init__()
self.depthwise = builder.Conv2dBNReLU(in_channels=in_planes, out_channels=in_planes, kernel_size=3,
stride=stride, padding=1, groups=in_planes)
self.pointwise = builder.Conv2dBNReLU(in_channels=in_planes, out_channels=out_planes, kernel_size=1,
stride=1, padding=0)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
imagenet_cfg = [32, 64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
# cifar_cfg = [16, (32,2), 32, (64,2), 64, (128,2), 128, 128, 128, 128, 128, (256,2), 256] # 86%
# cifar_cfg = [16, 32, 32, (64,2), 64, (128,2), 128, 128, 128, 128, 128, (256,2), 256]
cifar_cfg = [16, 32, 32, 64, 64, (128,2), 128, 128, 128, 128, 128, (256,2), 256] # 93
class MobileV1CifarNet(nn.Module):
def __init__(self, builder:ConvBuilder, num_classes):
super(MobileV1CifarNet, self).__init__()
self.conv1 = builder.Conv2dBNReLU(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1)
blocks = []
in_planes = cifar_cfg[0]
for x in cifar_cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
blocks.append(MobileV1Block(builder=builder, in_planes=in_planes, out_planes=out_planes, stride=stride))
in_planes = out_planes
self.stem = builder.Sequential(*blocks)
self.gap = builder.GAP(kernel_size=8)
self.linear = builder.Linear(cifar_cfg[-1], num_classes)
def forward(self, x):
out = self.conv1(x)
out = self.stem(out)
out = self.gap(out)
out = self.linear(out)
return out
class MobileV1ImagenetNet(nn.Module):
def __init__(self, builder:ConvBuilder, num_classes, deps=None):
super(MobileV1ImagenetNet, self).__init__()
if deps is None:
deps = MI1_ORIGIN_DEPS
assert len(deps) == 27
self.conv1 = builder.Conv2dBNReLU(in_channels=3, out_channels=deps[0], kernel_size=3, stride=2, padding=1)
blocks = []
for block_idx in range(13):
depthwise_channels = int(deps[block_idx * 2 + 1])
pointwise_channels = int(deps[block_idx * 2 + 2])
stride = 2 if block_idx in [1, 3, 5, 11] else 1
blocks.append(MobileV1Block(builder=builder, in_planes=depthwise_channels, out_planes=pointwise_channels, stride=stride))
self.stem = builder.Sequential(*blocks)
self.gap = builder.GAP(kernel_size=7)
self.linear = builder.Linear(imagenet_cfg[-1], num_classes)
def forward(self, x):
out = self.conv1(x)
out = self.stem(out)
out = self.gap(out)
out = self.linear(out)
return out
#
# class MobileV1ImagenetNetLarge(nn.Module):
#
# def __init__(self, builder:ConvBuilder, num_classes):
# super(MobileV1ImagenetNetLarge, self).__init__()
# self.conv1 = builder.Conv2dBNReLU(in_channels=3, out_channels=imagenet_cfg[0], kernel_size=3, stride=2, padding=1)
# blocks = []
# in_planes = imagenet_cfg[0]
# for block_idx in range(13):
# out_planes = x if isinstance(x, int) else x[0]
# stride = 2 if block_idx in []
# blocks.append(MobileV1Block(builder=builder, in_planes=in_planes, out_planes=out_planes, stride=stride))
# in_planes = out_planes
# self.stem = builder.Sequential(*blocks)
# self.gap = builder.GAP(kernel_size=28)
# self.linear = builder.Linear(imagenet_cfg[-1], num_classes)
#
# def forward(self, x):
# out = F.upsample_bilinear(x, scale_factor=4)
# out = self.conv1(out)
# out = self.stem(out)
# out = self.gap(out)
# out = self.linear(out)
# return out
def create_MobileV1Cifar(cfg, builder):
return MobileV1CifarNet(builder=builder, num_classes=10)
def create_MobileV1CH(cfg, builder):
return MobileV1CifarNet(builder=builder, num_classes=100)
def create_MobileV1Imagenet(cfg, builder):
return MobileV1ImagenetNet(builder=builder, num_classes=1000, deps=cfg.deps)
def create_MobileV1ImagenetLarge(cfg, builder):
return MobileV1ImagenetNetLarge(builder=builder, num_classes=1000) |
examples/locality-load-balancing/client.py | giantcroc/envoy | 17,703 | 11069332 | import sys
import urllib.request
from collections import Counter
url, n_requests = sys.argv[1], int(sys.argv[2])
count = Counter()
count_fail = 0
for i in range(n_requests):
try:
with urllib.request.urlopen(url) as resp:
content = resp.read().decode("utf-8").strip()
count[content] += 1
except:
count_fail += 1
for k in count:
print(f"{k}: actual weight {count[k] / n_requests * 100}%")
print(f"Failed: {count_fail}")
|
src/amuse/plot/__init__.py | sibonyves/amuse | 131 | 11069342 | <reponame>sibonyves/amuse
from ._plot import *
|
openspeech/models/rnn_transducer/configurations.py | CanYouImagine/openspeech | 207 | 11069354 | <filename>openspeech/models/rnn_transducer/configurations.py
# MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from dataclasses import dataclass, field
from openspeech.dataclass.configurations import OpenspeechDataclass
@dataclass
class RNNTransducerConfigs(OpenspeechDataclass):
r"""
This is the configuration class to store the configuration of
a :class:`~openspeech.models.RNNTransducer`.
It is used to initiated an `RNNTransducer` model.
Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`.
Args:
model_name (str): Model name (default: transformer_transducer)
encoder_hidden_state_dim (int): Hidden state dimension of encoder (default: 312)
decoder_hidden_state_dim (int): Hidden state dimension of decoder (default: 512)
num_encoder_layers (int): The number of encoder layers. (default: 4)
num_decoder_layers (int): The number of decoder layers. (default: 1)
encoder_dropout_p (float): The dropout probability of encoder. (default: 0.2)
decoder_dropout_p (float): The dropout probability of decoder. (default: 0.2)
bidirectional (bool): If True, becomes a bidirectional encoders (default: True)
rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm)
output_dim (int): dimension of model output. (default: 512)
optimizer (str): Optimizer for training. (default: adam)
"""
model_name: str = field(
default="rnn_transducer", metadata={"help": "Model name"}
)
encoder_hidden_state_dim: int = field(
default=320, metadata={"help": "Dimension of encoder."}
)
decoder_hidden_state_dim: int = field(
default=512, metadata={"help": "Dimension of decoder."}
)
num_encoder_layers: int = field(
default=4, metadata={"help": "The number of encoder layers."}
)
num_decoder_layers: int = field(
default=1, metadata={"help": "The number of decoder layers."}
)
encoder_dropout_p: float = field(
default=0.2, metadata={"help": "The dropout probability of encoder."}
)
decoder_dropout_p: float = field(
default=0.2, metadata={"help": "The dropout probability of decoder."}
)
bidirectional: bool = field(
default=True, metadata={"help": "If True, becomes a bidirectional encoders"}
)
rnn_type: str = field(
default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"}
)
output_dim: int = field(
default=512, metadata={"help": "Dimension of outputs"}
)
optimizer: str = field(
default="adam", metadata={"help": "Optimizer for training."}
)
|
smsBomb/__main__.py | YINZHI-keji/smsBomb | 461 | 11069363 | <filename>smsBomb/__main__.py
# coding=utf-8
from smsBomb import cli
if __name__ == '__main__':
cli.main()
|
elliot/recommender/neural/NeuMF/neural_matrix_factorization.py | gategill/elliot | 175 | 11069391 | <reponame>gategill/elliot
"""
Module description:
"""
__version__ = '0.3.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
import time
import numpy as np
from tqdm import tqdm
from elliot.recommender.neural.NeuMF import custom_sampler as cs
from elliot.recommender.base_recommender_model import BaseRecommenderModel
from elliot.recommender.base_recommender_model import init_charger
from elliot.recommender.neural.NeuMF.neural_matrix_factorization_model import NeuralMatrixFactorizationModel
from elliot.recommender.recommender_utils_mixin import RecMixin
class NeuMF(RecMixin, BaseRecommenderModel):
r"""
Neural Collaborative Filtering
For further details, please refer to the `paper <https://arxiv.org/abs/1708.05031>`_
Args:
mf_factors: Number of MF latent factors
mlp_factors: Number of MLP latent factors
mlp_hidden_size: List of units for each layer
lr: Learning rate
dropout: Dropout rate
is_mf_train: Whether to train the MF embeddings
is_mlp_train: Whether to train the MLP layers
To include the recommendation model, add it to the config file adopting the following pattern:
.. code:: yaml
models:
NeuMF:
meta:
save_recs: True
epochs: 10
batch_size: 512
mf_factors: 10
mlp_factors: 10
mlp_hidden_size: (64,32)
lr: 0.001
dropout: 0.0
is_mf_train: True
is_mlp_train: True
"""
@init_charger
def __init__(self, data, config, params, *args, **kwargs):
self._params_list = [
("_learning_rate", "lr", "lr", 0.001, None, None),
("_mf_factors", "mf_factors", "mffactors", 10, int, None),
# If the user prefer a generalized model (WARNING: not coherent with the paper) can uncomment the following options
#("_mlp_factors", "mlp_factors", "mlpfactors", 10, int, None),
#("_mlp_hidden_size", "mlp_hidden_size", "mlpunits", "(64,32)", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), " []").replace(",", "-")),
("_dropout", "dropout", "drop", 0, None, None),
("_is_mf_train", "is_mf_train", "mftrain", True, None, None),
("_is_mlp_train", "is_mlp_train", "mlptrain", True, None, None),
("_m", "m", "m", 0, int, None)
]
self.autoset_params()
self._mlp_hidden_size = (self._mf_factors*4, self._mf_factors*2, self._mf_factors)
self._mlp_factors = self._mf_factors
if self._batch_size < 1:
self._batch_size = self._data.transactions
self._sampler = cs.Sampler(self._data.i_train_dict, self._m)
self._ratings = self._data.train_dict
self._sp_i_train = self._data.sp_i_train
self._i_items_set = list(range(self._num_items))
self._model = NeuralMatrixFactorizationModel(self._num_users, self._num_items, self._mf_factors,
self._mlp_factors, self._mlp_hidden_size,
self._dropout, self._is_mf_train, self._is_mlp_train,
self._learning_rate, self._seed)
@property
def name(self):
return "NeuMF"\
+ f"_{self.get_base_params_shortcut()}" \
+ f"_{self.get_params_shortcut()}"
def train(self):
if self._restore:
return self.restore_weights()
for it in self.iterate(self._epochs):
loss = 0
steps = 0
with tqdm(total=int(self._data.transactions * (self._m + 1) // self._batch_size), disable=not self._verbose) as t:
for batch in self._sampler.step(self._batch_size):
steps += 1
loss += self._model.train_step(batch).numpy()
t.set_postfix({'loss': f'{loss / steps:.5f}'})
t.update()
self.evaluate(it, loss/(it + 1))
def get_recommendations(self, k: int = 100):
predictions_top_k_test = {}
predictions_top_k_val = {}
for index, offset in enumerate(range(0, self._num_users, self._batch_size)):
offset_stop = min(offset + self._batch_size, self._num_users)
predictions = self._model.get_recs(
(
np.repeat(np.array(list(range(offset, offset_stop)))[:, None], repeats=self._num_items, axis=1),
np.array([self._i_items_set for _ in range(offset, offset_stop)])
)
)
recs_val, recs_test = self.process_protocol(k, predictions, offset, offset_stop)
predictions_top_k_val.update(recs_val)
predictions_top_k_test.update(recs_test)
return predictions_top_k_val, predictions_top_k_test
|
examples/airline_demo/airline_demo_tests/unit_tests/test_ingest_csv_file_handle_to_spark.py | dbatten5/dagster | 4,606 | 11069402 | <filename>examples/airline_demo/airline_demo_tests/unit_tests/test_ingest_csv_file_handle_to_spark.py<gh_stars>1000+
import tempfile
from airline_demo.pipelines import local_parquet_io_manager
from airline_demo.solids import ingest_csv_file_handle_to_spark
from dagster import (
LocalFileHandle,
ModeDefinition,
execute_pipeline,
fs_io_manager,
local_file_manager,
pipeline,
solid,
)
from dagster.core.definitions.no_step_launcher import no_step_launcher
from dagster.utils import file_relative_path
from dagster_pyspark import pyspark_resource
from pyspark.sql import Row
@solid
def collect_df(df):
"""The pyspark Spark context will be stopped on pipeline termination, so we need to collect
the pyspark DataFrame before pipeline completion.
"""
return df.collect()
def test_ingest_csv_file_handle_to_spark(spark_config):
@solid
def emit_num_csv_local_file():
return LocalFileHandle(file_relative_path(__file__, "../num.csv"))
@pipeline(
mode_defs=[
ModeDefinition(
resource_defs={
"pyspark": pyspark_resource,
"pyspark_step_launcher": no_step_launcher,
"pyspark_io_manager": local_parquet_io_manager,
"file_manager": local_file_manager,
"io_manager": fs_io_manager,
}
)
]
)
def ingest_csv_file_test():
return collect_df(ingest_csv_file_handle_to_spark(emit_num_csv_local_file()))
with tempfile.TemporaryDirectory() as temp_dir:
result = execute_pipeline(
ingest_csv_file_test,
run_config={
"resources": {
"pyspark": {"config": {"spark_conf": spark_config}},
"pyspark_io_manager": {"config": {"base_dir": temp_dir}},
"io_manager": {"config": {"base_dir": temp_dir}},
}
},
)
assert result.success
df = result.result_for_solid("collect_df").output_value()
assert df == [Row(num1="1", num2="2")]
def test_ingest_csv_file_with_special_handle_to_spark(spark_config):
@solid
def emit_num_special_csv_local_file():
return LocalFileHandle(file_relative_path(__file__, "../num_with_special_chars.csv"))
@pipeline(
mode_defs=[
ModeDefinition(
resource_defs={
"pyspark": pyspark_resource,
"pyspark_step_launcher": no_step_launcher,
"file_manager": local_file_manager,
"pyspark_io_manager": local_parquet_io_manager,
"io_manager": fs_io_manager,
}
)
]
)
def ingest_csv_file_test():
return collect_df(ingest_csv_file_handle_to_spark(emit_num_special_csv_local_file()))
with tempfile.TemporaryDirectory() as temp_dir:
result = execute_pipeline(
ingest_csv_file_test,
run_config={
"resources": {
"pyspark": {"config": {"spark_conf": spark_config}},
"pyspark_io_manager": {"config": {"base_dir": temp_dir}},
"io_manager": {"config": {"base_dir": temp_dir}},
}
},
)
assert result.success
df = result.result_for_solid("collect_df").output_value()
assert df == [Row(num1="1", num2="2")]
|
openstackclient/tests/functional/compute/v2/test_flavor.py | mydevice/python-openstackclient | 262 | 11069417 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from openstackclient.tests.functional import base
class FlavorTests(base.TestCase):
"""Functional tests for flavor."""
PROJECT_NAME = uuid.uuid4().hex
@classmethod
def setUpClass(cls):
super(FlavorTests, cls).setUpClass()
# Make a project
cmd_output = json.loads(cls.openstack(
"project create -f json --enable " + cls.PROJECT_NAME
))
cls.project_id = cmd_output["id"]
@classmethod
def tearDownClass(cls):
try:
raw_output = cls.openstack("project delete " + cls.PROJECT_NAME)
cls.assertOutput('', raw_output)
finally:
super(FlavorTests, cls).tearDownClass()
def test_flavor_delete(self):
"""Test create w/project, delete multiple"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--project " + self.PROJECT_NAME + " " +
"--private " +
name1
))
self.assertIsNotNone(cmd_output["id"])
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--id qaz " +
"--project " + self.PROJECT_NAME + " " +
"--private " +
name2
))
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
"qaz",
cmd_output["id"],
)
raw_output = self.openstack(
"flavor delete " + name1 + " " + name2,
)
self.assertOutput('', raw_output)
def test_flavor_list(self):
"""Test create defaults, list filters, delete"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--property a=b " +
"--property c=d " +
name1
))
self.addCleanup(self.openstack, "flavor delete " + name1)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
name1,
cmd_output["name"],
)
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--id qaz " +
"--ram 123 " +
"--private " +
"--property a=b2 " +
"--property b=d2 " +
name2
))
self.addCleanup(self.openstack, "flavor delete " + name2)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
"qaz",
cmd_output["id"],
)
self.assertEqual(
name2,
cmd_output["name"],
)
self.assertEqual(
123,
cmd_output["ram"],
)
self.assertEqual(
0,
cmd_output["disk"],
)
self.assertFalse(
cmd_output["os-flavor-access:is_public"],
)
self.assertDictEqual(
{"a": "b2", "b": "d2"},
cmd_output["properties"],
)
# Test list
cmd_output = json.loads(self.openstack(
"flavor list -f json"
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
# Test list --long
cmd_output = json.loads(self.openstack(
"flavor list -f json " +
"--long"
))
# We have list of complex json objects
# Iterate through the list setting flags
found_expected = False
for rec in cmd_output:
if rec['Name'] == name1:
found_expected = True
self.assertEqual('b', rec['Properties']['a'])
self.assertEqual('d', rec['Properties']['c'])
elif rec['Name'] == name2:
# We should have not seen private flavor
self.assertFalse(True)
self.assertTrue(found_expected)
# Test list --public
cmd_output = json.loads(self.openstack(
"flavor list -f json " +
"--public"
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
# Test list --private
cmd_output = json.loads(self.openstack(
"flavor list -f json " +
"--private"
))
col_name = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --all
cmd_output = json.loads(self.openstack(
"flavor list -f json " +
"--all"
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertIn(name2, col_name)
def test_flavor_properties(self):
"""Test create defaults, list filters, delete"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--id qaz " +
"--ram 123 " +
"--disk 20 " +
"--private " +
"--property a=first " +
"--property b=second " +
name1
))
self.addCleanup(self.openstack, "flavor delete " + name1)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
"qaz",
cmd_output["id"],
)
self.assertEqual(
name1,
cmd_output["name"],
)
self.assertEqual(
123,
cmd_output["ram"],
)
self.assertEqual(
20,
cmd_output["disk"],
)
self.assertFalse(
cmd_output["os-flavor-access:is_public"],
)
self.assertDictEqual(
{"a": "first", "b": "second"},
cmd_output["properties"],
)
raw_output = self.openstack(
"flavor set " +
"--property a='third and 10' " +
"--property g=fourth " +
name1
)
self.assertEqual('', raw_output)
cmd_output = json.loads(self.openstack(
"flavor show -f json " +
name1
))
self.assertEqual(
"qaz",
cmd_output["id"],
)
self.assertEqual(
'third and 10',
cmd_output['properties']['a'])
self.assertEqual(
'second',
cmd_output['properties']['b'])
self.assertEqual(
'fourth',
cmd_output['properties']['g'])
raw_output = self.openstack(
"flavor unset " +
"--property b " +
name1
)
self.assertEqual('', raw_output)
cmd_output = json.loads(self.openstack(
"flavor show -f json " +
name1
))
self.assertNotIn('b', cmd_output['properties'])
|
model/model_cd.py | MingSun-Tse/Collaborative-Distillation | 172 | 11069433 | import numpy as np
import os
import torch.nn as nn
import torch
from torch.utils.serialization import load_lua
from utils import load_param_from_t7 as load_param
from model.model_kd2sd import SmallDecoder1_16x_aux, SmallDecoder2_16x_aux, SmallDecoder3_16x_aux, SmallDecoder4_16x_aux, SmallDecoder5_16x_aux
import pickle
pjoin = os.path.join
# calculate style distances in CVPR paper
# since 5-stage style distances are shown separately, there is no need to normalize it by num_channel.
# ref https://pytorch.org/tutorials/advanced/neural_style_tutorial.html
def gram_matrix(input):
a, b, c, d = input.size() # [N, C, H, W]
batch_feat = input.view(a, b, c*d) # [N, C, HW]
batch_gram = torch.stack([torch.mm(feat, feat.t()) for feat in batch_feat])
batch_gram = batch_gram.div(a*b*c*d)
return batch_gram # shape: [N, C, C]
# ref: AdaIN impel. (https://github.com/naoto0804/pytorch-AdaIN/blob/master/function.py)
def calc_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def adaptive_instance_normalization(content_feat, style_feat):
assert (content_feat.size()[:2] == style_feat.size()[:2])
size = content_feat.size()
style_mean, style_std = calc_mean_std(style_feat)
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
# calculate average style distance, which needs normalization by num_channel.
def gram_matrix_ave(input):
a, b, c, d = input.size()
batch_feat = input.view(a, b, c*d)
batch_gram = torch.stack([torch.mm(feat, feat.t()).div(b*c*d) for feat in batch_feat])
return batch_gram # shape: [batch_size, channel, channel]
# Load param from model1 to model2
# For each layer of model2, if model1 has the same layer, then copy the params.
def load_param2(model1_path, model2):
dict_param1 = torch.load(model1_path) # model1_path: .pth model path
dict_param2 = model2.state_dict()
for name2 in dict_param2:
if name2 in dict_param1:
# print("tensor '%s' found in both models, so copy it from model 1 to model 2" % name2)
dict_param2[name2].data.copy_(dict_param1[name2].data)
model2.load_state_dict(dict_param2)
return model2
# -----------------------------------------------
class SmallDecoder1_16x(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallDecoder1_16x, self).__init__()
self.fixed = fixed
self.conv11 = nn.Conv2d(24,3,3,1,0, dilation=1)
self.relu = nn.ReLU(inplace=True)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.relu(self.conv11(self.pad(y)))
return y
def forward_pwct(self, input):
out11 = self.conv11(self.pad(input))
return out11
class SmallDecoder2_16x(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallDecoder2_16x, self).__init__()
self.fixed = fixed
self.conv21 = nn.Conv2d( 32, 16,3,1,0)
self.conv12 = nn.Conv2d( 16, 16,3,1,0, dilation=1)
self.conv11 = nn.Conv2d( 16, 3,3,1,0, dilation=1)
self.relu = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.unpool_pwct = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.relu(self.conv21(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv12(self.pad(y)))
y = self.relu(self.conv11(self.pad(y)))
return y
def forward_pwct(self, x, pool1_idx=None, pool1_size=None, pool2_idx=None, pool2_size=None, pool3_idx=None, pool3_size=None):
out21 = self.relu(self.conv21(self.pad(x)))
out21 = self.unpool_pwct(out21, pool1_idx, output_size=pool1_size)
out12 = self.relu(self.conv12(self.pad(out21)))
out11 = self.conv11(self.pad(out12))
return out11
class SmallDecoder3_16x(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallDecoder3_16x, self).__init__()
self.fixed = fixed
self.conv31 = nn.Conv2d( 64, 32,3,1,0)
self.conv22 = nn.Conv2d( 32, 32,3,1,0)
self.conv21 = nn.Conv2d( 32, 16,3,1,0)
self.conv12 = nn.Conv2d( 16, 16,3,1,0, dilation=1)
self.conv11 = nn.Conv2d( 16, 3,3,1,0, dilation=1)
self.relu = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.unpool_pwct = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.relu(self.conv31(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv22(self.pad(y)))
y = self.relu(self.conv21(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv12(self.pad(y)))
y = self.relu(self.conv11(self.pad(y)))
return y
def forward_pwct(self, x, pool1_idx=None, pool1_size=None, pool2_idx=None, pool2_size=None, pool3_idx=None, pool3_size=None):
out31 = self.relu(self.conv31(self.pad(x)))
out31 = self.unpool_pwct(out31, pool2_idx, output_size=pool2_size)
out22 = self.relu(self.conv22(self.pad(out31)))
out21 = self.relu(self.conv21(self.pad(out22)))
out21 = self.unpool_pwct(out21, pool1_idx, output_size=pool1_size)
out12 = self.relu(self.conv12(self.pad(out21)))
out11 = self.conv11(self.pad(out12))
return out11
class SmallDecoder4_16x(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallDecoder4_16x, self).__init__()
self.fixed = fixed
self.conv41 = nn.Conv2d(128, 64,3,1,0)
self.conv34 = nn.Conv2d( 64, 64,3,1,0)
self.conv33 = nn.Conv2d( 64, 64,3,1,0)
self.conv32 = nn.Conv2d( 64, 64,3,1,0)
self.conv31 = nn.Conv2d( 64, 32,3,1,0)
self.conv22 = nn.Conv2d( 32, 32,3,1,0, dilation=1)
self.conv21 = nn.Conv2d( 32, 16,3,1,0, dilation=1)
self.conv12 = nn.Conv2d( 16, 16,3,1,0, dilation=1)
self.conv11 = nn.Conv2d( 16, 3,3,1,0, dilation=1)
self.relu = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.unpool_pwct = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.relu(self.conv41(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv34(self.pad(y)))
y = self.relu(self.conv33(self.pad(y)))
y = self.relu(self.conv32(self.pad(y)))
y = self.relu(self.conv31(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv22(self.pad(y)))
y = self.relu(self.conv21(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv12(self.pad(y)))
y = self.relu(self.conv11(self.pad(y)))
return y
def forward_pwct(self, x, pool1_idx=None, pool1_size=None, pool2_idx=None, pool2_size=None, pool3_idx=None, pool3_size=None):
out41 = self.relu(self.conv41(self.pad(x)))
out41 = self.unpool_pwct(out41, pool3_idx, output_size=pool3_size)
out34 = self.relu(self.conv34(self.pad(out41)))
out33 = self.relu(self.conv33(self.pad(out34)))
out32 = self.relu(self.conv32(self.pad(out33)))
out31 = self.relu(self.conv31(self.pad(out32)))
out31 = self.unpool_pwct(out31, pool2_idx, output_size=pool2_size)
out22 = self.relu(self.conv22(self.pad(out31)))
out21 = self.relu(self.conv21(self.pad(out22)))
out21 = self.unpool_pwct(out21, pool1_idx, output_size=pool1_size)
out12 = self.relu(self.conv12(self.pad(out21)))
out11 = self.conv11(self.pad(out12))
return out11
class SmallDecoder5_16x(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallDecoder5_16x, self).__init__()
self.fixed = fixed
self.conv51 = nn.Conv2d(128,128,3,1,0)
self.conv44 = nn.Conv2d(128,128,3,1,0)
self.conv43 = nn.Conv2d(128,128,3,1,0)
self.conv42 = nn.Conv2d(128,128,3,1,0)
self.conv41 = nn.Conv2d(128, 64,3,1,0)
self.conv34 = nn.Conv2d( 64, 64,3,1,0, dilation=1)
self.conv33 = nn.Conv2d( 64, 64,3,1,0, dilation=1)
self.conv32 = nn.Conv2d( 64, 64,3,1,0, dilation=1)
self.conv31 = nn.Conv2d( 64, 32,3,1,0, dilation=1)
self.conv22 = nn.Conv2d( 32, 32,3,1,0, dilation=1)
self.conv21 = nn.Conv2d( 32, 16,3,1,0, dilation=1)
self.conv12 = nn.Conv2d( 16, 16,3,1,0, dilation=1)
self.conv11 = nn.Conv2d( 16, 3,3,1,0, dilation=1)
self.relu = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.relu(self.conv51(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv44(self.pad(y)))
y = self.relu(self.conv43(self.pad(y)))
y = self.relu(self.conv42(self.pad(y)))
y = self.relu(self.conv41(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv34(self.pad(y)))
y = self.relu(self.conv33(self.pad(y)))
y = self.relu(self.conv32(self.pad(y)))
y = self.relu(self.conv31(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv22(self.pad(y)))
y = self.relu(self.conv21(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv12(self.pad(y)))
y = self.relu(self.conv11(self.pad(y))) # self.conv11(self.pad(y))
return y
def forward_branch(self, input):
out51 = self.relu(self.conv51(self.pad(input)))
out51 = self.unpool(out51)
out44 = self.relu(self.conv44(self.pad(out51)))
out43 = self.relu(self.conv43(self.pad(out44)))
out42 = self.relu(self.conv42(self.pad(out43)))
out41 = self.relu(self.conv41(self.pad(out42)))
out41 = self.unpool(out41)
out34 = self.relu(self.conv34(self.pad(out41)))
out33 = self.relu(self.conv33(self.pad(out34)))
out32 = self.relu(self.conv32(self.pad(out33)))
out31 = self.relu(self.conv31(self.pad(out32)))
out31 = self.unpool(out31)
out22 = self.relu(self.conv22(self.pad(out31)))
out21 = self.relu(self.conv21(self.pad(out22)))
out21 = self.unpool(out21)
out12 = self.relu(self.conv12(self.pad(out21)))
out11 = self.relu(self.conv11(self.pad(out12)))
return out11
# bridge the dimension mismatch using a 1x1 linear layer
class SmallEncoder1_16x_aux(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallEncoder1_16x_aux, self).__init__()
self.fixed = fixed
self.conv0 = nn.Conv2d(3,3,1,1,0)
self.conv0.requires_grad = False
self.conv11 = nn.Conv2d( 3, 24, 3, 1, 0, dilation=1)
self.conv11_aux = nn.Conv2d( 24, 64, 1, 1, 0)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=False)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
# "forward" only outputs the final output
# "forward_branch" outputs all the middle branch ouputs
# "forward_aux" outputs all the middle auxiliary mapping layers
def forward(self, y):
y = self.conv0(y)
y = self.relu(self.conv11(self.pad(y)))
return y
def forward_branch(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
return out11,
def forward_aux(self, input, relu=True):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
if relu:
out11_aux = self.relu(self.conv11_aux(out11))
else:
out11_aux = self.conv11_aux(out11)
return out11_aux,
def forward_aux2(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out11_aux = self.relu(self.conv11_aux(out11))
return out11_aux, out11 # used for feature loss and style loss
class SmallEncoder2_16x_aux(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallEncoder2_16x_aux, self).__init__()
self.fixed = fixed
self.conv0 = nn.Conv2d(3,3,1,1,0)
self.conv0.requires_grad = False
self.conv11 = nn.Conv2d( 3, 16,3,1,0, dilation=1)
self.conv12 = nn.Conv2d( 16, 16,3,1,0, dilation=1)
self.conv21 = nn.Conv2d( 16, 32,3,1,0)
self.conv11_aux = nn.Conv2d( 16, 64,1,1,0)
self.conv21_aux = nn.Conv2d( 32,128,1,1,0)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices=False)
self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices=True)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.conv0(y)
y = self.relu(self.conv11(self.pad(y)))
y = self.relu(self.conv12(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv21(self.pad(y)))
return y
def forward_branch(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
return out11, out21
def forward_aux(self, input, relu=True):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
if relu:
out11_aux = self.relu(self.conv11_aux(out11))
out21_aux = self.relu(self.conv21_aux(out21))
else:
out11_aux = self.conv11_aux(out11)
out21_aux = self.conv21_aux(out21)
return out11_aux, out21_aux
def forward_aux2(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out11_aux = self.relu(self.conv11_aux(out11))
out21_aux = self.relu(self.conv21_aux(out21))
return out11_aux, out21_aux, out21 # used for feature loss and style loss
def forward_pwct(self, input): # for function in photo WCT
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
pool12, out12_ix = self.pool2(out12)
out21 = self.relu(self.conv21(self.pad(pool12)))
return out21, out12_ix, out12.size()
class SmallEncoder3_16x_aux(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallEncoder3_16x_aux, self).__init__()
self.fixed = fixed
self.conv0 = nn.Conv2d(3,3,1,1,0)
self.conv0.requires_grad = False
self.conv11 = nn.Conv2d( 3, 16,3,1,0, dilation=1)
self.conv12 = nn.Conv2d( 16, 16,3,1,0, dilation=1)
self.conv21 = nn.Conv2d( 16, 32,3,1,0)
self.conv22 = nn.Conv2d( 32, 32,3,1,0)
self.conv31 = nn.Conv2d( 32, 64,3,1,0)
self.conv11_aux = nn.Conv2d( 16, 64,1,1,0)
self.conv21_aux = nn.Conv2d( 32,128,1,1,0)
self.conv31_aux = nn.Conv2d( 64,256,1,1,0)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices=False)
self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices=True)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.conv0(y)
y = self.relu(self.conv11(self.pad(y)))
y = self.relu(self.conv12(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv21(self.pad(y)))
y = self.relu(self.conv22(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv31(self.pad(y)))
return y
def forward_branch(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
return out11, out21, out31
def forward_aux(self, input, relu=True):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
if relu:
out11_aux = self.relu(self.conv11_aux(out11))
out21_aux = self.relu(self.conv21_aux(out21))
out31_aux = self.relu(self.conv31_aux(out31))
else:
out11_aux = self.conv11_aux(out11)
out21_aux = self.conv21_aux(out21)
out31_aux = self.conv31_aux(out31)
return out11_aux, out21_aux, out31_aux
def forward_aux2(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
out11_aux = self.relu(self.conv11_aux(out11))
out21_aux = self.relu(self.conv21_aux(out21))
out31_aux = self.relu(self.conv31_aux(out31))
return out11_aux, out21_aux, out31_aux, out31 # used for feature loss and style loss
def forward_pwct(self, input): # for function in photo WCT
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
pool12, out12_ix = self.pool2(out12)
out21 = self.relu(self.conv21(self.pad(pool12)))
out22 = self.relu(self.conv22(self.pad(out21)))
pool22, out22_ix = self.pool2(out22)
out31 = self.relu(self.conv31(self.pad(pool22)))
return out31, out12_ix, out12.size(), out22_ix, out22.size()
class SmallEncoder4_16x_aux(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallEncoder4_16x_aux, self).__init__()
self.fixed = fixed
self.conv0 = nn.Conv2d(3,3,1,1,0)
self.conv0.requires_grad = False
self.conv11 = nn.Conv2d( 3, 16,3,1,0, dilation=1)
self.conv12 = nn.Conv2d( 16, 16,3,1,0, dilation=1)
self.conv21 = nn.Conv2d( 16, 32,3,1,0, dilation=1)
self.conv22 = nn.Conv2d( 32, 32,3,1,0, dilation=1)
self.conv31 = nn.Conv2d( 32, 64,3,1,0)
self.conv32 = nn.Conv2d( 64, 64,3,1,0)
self.conv33 = nn.Conv2d( 64, 64,3,1,0)
self.conv34 = nn.Conv2d( 64, 64,3,1,0)
self.conv41 = nn.Conv2d( 64,128,3,1,0)
self.conv11_aux = nn.Conv2d( 16, 64,1,1,0)
self.conv21_aux = nn.Conv2d( 32,128,1,1,0)
self.conv31_aux = nn.Conv2d( 64,256,1,1,0)
self.conv41_aux = nn.Conv2d(128,512,1,1,0)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices=False)
self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices=True)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.conv0(y)
y = self.relu(self.conv11(self.pad(y)))
y = self.relu(self.conv12(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv21(self.pad(y)))
y = self.relu(self.conv22(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv31(self.pad(y)))
y = self.relu(self.conv32(self.pad(y)))
y = self.relu(self.conv33(self.pad(y)))
y = self.relu(self.conv34(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv41(self.pad(y)))
return y
def forward_branch(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
out32 = self.relu(self.conv32(self.pad(out31)))
out33 = self.relu(self.conv33(self.pad(out32)))
out34 = self.relu(self.conv34(self.pad(out33)))
out34 = self.pool(out34)
out41 = self.relu(self.conv41(self.pad(out34)))
return out11, out21, out31, out41
def forward_pwct(self, input): # for function in photo WCT
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
pool12, out12_ix = self.pool2(out12)
out21 = self.relu(self.conv21(self.pad(pool12)))
out22 = self.relu(self.conv22(self.pad(out21)))
pool22, out22_ix = self.pool2(out22)
out31 = self.relu(self.conv31(self.pad(pool22)))
out32 = self.relu(self.conv32(self.pad(out31)))
out33 = self.relu(self.conv33(self.pad(out32)))
out34 = self.relu(self.conv34(self.pad(out33)))
pool34, out34_ix = self.pool2(out34)
out41 = self.relu(self.conv41(self.pad(pool34)))
return out41, out12_ix, out12.size(), out22_ix, out22.size(), out34_ix, out34.size()
def forward_aux(self, input, relu=True):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
out32 = self.relu(self.conv32(self.pad(out31)))
out33 = self.relu(self.conv33(self.pad(out32)))
out34 = self.relu(self.conv34(self.pad(out33)))
out34 = self.pool(out34)
out41 = self.relu(self.conv41(self.pad(out34)))
if relu:
out11_aux = self.relu(self.conv11_aux(out11))
out21_aux = self.relu(self.conv21_aux(out21))
out31_aux = self.relu(self.conv31_aux(out31))
out41_aux = self.relu(self.conv41_aux(out41))
else:
out11_aux = self.conv11_aux(out11)
out21_aux = self.conv21_aux(out21)
out31_aux = self.conv31_aux(out31)
out41_aux = self.conv41_aux(out41)
return out11_aux, out21_aux, out31_aux, out41_aux
def forward_aux2(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
out32 = self.relu(self.conv32(self.pad(out31)))
out33 = self.relu(self.conv33(self.pad(out32)))
out34 = self.relu(self.conv34(self.pad(out33)))
out34 = self.pool(out34)
out41 = self.relu(self.conv41(self.pad(out34)))
out11_aux = self.relu(self.conv11_aux(out11))
out21_aux = self.relu(self.conv21_aux(out21))
out31_aux = self.relu(self.conv31_aux(out31))
out41_aux = self.relu(self.conv41_aux(out41))
return out11_aux, out21_aux, out31_aux, out41_aux, out41 # used for feature loss and style loss
class SmallEncoder5_16x_aux(nn.Module):
def __init__(self, model=None, fixed=False):
super(SmallEncoder5_16x_aux, self).__init__()
self.fixed = fixed
self.conv0 = nn.Conv2d(3,3,1,1,0)
self.conv0.requires_grad = False
self.conv11 = nn.Conv2d( 3, 16,3,1,0, dilation=1)
self.conv12 = nn.Conv2d( 16, 16,3,1,0, dilation=1)
self.conv21 = nn.Conv2d( 16, 32,3,1,0, dilation=1)
self.conv22 = nn.Conv2d( 32, 32,3,1,0, dilation=1)
self.conv31 = nn.Conv2d( 32, 64,3,1,0, dilation=1)
self.conv32 = nn.Conv2d( 64, 64,3,1,0, dilation=1)
self.conv33 = nn.Conv2d( 64, 64,3,1,0, dilation=1)
self.conv34 = nn.Conv2d( 64, 64,3,1,0, dilation=1)
self.conv41 = nn.Conv2d( 64,128,3,1,0)
self.conv42 = nn.Conv2d(128,128,3,1,0)
self.conv43 = nn.Conv2d(128,128,3,1,0)
self.conv44 = nn.Conv2d(128,128,3,1,0)
self.conv51 = nn.Conv2d(128,128,3,1,0)
self.conv11_aux = nn.Conv2d( 16, 64,1,1,0)
self.conv21_aux = nn.Conv2d( 32,128,1,1,0)
self.conv31_aux = nn.Conv2d( 64,256,1,1,0)
self.conv41_aux = nn.Conv2d(128,512,1,1,0)
self.conv51_aux = nn.Conv2d(128,512,1,1,0)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices=False)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
weights = torch.load(model, map_location=lambda storage, location: storage)
if "model" in weights:
self.load_state_dict(weights["model"])
else:
self.load_state_dict(weights)
print("load model '%s' successfully" % model)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.conv0(y)
y = self.relu(self.conv11(self.pad(y)))
y = self.relu(self.conv12(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv21(self.pad(y)))
y = self.relu(self.conv22(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv31(self.pad(y)))
y = self.relu(self.conv32(self.pad(y)))
y = self.relu(self.conv33(self.pad(y)))
y = self.relu(self.conv34(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv41(self.pad(y)))
y = self.relu(self.conv42(self.pad(y)))
y = self.relu(self.conv43(self.pad(y)))
y = self.relu(self.conv44(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv51(self.pad(y)))
return y
def forward_branch(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
out32 = self.relu(self.conv32(self.pad(out31)))
out33 = self.relu(self.conv33(self.pad(out32)))
out34 = self.relu(self.conv34(self.pad(out33)))
out34 = self.pool(out34)
out41 = self.relu(self.conv41(self.pad(out34)))
out42 = self.relu(self.conv42(self.pad(out41)))
out43 = self.relu(self.conv43(self.pad(out42)))
out44 = self.relu(self.conv44(self.pad(out43)))
out44 = self.pool(out44)
out51 = self.relu(self.conv51(self.pad(out44)))
return out11, out21, out31, out41, out51
def forward_aux(self, input, relu=True):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
out32 = self.relu(self.conv32(self.pad(out31)))
out33 = self.relu(self.conv33(self.pad(out32)))
out34 = self.relu(self.conv34(self.pad(out33)))
out34 = self.pool(out34)
out41 = self.relu(self.conv41(self.pad(out34)))
out42 = self.relu(self.conv42(self.pad(out41)))
out43 = self.relu(self.conv43(self.pad(out42)))
out44 = self.relu(self.conv44(self.pad(out43)))
out44 = self.pool(out44)
out51 = self.relu(self.conv51(self.pad(out44)))
if relu:
out11_aux = self.relu(self.conv11_aux(out11))
out21_aux = self.relu(self.conv21_aux(out21))
out31_aux = self.relu(self.conv31_aux(out31))
out41_aux = self.relu(self.conv41_aux(out41))
out51_aux = self.relu(self.conv51_aux(out51))
else:
out11_aux = self.conv11_aux(out11)
out21_aux = self.conv21_aux(out21)
out31_aux = self.conv31_aux(out31)
out41_aux = self.conv41_aux(out41)
out51_aux = self.conv51_aux(out51)
return out11_aux, out21_aux, out31_aux, out41_aux, out51_aux
def forward_aux2(self, input):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
out32 = self.relu(self.conv32(self.pad(out31)))
out33 = self.relu(self.conv33(self.pad(out32)))
out34 = self.relu(self.conv34(self.pad(out33)))
out34 = self.pool(out34)
out41 = self.relu(self.conv41(self.pad(out34)))
out42 = self.relu(self.conv42(self.pad(out41)))
out43 = self.relu(self.conv43(self.pad(out42)))
out44 = self.relu(self.conv44(self.pad(out43)))
out44 = self.pool(out44)
out51 = self.relu(self.conv51(self.pad(out44)))
out11_aux = self.relu(self.conv11_aux(out11))
out21_aux = self.relu(self.conv21_aux(out21))
out31_aux = self.relu(self.conv31_aux(out31))
out41_aux = self.relu(self.conv41_aux(out41))
out51_aux = self.relu(self.conv51_aux(out51))
return out11_aux, out21_aux, out31_aux, out41_aux, out51_aux, out51 # output out51
def forward_aux3(self, input, relu=False):
out0 = self.conv0(input)
out11 = self.relu(self.conv11(self.pad(out0)))
out12 = self.relu(self.conv12(self.pad(out11)))
out12 = self.pool(out12)
out21 = self.relu(self.conv21(self.pad(out12)))
out22 = self.relu(self.conv22(self.pad(out21)))
out22 = self.pool(out22)
out31 = self.relu(self.conv31(self.pad(out22)))
out32 = self.relu(self.conv32(self.pad(out31)))
out33 = self.relu(self.conv33(self.pad(out32)))
out34 = self.relu(self.conv34(self.pad(out33)))
out34 = self.pool(out34)
out41 = self.relu(self.conv41(self.pad(out34)))
out42 = self.relu(self.conv42(self.pad(out41)))
out43 = self.relu(self.conv43(self.pad(out42)))
out44 = self.relu(self.conv44(self.pad(out43)))
out44 = self.pool(out44)
out51 = self.relu(self.conv51(self.pad(out44)))
if relu:
out51_aux = self.relu(self.conv51_aux(out51))
else:
out51_aux = self.conv51_aux(out51)
return out11, out21, out31, out41, out51, out51_aux |
snippets/04b - Advanced groupby operations30.py | joshuagottardogalvani/pandas-tutorial | 183 | 11069436 | hamlets = titles[titles['title'].str.match('Hamlet')]
hamlets['title'].value_counts() |
Virtual_makeover/Virtual Makeup/Makeup.py | swapnilgarg7/Face-X | 175 | 11069445 | import cv2
import dlib
import numpy as np
def empty(a):
pass
cv2.namedWindow("BGR")
cv2.resizeWindow("BGR",400,240)
cv2.createTrackbar("Blue","BGR",0,255,empty)
cv2.createTrackbar("Green","BGR",0,255,empty)
cv2.createTrackbar("Red","BGR",0,255,empty)
def create(img, points,masked = False, cropped = True):
if masked:
mask = np.zeros_like(img)
mask = cv2.fillPoly(mask,[points],(255,255,255))
# cv2.imshow("mask",mask)
img = cv2.bitwise_and(img,mask)
if cropped:
b = cv2.boundingRect(points)
x,y,w,h = b
imgCrop = img[y:y+h,x:x+w]
imgCrop = cv2.resize(imgCrop,(0,0),None,5,5)
return imgCrop
else:
return mask
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
while True:
img = cv2.imread("./img.png")
img = cv2.resize(img,(0,0), None,2,2)
imgOriginal = img.copy()
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = detector(imgGray)
for face in faces:
x1,y1 = face.left(),face.top()
x2,y2 = face.right(),face.bottom()
# imgOri = cv2.rectangle(imgOriginal,(x1,y1),(x2,y2),(0,255,0),1)
landmarks = predictor(imgGray,face)
mypoints = []
for n in range(0,68):
x = landmarks.part(n).x
y = landmarks.part(n).y
mypoints.append([x,y])
# cv2.circle(imgOriginal,(x,y),2,(0,0,255),3)
# cv2.putText(imgOriginal,str(n),(x,y-10),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)
mypoints = np.array(mypoints)
lips = create(img,mypoints[48:61],masked=True,cropped=False)
# cv2.imshow("Lip",lips)
imgColor = np.zeros_like(lips)
a = cv2.getTrackbarPos("Blue","BGR")
q = cv2.getTrackbarPos("Green","BGR")
w = cv2.getTrackbarPos("Red","BGR")
imgColor[:] = a,q,w
# cv2.imshow("Color",imgColor)
imgColor = cv2.bitwise_and(lips,imgColor)
imgColor = cv2.GaussianBlur(imgColor,(9,9),20)
imgOriginal_Image = cv2.cvtColor(imgOriginal,cv2.COLOR_BGR2GRAY)
imgOriginal_Image = cv2.cvtColor(imgOriginal_Image,cv2.COLOR_GRAY2BGR)
imgColor =cv2.addWeighted(imgOriginal_Image,1,imgColor,0.8,0)
cv2.imshow("BGR",imgColor)
cv2.imshow("Original_Image",imgOriginal)
key = cv2.waitKey(1)
if key == ord("q"):
break
cv2.destroyAllWindows()
|
twitch/helix/models/video.py | sotif/Twitch-Python | 177 | 11069446 | from typing import Dict, Any
import twitch.helix as helix
import twitch.v5 as v5
from twitch.api import API
from .model import Model
class Video(Model):
def __init__(self, api: API, data: Dict[str, Any]):
super().__init__(api, data)
self.id: str = data.get('id')
self.user_id: str = data.get('user_id')
self.user_name: str = data.get('user_name')
self.title: str = data.get('title')
self.description: str = data.get('description')
self.created_at: str = data.get('created_at')
self.published_at: str = data.get('published_at')
self.url: str = data.get('url')
self.thumbnail_url: str = data.get('thumbnail_url')
self.viewable: str = data.get('viewable')
self.view_count: int = data.get('view_count')
self.language: str = data.get('language')
self.type: str = data.get('type')
self.duration: str = data.get('duration')
def __str__(self):
return self.title
@property
def comments(self) -> 'v5.Comments':
return v5.V5(client_id=self._api.client_id,
use_cache=self._api.use_cache,
cache_duration=self._api.cache_duration).comments(self.id)
@property
def user(self) -> 'helix.User':
return helix.Users(self._api, int(self.user_id))[0]
|
plex_mpv_shim/player.py | romosborne/plex-mpv-shim | 231 | 11069471 | import logging
import os
import sys
import requests
import urllib.parse
from threading import RLock, Lock
from queue import Queue
from . import conffile
from .utils import synchronous, Timer
from .conf import settings
from .menu import OSDMenu
from .media import MediaType
log = logging.getLogger('player')
mpv_log = logging.getLogger('mpv')
python_mpv_available=True
is_using_ext_mpv=False
if not settings.mpv_ext:
try:
import mpv
log.info("Using libmpv1 playback backend.")
except OSError:
log.warning("Could not find libmpv1.")
python_mpv_available=False
if settings.mpv_ext or not python_mpv_available:
import python_mpv_jsonipc as mpv
log.info("Using external mpv playback backend.")
is_using_ext_mpv=True
APP_NAME = 'plex-mpv-shim'
SUBTITLE_POS = {
"top": 0,
"bottom": 100,
"middle": 80,
}
mpv_log_levels = {
"fatal": mpv_log.error,
"error": mpv_log.error,
"warn": mpv_log.warning,
"info": mpv_log.info
}
def mpv_log_handler(level, prefix, text):
if level in mpv_log_levels:
mpv_log_levels[level]("{0}: {1}".format(prefix, text))
else:
mpv_log.debug("{0}: {1}".format(prefix, text))
win_utils = None
if sys.platform.startswith("win32") or sys.platform.startswith("cygwin"):
try:
from . import win_utils
except ModuleNotFoundError:
log.warning("win_utils is not available.")
# Q: What is with the put_task call?
# A: Some calls to python-mpv require event processing.
# put_task is used to deal with the events originating from
# the event thread, which would cause deadlock if they run there.
class PlayerManager(object):
"""
Manages the relationship between a ``Player`` instance and a ``Media``
item. This is designed to be used as a singleton via the ``playerManager``
instance in this module. All communication between a caller and either the
current ``player`` or ``media`` instance should be done through this class
for thread safety reasons as all methods that access the ``player`` or
``media`` are thread safe.
"""
def __init__(self):
mpv_config = conffile.get(APP_NAME,"mpv.conf", True)
input_config = conffile.get(APP_NAME,"input.conf", True)
extra_options = {}
self._media_item = None
self._lock = RLock()
self._finished_lock = Lock()
self.last_update = Timer()
self.__part = 1
self.timeline_trigger = None
self.action_trigger = None
self.external_subtitles = {}
self.external_subtitles_rev = {}
self.url = None
self.evt_queue = Queue()
self.is_in_intro = False
self.intro_has_triggered = False
if is_using_ext_mpv:
extra_options = {
"start_mpv": settings.mpv_ext_start,
"ipc_socket": settings.mpv_ext_ipc,
"mpv_location": settings.mpv_ext_path,
"player-operation-mode": "cplayer"
}
self._player = mpv.MPV(input_default_bindings=True, input_vo_keyboard=True,
input_media_keys=True, include=mpv_config, input_conf=input_config,
log_handler=mpv_log_handler, loglevel=settings.mpv_log_level,
**extra_options)
self.menu = OSDMenu(self)
if hasattr(self._player, 'osc'):
self._player.osc = settings.enable_osc
else:
log.warning("This mpv version doesn't support on-screen controller.")
# Wrapper for on_key_press that ignores None.
def keypress(key):
def wrapper(func):
if key is not None:
self._player.on_key_press(key)(func)
return func
return wrapper
@self._player.on_key_press('CLOSE_WIN')
@self._player.on_key_press('STOP')
@keypress(settings.kb_stop)
def handle_stop():
self.stop()
self.timeline_handle()
@keypress(settings.kb_prev)
def handle_prev():
self.put_task(self.play_prev)
@keypress(settings.kb_next)
def handle_next():
self.put_task(self.play_next)
@self._player.on_key_press('PREV')
@self._player.on_key_press('XF86_PREV')
def handle_media_prev():
if settings.media_key_seek:
self._player.command("seek", -15)
else:
self.put_task(self.play_prev)
@self._player.on_key_press('NEXT')
@self._player.on_key_press('XF86_NEXT')
def handle_media_next():
if settings.media_key_seek:
if self.is_in_intro:
self.skip_intro()
else:
self._player.command("seek", 30)
else:
self.put_task(self.play_next)
@keypress(settings.kb_watched)
def handle_watched():
self.put_task(self.watched_skip)
@keypress(settings.kb_unwatched)
def handle_unwatched():
self.put_task(self.unwatched_quit)
@keypress(settings.kb_menu)
def menu_open():
if not self.menu.is_menu_shown:
self.menu.show_menu()
else:
self.menu.hide_menu()
@keypress(settings.kb_menu_esc)
def menu_back():
if self.menu.is_menu_shown:
self.menu.menu_action('back')
else:
self._player.command('set', 'fullscreen', 'no')
@keypress(settings.kb_menu_ok)
def menu_ok():
self.menu.menu_action('ok')
@keypress(settings.kb_menu_left)
def menu_left():
if self.menu.is_menu_shown:
self.menu.menu_action('left')
else:
self._player.command("seek", settings.seek_left)
@keypress(settings.kb_menu_right)
def menu_right():
if self.menu.is_menu_shown:
self.menu.menu_action('right')
else:
if self.is_in_intro:
self.skip_intro()
else:
self._player.command("seek", settings.seek_right)
@keypress(settings.kb_menu_up)
def menu_up():
if self.menu.is_menu_shown:
self.menu.menu_action('up')
else:
if self.is_in_intro:
self.skip_intro()
else:
self._player.command("seek", settings.seek_up)
@keypress(settings.kb_menu_down)
def menu_down():
if self.menu.is_menu_shown:
self.menu.menu_action('down')
else:
self._player.command("seek", settings.seek_down)
@keypress(settings.kb_pause)
def handle_pause():
if self.menu.is_menu_shown:
self.menu.menu_action('ok')
else:
self.toggle_pause()
# This gives you an interactive python debugger prompt.
@keypress(settings.kb_debug)
def handle_debug():
import pdb
pdb.set_trace()
# Fires between episodes.
@self._player.property_observer('eof-reached')
def handle_end(_name, reached_end):
if self._media_item and reached_end:
has_lock = self._finished_lock.acquire(False)
self.put_task(self.finished_callback, has_lock)
# Fires at the end.
@self._player.event_callback('idle')
def handle_end_idle(event):
if self._media_item:
has_lock = self._finished_lock.acquire(False)
self.put_task(self.finished_callback, has_lock)
# Put a task to the event queue.
# This ensures the task executes outside
# of an event handler, which causes a crash.
def put_task(self, func, *args):
self.evt_queue.put([func, args])
if self.action_trigger:
self.action_trigger.set()
# Trigger the timeline to update all
# clients immediately.
def timeline_handle(self):
if self.timeline_trigger:
self.timeline_trigger.set()
def skip_intro(self):
if self._media_item.media_type == MediaType.VIDEO:
self._player.playback_time = self._media_item.intro_end
self.timeline_handle()
self.is_in_intro = False
@synchronous('_lock')
def update(self):
if ((settings.skip_intro_always or settings.skip_intro_prompt)
and self._media_item is not None and self._media_item.media_type == MediaType.VIDEO and self._media_item.intro_start is not None
and self._player.playback_time is not None
and self._player.playback_time > self._media_item.intro_start
and self._player.playback_time < self._media_item.intro_end):
if not self.is_in_intro:
if settings.skip_intro_always and not self.intro_has_triggered:
self.intro_has_triggered = True
self.skip_intro()
self._player.show_text("Skipped Intro", 3000, 1)
elif settings.skip_intro_prompt:
self._player.show_text("Seek to Skip Intro", 3000, 1)
self.is_in_intro = True
else:
self.is_in_intro = False
while not self.evt_queue.empty():
func, args = self.evt_queue.get()
func(*args)
if self._media_item and not self._player.playback_abort:
if not self.is_paused():
self.last_update.restart()
def play(self, media_item, offset=0):
url = media_item.get_playback_url()
if not url:
log.error("PlayerManager::play no URL found")
return
self._play_media(media_item, url, offset)
@synchronous('_lock')
def _play_media(self, media_item, url, offset=0):
self.url = url
self.menu.hide_menu()
if settings.log_decisions:
log.debug("Playing: {0}".format(url))
self._player.play(self.url)
self._player.wait_for_property("duration")
if settings.fullscreen:
self._player.fs = True
self._player.force_media_title = media_item.get_proper_title()
self._media_item = media_item
self.is_in_intro = False
self.intro_has_triggered = False
self.update_subtitle_visuals(False)
self.upd_player_hide()
self.external_subtitles = {}
self.external_subtitles_rev = {}
if win_utils:
win_utils.raise_mpv()
if offset > 0:
self._player.playback_time = offset
if media_item.media_type == MediaType.VIDEO and not media_item.is_transcode:
audio_idx = media_item.get_audio_idx()
if audio_idx is not None:
log.debug("PlayerManager::play selecting audio stream index=%s" % audio_idx)
self._player.audio = audio_idx
sub_idx = media_item.get_subtitle_idx()
xsub_id = media_item.get_external_sub_id()
if sub_idx is not None:
log.debug("PlayerManager::play selecting subtitle index=%s" % sub_idx)
self._player.sub = sub_idx
elif xsub_id is not None:
log.debug("PlayerManager::play selecting external subtitle id=%s" % xsub_id)
self.load_external_sub(xsub_id)
else:
self._player.sub = 'no'
self._player.pause = False
self.timeline_handle()
if self._finished_lock.locked():
self._finished_lock.release()
def exec_stop_cmd(self):
if settings.stop_cmd:
os.system(settings.stop_cmd)
@synchronous('_lock')
def stop(self, playend=False):
if not playend and (not self._media_item or self._player.playback_abort):
self.exec_stop_cmd()
return
if not playend:
log.debug("PlayerManager::stop stopping playback of %s" % self._media_item)
if self._media_item.media_type == MediaType.VIDEO:
self._media_item.terminate_transcode()
self._media_item = None
self._player.command("stop")
self._player.pause = False
self.timeline_handle()
if not playend:
self.exec_stop_cmd()
@synchronous('_lock')
def get_volume(self, percent=False):
if self._player:
if not percent:
return self._player.volume / 100
return self._player.volume
@synchronous('_lock')
def toggle_pause(self):
if not self._player.playback_abort:
self._player.pause = not self._player.pause
self.timeline_handle()
@synchronous('_lock')
def seek(self, offset):
"""
Seek to ``offset`` seconds
"""
if not self._player.playback_abort:
if self.is_in_intro and offset > self._player.playback_time:
self.skip_intro()
else:
self._player.playback_time = offset
self.timeline_handle()
@synchronous('_lock')
def set_volume(self, pct):
if not self._player.playback_abort:
self._player.volume = pct
self.timeline_handle()
@synchronous('_lock')
def get_state(self):
if self._player.playback_abort:
return "stopped"
if self._player.pause:
return "paused"
return "playing"
@synchronous('_lock')
def is_paused(self):
if not self._player.playback_abort:
return self._player.pause
return False
@synchronous('_lock')
def finished_callback(self, has_lock):
if not self._media_item:
return
self._media_item.set_played()
if self._media_item.is_multipart():
if has_lock:
log.debug("PlayerManager::finished_callback media is multi-part, checking for next part")
# Try to select the next part
next_part = self.__part+1
if self._media_item.select_part(next_part):
self.__part = next_part
log.debug("PlayerManager::finished_callback starting next part")
self.play(self._media_item)
else:
log.debug("PlayerManager::finished_callback No lock, skipping...")
elif self._media_item.parent.has_next and settings.auto_play:
if has_lock:
log.debug("PlayerManager::finished_callback starting next episode")
self.play(self._media_item.parent.get_next().get_media_item(0))
else:
log.debug("PlayerManager::finished_callback No lock, skipping...")
else:
if settings.media_ended_cmd:
os.system(settings.media_ended_cmd)
log.debug("PlayerManager::finished_callback reached end")
self.stop(playend=True)
@synchronous('_lock')
def watched_skip(self):
if not self._media_item:
return
self._media_item.set_played()
self.play_next()
@synchronous('_lock')
def unwatched_quit(self):
if not self._media_item:
return
self._media_item.set_played(False)
self.stop()
@synchronous('_lock')
def play_next(self):
if self._media_item.parent.has_next:
self.play(self._media_item.parent.get_next().get_media_item(0))
return True
return False
@synchronous('_lock')
def skip_to(self, key):
media = self._media_item.parent.get_from_key(key)
if media:
self.play(media.get_media_item(0))
return True
return False
@synchronous('_lock')
def play_prev(self):
if self._media_item.parent.has_prev:
self.play(self._media_item.parent.get_prev().get_media_item(0))
return True
return False
@synchronous('_lock')
def restart_playback(self):
current_time = self._player.playback_time
self.play(self._media_item, current_time)
return True
@synchronous('_lock')
def get_media_item_attr(self, attr, default=None):
if self._media_item:
return self._media_item.get_media_item_attr(attr, default)
return default
@synchronous('_lock')
def set_streams(self, audio_uid, sub_uid):
if not self._media_item.is_transcode:
if audio_uid is not None:
log.debug("PlayerManager::play selecting audio stream index=%s" % audio_uid)
self._player.audio = self._media_item.audio_seq[audio_uid]
if sub_uid == '0':
log.debug("PlayerManager::play selecting subtitle stream (none)")
self._player.sub = 'no'
elif sub_uid is not None:
log.debug("PlayerManager::play selecting subtitle stream index=%s" % sub_uid)
if sub_uid in self._media_item.subtitle_seq:
self._player.sub = self._media_item.subtitle_seq[sub_uid]
else:
log.debug("PlayerManager::play selecting external subtitle id=%s" % sub_uid)
self.load_external_sub(sub_uid)
self._media_item.set_streams(audio_uid, sub_uid)
if self._media_item.is_transcode:
self.restart_playback()
self.timeline_handle()
@synchronous('_lock')
def load_external_sub(self, sub_id):
if sub_id in self.external_subtitles:
self._player.sub = self.external_subtitles[sub_id]
else:
try:
sub_url = self._media_item.get_external_sub(sub_id)
if settings.log_decisions:
log.debug("Load External Subtitle: {0}".format(sub_url))
self._player.sub_add(sub_url)
self.external_subtitles[sub_id] = self._player.sub
self.external_subtitles_rev[self._player.sub] = sub_id
except SystemError:
log.debug("PlayerManager::could not load external subtitle")
def get_track_ids(self):
if self._media_item.is_transcode:
return self._media_item.get_transcode_streams()
else:
aid, sid = None, None
if self._player.sub and self._player.sub != 'no':
if self._player.sub in self.external_subtitles_rev:
sid = self.external_subtitles_rev.get(self._player.sub, '')
else:
sid = self._media_item.subtitle_uid.get(self._player.sub, '')
if self._player.audio != 'no':
aid = self._media_item.audio_uid.get(self._player.audio, '')
return aid, sid
def update_subtitle_visuals(self, restart_transcode=True):
if self._media_item.is_transcode:
if restart_transcode:
self.restart_playback()
else:
self._player.sub_pos = SUBTITLE_POS[settings.subtitle_position]
self._player.sub_scale = settings.subtitle_size / 100
self._player.sub_color = settings.subtitle_color
self.timeline_handle()
def upd_player_hide(self):
self._player.keep_open = self._media_item.parent.has_next
def terminate(self):
self.stop()
if is_using_ext_mpv:
self._player.terminate()
playerManager = PlayerManager()
|
pyquil/gates.py | stjordanis/pyquil | 677 | 11069477 | <reponame>stjordanis/pyquil<gh_stars>100-1000
##############################################################################
# Copyright 2016-2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from numbers import Real
from typing import Callable, Mapping, Optional, Tuple, Union, Iterable, no_type_check
import numpy as np
from pyquil.quilatom import (
Expression,
FormalArgument,
Frame,
MemoryReference,
MemoryReferenceDesignator,
ParameterDesignator,
QubitDesignator,
Qubit,
unpack_classical_reg,
unpack_qubit,
Waveform,
)
from pyquil.quilbase import (
AbstractInstruction,
Declare,
Gate,
Halt,
Reset,
ResetQubit,
Measurement,
Nop,
Wait,
ClassicalNeg,
ClassicalNot,
ClassicalAnd,
ClassicalInclusiveOr,
ClassicalExclusiveOr,
ClassicalEqual,
ClassicalGreaterEqual,
ClassicalGreaterThan,
ClassicalLessEqual,
ClassicalLessThan,
ClassicalMove,
ClassicalExchange,
ClassicalConvert,
ClassicalLoad,
ClassicalStore,
ClassicalAdd,
ClassicalSub,
ClassicalMul,
ClassicalDiv,
Pulse,
SetFrequency,
ShiftFrequency,
SetPhase,
ShiftPhase,
SwapPhase,
SetScale,
Capture,
RawCapture,
DelayFrames,
DelayQubits,
FenceAll,
Fence,
)
def unpack_reg_val_pair(
classical_reg1: MemoryReferenceDesignator,
classical_reg2: Union[MemoryReferenceDesignator, int, float],
) -> Tuple[MemoryReference, Union[MemoryReference, int, float]]:
"""
Helper function for typechecking / type-coercing arguments to constructors for binary classical
operators.
:param classical_reg1: Specifier for the classical memory address to be modified.
:param classical_reg2: Specifier for the second argument: a classical memory address or an
immediate value.
:return: A pair of pyQuil objects suitable for use as operands.
"""
left = unpack_classical_reg(classical_reg1)
if isinstance(classical_reg2, (float, int)):
return left, classical_reg2
return left, unpack_classical_reg(classical_reg2)
def prepare_ternary_operands(
classical_reg1: MemoryReferenceDesignator,
classical_reg2: MemoryReferenceDesignator,
classical_reg3: Union[MemoryReferenceDesignator, int, float],
) -> Tuple[MemoryReference, MemoryReference, Union[MemoryReference, int, float]]:
"""
Helper function for typechecking / type-coercing arguments to constructors for ternary
classical operators.
:param classical_reg1: Specifier for the classical memory address to be modified.
:param classical_reg2: Specifier for the left operand: a classical memory address.
:param classical_reg3: Specifier for the right operand: a classical memory address or an
immediate value.
:return: A triple of pyQuil objects suitable for use as operands.
"""
if isinstance(classical_reg1, int):
raise TypeError("Target operand of comparison must be a memory address")
classical_reg1 = unpack_classical_reg(classical_reg1)
if isinstance(classical_reg2, int):
raise TypeError("Left operand of comparison must be a memory address")
classical_reg2 = unpack_classical_reg(classical_reg2)
if not isinstance(classical_reg3, (float, int)):
classical_reg3 = unpack_classical_reg(classical_reg3)
return classical_reg1, classical_reg2, classical_reg3
def I(qubit: QubitDesignator) -> Gate:
"""Produces the I identity gate::
I = [1, 0]
[0, 1]
This gate is a single qubit identity gate.
Note that this gate is different that the NOP instruction as noise channels
are typically still applied during the duration of identity gates. Identities will
also block parallelization like any other gate.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="I", params=[], qubits=[unpack_qubit(qubit)])
def X(qubit: QubitDesignator) -> Gate:
"""Produces the X ("NOT") gate::
X = [[0, 1],
[1, 0]]
This gate is a single qubit X-gate.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="X", params=[], qubits=[unpack_qubit(qubit)])
def Y(qubit: QubitDesignator) -> Gate:
"""Produces the Y gate::
Y = [[0, 0 - 1j],
[0 + 1j, 0]]
This gate is a single qubit Y-gate.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="Y", params=[], qubits=[unpack_qubit(qubit)])
def Z(qubit: QubitDesignator) -> Gate:
"""Produces the Z gate::
Z = [[1, 0],
[0, -1]]
This gate is a single qubit Z-gate.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="Z", params=[], qubits=[unpack_qubit(qubit)])
def H(qubit: QubitDesignator) -> Gate:
"""Produces the Hadamard gate::
H = (1 / sqrt(2)) * [[1, 1],
[1, -1]]
Produces the H instruction. This gate is a single qubit Hadamard gate.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="H", params=[], qubits=[unpack_qubit(qubit)])
def S(qubit: QubitDesignator) -> Gate:
"""Produces the S gate::
S = [[1, 0],
[0, 1j]]
This gate is a single qubit S-gate.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="S", params=[], qubits=[unpack_qubit(qubit)])
def T(qubit: QubitDesignator) -> Gate:
"""Produces the T gate::
T = [[1, 0],
[0, exp(1j * pi / 4)]]
This gate is a single qubit T-gate. It is the same as RZ(pi/4).
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="T", params=[], qubits=[unpack_qubit(qubit)])
def RX(angle: ParameterDesignator, qubit: QubitDesignator) -> Gate:
"""Produces the RX gate::
RX(phi) = [[cos(phi / 2), -1j * sin(phi / 2)],
[-1j * sin(phi / 2), cos(phi / 2)]]
This gate is a single qubit X-rotation.
:param angle: The angle to rotate around the x-axis on the bloch sphere.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="RX", params=[angle], qubits=[unpack_qubit(qubit)])
def RY(angle: ParameterDesignator, qubit: QubitDesignator) -> Gate:
"""Produces the RY gate::
RY(phi) = [[cos(phi / 2), -sin(phi / 2)],
[sin(phi / 2), cos(phi / 2)]]
This gate is a single qubit Y-rotation.
:param angle: The angle to rotate around the y-axis on the bloch sphere.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="RY", params=[angle], qubits=[unpack_qubit(qubit)])
def RZ(angle: ParameterDesignator, qubit: QubitDesignator) -> Gate:
"""Produces the RZ gate::
RZ(phi) = [[cos(phi / 2) - 1j * sin(phi / 2), 0]
[0, cos(phi / 2) + 1j * sin(phi / 2)]]
This gate is a single qubit Z-rotation.
:param angle: The angle to rotate around the z-axis on the bloch sphere.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="RZ", params=[angle], qubits=[unpack_qubit(qubit)])
def PHASE(angle: ParameterDesignator, qubit: QubitDesignator) -> Gate:
"""Produces the PHASE gate::
PHASE(phi) = [[1, 0],
[0, exp(1j * phi)]]
This is the same as the RZ gate.
:param angle: The angle to rotate around the z-axis on the bloch sphere.
:param qubit: The qubit apply the gate to.
:returns: A Gate object.
"""
return Gate(name="PHASE", params=[angle], qubits=[unpack_qubit(qubit)])
def CZ(control: QubitDesignator, target: QubitDesignator) -> Gate:
"""Produces a controlled-Z gate::
CZ = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, -1]]
This gate applies to two qubit arguments to produce the controlled-Z gate instruction.
:param control: The control qubit.
:param target: The target qubit. The target qubit has an Z-gate applied to it if the control
qubit is in the excited state.
:returns: A Gate object.
"""
return Gate(name="CZ", params=[], qubits=[unpack_qubit(q) for q in (control, target)])
def CNOT(control: QubitDesignator, target: QubitDesignator) -> Gate:
"""Produces a controlled-NOT (controlled-X) gate::
CNOT = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]
This gate applies to two qubit arguments to produce the controlled-not gate instruction.
:param control: The control qubit.
:param target: The target qubit. The target qubit has an X-gate applied to it if the control
qubit is in the ``|1>`` state.
:returns: A Gate object.
"""
return Gate(name="CNOT", params=[], qubits=[unpack_qubit(q) for q in (control, target)])
def CCNOT(control1: QubitDesignator, control2: QubitDesignator, target: QubitDesignator) -> Gate:
"""Produces a doubly-controlled NOT gate::
CCNOT = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]]
This gate applies to three qubit arguments to produce the controlled-controlled-not gate
instruction.
:param control1: The first control qubit.
:param control2: The second control qubit.
:param target: The target qubit. The target qubit has an X-gate applied to it if both control
qubits are in the excited state.
:returns: A Gate object.
"""
qubits = [unpack_qubit(q) for q in (control1, control2, target)]
return Gate(name="CCNOT", params=[], qubits=qubits)
def CPHASE00(angle: ParameterDesignator, control: QubitDesignator, target: QubitDesignator) -> Gate:
"""Produces a controlled-phase gate that phases the ``|00>`` state::
CPHASE00(phi) = diag([exp(1j * phi), 1, 1, 1])
This gate applies to two qubit arguments to produce the variant of the controlled phase
instruction that affects the state 00.
:param angle: The input phase angle to apply when both qubits are in the ``|0>`` state.
:param control: Qubit 1.
:param target: Qubit 2.
:returns: A Gate object.
"""
qubits = [unpack_qubit(q) for q in (control, target)]
return Gate(name="CPHASE00", params=[angle], qubits=qubits)
def CPHASE01(angle: ParameterDesignator, control: QubitDesignator, target: QubitDesignator) -> Gate:
"""Produces a controlled-phase gate that phases the ``|01>`` state::
CPHASE01(phi) = diag([1.0, exp(1j * phi), 1.0, 1.0])
This gate applies to two qubit arguments to produce the variant of the controlled phase
instruction that affects the state 01.
:param angle: The input phase angle to apply when q1 is in the ``|1>`` state and q2 is in
the ``|0>`` state.
:param control: Qubit 1.
:param target: Qubit 2.
:returns: A Gate object.
"""
qubits = [unpack_qubit(q) for q in (control, target)]
return Gate(name="CPHASE01", params=[angle], qubits=qubits)
def CPHASE10(angle: ParameterDesignator, control: QubitDesignator, target: QubitDesignator) -> Gate:
"""Produces a controlled-phase gate that phases the ``|10>`` state::
CPHASE10(phi) = diag([1, 1, exp(1j * phi), 1])
This gate applies to two qubit arguments to produce the variant of the controlled phase
instruction that affects the state 10.
:param angle: The input phase angle to apply when q2 is in the ``|1>`` state and q1 is in
the ``|0>`` state.
:param control: Qubit 1.
:param target: Qubit 2.
:returns: A Gate object.
"""
qubits = [unpack_qubit(q) for q in (control, target)]
return Gate(name="CPHASE10", params=[angle], qubits=qubits)
# NOTE: We don't use ParameterDesignator here because of the following Sphinx error. This error
# can be resolved by importing Expression, but then flake8 complains about an unused import:
# Cannot resolve forward reference in type annotations of "pyquil.gates.CPHASE":
# name 'Expression' is not defined
def CPHASE(
angle: Union[Expression, MemoryReference, np.int_, int, float, complex],
control: QubitDesignator,
target: QubitDesignator,
) -> Gate:
"""Produces a controlled-phase instruction::
CPHASE(phi) = diag([1, 1, 1, exp(1j * phi)])
This gate applies to two qubit arguments to produce the variant of the controlled phase
instruction that affects the state 11.
Compare with the ``CPHASExx`` variants. This variant is the most common and does
not have a suffix, although you can think of it as ``CPHASE11``.
:param angle: The input phase angle to apply when both qubits are in the ``|1>`` state.
:param control: Qubit 1.
:param target: Qubit 2.
:returns: A Gate object.
"""
qubits = [unpack_qubit(q) for q in (control, target)]
return Gate(name="CPHASE", params=[angle], qubits=qubits)
def SWAP(q1: QubitDesignator, q2: QubitDesignator) -> Gate:
"""Produces a SWAP gate which swaps the state of two qubits::
SWAP = [[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]]
:param q1: Qubit 1.
:param q2: Qubit 2.
:returns: A Gate object.
"""
return Gate(name="SWAP", params=[], qubits=[unpack_qubit(q) for q in (q1, q2)])
def CSWAP(control: QubitDesignator, target_1: QubitDesignator, target_2: QubitDesignator) -> Gate:
"""Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits::
CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
:param control: The control qubit.
:param target_1: The first target qubit.
:param target_2: The second target qubit. The two target states are swapped if the control is
in the ``|1>`` state.
"""
qubits = [unpack_qubit(q) for q in (control, target_1, target_2)]
return Gate(name="CSWAP", params=[], qubits=qubits)
def ISWAP(q1: QubitDesignator, q2: QubitDesignator) -> Gate:
"""Produces an ISWAP gate::
ISWAP = [[1, 0, 0, 0],
[0, 0, 1j, 0],
[0, 1j, 0, 0],
[0, 0, 0, 1]]
This gate swaps the state of two qubits, applying a -i phase to q1 when it
is in the 1 state and a -i phase to q2 when it is in the 0 state.
:param q1: Qubit 1.
:param q2: Qubit 2.
:returns: A Gate object.
"""
return Gate(name="ISWAP", params=[], qubits=[unpack_qubit(q) for q in (q1, q2)])
def PSWAP(angle: ParameterDesignator, q1: QubitDesignator, q2: QubitDesignator) -> Gate:
"""Produces a parameterized SWAP gate::
PSWAP(phi) = [[1, 0, 0, 0],
[0, 0, exp(1j * phi), 0],
[0, exp(1j * phi), 0, 0],
[0, 0, 0, 1]]
:param angle: The angle of the phase to apply to the swapped states. This phase is applied to
q1 when it is in the 1 state and to q2 when it is in the 0 state.
:param q1: Qubit 1.
:param q2: Qubit 2.
:returns: A Gate object.
"""
return Gate(name="PSWAP", params=[angle], qubits=[unpack_qubit(q) for q in (q1, q2)])
def XY(angle: ParameterDesignator, q1: QubitDesignator, q2: QubitDesignator) -> Gate:
"""Produces a parameterized ISWAP gate::
XY(phi) = [[1, 0, 0, 0],
[0, cos(phi/2), 1j * sin(phi/2), 0],
[0, 1j * sin(phi/2), cos(phi/2), 0],
[0, 0, 0, 1]
:param angle: The angle of the rotation to apply to the population 1 subspace.
:param q1: Qubit 1.
:param q2: Qubit 2.
:returns: A Gate object.
"""
return Gate(name="XY", params=[angle], qubits=[unpack_qubit(q) for q in (q1, q2)])
WAIT = Wait()
"""
This instruction tells the quantum computation to halt. Typically these is used while classical
memory is being manipulated by a CPU in a hybrid classical/quantum algorithm.
:returns: A Wait object.
"""
def RESET(qubit_index: Optional[QubitDesignator] = None) -> Union[Reset, ResetQubit]:
"""
Reset all qubits or just one specific qubit.
:param qubit_index: The qubit to reset.
This can be a qubit's index, a Qubit, or a QubitPlaceholder.
If None, reset all qubits.
:returns: A Reset or ResetQubit Quil AST expression corresponding to a global or targeted
reset, respectively.
"""
if qubit_index is not None:
return ResetQubit(unpack_qubit(qubit_index))
else:
return Reset()
NOP = Nop()
"""
This instruction applies no operation at that timestep. Typically these are ignored in error-models.
:returns: A Nop object.
"""
HALT = Halt()
"""
This instruction ends the program.
:returns: A Halt object.
"""
def DECLARE(
name: str,
memory_type: str = "BIT",
memory_size: int = 1,
shared_region: Optional[str] = None,
offsets: Optional[Iterable[Tuple[int, str]]] = None,
) -> Declare:
return Declare(
name=name,
memory_type=memory_type,
memory_size=memory_size,
shared_region=shared_region,
offsets=offsets,
)
def MEASURE(qubit: QubitDesignator, classical_reg: Optional[MemoryReferenceDesignator]) -> Measurement:
"""
Produce a MEASURE instruction.
:param qubit: The qubit to measure.
:param classical_reg: The classical register to measure into, or None.
:return: A Measurement instance.
"""
qubit = unpack_qubit(qubit)
if classical_reg is None:
address = None
else:
address = unpack_classical_reg(classical_reg)
return Measurement(qubit, address)
def NEG(classical_reg: MemoryReferenceDesignator) -> ClassicalNeg:
"""
Produce a NEG instruction.
:param classical_reg: A classical memory address to modify.
:return: A ClassicalNeg instance.
"""
return ClassicalNeg(unpack_classical_reg(classical_reg))
def NOT(classical_reg: MemoryReferenceDesignator) -> ClassicalNot:
"""
Produce a NOT instruction.
:param classical_reg: A classical register to modify.
:return: A ClassicalNot instance.
"""
return ClassicalNot(unpack_classical_reg(classical_reg))
def AND(
classical_reg1: MemoryReferenceDesignator, classical_reg2: Union[MemoryReferenceDesignator, int]
) -> ClassicalAnd:
"""
Produce an AND instruction.
NOTE: The order of operands was reversed in pyQuil <=1.9 .
:param classical_reg1: The first classical register, which gets modified.
:param classical_reg2: The second classical register or immediate value.
:return: A ClassicalAnd instance.
"""
left, right = unpack_reg_val_pair(classical_reg1, classical_reg2)
assert isinstance(right, (MemoryReference, int)) # placate mypy
return ClassicalAnd(left, right)
def IOR(
classical_reg1: MemoryReferenceDesignator, classical_reg2: Union[MemoryReferenceDesignator, int]
) -> ClassicalInclusiveOr:
"""
Produce an inclusive OR instruction.
:param classical_reg1: The first classical register, which gets modified.
:param classical_reg2: The second classical register or immediate value.
:return: A ClassicalInclusiveOr instance.
"""
left, right = unpack_reg_val_pair(classical_reg1, classical_reg2)
assert isinstance(right, (MemoryReference, int)) # placate mypy
return ClassicalInclusiveOr(left, right)
def XOR(
classical_reg1: MemoryReferenceDesignator, classical_reg2: Union[MemoryReferenceDesignator, int]
) -> ClassicalExclusiveOr:
"""
Produce an exclusive OR instruction.
:param classical_reg1: The first classical register, which gets modified.
:param classical_reg2: The second classical register or immediate value.
:return: A ClassicalExclusiveOr instance.
"""
left, right = unpack_reg_val_pair(classical_reg1, classical_reg2)
assert isinstance(right, (MemoryReference, int)) # placate mypy
return ClassicalExclusiveOr(left, right)
def MOVE(
classical_reg1: MemoryReferenceDesignator,
classical_reg2: Union[MemoryReferenceDesignator, int, float],
) -> ClassicalMove:
"""
Produce a MOVE instruction.
:param classical_reg1: The first classical register, which gets modified.
:param classical_reg2: The second classical register or immediate value.
:return: A ClassicalMove instance.
"""
left, right = unpack_reg_val_pair(classical_reg1, classical_reg2)
return ClassicalMove(left, right)
def EXCHANGE(classical_reg1: MemoryReferenceDesignator, classical_reg2: MemoryReferenceDesignator) -> ClassicalExchange:
"""
Produce an EXCHANGE instruction.
:param classical_reg1: The first classical register, which gets modified.
:param classical_reg2: The second classical register, which gets modified.
:return: A ClassicalExchange instance.
"""
left = unpack_classical_reg(classical_reg1)
right = unpack_classical_reg(classical_reg2)
return ClassicalExchange(left, right)
def LOAD(
target_reg: MemoryReferenceDesignator, region_name: str, offset_reg: MemoryReferenceDesignator
) -> ClassicalLoad:
"""
Produce a LOAD instruction.
:param target_reg: LOAD storage target.
:param region_name: Named region of memory to load from.
:param offset_reg: Offset into region of memory to load from. Must be a MemoryReference.
:return: A ClassicalLoad instance.
"""
return ClassicalLoad(unpack_classical_reg(target_reg), region_name, unpack_classical_reg(offset_reg))
def STORE(
region_name: str,
offset_reg: MemoryReferenceDesignator,
source: Union[MemoryReferenceDesignator, int, float],
) -> ClassicalStore:
"""
Produce a STORE instruction.
:param region_name: Named region of memory to store to.
:param offset_reg: Offset into memory region. Must be a MemoryReference.
:param source: Source data. Can be either a MemoryReference or a constant.
:return: A ClassicalStore instance.
"""
if not isinstance(source, int) and not isinstance(source, float):
source = unpack_classical_reg(source)
return ClassicalStore(region_name, unpack_classical_reg(offset_reg), source)
def CONVERT(classical_reg1: MemoryReferenceDesignator, classical_reg2: MemoryReferenceDesignator) -> ClassicalConvert:
"""
Produce a CONVERT instruction.
:param classical_reg1: MemoryReference to store to.
:param classical_reg2: MemoryReference to read from.
:return: A ClassicalConvert instance.
"""
return ClassicalConvert(unpack_classical_reg(classical_reg1), unpack_classical_reg(classical_reg2))
def ADD(classical_reg: MemoryReferenceDesignator, right: Union[MemoryReferenceDesignator, int, float]) -> ClassicalAdd:
"""
Produce an ADD instruction.
:param classical_reg: Left operand for the arithmetic operation. Also serves as the store
target.
:param right: Right operand for the arithmetic operation.
:return: A ClassicalAdd instance.
"""
left, right = unpack_reg_val_pair(classical_reg, right)
return ClassicalAdd(left, right)
def SUB(classical_reg: MemoryReferenceDesignator, right: Union[MemoryReferenceDesignator, int, float]) -> ClassicalSub:
"""
Produce a SUB instruction.
:param classical_reg: Left operand for the arithmetic operation. Also serves as the store
target.
:param right: Right operand for the arithmetic operation.
:return: A ClassicalSub instance.
"""
left, right = unpack_reg_val_pair(classical_reg, right)
return ClassicalSub(left, right)
def MUL(classical_reg: MemoryReferenceDesignator, right: Union[MemoryReferenceDesignator, int, float]) -> ClassicalMul:
"""
Produce a MUL instruction.
:param classical_reg: Left operand for the arithmetic operation. Also serves as the store
target.
:param right: Right operand for the arithmetic operation.
:return: A ClassicalMul instance.
"""
left, right = unpack_reg_val_pair(classical_reg, right)
return ClassicalMul(left, right)
def DIV(classical_reg: MemoryReferenceDesignator, right: Union[MemoryReferenceDesignator, int, float]) -> ClassicalDiv:
"""
Produce an DIV instruction.
:param classical_reg: Left operand for the arithmetic operation. Also serves as the store
target.
:param right: Right operand for the arithmetic operation.
:return: A ClassicalDiv instance.
"""
left, right = unpack_reg_val_pair(classical_reg, right)
return ClassicalDiv(left, right)
def EQ(
classical_reg1: MemoryReferenceDesignator,
classical_reg2: MemoryReferenceDesignator,
classical_reg3: Union[MemoryReferenceDesignator, int, float],
) -> ClassicalEqual:
"""
Produce an EQ instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalEqual instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(
classical_reg1, classical_reg2, classical_reg3
)
return ClassicalEqual(classical_reg1, classical_reg2, classical_reg3)
def LT(
classical_reg1: MemoryReferenceDesignator,
classical_reg2: MemoryReferenceDesignator,
classical_reg3: Union[MemoryReferenceDesignator, int, float],
) -> ClassicalLessThan:
"""
Produce an LT instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalLessThan instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(
classical_reg1, classical_reg2, classical_reg3
)
return ClassicalLessThan(classical_reg1, classical_reg2, classical_reg3)
def LE(
classical_reg1: MemoryReferenceDesignator,
classical_reg2: MemoryReferenceDesignator,
classical_reg3: Union[MemoryReferenceDesignator, int, float],
) -> ClassicalLessEqual:
"""
Produce an LE instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalLessEqual instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(
classical_reg1, classical_reg2, classical_reg3
)
return ClassicalLessEqual(classical_reg1, classical_reg2, classical_reg3)
def GT(
classical_reg1: MemoryReferenceDesignator,
classical_reg2: MemoryReferenceDesignator,
classical_reg3: Union[MemoryReferenceDesignator, int, float],
) -> ClassicalGreaterThan:
"""
Produce an GT instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterThan instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(
classical_reg1, classical_reg2, classical_reg3
)
return ClassicalGreaterThan(classical_reg1, classical_reg2, classical_reg3)
def GE(
classical_reg1: MemoryReferenceDesignator,
classical_reg2: MemoryReferenceDesignator,
classical_reg3: Union[MemoryReferenceDesignator, int, float],
) -> ClassicalGreaterEqual:
"""
Produce an GE instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterEqual instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(
classical_reg1, classical_reg2, classical_reg3
)
return ClassicalGreaterEqual(classical_reg1, classical_reg2, classical_reg3)
def PULSE(frame: Frame, waveform: Waveform, nonblocking: bool = False) -> Pulse:
"""
Produce a PULSE instruction.
:param frame: The frame on which to apply the pulse.
:param waveform: The pulse waveform.
:param nonblocking: A flag indicating whether the pulse is NONBLOCKING.
:return: A Pulse instance.
"""
return Pulse(frame, waveform, nonblocking)
def SET_FREQUENCY(frame: Frame, freq: ParameterDesignator) -> SetFrequency:
"""
Produce a SET-FREQUENCY instruction.
:param frame: The frame on which to set the frequency.
:param freq: The frequency value, in Hz.
:returns: A SetFrequency instance.
"""
return SetFrequency(frame, freq)
def SHIFT_FREQUENCY(frame: Frame, freq: ParameterDesignator) -> ShiftFrequency:
"""
Produce a SHIFT-FREQUENCY instruction.
:param frame: The frame on which to shift the frequency.
:param freq: The value, in Hz, to add to the existing frequency.
:returns: A ShiftFrequency instance.
"""
return ShiftFrequency(frame, freq)
def SET_PHASE(frame: Frame, phase: ParameterDesignator) -> SetPhase:
"""
Produce a SET-PHASE instruction.
:param frame: The frame on which to set the phase.
:param phase: The new phase value, in radians.
:returns: A SetPhase instance.
"""
return SetPhase(frame, phase)
def SHIFT_PHASE(frame: Frame, phase: ParameterDesignator) -> ShiftPhase:
"""
Produce a SHIFT-PHASE instruction.
:param frame: The frame on which to shift the phase.
:param phase: The value, in radians, to add to the existing phase.
:returns: A ShiftPhase instance.
"""
return ShiftPhase(frame, phase)
def SWAP_PHASE(frameA: Frame, frameB: Frame) -> SwapPhase:
"""
Produce a SWAP-PHASE instruction.
:param frameA: A frame.
:param frameB: A frame.
:returns: A SwapPhase instance.
"""
return SwapPhase(frameA, frameB)
def SET_SCALE(frame: Frame, scale: ParameterDesignator) -> SetScale:
"""
Produce a SET-SCALE instruction.
:param frame: The frame on which to set the scale.
:param scale: The scaling factor.
:returns: A SetScale instance.
"""
return SetScale(frame, scale)
def CAPTURE(
frame: Frame,
kernel: Waveform,
memory_region: MemoryReferenceDesignator,
nonblocking: bool = False,
) -> Capture:
"""
Produce a CAPTURE instruction.
:param frame: The frame on which to capture an IQ value.
:param kernel: The integrating kernel for the capture.
:param memory_region: The classical memory region to store the resulting IQ value.
:param nonblocking: A flag indicating whether the capture is NONBLOCKING.
:returns: A Capture instance.
"""
memory_region = unpack_classical_reg(memory_region)
return Capture(frame, kernel, memory_region, nonblocking)
def RAW_CAPTURE(
frame: Frame,
duration: float,
memory_region: MemoryReferenceDesignator,
nonblocking: bool = False,
) -> RawCapture:
"""
Produce a RAW-CAPTURE instruction.
:param frame: The frame on which to capture raw values.
:param duration: The duration of the capture, in seconds.
:param memory_region: The classical memory region to store the resulting raw values.
:param nonblocking: A flag indicating whether the capture is NONBLOCKING.
:returns: A RawCapture instance.
"""
memory_region = unpack_classical_reg(memory_region)
return RawCapture(frame, duration, memory_region, nonblocking)
# Mypy doesn't support a complex type hint here on args. Particularly,
# you can't tell Mypy that args should always begin with a int, end
# with a float, and everything in between should be of a particular
# type T.
@no_type_check
def DELAY(*args) -> Union[DelayFrames, DelayQubits]:
"""
Produce a DELAY instruction.
Note: There are two variants of DELAY. One applies to specific frames on some
qubit, e.g. `DELAY 0 "rf" "ff" 1.0` delays the `"rf"` and `"ff"` frames on 0.
It is also possible to delay all frames on some qubits, e.g. `DELAY 0 1 2 1.0`.
:param args: A list of delay targets, ending with a duration.
:returns: A DelayFrames or DelayQubits instance.
"""
if len(args) < 2:
raise ValueError(
"Expected DELAY(t1,...,tn, duration). In particular, there "
"must be at least one target, as well as a duration."
)
targets, duration = args[:-1], args[-1]
if not isinstance(duration, (Expression, Real)):
raise TypeError("The last argument of DELAY must be a real or parametric duration.")
if all(isinstance(t, Frame) for t in targets):
return DelayFrames(targets, duration)
elif all(isinstance(t, (int, Qubit, FormalArgument)) for t in targets):
targets = [Qubit(t) if isinstance(t, int) else t for t in targets]
return DelayQubits(targets, duration)
else:
raise TypeError(
"DELAY targets must be either (i) a list of frames, or "
"(ii) a list of qubits / formal arguments. "
f"Got {args}."
)
def FENCE(*qubits: Union[int, Qubit, FormalArgument]) -> Union[FenceAll, Fence]:
"""
Produce a FENCE instruction.
Note: If no qubits are specified, then this is interpreted as a global FENCE.
:params qubits: A list of qubits or formal arguments.
:returns: A Fence or FenceAll instance.
"""
if qubits:
return Fence([Qubit(t) if isinstance(t, int) else t for t in qubits])
else:
return FenceAll()
QUANTUM_GATES: Mapping[str, Callable[..., Gate]] = {
"I": I,
"X": X,
"Y": Y,
"Z": Z,
"H": H,
"S": S,
"T": T,
"PHASE": PHASE,
"RX": RX,
"RY": RY,
"RZ": RZ,
"CZ": CZ,
"CNOT": CNOT,
"CCNOT": CCNOT,
"CPHASE00": CPHASE00,
"CPHASE01": CPHASE01,
"CPHASE10": CPHASE10,
"CPHASE": CPHASE,
"SWAP": SWAP,
"CSWAP": CSWAP,
"ISWAP": ISWAP,
"PSWAP": PSWAP,
"XY": XY,
}
"""
Dictionary of quantum gate functions keyed by gate names.
"""
STANDARD_GATES = QUANTUM_GATES
"""
Alias for the above dictionary of quantum gates.
"""
QUILT_INSTRUCTIONS: Mapping[str, Callable[..., AbstractInstruction]] = {
"PULSE": PULSE,
"SET-FREQUENCY": SET_FREQUENCY,
"SHIFT-FREQUENCY": SHIFT_FREQUENCY,
"SET-PHASE": SET_PHASE,
"SHIFT-PHASE": SHIFT_PHASE,
"SWAP-PHASE": SWAP_PHASE,
"SET-SCALE": SET_SCALE,
"CAPTURE": CAPTURE,
"RAW-CAPTURE": RAW_CAPTURE,
"DELAY": DELAY,
"FENCE": FENCE,
}
"""
Dictionary of Quil-T AST construction functions keyed by instruction name.
"""
STANDARD_INSTRUCTIONS: Mapping[str, Union[AbstractInstruction, Callable[..., AbstractInstruction]]] = {
"WAIT": WAIT,
"RESET": RESET,
"DECLARE": DECLARE,
"NOP": NOP,
"HALT": HALT,
"MEASURE": MEASURE,
"NOT": NOT,
"AND": AND,
"MOVE": MOVE,
"EXCHANGE": EXCHANGE,
"IOR": IOR,
"XOR": XOR,
"NEG": NEG,
"ADD": ADD,
"SUB": SUB,
"MUL": MUL,
"DIV": DIV,
"EQ": EQ,
"GT": GT,
"GE": GE,
"LE": LE,
"LT": LT,
"LOAD": LOAD,
"STORE": STORE,
"CONVERT": CONVERT,
}
"""
Dictionary of standard instruction functions keyed by instruction names.
"""
__all__ = (
list(QUANTUM_GATES.keys())
+ list(fn.__name__ for fn in QUILT_INSTRUCTIONS.values())
+ list(STANDARD_INSTRUCTIONS.keys())
+ ["Gate", "QUANTUM_GATES", "STANDARD_GATES", "QUILT_INSTRUCTIONS", "STANDARD_INSTRUCTIONS"]
)
|
tests/terraform/checks/resource/aws/test_AthenaWorkgroupConfiguration.py | antonblr/checkov | 4,013 | 11069525 | import unittest
from checkov.common.models.enums import CheckResult
from checkov.terraform.checks.resource.aws.AthenaWorkgroupConfiguration import check
class TestAthenaWorkgroupConfiguration(unittest.TestCase):
def test_failure(self):
resource_conf = {
"name": "Example",
"configuration": [
{
"enforce_workgroup_configuration": False,
}
],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {
"name": "Example",
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_full(self):
resource_conf = {
"name": "Example",
"configuration": [
{
"enforce_workgroup_configuration": True,
}
],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
custom/m4change/reports/anc_hmis_report.py | dimagilg/commcare-hq | 471 | 11069531 | <reponame>dimagilg/commcare-hq<gh_stars>100-1000
from django.utils.translation import ugettext as _
from corehq.apps.locations.permissions import location_safe
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, NumericColumn
from corehq.apps.reports.filters.select import MonthFilter, YearFilter
from corehq.apps.reports.standard import MonthYearMixin
from corehq.apps.reports.standard.cases.basic import CaseListReport
from custom.common.filters import RestrictedAsyncLocationFilter
from custom.m4change.reports import validate_report_parameters, get_location_hierarchy_by_id
from custom.m4change.reports.reports import M4ChangeReport
from custom.m4change.reports.sql_data import AncHmisCaseSqlData
def _get_row(row_data, form_data, key):
data = form_data.get(key)
rows = dict([(row_key, data.get(row_key, 0)) for row_key in row_data])
for key in rows:
if rows.get(key) == None:
rows[key] = 0
rows["antenatal_first_visit_total"] = rows.get("attendance_before_20_weeks_total") \
+ rows.get("attendance_after_20_weeks_total")
return rows
@location_safe
class AncHmisReport(MonthYearMixin, CaseListReport, M4ChangeReport):
ajax_pagination = False
asynchronous = True
exportable = True
emailable = False
name = "Facility ANC HMIS Report"
slug = "facility_anc_hmis_report"
default_rows = 25
base_template = "m4change/report.html"
report_template_path = "m4change/anc_hmis_report_content.html"
fields = [
RestrictedAsyncLocationFilter,
MonthFilter,
YearFilter
]
@classmethod
def get_report_data(cls, config):
validate_report_parameters(["domain", "location_id", "datespan"], config)
domain = config["domain"]
location_id = config["location_id"]
user = config["user"]
sql_data = AncHmisCaseSqlData(domain=domain, datespan=config["datespan"]).data
locations = get_location_hierarchy_by_id(location_id, domain, user)
row_data = AncHmisReport.get_initial_row_data()
for location_id in locations:
key = (domain, location_id)
if key in sql_data:
report_rows = _get_row(row_data, sql_data, key)
for key in report_rows:
row_data.get(key)["value"] += report_rows.get(key)
return sorted([(key, row_data[key]) for key in row_data], key=lambda t: t[1].get("hmis_code"))
@classmethod
def get_initial_row_data(cls):
return {
"attendance_total": {
"hmis_code": 3, "label": _("Antenatal Attendance - Total"), "value": 0
},
"attendance_before_20_weeks_total": {
"hmis_code": 4, "label": _("Antenatal first Visit before 20wks"), "value": 0
},
"attendance_after_20_weeks_total": {
"hmis_code": 5, "label": _("Antenatal first Visit after 20wks"), "value": 0
},
"antenatal_first_visit_total": {
"hmis_code": 6, "label": _("Antenatal first visit - total"), "value": 0
},
"attendance_gte_4_visits_total": {
"hmis_code": 7, "label": _("Pregnant women that attend antenatal clinic for 4th visit during the month"), "value": 0
},
'anc_syphilis_test_done_total': {
"hmis_code": 8, "label": _("ANC syphilis test done"), "value": 0
},
'anc_syphilis_test_positive_total': {
"hmis_code": 9, "label": _("ANC syphilis test positive"), "value": 0
},
'anc_syphilis_case_treated_total': {
"hmis_code": 10, "label": _("ANC syphilis case treated"), "value": 0
},
'pregnant_mothers_receiving_ipt1_total': {
"hmis_code": 11, "label": _("Pregnant women who receive malaria IPT1"), "value": 0
},
'pregnant_mothers_receiving_ipt2_total': {
"hmis_code": 12, "label": _("Pregnant women who receive malaria IPT2"), "value": 0
},
'pregnant_mothers_receiving_llin_total': {
"hmis_code": 13, "label": _("Pregnant women who receive malaria LLIN"), "value": 0
},
'pregnant_mothers_receiving_ifa_total': {
"hmis_code": 14, "label": _("Pregnant women who receive malaria Haematinics"), "value": 0
},
'postnatal_attendance_total': {
"hmis_code": 15, "label": _("Postnatal Attendance - Total"), "value": 0
},
'postnatal_clinic_visit_lte_1_day_total': {
"hmis_code": 16, "label": _("Postnatal clinic visit within 1 day of delivery"), "value": 0
},
'postnatal_clinic_visit_lte_3_days_total': {
"hmis_code": 17, "label": _("Postnatal clinic visit within 3 days of delivery"), "value": 0
},
'postnatal_clinic_visit_gte_7_days_total': {
"hmis_code": 18, "label": _("Postnatal clinic visit >= 7 days of delivery"), "value": 0
}
}
@property
def headers(self):
headers = DataTablesHeader(NumericColumn(_("HMIS code")),
DataTablesColumn(_("Data Point")),
NumericColumn(_("Total")))
return headers
@property
def rows(self):
row_data = AncHmisReport.get_report_data({
"location_id": self.request.GET.get("location_id", None),
"datespan": self.datespan,
"domain": str(self.domain),
"user": self.request.couch_user
})
for row in row_data:
yield [
self.table_cell(row[1].get("hmis_code")),
self.table_cell(row[1].get("label")),
self.table_cell(row[1].get("value"))
]
@property
def rendered_report_title(self):
return self.name
|
endgame/exposure_via_resource_policies/iam.py | vikrum/endgame | 224 | 11069533 | import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseMessage
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class IAMRole(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "iam"
self.resource_type = "role"
self.region = region
self.current_account_id = current_account_id
self.name = name
# Override parent values due to IAM being special
# Don't include the "Resource" block in the policy, or else the policy update will fail
# Instead of iam:*, we want to give sts:AssumeRole
self.include_resource_block = False
self.override_action = "sts:AssumeRole"
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
include_resource_block=self.include_resource_block, override_action=self.override_action)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}::{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
try:
response = self.client.get_role(RoleName=self.name)
policy = response.get("Role").get("AssumeRolePolicyDocument")
success = True
except self.client.exceptions.NoSuchEntityException:
logger.critical(f"There is no resource with the name {self.name}")
policy = constants.get_empty_policy()
success = False
except botocore.exceptions.ClientError:
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy = json.dumps(evil_policy)
try:
self.client.update_assume_role_policy(RoleName=self.name, PolicyDocument=new_policy)
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
logger.critical(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class IAMRoles(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "iam"
self.resource_type = "role"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("list_roles")
page_iterator = paginator.paginate()
for page in page_iterator:
roles = page["Roles"]
for role in roles:
path = role.get("Path")
arn = role.get("Arn")
name = role.get("RoleName")
# Special case: Ignore Service Linked Roles
if path.startswith("/aws-service-role/"):
# if path == "/service-role/" or path.startswith("/aws-service-role/"):
continue
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
resources.append(list_resources_response)
return resources
|
djangox/lib/python3.8/site-packages/oauthlib/oauth2/rfc6749/endpoints/__init__.py | DemarcusL/django_wiki_lab | 954 | 11069545 | <reponame>DemarcusL/django_wiki_lab
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from .authorization import AuthorizationEndpoint
from .introspect import IntrospectEndpoint
from .metadata import MetadataEndpoint
from .pre_configured import (
BackendApplicationServer, LegacyApplicationServer, MobileApplicationServer,
Server, WebApplicationServer,
)
from .resource import ResourceEndpoint
from .revocation import RevocationEndpoint
from .token import TokenEndpoint
|
setup.py | gmossessian/DAWG-1 | 153 | 11069562 | #! /usr/bin/env python
import glob
from setuptools import setup, Extension
setup(
name="DAWG",
version="0.8.0",
description="Fast and memory efficient DAWG (DAFSA) for Python",
long_description=open('README.rst').read() + '\n\n' + open('CHANGES.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/pytries/DAWG/',
ext_modules=[
Extension(
"dawg",
sources=glob.glob('src/*.cpp') + glob.glob('lib/b64/*.c'),
include_dirs=['lib'],
language="c++",
)
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing :: Linguistic',
],
)
|
src/benchmarks/gc/src/commonlib/document.py | BruceForstall/performance | 547 | 11069629 | <reponame>BruceForstall/performance
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
from collections.abc import Sequence as ABCSequence
from dataclasses import dataclass
from enum import Enum
from io import StringIO
from math import ceil, floor
from os import get_terminal_size, terminal_size
from pathlib import Path
from typing import Callable, cast, Iterable, Mapping, Optional, List, Sequence, Tuple, Union
from psutil import Process
from termcolor import colored
from xlsxwriter import Workbook
from yattag import Doc, indent as indent_xml
from yattag.simpledoc import SimpleDoc
from .collection_util import (
empty_sequence,
index_of_max,
is_empty,
try_index,
with_is_last,
zip_check,
zip_check_3,
)
from .option import map_option, optional_to_iter, option_or
from .type_utils import argument, check_cast, with_slots
from .util import float_to_str, get_command_line, os_is_windows
Tag = SimpleDoc.Tag
# Parameters: text, bold, color
_ColorType = Callable[[str, bool, Optional[str]], str]
class Align(Enum):
left = 0
right = 1
center = 2
CellValueSingle = Union[str, float, int, None]
CellValue = Union[CellValueSingle, Sequence[CellValueSingle]]
@with_slots
@dataclass(frozen=True)
class Cell:
value: CellValue = None
align: Align = Align.right
color: Optional[str] = None
bold: bool = False
@with_slots
@dataclass(frozen=True)
class HeaderGroup:
text: str
size_cells: int
def __post_init__(self) -> None:
assert self.size_cells > 0
Row = Sequence[Cell]
@with_slots
@dataclass(frozen=True)
class Table:
name: Optional[str] = None
text: Optional[str] = None
headers: Optional[Sequence[str]] = None
rows: Sequence[Row] = ()
header_groups: Optional[Sequence[HeaderGroup]] = None
@property
def n_columns(self) -> int:
if self.headers is not None:
return len(self.headers)
elif not is_empty(self.rows):
return len(self.rows[0])
else:
return 0
def __post_init__(self) -> None:
n_columns = self.n_columns
assert self.headers is None or len(self.headers) == n_columns
assert (
self.header_groups is None or sum(x.size_cells for x in self.header_groups) == n_columns
)
for row in self.rows:
assert (
len(row) == n_columns
), f"Row has {len(row)} entries but table has {n_columns} column headers"
@with_slots
@dataclass(frozen=True)
class Section:
name: Optional[str] = None
text: Optional[str] = None
tables: Sequence[Table] = ()
@property
def n_columns(self) -> int:
return 0 if is_empty(self.tables) else self.tables[0].n_columns
def __post_init__(self) -> None:
for table in self.tables:
assert table.n_columns == self.n_columns
@with_slots
@dataclass(frozen=True)
class Document:
comment: Optional[str] = None
sections: Sequence[Section] = empty_sequence()
def single_table_document(table: Table) -> Document:
return Document(sections=(Section(tables=(table,)),))
def _pad(width: int, s: str, align: Align) -> str:
assert len(s) <= width, (
f"Line '{s}' (len {len(s)}) is bigger than allowed width {width}.\n"
+ "(This is a bug in document.py)"
)
pad = width - len(s)
switch: Mapping[Align, Callable[[], str]] = {
Align.left: lambda: s + " " * pad,
Align.right: lambda: " " * pad + s,
Align.center: lambda: " " * floor(pad / 2) + s + " " * ceil(pad / 2),
}
return switch[align]()
_SUPPORTED_TERMINALS = {"ConEmuC64.exe", "WindowsTerminal.exe"}
def _shell_supports_color() -> bool:
if os_is_windows():
py = _get_py_process()
parent = py.parent()
if parent.name() in ("Code.exe", "jupyter-notebook.exe"):
return True
else:
assert parent.name() in ("powershell.exe", "cmd.exe", "wsl.exe")
shell = parent.parent()
return shell.name() in _SUPPORTED_TERMINALS
else:
return True
def _get_py_process() -> Process:
p = Process()
assert p.name() == "python.exe"
par = p.parent()
# In jupyter notebook in code, "python.exe" has "python.exe" as a parent, then "Code.exe"
return par if par.name() in ("py.exe", "python.exe") else p
class SpecialOutputWidth(Enum):
inf = 0
OutputWidth = Union[int, SpecialOutputWidth]
def print_document(
doc: Document,
max_width: Optional[OutputWidth] = None,
table_indent: Optional[int] = None,
color: Optional[bool] = False,
) -> None:
# Add an additional newline in front
# Because jupyter notebook likes removing leading spaces (messing up indent),
# but wont' remove a leading newline.
text = "\n" + _render_to_text(
doc=doc,
color=yes_color if option_or(color, _shell_supports_color()) else no_color,
# Need a cast due to https://github.com/python/mypy/issues/6751
max_width=cast(OutputWidth, option_or(max_width, _get_terminal_width())),
op_table_indent=option_or(table_indent, 2),
)
# Avoid https://bugs.python.org/issue37871
for line in text.split("\n"):
print(line)
def _get_terminal_width() -> OutputWidth:
try:
term_size: Optional[terminal_size] = get_terminal_size()
except OSError:
term_size = None
if term_size is None:
return SpecialOutputWidth.inf
else:
# PowerShell wraps to a blank line if you fill in exactly the terminal width.
# So reduce by 1.
res = term_size.columns - 1
assert 20 < res < 2000
return res
def yes_color(text: str, bold: bool, color: Optional[str]) -> str:
return colored(text, color, attrs=("bold",) if bold else ())
def no_color(text: str, _bold: bool, _color: Optional[str]) -> str:
return text
def _render_to_plaintext(
doc: Document, max_width: Optional[OutputWidth], table_indent: Optional[int]
) -> str:
return _render_to_text(
doc,
no_color,
# Need a cast due to https://github.com/python/mypy/issues/6751
max_width=cast(OutputWidth, option_or(max_width, SpecialOutputWidth.inf)),
op_table_indent=table_indent,
)
def _render_to_excel(doc: Document, file_name: Path) -> None:
"""WARN: This is untested."""
workbook = Workbook(str(file_name))
worksheet = workbook.add_worksheet()
row_index = 0
def next_row() -> int:
nonlocal row_index
res = row_index
row_index += 1
return res
if doc.comment is not None:
raise Exception("TODO: render doc comment to excel")
for section in doc.sections:
next_row()
if section.name is not None:
worksheet.write(next_row(), 0, section.name)
if section.text is not None:
raise Exception("TODO: render section text to excel")
next_row()
for table in section.tables:
if table.name is not None:
worksheet.write(next_row(), 0, table.name)
if table.text is not None:
raise Exception("TODO: render table text to excel")
assert table.header_groups is None
if table.headers is not None:
for i, header in enumerate(table.headers):
worksheet.write(row_index, i, header)
next_row()
for row in table.rows:
for i, value in enumerate(row):
if value.color is not None or value.bold is not None:
raise Exception("TODO: render to excel with bold or colored cells")
worksheet.write(row_index, i, "" if value.value is None else value.value)
workbook.close()
def _render_to_text(
doc: Document, color: _ColorType, max_width: OutputWidth, op_table_indent: Optional[int]
) -> str:
table_indent = option_or(op_table_indent, 2)
out = StringIO()
def write(s: str) -> None:
out.write(s)
def nl(n: int = 1) -> None:
write("\n" * n)
if doc.comment is not None:
write(doc.comment)
nl(2)
for section in doc.sections:
cell_sizes = _get_cell_sizes_for_section(
section,
max_table_width=max_width
if isinstance(max_width, SpecialOutputWidth)
else max_width - table_indent,
)
if section.name is not None:
_write_in_box(write, section.name, total_width=_sum_cell_sizes(cell_sizes))
nl(2)
if section.text is not None:
write(section.text)
nl(2)
for table in section.tables:
_render_table_to_text(table, write, color, cell_sizes, table_indent)
nl(2)
nl()
return out.getvalue()
Write = Callable[[str], None]
_VERTICAL_BAR = "│"
_HORIZONTAL_BAR = "─"
_HORIZONTAL_AND_VERTICAL_BAR = "┼"
_TOP_LEFT_CORNER = "┌"
_TOP_RIGHT_CORNER = "┐"
_BOTTOM_LEFT_CORNER = "└"
_BOTTOM_RIGHT_CORNER = "┘"
def _write_in_box(write: Write, s: str, total_width: int) -> None:
width = _max_line_len(s) + 4
pad = " " * ((total_width - width) // 2) if width < total_width else ""
write(f"{pad}{_TOP_LEFT_CORNER}{_HORIZONTAL_BAR * (width - 2)}{_TOP_RIGHT_CORNER}\n")
for line in _lines(s):
write(f"{pad}{_VERTICAL_BAR} {_pad(width - 4, line, Align.left)} {_VERTICAL_BAR}\n")
write(f"{pad}{_BOTTOM_LEFT_CORNER}{_HORIZONTAL_BAR * (width - 2)}{_BOTTOM_RIGHT_CORNER}\n")
_MIN_CELL_SIZE = 3
_SPACE_BETWEEN_COLUMNS = 3
assert _SPACE_BETWEEN_COLUMNS % 2 == 1
_HALF_SPACE_BETWEEN_COLUMNS = 1
assert _HALF_SPACE_BETWEEN_COLUMNS * 2 + 1 == _SPACE_BETWEEN_COLUMNS
_HALF_SPACE_BETWEEN_COLUMNS_STR = " " * _HALF_SPACE_BETWEEN_COLUMNS
_BETWEEN_COLUMNS_STR = (
f"{_HALF_SPACE_BETWEEN_COLUMNS_STR}{_VERTICAL_BAR}{_HALF_SPACE_BETWEEN_COLUMNS_STR}"
)
_HALF_HORIZ = _HORIZONTAL_BAR * _HALF_SPACE_BETWEEN_COLUMNS
_HORIZ_BETWEEN_COLUMNS_STR = f"{_HALF_HORIZ}{_HORIZONTAL_AND_VERTICAL_BAR}{_HALF_HORIZ}"
# WARN: Cell size is the *interior* size of each cell, does not include _SPACE_BETWEEN_COLUMNS
# Each table in a section must have the same number of columns, so this is for the whole section.
def _get_cell_sizes_for_section(section: Section, max_table_width: OutputWidth) -> Sequence[int]:
assert isinstance(max_table_width, SpecialOutputWidth) or max_table_width > _sum_cell_sizes(
_MIN_CELL_SIZE for _ in range(section.n_columns)
), f"Can't squeeze a {section.n_columns}-column table with a width of only {max_table_width}"
cell_sizes = _get_cell_sizes_ignoring_max_width(section)
while (
not isinstance(max_table_width, SpecialOutputWidth)
and _sum_cell_sizes(cell_sizes) > max_table_width
):
# Find the largest cell size and reduce it
# TODO: We should actually be trying to minimize the number of line breaks
# we'll have to insert to fit text into the smaller width.
i = index_of_max(cell_sizes)
assert cell_sizes[i] > _SPACE_BETWEEN_COLUMNS
cell_sizes[i] -= 1
return cell_sizes
def _get_cell_sizes_ignoring_max_width(section: Section) -> List[int]:
cell_sizes = [0 for _ in range(section.n_columns)]
for table in section.tables:
for i in range(table.n_columns):
cell_sizes[i] = max(
cell_sizes[i],
0 if table.headers is None else len(table.headers[i]),
*(_max_line_len(_to_str(r[i].value)) for r in table.rows),
)
# header_groups will expand the last column so the header will fit
if table.header_groups is not None:
i = 0
for hg in table.header_groups:
end = i + hg.size_cells
cur = _sum_cell_sizes([cell_sizes[j] for j in range(i, end)])
diff = _max_line_len(hg.text) - cur
if diff > 0:
cell_sizes[end - 1] += diff
i = end
return cell_sizes
def _sum_cell_sizes(cell_sizes: Iterable[int]) -> int:
total, count = _sum_and_count(cell_sizes)
return total + (_SPACE_BETWEEN_COLUMNS * (count - 1))
def _sum_and_count(i: Iterable[int]) -> Tuple[int, int]:
total = 0
count = 0
for x in i:
total += x
count += 1
return total, count
def _render_table_to_text(
table: Table, write: Write, color: _ColorType, cell_sizes: Sequence[int], indent: int
) -> None:
if table.name is not None:
write(table.name + "\n\n")
if table.text is not None:
write(table.text + "\n\n")
if table.header_groups is not None:
_write_header_groups(table.header_groups, write, cell_sizes, indent)
if table.headers is not None:
_write_cells([Cell(h) for h in table.headers], write, no_color, cell_sizes, indent)
_write_between_rows(write, cell_sizes, indent)
for is_last, row in with_is_last(table.rows):
_write_cells(row, write, color, cell_sizes, indent)
if not is_last:
_write_between_rows(write, cell_sizes, indent)
def _write_header_groups(
header_groups: Sequence[HeaderGroup], write: Write, cell_sizes: Sequence[int], indent: int
) -> None:
group_sizes = _get_header_group_sizes(cell_sizes, [group.size_cells for group in header_groups])
_write_cells(
[Cell(group.text) for group in header_groups], write, no_color, group_sizes, indent
)
def _get_header_group_sizes(
cell_sizes: Sequence[int], group_sizes_in_cells: Sequence[int]
) -> Sequence[int]:
cell_i = 0
def group_size_columns(group_size_cells: int) -> int:
nonlocal cell_i
old_cell_i = cell_i
cell_i = cell_i + group_size_cells
return _sum_cell_sizes(cell_sizes[old_cell_i:cell_i])
group_cell_sizes = [
group_size_columns(group_size_cells) for group_size_cells in group_sizes_in_cells
]
assert cell_i == len(cell_sizes)
assert _sum_cell_sizes(cell_sizes) == _sum_cell_sizes(group_cell_sizes)
return group_cell_sizes
def _write_cells(
cells: Sequence[Cell], write: Write, color: _ColorType, cell_sizes: Sequence[int], indent: int
) -> None:
cell_lines = [
_split_text_to_lines(_to_str(cell.value), cell_size)
for cell_size, cell in zip_check(cell_sizes, cells)
]
n_lines = max(len(lines) for lines in cell_lines)
for line_index in range(n_lines):
_write_indent(write, indent)
for is_last, (cell_size, cell, lines) in with_is_last(
zip_check_3(cell_sizes, cells, cell_lines)
):
line_text = lines[line_index] if line_index < len(lines) else ""
assert len(line_text) <= cell_size
write(color(_pad(cell_size, line_text, cell.align), cell.bold, cell.color))
if not is_last:
write(_BETWEEN_COLUMNS_STR)
write("\n")
def _write_between_rows(write: Write, cell_sizes: Sequence[int], indent: int) -> None:
_write_indent(write, indent)
for is_last, i in with_is_last(range(len(cell_sizes))):
write(_HORIZONTAL_BAR * cell_sizes[i])
if not is_last:
write(_HORIZ_BETWEEN_COLUMNS_STR)
write("\n")
def _write_indent(write: Write, indent: int) -> None:
write(" " * indent)
def _render_to_html(document: Document) -> str:
doc = Doc()
tag = doc.tag
text = doc.text
line = doc.line
with tag("html"):
with tag("head"):
with tag("style"):
text(
"""
table {
border-collapse: collapse;
}
tr, td, th {
border: solid;
}
tr {
border-width: 1px 0;
}
td, th {
border-width: 0 1px;
}
div {
margin: 1em;
}
"""
)
with tag("body"):
if document.comment is not None:
text(document.comment)
for section in document.sections:
with tag("div"):
if section.name is not None:
line("h1", section.name)
if section.text is not None:
text(section.text)
for table in section.tables:
_render_table_to_html(doc, table)
return indent_xml(doc.getvalue(), indent_text=True)
def _render_table_to_html(doc: Doc, table: Table) -> None:
tag = doc.tag
line = doc.line
text = doc.text
if table.name is not None:
line("h2", table.name)
if table.text is not None:
raise Exception("TODO")
with tag("table", style="width: 100%"): # TODO: use css
if table.header_groups is not None:
with tag("tr"):
for hg in table.header_groups:
line("th", hg.text, colspan=hg.size_cells)
if table.headers is not None:
with tag("tr"):
for header in table.headers:
line("th", header)
for row in table.rows:
with tag("tr"):
for cell in row:
cell_text = _to_str(cell.value)
with tag("td"):
style = ";".join(
(
*optional_to_iter(map_option(cell.color, lambda c: f"color:{c}")),
*optional_to_iter("bold" if cell.bold else None),
)
)
if is_empty(style):
text(cell_text)
else:
with tag("span", style=style):
text(cell_text)
def _to_str(v: CellValue) -> str:
if isinstance(v, ABCSequence) and not isinstance(v, str):
return "\n".join(_to_str_single(x) for x in v)
else:
return _to_str_single(v)
def _to_str_single(u: CellValueSingle) -> str:
if u is None:
return ""
elif isinstance(u, str):
return u
elif isinstance(u, int):
return str(u)
else:
return float_to_str(u)
def _split_text_to_lines(s: str, width: int) -> Sequence[str]:
return tuple(_iter_lines_of_split_text(s, width))
def _iter_lines_of_split_text(s: str, width: int) -> Iterable[str]:
assert width > 0
while not is_empty(s):
first_line = s[:width]
nl = try_index(first_line, "\n")
if nl is None:
yield first_line
s = s[width:]
else:
yield first_line[:nl]
s = s[nl + 1 :]
def _max_line_len(s: str) -> int:
return max(len(line) for line in _lines(s))
def _lines(s: str) -> Sequence[str]:
return s.split("\n")
@with_slots
@dataclass(frozen=True)
class OutputOptions:
width: Optional[OutputWidth] = None # Applies only to text output
table_indent: Optional[int] = None
html: Optional[Path] = None
txt: Optional[Path] = None
excel: Optional[Path] = None
def __post_init__(self) -> None:
check_cast(Optional[Path], self.html)
check_cast(Optional[Path], self.txt)
check_cast(Optional[Path], self.excel)
def any_file_output(self) -> bool:
return self.html is not None or self.txt is not None or self.excel is not None
EMPTY_OUTPUT_OPTIONS = OutputOptions()
@with_slots
@dataclass(frozen=True)
class DocOutputArgs:
output_width: Optional[OutputWidth] = argument(
default=None,
doc="""
Maximum width (in columns) of console or text file output.
Default is the current terminal size.
""",
)
table_indent: Optional[int] = argument(default=None, doc="Indent tables by this many spaces.")
txt: Optional[Path] = argument(default=None, doc="Output to a '.txt' file")
html: Optional[Path] = argument(default=None, doc="Output to a '.html' file")
# Hidden because render_to_excel is incomplete
xlsx: Optional[Path] = argument(default=None, hidden=True, doc="Output to a '.xlsx' file")
def output_options_from_args(args: DocOutputArgs) -> OutputOptions:
return OutputOptions(
width=args.output_width,
table_indent=args.table_indent,
html=args.html,
txt=args.txt,
excel=args.xlsx,
)
def handle_doc(doc: Document, output: OutputOptions = EMPTY_OUTPUT_OPTIONS) -> None:
if output.html:
output.html.write_text(_render_to_html(doc))
if output.txt:
doc_txt = _render_to_plaintext(
doc, max_width=output.width, table_indent=output.table_indent
)
txt = f"{get_command_line()}\n\n{doc_txt}"
output.txt.write_text(txt, encoding="utf-8")
if output.excel:
_render_to_excel(doc, output.excel)
if not output.any_file_output():
print_document(doc, max_width=output.width, table_indent=output.table_indent, color=False)
|
tests/constants.py | nhutnamhcmus/pykeen | 750 | 11069682 | <filename>tests/constants.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Constants for PyKEEN testing."""
import pathlib
__all__ = [
"HERE",
"RESOURCES",
"EPSILON",
]
HERE = pathlib.Path(__file__).resolve().parent
RESOURCES = HERE.joinpath("resources")
EPSILON = 1.0e-07
|
python/cuml/test/dask/test_label_encoder.py | siddheshmhatre/cuml | 2,743 | 11069699 | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cuml
from cuml.dask.preprocessing.LabelEncoder import LabelEncoder
import cudf
import numpy as np
import dask_cudf
import pytest
from cuml.common.exceptions import NotFittedError
import cupy as cp
def _arr_to_similarity_mat(arr):
arr = arr.reshape(1, -1)
return np.pad(arr, [(arr.shape[1] - 1, 0), (0, 0)], "edge")
@pytest.mark.parametrize("length", [10, 1000])
@pytest.mark.parametrize("cardinality", [5, 10, 50])
def test_labelencoder_fit_transform(length, cardinality, client):
""" Try encoding the entire df
"""
tmp = cudf.Series(np.random.choice(cardinality, (length,)))
df = dask_cudf.from_cudf(tmp, npartitions=len(client.has_what()))
encoded = cuml.dask.preprocessing.LabelEncoder().fit_transform(df)
df_arr = df.compute().to_numpy()
df_arr = _arr_to_similarity_mat(df_arr)
encoder_arr = cp.asnumpy(encoded.compute().to_numpy())
encoded_arr = _arr_to_similarity_mat(encoder_arr)
assert ((encoded_arr == encoded_arr.T) == (df_arr == df_arr.T)).all()
@pytest.mark.parametrize("length", [10, 100, 1000])
@pytest.mark.parametrize("cardinality", [5, 10, 50])
def test_labelencoder_transform(length, cardinality, client):
""" Try fitting and then encoding a small subset of the df
"""
tmp = cudf.Series(np.random.choice(cardinality, (length,)))
df = dask_cudf.from_cudf(tmp, npartitions=len(client.has_what()))
le = LabelEncoder().fit(df)
assert le._fitted
encoded = le.transform(df)
df_arr = df.compute().to_numpy()
df_arr = _arr_to_similarity_mat(df_arr)
encoder_arr = cp.asnumpy(encoded.compute().to_numpy())
encoded_arr = _arr_to_similarity_mat(encoder_arr)
assert (
(encoded_arr == encoded_arr.T) == (df_arr == df_arr.T)
).all()
def test_labelencoder_unseen(client):
""" Try encoding a value that was not present during fitting
"""
df = dask_cudf.from_cudf(cudf.Series(np.random.choice(10, (10,))),
npartitions=len(client.has_what()))
le = LabelEncoder().fit(df)
assert le._fitted
with pytest.raises(KeyError):
tmp = dask_cudf.from_cudf(cudf.Series([-100, -120]),
npartitions=len(client.has_what()))
le.transform(tmp).compute()
def test_labelencoder_unfitted(client):
""" Try calling `.transform()` without fitting first
"""
df = dask_cudf.from_cudf(cudf.Series(np.random.choice(10, (10,))),
npartitions=len(client.has_what()))
le = LabelEncoder()
with pytest.raises(NotFittedError):
le.transform(df).compute()
@pytest.mark.parametrize("use_fit_transform", [False, True])
@pytest.mark.parametrize(
"orig_label, ord_label, expected_reverted, bad_ord_label",
[(cudf.Series(['a', 'b', 'c']),
cudf.Series([2, 1, 2, 0]),
cudf.Series(['c', 'b', 'c', 'a']),
cudf.Series([-1, 1, 2, 0])),
(cudf.Series(['Tokyo', 'Paris', 'Austin']),
cudf.Series([0, 2, 0]),
cudf.Series(['Austin', 'Tokyo', 'Austin']),
cudf.Series([0, 1, 2, 3])),
(cudf.Series(['a', 'b', 'c1']),
cudf.Series([2, 1]),
cudf.Series(['c1', 'b']),
cudf.Series([0, 1, 2, 3])),
(cudf.Series(['1.09', '0.09', '.09', '09']),
cudf.Series([0, 1, 2, 3]),
cudf.Series(['.09', '0.09', '09', '1.09']),
cudf.Series([0, 1, 2, 3, 4]))])
def test_inverse_transform(orig_label, ord_label,
expected_reverted, bad_ord_label,
use_fit_transform, client):
n_workers = len(client.has_what())
orig_label = dask_cudf.from_cudf(orig_label, npartitions=n_workers)
ord_label = dask_cudf.from_cudf(ord_label,
npartitions=n_workers)
expected_reverted = dask_cudf.from_cudf(expected_reverted,
npartitions=n_workers)
bad_ord_label = dask_cudf.from_cudf(bad_ord_label, npartitions=n_workers)
# prepare LabelEncoder
le = LabelEncoder()
if use_fit_transform:
le.fit_transform(orig_label)
else:
le.fit(orig_label)
assert(le._fitted is True)
# test if inverse_transform is correct
reverted = le.inverse_transform(ord_label)
reverted = reverted.compute().reset_index(drop=True)
expected_reverted = expected_reverted.compute()
assert(len(reverted) == len(expected_reverted))
assert(len(reverted)
== len(reverted[reverted == expected_reverted]))
# test if correctly raies ValueError
with pytest.raises(ValueError, match='y contains previously unseen label'):
le.inverse_transform(bad_ord_label).compute()
def test_unfitted_inverse_transform(client):
""" Try calling `.inverse_transform()` without fitting first
"""
tmp = cudf.Series(np.random.choice(10, (10,)))
df = dask_cudf.from_cudf(tmp, npartitions=len(client.has_what()))
le = LabelEncoder()
with pytest.raises(NotFittedError):
le.transform(df)
@pytest.mark.parametrize("empty, ord_label",
[(cudf.Series([]), cudf.Series([2, 1]))])
def test_empty_input(empty, ord_label, client):
# prepare LabelEncoder
n_workers = len(client.has_what())
empty = dask_cudf.from_cudf(empty, npartitions=n_workers)
ord_label = dask_cudf.from_cudf(ord_label, npartitions=n_workers)
le = LabelEncoder()
le.fit(empty)
assert(le._fitted is True)
# test if correctly raies ValueError
with pytest.raises(ValueError, match='y contains previously unseen label'):
le.inverse_transform(ord_label).compute()
# check fit_transform()
le = LabelEncoder()
transformed = le.fit_transform(empty).compute()
assert(le._fitted is True)
assert(len(transformed) == 0)
def test_masked_encode(client):
n_workers = len(client.has_what())
df = cudf.DataFrame({"filter_col": [1, 1, 2, 3, 1, 1, 1, 1, 6, 5],
"cat_col": ['a', 'b', 'c', 'd', 'a',
'a', 'a', 'c', 'b', 'c']})
ddf = dask_cudf.from_cudf(df, npartitions=n_workers)
ddf_filter = ddf[ddf["filter_col"] == 1]
filter_encoded = LabelEncoder().fit_transform(ddf_filter["cat_col"])
ddf_filter = ddf_filter.assign(filter_encoded=filter_encoded.values)
encoded_filter = LabelEncoder().fit_transform(ddf["cat_col"])
ddf = ddf.assign(encoded_filter=encoded_filter.values)
ddf = ddf[ddf.filter_col == 1]
assert(ddf.encoded_filter ==
ddf_filter.filter_encoded).compute().all()
|
third_party/bazel/cuda_supplement/cuda_supplement_configure.bzl | alibaba/BladeDISC | 328 | 11069711 | load("//bazel:common.bzl", "files_exist")
_TF_CUDA_HOME = "TF_CUDA_HOME"
def _create_dummy_repository(repo_ctx):
repo_ctx.symlink(Label("//bazel/cuda_supplement:dummy.BUILD.tpl"), "BUILD")
repo_ctx.template("build_defs.bzl", Label("//bazel/cuda_supplement:build_defs.bzl.tpl"), {
"%{IF_HAS_CUBLASLT}": "False",
"%{IF_HAS_CUDNN_STATIC}": "False",
})
def _impl(repo_ctx):
cuda_path = repo_ctx.os.environ.get(_TF_CUDA_HOME, None)
if cuda_path != None:
if_has_cublaslt, if_has_cudnn_static = files_exist(repo_ctx, [
cuda_path + "/lib64/libcublasLt_static.a",
cuda_path + "/lib64/libcudnn_static.a",
])
repo_ctx.template("BUILD", Label("//bazel/cuda_supplement:cuda_supplement.BUILD.tpl"), {})
repo_ctx.symlink(cuda_path + "/lib64", "lib64")
repo_ctx.template("build_defs.bzl", Label("//bazel/cuda_supplement:build_defs.bzl.tpl"), {
"%{IF_HAS_CUBLASLT}": "True" if if_has_cublaslt else "False",
"%{IF_HAS_CUDNN_STATIC}": "True" if if_has_cudnn_static else "False",
})
else:
_create_dummy_repository(repo_ctx)
cuda_supplement_configure = repository_rule(
implementation = _impl,
local = True,
environ = [_TF_CUDA_HOME],
)
|
mypy_drf_plugin/lib/fullnames.py | danielroseman/djangorestframework-stubs | 224 | 11069727 | FIELD_FULLNAME = "rest_framework.fields.Field"
BASE_SERIALIZER_FULLNAME = "rest_framework.serializers.BaseSerializer"
SERIALIZER_FULLNAME = "rest_framework.serializers.Serializer"
LIST_SERIALIZER_FULLNAME = "rest_framework.serializers.ListSerializer"
MODEL_SERIALIZER_FULLNAME = "rest_framework.serializers.ModelSerializer"
SERIALIZER_FIELD_MAPPING = {
"django.db.models.fields.AutoField": "rest_framework.serializers.IntegerField",
"django.db.models.fields.BigIntegerField": "rest_framework.serializers.IntegerField",
"django.db.models.fields.BooleanField": "rest_framework.serializers.BooleanField",
"django.db.models.fields.CharField": "rest_framework.serializers.CharField",
"django.db.models.fields.CommaSeparatedIntegerField": "rest_framework.serializers.CharField",
"django.db.models.fields.DateField": "rest_framework.serializers.DateField",
"django.db.models.fields.DateTimeField": "rest_framework.serializers.DateTimeField",
"django.db.models.fields.DecimalField": "rest_framework.serializers.DecimalField",
"django.db.models.fields.DurationField": "rest_framework.serializers.DurationField",
"django.db.models.fields.EmailField": "rest_framework.serializers.EmailField",
"django.db.models.fields.Field": "rest_framework.serializers.ModelField",
"django.db.models.fields.FileField": "rest_framework.serializers.FileField",
"django.db.models.fields.FilePathField": "rest_framework.serializers.FilePathField",
"django.db.models.fields.FloatField": "rest_framework.serializers.FloatField",
"django.db.models.fields.GenericIPAddressField": "rest_framework.serializers.IPAddressField",
"django.db.models.fields.ImageField": "rest_framework.serializers.ImageField",
"django.db.models.fields.IntegerField": "rest_framework.serializers.IntegerField",
"django.db.models.fields.NullBooleanField": "rest_framework.serializers.BooleanField",
"django.db.models.fields.PositiveIntegerField": "rest_framework.serializers.IntegerField",
"django.db.models.fields.PositiveSmallIntegerField": "rest_framework.serializers.IntegerField",
"django.db.models.fields.SlugField": "rest_framework.serializers.SlugField",
"django.db.models.fields.SmallIntegerField": "rest_framework.serializers.IntegerField",
"django.db.models.fields.TextField": "rest_framework.serializers.CharField",
"django.db.models.fields.TimeField": "rest_framework.serializers.TimeField",
"django.db.models.fields.URLField": "rest_framework.serializers.URLField",
"django.db.models.fields.UUIDField": "rest_framework.serializers.UUIDField",
"django.db.models.fields.JSONField": "rest_framework.serializers.JSONField",
}
ID_TYPE = "builtins.object"
|
pandas/_config/localization.py | CJL89/pandas | 28,899 | 11069739 | <gh_stars>1000+
"""
Helpers for configuring locale settings.
Name `localization` is chosen to avoid overlap with builtin `locale` module.
"""
from contextlib import contextmanager
import locale
import re
import subprocess
from pandas._config.config import options
@contextmanager
def set_locale(new_locale, lc_var: int = locale.LC_ALL):
"""
Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if all(x is not None for x in normalized_locale):
yield ".".join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool:
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError, locale.Error):
# horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""
Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
return [
loc
for loc in (
locale.normalize(loc.strip()) if normalize else loc.strip()
for loc in locales
)
if can_set_locale(loc)
]
def _default_locale_getter():
return subprocess.check_output(["locale -a"], shell=True)
def get_locales(prefix=None, normalize=True, locale_getter=_default_locale_getter):
"""
Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except subprocess.CalledProcessError:
# Raised on (some? all?) Windows platforms because Note: "locale -a"
# is not defined
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b"\n")
out_locales = []
for x in raw_locales:
try:
out_locales.append(str(x, encoding=options.display.encoding))
except UnicodeError:
# 'locale -a' is used to populated 'raw_locales' and on
# Redhat 7 Linux (and maybe others) prints locale names
# using windows-1252 encoding. Bug only triggered by
# a few special characters and when there is an
# extensive list of installed locales.
out_locales.append(str(x, encoding="windows-1252"))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile(f"{prefix}.*")
found = pattern.findall("\n".join(out_locales))
return _valid_locales(found, normalize)
|
tests/basics/frozenset_difference.py | learnforpractice/micropython-cpp | 13,648 | 11069768 | <filename>tests/basics/frozenset_difference.py
try:
frozenset
except NameError:
print("SKIP")
raise SystemExit
l = [1, 2, 3, 4]
s = frozenset(l)
outs = [s.difference(),
s.difference(frozenset({1})),
s.difference(frozenset({1}), [1, 2]),
s.difference(frozenset({1}), {1, 2}, {2, 3})]
for out in outs:
print(type(out), sorted(out))
s = frozenset(l)
try:
print(s.difference_update({1}))
except AttributeError:
print("AttributeError")
|
examples/pybullet/gym/pybullet_envs/minitaur/robots/mini_cheetah.py | felipeek/bullet3 | 9,136 | 11069773 | <reponame>felipeek/bullet3
"""Pybullet simulation of a vision60 robot."""
import math
import os
import gin
import numpy as np
from pybullet_envs.minitaur.robots import laikago_motor
from pybullet_envs.minitaur.robots import minitaur
from pybullet_envs.minitaur.robots import robot_config
NUM_MOTORS = 12
NUM_LEGS = 4
MOTOR_NAMES = [
"torso_to_abduct_fl_j", # Left front abduction (hip0).
"abduct_fl_to_thigh_fl_j", # Left front hip (upper0).
"thigh_fl_to_knee_fl_j", # Left front knee (lower0).
"torso_to_abduct_hl_j", # Left rear abduction (hip1).
"abduct_hl_to_thigh_hl_j", # Left rear hip (upper1).
"thigh_hl_to_knee_hl_j", # Left rear knee (lower1).
"torso_to_abduct_fr_j", # Right front abduction (hip2).
"abduct_fr_to_thigh_fr_j", # Right front hip (upper2).
"thigh_fr_to_knee_fr_j", # Right front knee (lower2).
"torso_to_abduct_hr_j", # Right rear abduction (hip3).
"abduct_hr_to_thigh_hr_j", # Right rear hip (upper3).
"thigh_hr_to_knee_hr_j", # Right rear knee (lower3).
]
_DEFAULT_TORQUE_LIMITS = [12, 18, 12] * 4
INIT_RACK_POSITION = [0, 0, 1.4]
INIT_POSITION = [0, 0, 0.4]
JOINT_DIRECTIONS = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = 0.0
KNEE_JOINT_OFFSET = 0.0
DOFS_PER_LEG = 3
JOINT_OFFSETS = np.array(
[HIP_JOINT_OFFSET, UPPER_LEG_JOINT_OFFSET, KNEE_JOINT_OFFSET] * 4)
PI = math.pi
DEFAULT_ABDUCTION_ANGLE = 0.0
DEFAULT_HIP_ANGLE = -1.1
DEFAULT_KNEE_ANGLE = 2.3
# Bases on the readings from 's default pose.
INIT_MOTOR_ANGLES = [
DEFAULT_ABDUCTION_ANGLE, DEFAULT_HIP_ANGLE, DEFAULT_KNEE_ANGLE
] * NUM_LEGS
DEFAULT_LOCAL_TOE_POSITIONS = [[0.17, -0.11, -0.16], [0.17, 0.11, -0.16],
[-0.20, -0.11, -0.16], [-0.20, 0.11, -0.16]]
@gin.configurable
class MiniCheetah(minitaur.Minitaur):
"""A simulation for the mini cheetah robot."""
def __init__(self, **kwargs):
if "motor_kp" not in kwargs:
kwargs["motor_kp"] = 100.0
if "motor_kd" not in kwargs:
kwargs["motor_kd"] = 2.0
if "motor_torque_limits" not in kwargs:
kwargs["motor_torque_limits"] = _DEFAULT_TORQUE_LIMITS
# The follwing parameters are fixed for the vision60 robot.
kwargs["num_motors"] = NUM_MOTORS
kwargs["dofs_per_leg"] = DOFS_PER_LEG
kwargs["motor_direction"] = JOINT_DIRECTIONS
kwargs["motor_offset"] = JOINT_OFFSETS
kwargs["motor_overheat_protection"] = False
kwargs["motor_model_class"] = laikago_motor.LaikagoMotorModel
super(MiniCheetah, self).__init__(**kwargs)
def _LoadRobotURDF(self):
mini_cheetah_urdf_path = "mini_cheetah/mini_cheetah.urdf"
if self._self_collision_enabled:
self.quadruped = self._pybullet_client.loadURDF(
mini_cheetah_urdf_path,
self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation(),
flags=self._pybullet_client.URDF_USE_SELF_COLLISION)
else:
self.quadruped = self._pybullet_client.loadURDF(
mini_cheetah_urdf_path, self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation())
def _SettleDownForReset(self, default_motor_angles, reset_time):
self.ReceiveObservation()
for _ in range(500):
self.ApplyAction(
INIT_MOTOR_ANGLES,
motor_control_mode=robot_config.MotorControlMode.POSITION)
self._pybullet_client.stepSimulation()
self.ReceiveObservation()
if default_motor_angles is not None:
num_steps_to_reset = int(reset_time / self.time_step)
for _ in range(num_steps_to_reset):
self.ApplyAction(
default_motor_angles,
motor_control_mode=robot_config.MotorControlMode.POSITION)
self._pybullet_client.stepSimulation()
self.ReceiveObservation()
def GetURDFFile(self):
return os.path.join(self._urdf_root, "mini_cheetah/mini_cheetah.urdf")
def ResetPose(self, add_constraint):
del add_constraint
for name in self._joint_name_to_id:
joint_id = self._joint_name_to_id[name]
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(joint_id),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
for name, i in zip(MOTOR_NAMES, range(len(MOTOR_NAMES))):
angle = INIT_MOTOR_ANGLES[i]
self._pybullet_client.resetJointState(
self.quadruped, self._joint_name_to_id[name], angle, targetVelocity=0)
def _BuildUrdfIds(self):
pass
def _GetMotorNames(self):
return MOTOR_NAMES
def _GetDefaultInitPosition(self):
if self._on_rack:
return INIT_RACK_POSITION
else:
return INIT_POSITION
def _GetDefaultInitOrientation(self):
init_orientation = [0, 0, 0, 1.0]
return init_orientation
|
portia_server/portia_server/views.py | hackrush01/portia | 6,390 | 11069777 | from django.conf import settings
from portia_api.jsonapi import JSONResponse
def capabilities(request):
capabilities = {
'custom': settings.CUSTOM,
'username': request.user.username,
'capabilities': settings.CAPABILITIES,
}
return JSONResponse(capabilities)
|
devil/devil/utils/markdown_test.py | Martijnve23/catapult | 1,894 | 11069785 | #! /usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import textwrap
import unittest
if __name__ == '__main__':
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from devil.utils import markdown
class MarkdownTest(unittest.TestCase):
def testBold(self):
raw = 'foo'
self.assertEqual('**foo**', markdown.md_bold(raw))
def testBoldContainsStars(self):
raw = '*foo*'
self.assertEqual('**\\*foo\\***', markdown.md_bold(raw))
def testCode(self):
raw = textwrap.dedent("""\
class MarkdownTest(unittest.TestCase):
def testCode(self):
pass""")
expected = textwrap.dedent("""\
```python
class MarkdownTest(unittest.TestCase):
def testCode(self):
pass
```
""")
actual = markdown.md_code(raw, language='python')
self.assertEqual(expected, actual)
def testCodeContainsTicks(self):
raw = textwrap.dedent("""\
This is sample markdown.
```c
// This is a sample code block.
int main(int argc, char** argv) {
return 0;
}
```""")
expected = textwrap.dedent("""\
```
This is sample markdown.
\\`\\`\\`c
// This is a sample code block.
int main(int argc, char** argv) {
return 0;
}
\\`\\`\\`
```
""")
actual = markdown.md_code(raw, language=None)
self.assertEqual(expected, actual)
def testEscape(self):
raw = 'text_with_underscores *and stars*'
expected = 'text\\_with\\_underscores \\*and stars\\*'
actual = markdown.md_escape(raw)
self.assertEqual(expected, actual)
def testHeading1(self):
raw = 'Heading 1'
self.assertEqual('# Heading 1', markdown.md_heading(raw, level=1))
def testHeading5(self):
raw = 'Heading 5'
self.assertEqual('##### Heading 5', markdown.md_heading(raw, level=5))
def testHeading10(self):
raw = 'Heading 10'
self.assertEqual('###### Heading 10', markdown.md_heading(raw, level=10))
def testInlineCode(self):
raw = 'devil.utils.markdown_test'
self.assertEqual('`devil.utils.markdown_test`',
markdown.md_inline_code(raw))
def testInlineCodeContainsTicks(self):
raw = 'this contains `backticks`'
self.assertEqual('`this contains \\`backticks\\``',
markdown.md_inline_code(raw))
def testItalic(self):
raw = 'bar'
self.assertEqual('*bar*', markdown.md_italic(raw))
def testItalicContainsStars(self):
raw = '*bar*'
self.assertEqual('*\\*bar\\**', markdown.md_italic(raw))
def testLink(self):
link_text = 'Devil home'
link_target = (
'https://chromium.googlesource.com/catapult.git/+/HEAD/devil')
expected = ('[Devil home]'
'(https://chromium.googlesource.com/catapult.git/+/HEAD/devil)')
self.assertEqual(expected, markdown.md_link(link_text, link_target))
def testLinkTextContainsBracket(self):
link_text = 'foo [] bar'
link_target = 'https://www.google.com'
expected = '[foo [\\] bar](https://www.google.com)'
self.assertEqual(expected, markdown.md_link(link_text, link_target))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
stackoverflow/venv/lib/python3.6/site-packages/hamcrest/library/collection/is_empty.py | zhi-xianwei/learn_python3_spider | 9,953 | 11069788 | from hamcrest.core.base_matcher import BaseMatcher
__author__ = "<NAME>"
__copyright__ = "Copyright 2012 hamcrest.org"
__license__ = "BSD, see License.txt"
class IsEmpty(BaseMatcher):
def matches(self, item, mismatch_description=None):
try:
if len(item) == 0:
return True
if mismatch_description:
mismatch_description \
.append_text('has %d item(s)' % len(item))
except TypeError:
if mismatch_description:
mismatch_description \
.append_text('does not support length')
return False
def describe_to(self, description):
description.append_text('an empty collection')
def empty():
"""
This matcher matches any collection-like object that responds to the
__len__ method, and has a length of 0.
"""
return IsEmpty()
|
mythril/analysis/module/modules/multiple_sends.py | kalloc/mythril | 1,887 | 11069796 | <reponame>kalloc/mythril
"""This module contains the detection code to find multiple sends occurring in
a single transaction."""
from copy import copy
from typing import cast, List
from mythril.analysis.report import Issue
from mythril.analysis.solver import get_transaction_sequence, UnsatError
from mythril.analysis.swc_data import MULTIPLE_SENDS
from mythril.analysis.module.base import DetectionModule, EntryPoint
from mythril.laser.ethereum.state.annotation import StateAnnotation
from mythril.laser.ethereum.state.global_state import GlobalState
import logging
log = logging.getLogger(__name__)
class MultipleSendsAnnotation(StateAnnotation):
def __init__(self) -> None:
self.call_offsets = [] # type: List[int]
def __copy__(self):
result = MultipleSendsAnnotation()
result.call_offsets = copy(self.call_offsets)
return result
class MultipleSends(DetectionModule):
"""This module checks for multiple sends in a single transaction."""
name = "Multiple external calls in the same transaction"
swc_id = MULTIPLE_SENDS
description = "Check for multiple sends in a single transaction"
entry_point = EntryPoint.CALLBACK
pre_hooks = ["CALL", "DELEGATECALL", "STATICCALL", "CALLCODE", "RETURN", "STOP"]
def _execute(self, state: GlobalState) -> None:
if state.get_current_instruction()["address"] in self.cache:
return
issues = self._analyze_state(state)
for issue in issues:
self.cache.add(issue.address)
self.issues.extend(issues)
@staticmethod
def _analyze_state(state: GlobalState):
"""
:param state: the current state
:return: returns the issues for that corresponding state
"""
instruction = state.get_current_instruction()
annotations = cast(
List[MultipleSendsAnnotation],
list(state.get_annotations(MultipleSendsAnnotation)),
)
if len(annotations) == 0:
state.annotate(MultipleSendsAnnotation())
annotations = cast(
List[MultipleSendsAnnotation],
list(state.get_annotations(MultipleSendsAnnotation)),
)
call_offsets = annotations[0].call_offsets
if instruction["opcode"] in ["CALL", "DELEGATECALL", "STATICCALL", "CALLCODE"]:
call_offsets.append(state.get_current_instruction()["address"])
else: # RETURN or STOP
for offset in call_offsets[1:]:
try:
transaction_sequence = get_transaction_sequence(
state, state.world_state.constraints
)
except UnsatError:
continue
description_tail = (
"This call is executed following another call within the same transaction. It is possible "
"that the call never gets executed if a prior call fails permanently. This might be caused "
"intentionally by a malicious callee. If possible, refactor the code such that each transaction "
"only executes one external call or "
"make sure that all callees can be trusted (i.e. they’re part of your own codebase)."
)
issue = Issue(
contract=state.environment.active_account.contract_name,
function_name=state.environment.active_function_name,
address=offset,
swc_id=MULTIPLE_SENDS,
bytecode=state.environment.code.bytecode,
title="Multiple Calls in a Single Transaction",
severity="Low",
description_head="Multiple calls are executed in the same transaction.",
description_tail=description_tail,
gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used),
transaction_sequence=transaction_sequence,
)
return [issue]
return []
detector = MultipleSends()
|
electroncash/winconsole.py | christroutner/Electron-Cash | 208 | 11069820 | # Electron Cash - lightweight Bitcoin client
# Copyright (C) 2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module is for to handling console attaching and / or creation in Windows
binaries that are built for the Windows subsystem and therefore do not
automatically allocate a console.
"""
import sys
import os
import ctypes
import atexit
STD_OUTPUT_HANDLE = -11
FILE_TYPE_DISK = 1
def parent_process_pids() -> int:
"""
Returns all parent process PIDs, starting with the closest parent
"""
try:
import psutil
pid = os.getpid()
while pid > 0:
pid = psutil.Process(pid).ppid()
yield pid
except psutil.NoSuchProcess:
# Parent process not found, likely terminated, nothing we can do
pass
def get_console_title() -> str:
''' Return the current console title as a string. May return None on error. '''
b = bytes(1024)
b_ptr = ctypes.c_char_p(b)
title = None
title_len = ctypes.windll.kernel32.GetConsoleTitleW(b_ptr, len(b)//2) # GetConsoleTitleW expects size in 2-byte chars
if title_len > 0:
title = b.decode('utf-16')[:title_len]
return title
def create_or_attach_console(*, attach: bool = True, create: bool = False,
title: str = None) -> bool:
"""
Workaround to the fact that cmd.exe based execution of this program means
it has no stdout handles and thus is always silent, thereby rendering
vernbose console output or command-line usage problematic.
First, check if we have STD_OUTPUT_HANDLE (a console) and do nothing if
there is one, returning True.
Otherwise, try to attach to the console of any ancestor process, and return
True.
If not successful, optionally (create=True) create a new console.
NB: Creating a new console results in a 'cmd.exe' console window to be
created on the Windows desktop, so only pass create=True if that's
acceptable.
If a console was found or created, we redirect current output handles
(sys.stdout, sys.stderr) to this found and/or created console.
Always return True on success or if there was a console already,
False or None on failure (a None return indicates a missing lib or some
other unspecified exception was raised when attempting to create a console).
"""
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
has_console = std_out_handle > 0
if has_console:
# Output is being redirected to a file, or we have an msys console.
# do nothing
return True
try:
if attach:
# Try to attach to a parent console
for pid in parent_process_pids():
if ctypes.windll.kernel32.AttachConsole(pid):
has_console = True
break
except ImportError:
# User's system lacks psutil
return # Return None in case caller wants to differntiate exceptional failures from regular False return
created = False
if not has_console and create:
# Try to allocate a new console
if ctypes.windll.kernel32.AllocConsole():
has_console = True
created = True
if not has_console:
# Indicate to caller no console is to be had.
return False
try:
# Reopen Pythons console input and output handles
conout = open('CONOUT$', 'w')
sys.stdout = conout
sys.stderr = conout
sys.stdin = open('CONIN$', 'r')
except OSError:
# If we get here, we likely were in MinGW / MSYS where CONOUT$ / CONIN$
# are not valid files or some other weirdness occurred. Give up.
return # return None to indicate underlying exception
if title:
old_title = get_console_title() if not created else None # save the old title only if not created by us
# Set the console title, if specified
ctypes.windll.kernel32.SetConsoleTitleW(title)
if old_title is not None:
# undo the setting of the console title at app exit
atexit.register(ctypes.windll.kernel32.SetConsoleTitleW, old_title)
return True
|
azure-py-cosmosdb-logicapp/__main__.py | PaulusTM/examples | 1,628 | 11069823 | # Copyright 2016-2021, Pulumi Corporation. All rights reserved.
import pulumi
import pulumi_azure_native.authorization as authorization
import pulumi_azure_native.documentdb as documentdb
import pulumi_azure_native.logic as logic
import pulumi_azure_native.resources as resources
import pulumi_azure_native.storage as storage
import pulumi_azure_native.web as web
# Create an Azure Resource Group
resource_group = resources.ResourceGroup("resourceGroup")
# Create an Azure resource (Storage Account)
storage_account = storage.StorageAccount(
"logicappdemosa",
resource_group_name=resource_group.name,
sku=storage.SkuArgs(
name=storage.SkuName.STANDARD_LRS,
),
kind=storage.Kind.STORAGE_V2)
# Cosmos DB Account
cosmosdb_account = documentdb.DatabaseAccount(
"logicappdemo-cdb",
resource_group_name=resource_group.name,
database_account_offer_type=documentdb.DatabaseAccountOfferType.STANDARD,
locations=[documentdb.LocationArgs(
location_name=resource_group.location,
failover_priority=0,
)],
consistency_policy=documentdb.ConsistencyPolicyArgs(
default_consistency_level=documentdb.DefaultConsistencyLevel.SESSION,
))
# Cosmos DB Database
db = documentdb.SqlResourceSqlDatabase(
"sqldb",
resource_group_name=resource_group.name,
account_name=cosmosdb_account.name,
resource=documentdb.SqlDatabaseResourceArgs(
id="sqldb",
))
# Cosmos DB SQL Container
db_container = documentdb.SqlResourceSqlContainer(
"container",
resource_group_name=resource_group.name,
account_name=cosmosdb_account.name,
database_name=db.name,
resource=documentdb.SqlContainerResourceArgs(
id="container",
partition_key=documentdb.ContainerPartitionKeyArgs(
paths=["/myPartitionKey"],
kind="Hash",
)
))
account_keys = documentdb.list_database_account_keys_output(
account_name=cosmosdb_account.name,
resource_group_name=resource_group.name)
client_config = pulumi.Output.from_input(authorization.get_client_config())
api_id = pulumi.Output.concat(
"/subscriptions/", client_config.subscription_id,
"/providers/Microsoft.Web/locations/", resource_group.location,
"/managedApis/documentdb")
# API Connection to be used in a Logic App
connection = web.Connection(
"connection",
resource_group_name=resource_group.name,
properties=web.ApiConnectionDefinitionPropertiesArgs(
display_name="cosmosdb_connection",
api=web.ApiReferenceArgs(
id=api_id,
),
parameter_values={
"databaseAccount": cosmosdb_account.name,
"access_key": account_keys.primary_master_key,
},
))
# Logic App with an HTTP trigger and Cosmos DB action
workflow = logic.Workflow(
"workflow",
resource_group_name=resource_group.name,
definition={
"$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#",
"content_version": "1.0.0.0",
"parameters": {
"$connections": {
"default_value": {},
"type": "Object",
},
},
"triggers": {
"Receive_post": {
"type": "Request",
"kind": "Http",
"inputs": {
"method": "POST",
"schema": {
"properties": {},
"type": "object",
},
},
},
},
"actions": {
"write_body": {
"type": "ApiConnection",
"inputs": {
"body": {
"data": "@triggerBody()",
"id": "@utcNow()",
},
"host": {
"connection": {
"name": "@parameters('$connections')['documentdb']['connectionId']",
},
},
"method": "post",
"path": pulumi.Output.all(db.name, db_container.name).apply(
lambda arg: f"/dbs/{arg[0]}/colls/{arg[1]}/docs"),
},
},
},
},
parameters={
"$connections": logic.WorkflowParameterArgs(
value={
"documentdb": {
"connection_id": connection.id,
"connection_name": "logicapp-cosmosdb-connection",
"id": api_id,
},
},
),
})
callback_urls = logic.list_workflow_trigger_callback_url_output(
resource_group_name=resource_group.name,
workflow_name=workflow.name,
trigger_name="Receive_post")
# Export the HTTP endpoint
pulumi.export("endpoint", callback_urls.value)
|
sportsbetting/bookmakers/france_pari.py | MiladC4/Sports-betting | 169 | 11069834 | """
France-pari odds scraper
"""
import datetime
import re
import requests
from bs4 import BeautifulSoup
import sportsbetting as sb
def parse_france_pari(url):
"""
Retourne les cotes disponibles sur france-pari
"""
soup = BeautifulSoup(requests.get(url).content, features="lxml")
match_odds_hash = {}
today = datetime.datetime.today()
today = datetime.datetime(today.year, today.month, today.day)
year = " " + str(today.year)
date = ""
match = ""
date_time = None
id_match = None
for line in soup.find_all():
if "class" in line.attrs and "competition" in line["class"]:
competition = line.text.strip()
if "class" in line.attrs and "date" in line["class"]:
date = line.text + year
elif "class" in line.attrs and "odd-event-block" in line["class"]:
strings = list(line.stripped_strings)
if "snc-odds-date-lib" in line["class"]:
id_match = line.findChild("a", recursive=True).get("href").split("-")[0].split("/")[-1]
hour = strings[0]
try:
i = strings.index("/")
date_time = datetime.datetime.strptime(
date + " " + hour, "%A %d %B %Y %H:%M")
if date_time < today:
date_time = date_time.replace(year=date_time.year + 1)
match = " ".join(strings[1:i]) + \
" - " + " ".join(strings[i + 1:])
reg_exp = (r'\[[0-7]\/[0-7]\s?([0-7]\/[0-7]\s?)*\]'
r'|\[[0-7]\-[0-7]\s?([0-7]\-[0-7]\s?)*\]')
if list(re.finditer(reg_exp, match)): # match tennis live
match = match.split("[")[0].strip()
except ValueError:
pass
else:
odds = []
for i, val in enumerate(strings):
if i % 2:
odds.append(float(val.replace(",", ".")))
try:
if match:
match_odds_hash[match] = {}
match_odds_hash[match]['odds'] = {"france_pari": odds}
match_odds_hash[match]['date'] = date_time
match_odds_hash[match]['id'] = {"france_pari": id_match}
match_odds_hash[match]['competition'] = competition
match = None
except UnboundLocalError:
pass
if not match_odds_hash:
raise sb.UnavailableCompetitionException
return match_odds_hash
|
src/pybel/manager/citation_utils.py | rpatil524/pybel | 103 | 11069843 | <filename>src/pybel/manager/citation_utils.py
# -*- coding: utf-8 -*-
"""Citation utilities for the database manager."""
import logging
import re
from datetime import date, datetime
from functools import lru_cache
from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union
import ratelimit
import requests
from more_itertools import chunked
from sqlalchemy import and_
from tqdm.autonotebook import tqdm
from . import models
from .cache_manager import Manager
from ..constants import CITATION
from ..struct.filters import filter_edges
from ..struct.filters.edge_predicates import CITATION_PREDICATES
from ..struct.graph import BELGraph
from ..struct.summary.provenance import get_citation_identifiers
__all__ = [
'enrich_pubmed_citations',
'enrich_pmc_citations',
]
logger = logging.getLogger(__name__)
EUTILS_URL_FMT = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&retmode=json&id={}"
re1 = re.compile(r'^[12][0-9]{3} [a-zA-Z]{3} \d{1,2}$')
re2 = re.compile(r'^[12][0-9]{3} [a-zA-Z]{3}$')
re3 = re.compile(r'^[12][0-9]{3}$')
re4 = re.compile(r'^[12][0-9]{3} [a-zA-Z]{3}-[a-zA-Z]{3}$')
re5 = re.compile(r'^([12][0-9]{3}) (Spring|Fall|Winter|Summer)$')
re6 = re.compile(r'^[12][0-9]{3} [a-zA-Z]{3} \d{1,2}-(\d{1,2})$')
re7 = re.compile(r'^[12][0-9]{3} [a-zA-Z]{3} \d{1,2}-([a-zA-Z]{3} \d{1,2})$')
# TODO "Winter 2016" probably with re.compile(r'^(Spring|Fall|Winter|Summer) ([12][0-9]{3})$')
# TODO "YYYY Oct - Dec" update re4 to allow spaces before and after the dash
season_map = {'Spring': '03', 'Summer': '06', 'Fall': '09', 'Winter': '12'}
def sanitize_date(publication_date: str) -> str:
"""Sanitize lots of different date strings into ISO-8601."""
if re1.search(publication_date):
return datetime.strptime(publication_date, '%Y %b %d').strftime('%Y-%m-%d')
if re2.search(publication_date):
return datetime.strptime(publication_date, '%Y %b').strftime('%Y-%m-01')
if re3.search(publication_date):
return publication_date + "-01-01"
if re4.search(publication_date):
return datetime.strptime(publication_date[:-4], '%Y %b').strftime('%Y-%m-01')
s = re5.search(publication_date)
if s:
year, season = s.groups()
return '{}-{}-01'.format(year, season_map[season])
s = re6.search(publication_date)
if s:
return datetime.strptime(publication_date, '%Y %b %d-{}'.format(s.groups()[0])).strftime('%Y-%m-%d')
s = re7.search(publication_date)
if s:
return datetime.strptime(publication_date, '%Y %b %d-{}'.format(s.groups()[0])).strftime('%Y-%m-%d')
def clean_pubmed_identifiers(identifiers: Iterable[str]) -> List[str]:
"""Clean a list of identifiers with string strips, deduplicates, and sorting."""
_identifiers = (str(identifier).strip() for identifier in identifiers if identifier)
return sorted({i for i in _identifiers if i})
@ratelimit.limits(calls=3, period=1)
def get_pubmed_citation_response(pubmed_identifiers: Iterable[str]):
"""Get the response from PubMed E-Utils for a given list of PubMed identifiers.
Rate limit of 3 requests per second is from:
https://ncbiinsights.ncbi.nlm.nih.gov/2018/08/14/release-plan-for-e-utility-api-keys/
:param pubmed_identifiers:
:rtype: dict
"""
pubmed_identifiers = list(pubmed_identifiers)
url = EUTILS_URL_FMT.format(
','.join(
pubmed_identifier
for pubmed_identifier in pubmed_identifiers
if pubmed_identifier
),
)
response = requests.get(url)
return response.json()
def enrich_citation_model(manager: Manager, citation: models.Citation, p: Mapping[str, Any]) -> bool:
"""Enrich a citation model with the information from PubMed.
:param manager: A database manager
:param citation: A citation model
:param p: The dictionary from PubMed E-Utils corresponding to d["result"][pmid]
"""
if 'error' in p:
logger.warning('Error downloading PubMed')
return False
citation.title = p['title']
citation.journal = p['fulljournalname']
citation.volume = p['volume']
citation.issue = p['issue']
citation.pages = p['pages']
citation.first = manager.get_or_create_author(p['sortfirstauthor'])
citation.last = manager.get_or_create_author(p['lastauthor'])
pubtypes = p['pubtype']
if pubtypes:
citation.article_type = pubtypes[0]
if 'authors' in p:
for author in p['authors']:
author_model = manager.get_or_create_author(author['name'])
if author_model not in citation.authors:
citation.authors.append(author_model)
publication_date = p['pubdate']
try:
sanitized_publication_date = sanitize_date(publication_date)
except ValueError:
logger.warning('could not parse publication date %s for pubmed:%s', publication_date, citation.db_id)
sanitized_publication_date = None
if sanitized_publication_date:
citation.date = datetime.strptime(sanitized_publication_date, '%Y-%m-%d')
else:
logger.info('result had date with strange format: %s', publication_date)
return True
def get_citations_by_pmids(
manager: Manager,
pmids: Iterable[Union[str, int]],
*,
group_size: Optional[int] = None,
offline: bool = False,
) -> Tuple[Dict[str, Dict], Set[str]]:
return _get_citations_by_identifiers(
manager=manager, identifiers=pmids, group_size=group_size, offline=offline, prefix='pubmed',
)
def _get_citations_by_identifiers(
manager: Manager,
identifiers: Iterable[Union[str, int]],
*,
group_size: Optional[int] = None,
offline: bool = False,
prefix: Optional[str] = None,
) -> Tuple[Dict[str, Dict], Set[str]]:
"""Get citation information for the given list of PubMed identifiers using the NCBI's eUtils service.
:type manager: pybel.Manager
:param identifiers: an iterable of PubMed identifiers
:param group_size: The number of PubMed identifiers to query at a time. Defaults to 200 identifiers.
:return: A dictionary of {identifier: data dictionary} or a pair of this dictionary and a set ot erroneous
identifiers.
"""
if prefix is None:
prefix = 'pubmed'
helper = _HELPERS.get(prefix)
if helper is None:
raise ValueError(f'can not work on prefix: {prefix}')
group_size = group_size if group_size is not None else 200
identifiers = clean_pubmed_identifiers(identifiers)
logger.info('ensuring %d %s identifiers', len(identifiers), prefix)
enriched_models = {}
unenriched_models = {}
id_to_model = {
citation_model.db_id: citation_model
for citation_model in _get_citation_models(identifiers, prefix=prefix, manager=manager)
}
logger.info('%d of %d %s identifiers are already cached', len(id_to_model), len(identifiers), prefix)
for identifier in tqdm(identifiers, desc=f'creating {prefix} models'):
model = id_to_model.get(identifier)
if model is None:
model = id_to_model[identifier] = manager.get_or_create_citation(identifier=identifier, namespace=prefix)
if model.is_enriched:
enriched_models[identifier] = model.to_json()
else:
unenriched_models[identifier] = model
logger.info('%d of %d %s are identifiers already enriched', len(enriched_models), len(identifiers), prefix)
manager.session.commit()
errors = set()
if not unenriched_models or offline:
return enriched_models, errors
it = tqdm(unenriched_models, desc=f'getting {prefix} data in chunks of {group_size}')
for identifier_chunk in chunked(it, n=group_size):
helper(
identifier_chunk,
manager=manager,
enriched_models=enriched_models,
unenriched_models=unenriched_models,
errors=errors,
)
return enriched_models, errors
def _help_enrich_pmids(identifiers: Iterable[str], *, manager, unenriched_models, enriched_models, errors):
response = get_pubmed_citation_response(identifiers)
response_pmids = response['result']['uids']
for pmid in response_pmids:
p = response['result'][pmid]
citation = unenriched_models.get(pmid)
if citation is None:
tqdm.write(f'problem looking up pubmed:{pmid}')
continue
successful_enrichment = enrich_citation_model(manager, citation, p)
if not successful_enrichment:
tqdm.write(f"Error downloading pubmed:{pmid}")
errors.add(pmid)
continue
enriched_models[pmid] = citation.to_json()
manager.session.add(citation)
manager.session.commit() # commit in groups
def _help_enrich_pmc_identifiers(
identifiers: Iterable[str],
*,
manager: Manager,
unenriched_models,
enriched_models,
errors,
):
for pmcid in identifiers:
try:
csl = get_pmc_csl_item(pmcid)
except Exception:
tqdm.write(f"Error downloading pmc:{pmcid}")
errors.add(pmcid)
continue
model = unenriched_models[pmcid]
enrich_citation_model_from_pmc(manager=manager, citation=model, csl=csl)
manager.session.add(model)
enriched_models[pmcid] = model.to_json()
manager.session.commit() # commit in groups
_HELPERS = {
'pubmed': _help_enrich_pmids,
'pmc': _help_enrich_pmc_identifiers,
}
def _get_citation_models(
identifiers: Iterable[str],
*,
prefix: str,
manager: Manager,
chunksize: int = 200,
) -> Iterable[models.Citation]:
for identifiers_chunk in chunked(identifiers, chunksize):
citation_filter = and_(
models.Citation.db == prefix,
models.Citation.db_id.in_(identifiers_chunk),
)
yield from manager.session.query(models.Citation).filter(citation_filter).all()
def enrich_pubmed_citations(
graph: BELGraph,
*,
manager: Optional[Manager] = None,
group_size: Optional[int] = None,
offline: bool = False,
) -> Set[str]:
"""Overwrite all PubMed citations with values from NCBI's eUtils lookup service.
:param graph: A BEL graph
:param manager: A PyBEL database manager
:param group_size: The number of PubMed identifiers to query at a time. Defaults to 200 identifiers.
:param offline: An override for when you don't want to hit the eUtils
:return: A set of PMIDs for which the eUtils service crashed
"""
return _enrich_citations(
manager=manager, graph=graph, group_size=group_size, offline=offline, prefix='pubmed',
)
def enrich_pmc_citations(
graph: BELGraph,
*,
manager: Optional[Manager] = None,
group_size: Optional[int] = None,
offline: bool = False,
) -> Set[str]:
"""Overwrite all PubMed citations with values from NCBI's eUtils lookup service.
:param graph: A BEL graph
:param manager: A PyBEL database manager
:param group_size: The number of PubMed identifiers to query at a time. Defaults to 200 identifiers.
:param offline: An override for when you don't want to hit the eUtils
:return: A set of PMIDs for which the eUtils service crashed
"""
return _enrich_citations(
manager=manager, graph=graph, group_size=group_size, offline=offline, prefix='pmc',
)
def _enrich_citations(
graph: BELGraph,
manager: Optional[Manager],
group_size: Optional[int] = None,
offline: bool = False,
prefix: Optional[str] = None,
) -> Set[str]:
"""Overwrite all citations of the given prefix using the predefined lookup functions.
:param graph: A BEL Graph
:param group_size: The number of identifiers to query at a time. Defaults to 200 identifiers.
:return: A set of identifiers for which lookup was not possible
"""
if manager is None:
manager = Manager()
if prefix is None:
prefix = 'pubmed'
identifiers = {identifier for identifier in get_citation_identifiers(graph, prefix) if identifier}
identifier_map, errors = _get_citations_by_identifiers(
manager,
identifiers=identifiers,
group_size=group_size,
offline=offline,
prefix=prefix,
)
for u, v, k in filter_edges(graph, CITATION_PREDICATES[prefix]):
identifier = graph[u][v][k][CITATION].identifier
identifier_data = identifier_map.get(identifier)
if identifier_data is None:
logger.warning('Missing data for %s:%s', prefix, identifier)
errors.add(identifier)
continue
graph[u][v][k][CITATION].update(identifier_data)
return errors
@lru_cache()
def get_pmc_csl_item(pmcid: str) -> Mapping[str, Any]:
"""Get the CSL Item for a PubMed Central record by its PMID, PMCID, or DOI, using the NCBI Citation Exporter API."""
if not pmcid.startswith("PMC"):
raise ValueError(f'not a valid pmd id: {pmcid}')
from manubot.cite.pubmed import get_pmc_csl_item
csl_item = get_pmc_csl_item(pmcid)
if "URL" not in csl_item:
csl_item["URL"] = f"https://www.ncbi.nlm.nih.gov/pmc/articles/{csl_item.get('PMCID', pmcid)}/"
return csl_item
def enrich_citation_model_from_pmc(manager: Manager, citation: models.Citation, csl: Mapping[str, Any]) -> bool:
"""Enrich a citation model with the information from PubMed Central.
:param manager: A database manager
:param citation: A citation model
:param dict csl: The dictionary from PMC
"""
citation.title = csl.get('title')
citation.journal = csl.get('container-title')
citation.volume = csl.get('volume')
# citation.issue = csl['issue']
citation.pages = csl.get('page')
citation.article_type = csl.get('type')
for author in csl.get('author', []):
try:
author_name = f'{author["given"]} {author["family"]}'
except KeyError:
print(f'problem with author in pmc:{citation.db_id}', author)
continue
author_model = manager.get_or_create_author(author_name)
if author_model not in citation.authors:
citation.authors.append(author_model)
if citation.authors:
citation.first = citation.authors[0]
citation.last = citation.authors[-1]
issued = csl.get('issued')
if issued is not None:
date_parts = issued['date-parts'][0]
if len(date_parts) == 3:
citation.date = date(year=date_parts[0], month=date_parts[1], day=date_parts[2])
elif len(date_parts) == 2:
citation.date = date(year=date_parts[0], month=date_parts[1], day=1)
elif len(date_parts) == 1:
citation.date = date(year=date_parts[0], month=1, day=1)
else:
logger.warning('not sure about date parts: %s', date_parts)
return True
|
packages/python/pyfora/algorithms/logistic/TrustRegionConjugateGradientSolver.py | ufora/ufora | 571 | 11069860 | <reponame>ufora/ufora<gh_stars>100-1000
# Copyright 2016 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import math
from Solver import Solver, ReturnValue
class TrustRegionConjugateGradientSolver(Solver):
"""
Implements "Trust Region Newton Methods for Large-Scale Logistic Regression"
of <NAME>, <NAME>, and <NAME>
(http://www.machinelearning.org/proceedings/icml2007/papers/114.pdf)
This is the same algorithm used in the liblinear library.
"""
def __init__(
self, X, y,
classZeroLabel,
C=1.0,
eps=0.001,
maxIters=1000,
splitLimit=1000000):
self.X = X
self.y = y
self.classZeroLabel = classZeroLabel
self.C = float(C)
self.eps = TrustRegionConjugateGradientSolver.computeEps(
eps, y, classZeroLabel)
self.maxIters = maxIters
self.nFeatures = X.shape[1]
self.nSamples = X.shape[0]
self.splitLimit = splitLimit
self.eta0 = 1e-4
self.eta1 = 0.25
self.eta2 = 0.75
self.sigma1 = 0.25
self.sigma2 = 0.5
self.sigma3 = 4.0
self.xi = 0.1
@staticmethod
def computeEps(eps, y, classZeroLabel):
def signFunc(elt):
if elt == classZeroLabel:
return 1.0
return 0.0
numClassZeros = sum(signFunc(elt) for elt in y)
numClassOnes = len(y) - numClassZeros
return eps * max(min(numClassZeros, numClassOnes), 1.0) / float(len(y))
def normalized_y_value(self, ix):
if self.y[ix] == self.classZeroLabel:
return 1.0
return -1.0
def solve(self, weights=None):
if weights is None:
weights = numpy.zeros(self.X.shape[1])
normGradientAtZeroWeights = self.normGradientAtZeroWeights()
objectiveFun = ObjectiveFunctionAtWeights(
self.X,
self.normalized_y_value,
self.C,
weights)
gradient = objectiveFun.gradient()
normGradient = self.norm(gradient)
delta = normGradient
solverState = SolverState(
objectiveFun=objectiveFun,
gradient=gradient,
normGradient=normGradient,
delta=delta)
while solverState.iterationIx < self.maxIters and \
solverState.normGradient > self.eps * normGradientAtZeroWeights:
solverState = self.update(solverState)
return ReturnValue(
weights=solverState.objectiveFun.w,
iterations=solverState.iterationIx - 1
)
def update(self, solverState):
step, r = self.trustRegionConjugateGradientSearch(
solverState.objectiveFun,
solverState.gradient,
solverState.normGradient,
solverState.delta)
candidateObjectiveFun = solverState.objectiveFun.withWeights(
solverState.objectiveFun.w + step)
gradientDotStep = solverState.gradient.dot(step)
estimatedFunctionChange = -0.5 * (gradientDotStep - step.dot(r))
actualFunctionChange = \
solverState.objectiveFun.value() - candidateObjectiveFun.value()
if actualFunctionChange > self.eta0 * estimatedFunctionChange:
newObjectiveFun = candidateObjectiveFun
newGradient = candidateObjectiveFun.gradient()
newNormGradient = self.norm(newGradient)
else:
newObjectiveFun = solverState.objectiveFun
newGradient = solverState.gradient
newNormGradient = solverState.normGradient
newDelta = self.updateDelta(
solverState.delta,
gradientDotStep,
solverState.iterationIx,
step,
actualFunctionChange,
estimatedFunctionChange
)
return SolverState(
objectiveFun=newObjectiveFun,
gradient=newGradient,
normGradient=newNormGradient,
delta=newDelta,
iterationIx=solverState.iterationIx + 1)
def normGradientAtZeroWeights(self):
nSamples = len(self.X)
return math.sqrt(
sum(
(-0.5 * self.C * \
sum(self.normalized_y_value(ix) * column[ix] for \
ix in xrange(nSamples))) ** 2.0 \
for column in self.X.columns()
)
)
def updateDelta(
self,
delta,
gradientDotStep,
iterationIx,
step,
actualFunctionChange,
estimatedFunctionChange):
stepNorm = self.norm(step)
if iterationIx == 1:
delta = min(delta, stepNorm)
if -actualFunctionChange - gradientDotStep <= 0:
alpha = self.sigma3
else:
alpha = max(
self.sigma1,
-0.5 * (gradientDotStep / \
(-actualFunctionChange - gradientDotStep))
)
if actualFunctionChange < self.eta0 * estimatedFunctionChange:
return min(max(alpha, self.sigma1) * stepNorm,
self.sigma2 * delta)
elif actualFunctionChange < self.eta1 * estimatedFunctionChange:
return max(self.sigma1 * delta,
min(alpha * stepNorm, self.sigma2 * delta))
elif actualFunctionChange < self.eta2 * estimatedFunctionChange:
return max(self.sigma1 * delta,
min(alpha * stepNorm, self.sigma3 * delta))
else:
return max(delta,
min(alpha * stepNorm, self.sigma3 * delta))
def trustRegionConjugateGradientSearch(
self,
objectiveFun,
gradient,
normGradient,
delta):
step = numpy.zeros(self.nFeatures)
r = -gradient
r_norm_squared = r.dot(r)
d = r
Hd = objectiveFun.hessian_dot_vec(d)
iters = 1
while iters < self.maxIters and \
math.sqrt(r_norm_squared) > self.xi * normGradient:
alpha = r_norm_squared / d.dot(Hd)
step = step + (d * alpha)
if self.norm(step) >= delta:
return self.touchedTrustRegion(step, d, alpha, delta, Hd, r)
r = r - (Hd * alpha)
old_r_norm_squared = r_norm_squared
r_norm_squared = r.dot(r)
beta = r_norm_squared / old_r_norm_squared
d = r + (d * beta)
Hd = objectiveFun.hessian_dot_vec(d)
return step, r
def touchedTrustRegion(self, step, d, alpha, delta, Hd, r):
step = (d * -alpha) + step
step_dot_d = step.dot(d)
norm_squared_step = step.dot(step)
norm_squared_d = d.dot(d)
deltaSquared = delta * delta
rad = math.sqrt(step_dot_d * step_dot_d + \
norm_squared_d * (deltaSquared - norm_squared_step))
if step_dot_d >= 0.0:
alpha = (deltaSquared - norm_squared_step) / (step_dot_d + rad)
else:
alpha = (rad - step_dot_d) / norm_squared_d
step = (d * alpha) + step
r = (Hd * -alpha) + r
return step, r
def norm(self, vec):
return math.sqrt(vec.dot(vec))
class SolverState(object):
def __init__(
self,
objectiveFun,
gradient,
normGradient,
delta,
iterationIx=1):
self.objectiveFun = objectiveFun
self.gradient = gradient
self.normGradient = normGradient
self.delta = delta
self.iterationIx = iterationIx
class ObjectiveFunctionAtWeights(object):
def __init__(self, X, normalized_y_value, regularlizer, weights):
self.X = X
self.normalized_y_value = normalized_y_value
self.C = regularlizer
self.w = numpy.array(weights)
self.Xw = X.dot(weights)
def withWeights(self, newWeights):
return ObjectiveFunctionAtWeights(
self.X,
self.normalized_y_value,
self.C,
newWeights
)
def value(self):
return 0.5 * self.w.dot(self.w) + self.C * sum(
math.log(1.0 + math.exp(-self.normalized_y_value(ix) * self.Xw[ix])) \
for ix in xrange(len(self.Xw))
)
def sigma(self, t):
return 1.0 / (1.0 + math.exp(-t))
def gradient(self):
def rowMultiplier(rowIx):
y_rowIx = self.normalized_y_value(rowIx)
return (self.sigma(y_rowIx * self.Xw[rowIx]) - 1) * y_rowIx
rowMultipliers = [rowMultiplier(ix) for ix in xrange(len(self.X))]
tr = numpy.array(
[self.C * column.dot(rowMultipliers) for column in self.X.columns()]
)
tr = tr + self.w
return tr
def hessian_dot_vec(self, v):
# Hess = I + C * X^t * D * X
Xv = self.X.dot(v)
def D_fun(ix):
sigma = self.sigma(self.normalized_y_value(ix) * self.Xw[ix])
return sigma * (1 - sigma)
DXv = [Xv[ix] * D_fun(ix) for ix in xrange(len(Xv))]
# this part doesn't seem so natural. What if we had a row major self.X?
tr = numpy.array(
[self.C * column.dot(DXv) for column in self.X.columns()]
)
tr = tr + v
return tr
|
src/transformers/models/wav2vec2/convert_wav2vec2_original_s3prl_checkpoint_to_pytorch.py | liminghao1630/transformers | 8,028 | 11069861 | <reponame>liminghao1630/transformers
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Hubert checkpoint."""
import argparse
import torch
from transformers import (
Wav2Vec2Config,
Wav2Vec2FeatureExtractor,
Wav2Vec2ForAudioFrameClassification,
Wav2Vec2ForSequenceClassification,
Wav2Vec2ForXVector,
logging,
)
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def convert_classification(base_model_name, hf_config, downstream_dict):
model = Wav2Vec2ForSequenceClassification.from_pretrained(base_model_name, config=hf_config)
model.projector.weight.data = downstream_dict["projector.weight"]
model.projector.bias.data = downstream_dict["projector.bias"]
model.classifier.weight.data = downstream_dict["model.post_net.linear.weight"]
model.classifier.bias.data = downstream_dict["model.post_net.linear.bias"]
return model
def convert_diarization(base_model_name, hf_config, downstream_dict):
model = Wav2Vec2ForAudioFrameClassification.from_pretrained(base_model_name, config=hf_config)
model.classifier.weight.data = downstream_dict["model.linear.weight"]
model.classifier.bias.data = downstream_dict["model.linear.bias"]
return model
def convert_xvector(base_model_name, hf_config, downstream_dict):
model = Wav2Vec2ForXVector.from_pretrained(base_model_name, config=hf_config)
model.projector.weight.data = downstream_dict["connector.weight"]
model.projector.bias.data = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
model.tdnn[i].kernel.weight.data = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
model.tdnn[i].kernel.bias.data = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
model.feature_extractor.weight.data = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
model.feature_extractor.bias.data = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
model.classifier.weight.data = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
model.classifier.bias.data = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
model.objective.weight.data = downstream_dict["objective.W"]
return model
@torch.no_grad()
def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path):
"""
Copy/paste/tweak model's weights to transformers design.
"""
checkpoint = torch.load(checkpoint_path, map_location="cpu")
downstream_dict = checkpoint["Downstream"]
hf_config = Wav2Vec2Config.from_pretrained(config_path)
hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
base_model_name, return_attention_mask=True, do_normalize=False
)
arch = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification"):
hf_model = convert_classification(base_model_name, hf_config, downstream_dict)
elif arch.endswith("ForAudioFrameClassification"):
hf_model = convert_diarization(base_model_name, hf_config, downstream_dict)
elif arch.endswith("ForXVector"):
hf_model = convert_xvector(base_model_name, hf_config, downstream_dict)
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}")
if hf_config.use_weighted_layer_sum:
hf_model.layer_weights.data = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(model_dump_path)
hf_model.save_pretrained(model_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
args = parser.parse_args()
convert_s3prl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
|
tools/agile-machine-learning-api/train.py | ruchirjain86/professional-services | 2,116 | 11069871 | <filename>tools/agile-machine-learning-api/train.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
API framework to post a training job
"""
import os
import yaml
from googleapiclient import discovery
def post(
cfg,
train_csv_path,
eval_csv_path,
task_type,
target_var,
data_type,
column_name,
na_values,
condition,
n_classes,
to_drop,
name,
hidden_units,
num_layers,
lin_opt,
deep_opt,
train_steps,
export_dir,
jobid):
"""
Post request to submit the training job
Args:
cfg: dict, Configurations from yaml file
train_csv_path: string, Path of the Train csv
eval_csv_path: string, Path of the Eval csv
task_type: string, Type of the task (eg LinearClassifier etc.)
target_var: string, Target column name in the given data
data_type: dict, A dictionary containing feature names as key and values as the types of the feature
column_name: list of strings, Column names in the given data
na_values: string, Null value character in the data
condition: string, Condition to convert seperate classes in the target column
n_classes: integer, Number of classes in target column
to_drop: list of strings, Specific columns to drop
name: string, Name of the model you want to use
hidden_units: integer, No. of hidden units for deep classifiers and regressors
num_layers: integer, No of layers for deep classifiers and regressors
lin_opt: string, Linear Optimizer
deep_opt: string, Deep Optimizer
job_dir: string, Job directory for CMLE job
train_steps: integer, No. of training steps
export_dir: string, Export directory of trained model
jobid: string, Job ID of the training
Returns:
Response of the Training job
"""
with open('config/train.yaml', 'rb') as config_yml:
train_cfg = yaml.load(config_yml)
project_id = 'projects/{}'.format(cfg['project_id'])
cloudml = discovery.build('ml', 'v1')
params = [
'--train_csv_path', train_csv_path,
'--eval_csv_path', eval_csv_path,
'--task_type', task_type,
'--target_var', target_var,
'--data_type', data_type,
'--column_name', column_name,
'--na_values', na_values,
'--condition', condition,
'--n_classes', n_classes,
'--to_drop', to_drop,
'--name', name,
'--hidden_units', hidden_units,
'--num_layers', num_layers,
'--lin_opt', lin_opt,
'--deep_opt', deep_opt,
'--train_steps', train_steps,
'--export_dir', export_dir
]
current_models = [
'linearclassifier',
'linearregressor',
'dnnclassifier',
'dnnregressor',
'combinedclassifier',
'combinedregressor'
]
if name not in current_models:
raise AssertionError(
'Please provide a model name from the following : {}'.format(
str(current_models)))
training_inputs = {
'scaleTier': train_cfg['scaleTier'],
'masterType': train_cfg['masterType'],
'workerType': train_cfg['workerType'],
'parameterServerType': train_cfg['parameterServerType'],
'workerCount': train_cfg['workerCount'],
'parameterServerCount': train_cfg['parameterServerCount'],
'packageUris': train_cfg['packageUris'],
'pythonModule': "trainer.launch_demo",
'args': params,
'region': train_cfg['region'],
'jobDir': os.path.join(train_cfg['jobDir'], jobid),
'runtimeVersion': train_cfg['runtimeVersion'],
'pythonVersion': train_cfg['pythonVersion']
}
job_spec = {'jobId': jobid, 'trainingInput': training_inputs}
response = cloudml.projects().jobs().create(body=job_spec,
parent=project_id).execute()
return response
|
pyNastran/bdf/bdf_interface/test/test_dev_utils.py | ACea15/pyNastran | 293 | 11069891 | import os
import unittest
import pyNastran
from pyNastran.bdf.mesh_utils.dev.create_vectorized_numbered import create_vectorized_numbered
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.join(PKG_PATH, '../', 'models')
class DevUtils(unittest.TestCase):
"""tests various dev functions"""
def test_convert_bdf(self):
"""tests create_vectorized_numbered"""
bdf_filename_in = os.path.join(MODEL_PATH, 'elements', 'static_elements.bdf')
bdf_filename_out = os.path.join(MODEL_PATH, 'elements', 'static_elements_convert.bdf')
create_vectorized_numbered(bdf_filename_in, bdf_filename_out, debug=False)
os.remove(bdf_filename_out)
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
mmfewshot/detection/models/backbones/resnet_with_meta_conv.py | BIGWangYuDong/mmfewshot | 376 | 11069901 | <filename>mmfewshot/detection/models/backbones/resnet_with_meta_conv.py<gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
from mmcv.cnn import build_conv_layer
from mmdet.models import ResNet
from mmdet.models.builder import BACKBONES
from torch import Tensor
@BACKBONES.register_module()
class ResNetWithMetaConv(ResNet):
"""ResNet with `meta_conv` to handle different inputs in metarcnn and
fsdetview.
When input with shape (N, 3, H, W) from images, the network will use
`conv1` as regular ResNet. When input with shape (N, 4, H, W) from (image +
mask) the network will replace `conv1` with `meta_conv` to handle
additional channel.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.meta_conv = build_conv_layer(
self.conv_cfg, # from config of ResNet
4,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
def forward(self, x: Tensor, use_meta_conv: bool = False) -> Tuple[Tensor]:
"""Forward function.
When input with shape (N, 3, H, W) from images, the network will use
`conv1` as regular ResNet. When input with shape (N, 4, H, W) from
(image + mask) the network will replace `conv1` with `meta_conv` to
handle additional channel.
Args:
x (Tensor): Tensor with shape (N, 3, H, W) from images
or (N, 4, H, W) from (images + masks).
use_meta_conv (bool): If set True, forward input tensor with
`meta_conv` which require tensor with shape (N, 4, H, W).
Otherwise, forward input tensor with `conv1` which require
tensor with shape (N, 3, H, W). Default: False.
Returns:
tuple[Tensor]: Tuple of features, each item with
shape (N, C, H, W).
"""
if use_meta_conv:
x = self.meta_conv(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
|
Lib/objc/_Engram.py | snazari/Pyto | 701 | 11069907 | <filename>Lib/objc/_Engram.py
"""
Classes from the 'Engram' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
ENCypher_AES128 = _Class("ENCypher_AES128")
ENCypher = _Class("ENCypher")
ENParticipantDevice = _Class("ENParticipantDevice")
ENParticipant = _Class("ENParticipant")
ENAsyncReducerState = _Class("ENAsyncReducerState")
ENAsyncReducer = _Class("ENAsyncReducer")
_ENGroupInfo = _Class("_ENGroupInfo")
ENGroup = _Class("ENGroup")
ENLog = _Class("ENLog")
ENAccountIdentity = _Class("ENAccountIdentity")
ENGroupContext = _Class("ENGroupContext")
ENGroupContextNotifyingObserver = _Class("ENGroupContextNotifyingObserver")
ENGroupContextInMemoryCache = _Class("ENGroupContextInMemoryCache")
ENStableGroupID = _Class("ENStableGroupID")
ENGroupID = _Class("ENGroupID")
ENPair = _Class("ENPair")
ENKeyClassRegister = _Class("ENKeyClassRegister")
ENGroupContextCoreDataCache = _Class("ENGroupContextCoreDataCache")
ENKeyedArchiverFromDataTransformer = _Class("ENKeyedArchiverFromDataTransformer")
ENCDGroup = _Class("ENCDGroup")
|
chatbotv5/demo.py | drpreetyrai/ChatBotCourse | 5,087 | 11069919 | <filename>chatbotv5/demo.py
# coding:utf-8
# author: lichuang
# mail: <EMAIL>
import sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq
import word_token
import jieba
import random
# 输入序列长度
input_seq_len = 5
# 输出序列长度
output_seq_len = 5
# 空值填充0
PAD_ID = 0
# 输出序列起始标记
GO_ID = 1
# 结尾标记
EOS_ID = 2
# LSTM神经元size
size = 8
# 初始学习率
init_learning_rate = 1
# 在样本中出现频率超过这个值才会进入词表
min_freq = 10
wordToken = word_token.WordToken()
# 放在全局的位置,为了动态算出num_encoder_symbols和num_decoder_symbols
max_token_id = wordToken.load_file_list(['./samples/question', './samples/answer'], min_freq)
num_encoder_symbols = max_token_id + 5
num_decoder_symbols = max_token_id + 5
def get_id_list_from(sentence):
sentence_id_list = []
seg_list = jieba.cut(sentence)
for str in seg_list:
id = wordToken.word2id(str)
if id:
sentence_id_list.append(wordToken.word2id(str))
return sentence_id_list
def get_train_set():
global num_encoder_symbols, num_decoder_symbols
train_set = []
with open('./samples/question', 'r') as question_file:
with open('./samples/answer', 'r') as answer_file:
while True:
question = question_file.readline()
answer = answer_file.readline()
if question and answer:
question = question.strip()
answer = answer.strip()
question_id_list = get_id_list_from(question)
answer_id_list = get_id_list_from(answer)
if len(question_id_list) > 0 and len(answer_id_list) > 0:
answer_id_list.append(EOS_ID)
train_set.append([question_id_list, answer_id_list])
else:
break
return train_set
def get_samples(train_set, batch_num):
"""构造样本数据
:return:
encoder_inputs: [array([0, 0], dtype=int32), array([0, 0], dtype=int32), array([5, 5], dtype=int32),
array([7, 7], dtype=int32), array([9, 9], dtype=int32)]
decoder_inputs: [array([1, 1], dtype=int32), array([11, 11], dtype=int32), array([13, 13], dtype=int32),
array([15, 15], dtype=int32), array([2, 2], dtype=int32)]
"""
# train_set = [[[5, 7, 9], [11, 13, 15, EOS_ID]], [[7, 9, 11], [13, 15, 17, EOS_ID]], [[15, 17, 19], [21, 23, 25, EOS_ID]]]
raw_encoder_input = []
raw_decoder_input = []
if batch_num >= len(train_set):
batch_train_set = train_set
else:
random_start = random.randint(0, len(train_set)-batch_num)
batch_train_set = train_set[random_start:random_start+batch_num]
for sample in batch_train_set:
raw_encoder_input.append([PAD_ID] * (input_seq_len - len(sample[0])) + sample[0])
raw_decoder_input.append([GO_ID] + sample[1] + [PAD_ID] * (output_seq_len - len(sample[1]) - 1))
encoder_inputs = []
decoder_inputs = []
target_weights = []
for length_idx in xrange(input_seq_len):
encoder_inputs.append(np.array([encoder_input[length_idx] for encoder_input in raw_encoder_input], dtype=np.int32))
for length_idx in xrange(output_seq_len):
decoder_inputs.append(np.array([decoder_input[length_idx] for decoder_input in raw_decoder_input], dtype=np.int32))
target_weights.append(np.array([
0.0 if length_idx == output_seq_len - 1 or decoder_input[length_idx] == PAD_ID else 1.0 for decoder_input in raw_decoder_input
], dtype=np.float32))
return encoder_inputs, decoder_inputs, target_weights
def seq_to_encoder(input_seq):
"""从输入空格分隔的数字id串,转成预测用的encoder、decoder、target_weight等
"""
input_seq_array = [int(v) for v in input_seq.split()]
encoder_input = [PAD_ID] * (input_seq_len - len(input_seq_array)) + input_seq_array
decoder_input = [GO_ID] + [PAD_ID] * (output_seq_len - 1)
encoder_inputs = [np.array([v], dtype=np.int32) for v in encoder_input]
decoder_inputs = [np.array([v], dtype=np.int32) for v in decoder_input]
target_weights = [np.array([1.0], dtype=np.float32)] * output_seq_len
return encoder_inputs, decoder_inputs, target_weights
def get_model(feed_previous=False):
"""构造模型
"""
learning_rate = tf.Variable(float(init_learning_rate), trainable=False, dtype=tf.float32)
learning_rate_decay_op = learning_rate.assign(learning_rate * 0.9)
encoder_inputs = []
decoder_inputs = []
target_weights = []
for i in xrange(input_seq_len):
encoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name="encoder{0}".format(i)))
for i in xrange(output_seq_len + 1):
decoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name="decoder{0}".format(i)))
for i in xrange(output_seq_len):
target_weights.append(tf.placeholder(tf.float32, shape=[None], name="weight{0}".format(i)))
# decoder_inputs左移一个时序作为targets
targets = [decoder_inputs[i + 1] for i in xrange(output_seq_len)]
cell = tf.contrib.rnn.BasicLSTMCell(size)
# 这里输出的状态我们不需要
outputs, _ = seq2seq.embedding_attention_seq2seq(
encoder_inputs,
decoder_inputs[:output_seq_len],
cell,
num_encoder_symbols=num_encoder_symbols,
num_decoder_symbols=num_decoder_symbols,
embedding_size=size,
output_projection=None,
feed_previous=feed_previous,
dtype=tf.float32)
# 计算加权交叉熵损失
loss = seq2seq.sequence_loss(outputs, targets, target_weights)
# 梯度下降优化器
opt = tf.train.GradientDescentOptimizer(learning_rate)
# 优化目标:让loss最小化
update = opt.apply_gradients(opt.compute_gradients(loss))
# 模型持久化
saver = tf.train.Saver(tf.global_variables())
return encoder_inputs, decoder_inputs, target_weights, outputs, loss, update, saver, learning_rate_decay_op, learning_rate
def train():
"""
训练过程
"""
# train_set = [[[5, 7, 9], [11, 13, 15, EOS_ID]], [[7, 9, 11], [13, 15, 17, EOS_ID]],
# [[15, 17, 19], [21, 23, 25, EOS_ID]]]
train_set = get_train_set()
with tf.Session() as sess:
encoder_inputs, decoder_inputs, target_weights, outputs, loss, update, saver, learning_rate_decay_op, learning_rate = get_model()
# 全部变量初始化
sess.run(tf.global_variables_initializer())
# 训练很多次迭代,每隔10次打印一次loss,可以看情况直接ctrl+c停止
previous_losses = []
for step in xrange(20000):
sample_encoder_inputs, sample_decoder_inputs, sample_target_weights = get_samples(train_set, 1000)
input_feed = {}
for l in xrange(input_seq_len):
input_feed[encoder_inputs[l].name] = sample_encoder_inputs[l]
for l in xrange(output_seq_len):
input_feed[decoder_inputs[l].name] = sample_decoder_inputs[l]
input_feed[target_weights[l].name] = sample_target_weights[l]
input_feed[decoder_inputs[output_seq_len].name] = np.zeros([len(sample_decoder_inputs[0])], dtype=np.int32)
[loss_ret, _] = sess.run([loss, update], input_feed)
if step % 10 == 0:
print 'step=', step, 'loss=', loss_ret, 'learning_rate=', learning_rate.eval()
if len(previous_losses) > 5 and loss_ret > max(previous_losses[-5:]):
sess.run(learning_rate_decay_op)
previous_losses.append(loss_ret)
# 模型持久化
saver.save(sess, './model/demo')
def predict():
"""
预测过程
"""
with tf.Session() as sess:
encoder_inputs, decoder_inputs, target_weights, outputs, loss, update, saver, learning_rate_decay_op, learning_rate = get_model(feed_previous=True)
saver.restore(sess, './model/demo')
sys.stdout.write("> ")
sys.stdout.flush()
input_seq = sys.stdin.readline()
while input_seq:
input_seq = input_seq.strip()
input_id_list = get_id_list_from(input_seq)
if (len(input_id_list)):
sample_encoder_inputs, sample_decoder_inputs, sample_target_weights = seq_to_encoder(' '.join([str(v) for v in input_id_list]))
input_feed = {}
for l in xrange(input_seq_len):
input_feed[encoder_inputs[l].name] = sample_encoder_inputs[l]
for l in xrange(output_seq_len):
input_feed[decoder_inputs[l].name] = sample_decoder_inputs[l]
input_feed[target_weights[l].name] = sample_target_weights[l]
input_feed[decoder_inputs[output_seq_len].name] = np.zeros([2], dtype=np.int32)
# 预测输出
outputs_seq = sess.run(outputs, input_feed)
# 因为输出数据每一个是num_decoder_symbols维的,因此找到数值最大的那个就是预测的id,就是这里的argmax函数的功能
outputs_seq = [int(np.argmax(logit[0], axis=0)) for logit in outputs_seq]
# 如果是结尾符,那么后面的语句就不输出了
if EOS_ID in outputs_seq:
outputs_seq = outputs_seq[:outputs_seq.index(EOS_ID)]
outputs_seq = [wordToken.id2word(v) for v in outputs_seq]
print " ".join(outputs_seq)
else:
print "WARN:词汇不在服务区"
sys.stdout.write("> ")
sys.stdout.flush()
input_seq = sys.stdin.readline()
if __name__ == "__main__":
if sys.argv[1] == 'train':
train()
else:
predict()
|
vkbottle/dispatch/__init__.py | homus32/vkbottle | 698 | 11069947 | from .abc import ABCRouter
from .bot_router import BotRouter
from .dispenser import ABCStateDispenser, BuiltinStateDispenser
from .handlers import ABCHandler
from .middlewares import BaseMiddleware, MiddlewareResponse
from .return_manager import BaseReturnManager
from .rules import ABCRule, ABCFilter, AndFilter, OrFilter
from .views import ABCView, ABCDispenseView, ABCMessageView, MessageView, RawEventView
|
tests/fixtures/example.py | tony/vcspull | 169 | 11069965 | <reponame>tony/vcspull<filename>tests/fixtures/example.py<gh_stars>100-1000
import os
config_dict = {
"/home/me/myproject/study/": {
"linux": "git+git://git.kernel.org/linux/torvalds/linux.git",
"freebsd": "git+https://github.com/freebsd/freebsd.git",
"sphinx": "hg+https://bitbucket.org/birkenfeld/sphinx",
"docutils": "svn+http://svn.code.sf.net/p/docutils/code/trunk",
},
"/home/me/myproject/github_projects/": {
"kaptan": {
"url": "git+git<EMAIL>:tony/kaptan.git",
"remotes": {
"upstream": "git+https://github.com/emre/kaptan",
"ms": "git+https://github.com/ms/kaptan.git",
},
}
},
"/home/me/myproject": {
".vim": {
"url": "git+git<EMAIL>:tony/vim-config.git",
"shell_command_after": "ln -sf /home/me/.vim/.vimrc /home/me/.vimrc",
},
".tmux": {
"url": "git+git@<EMAIL>.com:tony/tmux-config.git",
"shell_command_after": [
"ln -sf /home/me/.tmux/.tmux.conf /home/me/.tmux.conf"
],
},
},
}
config_dict_expanded = [
{
"name": "linux",
"parent_dir": "/home/me/myproject/study/",
"dir": os.path.join("/home/me/myproject/study/", "linux"),
"url": "git+git://git.kernel.org/linux/torvalds/linux.git",
},
{
"name": "freebsd",
"parent_dir": "/home/me/myproject/study/",
"dir": os.path.join("/home/me/myproject/study/", "freebsd"),
"url": "git+https://github.com/freebsd/freebsd.git",
},
{
"name": "sphinx",
"parent_dir": "/home/me/myproject/study/",
"dir": os.path.join("/home/me/myproject/study/", "sphinx"),
"url": "hg+https://bitbucket.org/birkenfeld/sphinx",
},
{
"name": "docutils",
"parent_dir": "/home/me/myproject/study/",
"dir": os.path.join("/home/me/myproject/study/", "docutils"),
"url": "svn+http://svn.code.sf.net/p/docutils/code/trunk",
},
{
"name": "kaptan",
"url": "<EMAIL>+<EMAIL>:tony/kaptan.git",
"parent_dir": "/home/me/myproject/github_projects/",
"dir": os.path.join("/home/me/myproject/github_projects/", "kaptan"),
"remotes": [
{"remote_name": "upstream", "url": "git+https://github.com/emre/kaptan"},
{"remote_name": "ms", "url": "git+https://github.com/ms/kaptan.git"},
],
},
{
"name": ".vim",
"parent_dir": "/home/me/myproject",
"dir": os.path.join("/home/me/myproject", ".vim"),
"url": "<EMAIL>+<EMAIL>:tony/vim-config.git",
"shell_command_after": ["ln -sf /home/me/.vim/.vimrc /home/me/.vimrc"],
},
{
"name": ".tmux",
"parent_dir": "/home/me/myproject",
"dir": os.path.join("/home/me/myproject", ".tmux"),
"url": "<EMAIL>+<EMAIL>:tony/tmux-config.git",
"shell_command_after": ["ln -sf /home/me/.tmux/.tmux.conf /home/me/.tmux.conf"],
},
]
|
examples/exploding_logo.py | salt-die/nurses_2 | 171 | 11069967 | """
Credit for ascii art logo to <NAME> (https://ascii.matthewbarber.io/art/python/)
Directions:
'esc' to quit
'r' to reset
'click' to poke
"""
import asyncio
import numpy as np
from nurses_2.app import App
from nurses_2.colors import foreground_rainbow
from nurses_2.io import MouseButton
from nurses_2.widgets.particle_field.text_field import (
TextParticleField,
TextParticle,
)
LOGO = """
_.gj8888888lkoz.,_
d888888888888888888888b,
j88P""V8888888888888888888
888 8888888888888888888
888baed8888888888888888888
88888888888888888888888888
8888888888888
,ad8888888888888888888888888888888888 888888be,
d8888888888888888888888888888888888888 888888888b,
d88888888888888888888888888888888888888 8888888888b,
j888888888888888888888888888888888888888 88888888888p,
j888888888888888888888888888888888888888' 8888888888888
8888888888888888888888888888888888888^" ,8888888888888
88888888888888^' .d88888888888888
8888888888888" .a8888888888888888888888888888888888888
8888888888888 ,888888888888888888888888888888888888888^
^888888888888 888888888888888888888888888888888888888^
V88888888888 88888888888888888888888888888888888888Y
V8888888888 8888888888888888888888888888888888888Y
`"^8888888 8888888888888888888888888888888888^"'
8888888888888
88888888888888888888888888
8888888888888888888P""V888
8888888888888888888 888
8888888888888888888baed88V
`^888888888888888888888^
`'"^^V888888888V^^'
"""
HEIGHT, WIDTH = 28, 56
POWER = 2
MAX_PARTICLE_SPEED = 10
FRICTION = .97
NCOLORS = 100
RAINBOW = foreground_rainbow(NCOLORS)
BLUE_INDEX = round(.65 * NCOLORS)
YELLOW_INDEX = round(.1 * NCOLORS)
COLOR_CHANGE_SPEED = 5
PERCENTS = tuple(np.linspace(0, 1, 30))
class PokeParticle(TextParticle):
def __init__(self, color_index, **kwargs):
self.color_index = color_index
super().__init__(color_pair=RAINBOW[color_index], **kwargs)
self.middle_row = self.middle_column = 0
self.original_position = self.pos
self.position = complex(self.top, self.left)
self.velocity = 0j
self._update_task = self._reset_task = asyncio.create_task(asyncio.sleep(0)) # dummy task
def update_geometry(self):
"""
Re-position towards center of parent's canvas.
"""
old_middle_row = self.middle_row
old_middle_column = self.middle_column
parent_middle_row, parent_middle_column = self.parent.center
self.middle_row = parent_middle_row - HEIGHT // 2
self.middle_column = parent_middle_column - WIDTH // 2
move_vertical = self.middle_row - old_middle_row
move_horizontal = self.middle_column - old_middle_column
o_top, o_left = self.original_position
o_top += move_vertical
o_left += move_horizontal
self.original_position = o_top, o_left
self.position += complex(move_vertical, move_horizontal)
self.top += move_vertical
self.left += move_horizontal
def on_click(self, mouse_event):
if mouse_event.button == MouseButton.LEFT:
if dyx := -complex(*self.to_local(mouse_event.position)):
self.velocity += POWER * dyx / (dyx.real**2 + dyx.imag**2)
if self._update_task.done():
self._reset_task.cancel()
self._update_task = asyncio.create_task(self.update())
def on_press(self, key_press_event):
if key_press_event.key == "r" and self._reset_task.done():
self._reset_task = asyncio.create_task(self.reset())
async def update(self):
"""
Coroutine that updates color and position due to velocity.
"""
parent = self.parent
color_index = RAINBOW.index(self.color_pair)
while True:
velocity = self.velocity
speed = abs(velocity)
if speed < .001:
return
color_index = round(color_index + min(speed, MAX_PARTICLE_SPEED) * COLOR_CHANGE_SPEED) % NCOLORS
self.color_pair = RAINBOW[color_index]
if speed > MAX_PARTICLE_SPEED:
velocity *= MAX_PARTICLE_SPEED / speed
self.position += velocity
position = self.position
self.top = top = round(position.real)
self.left = left = round(position.imag)
if (
top < 0 and velocity.real < 0
or top >= parent.height and velocity.real > 0
):
velocity = -velocity.conjugate()
if (
left < 0 and velocity.imag < 0
or left >= parent.width and velocity.imag > 0
):
velocity = velocity.conjugate()
self.velocity = velocity * FRICTION
try:
await asyncio.sleep(0)
except asyncio.CancelledError:
return
async def reset(self):
"""
Coroutine that returns a particle to its starting position with original color.
"""
self._update_task.cancel()
self.velocity = 0j
start_y, start_x = self.pos
end_y, end_x = self.original_position
start_color_index = RAINBOW.index(self.color_pair)
end_color_index = self.color_index
for percent in PERCENTS:
percent_left = 1 - percent
self.top = round(percent_left * start_y + percent * end_y)
self.left = round(percent_left * start_x + percent * end_x)
self.position = complex(self.top, self.left)
color_index = round(percent_left * start_color_index + percent * end_color_index)
self.color_pair = RAINBOW[color_index]
try:
await asyncio.sleep(0)
except asyncio.CancelledError:
return
class MyApp(App):
async def on_start(self):
# Create array of starting colors of particles
colors = np.full((HEIGHT, WIDTH), BLUE_INDEX)
colors[-7:] = colors[-13: -7, -41:] = YELLOW_INDEX
colors[-14, -17:] = colors[-20: -14, -15:] = YELLOW_INDEX
field = TextParticleField(size_hint=(1.0, 1.0))
# Create a Particle for each non-space character in the logo
field.add_widgets(
PokeParticle(pos=(y, x), char=char, color_index=colors[y, x])
for y, row in enumerate(LOGO.splitlines())
for x, char in enumerate(row)
if char != " "
)
self.add_widget(field)
MyApp().run()
|
modules/python/test/test_persistence.py | ghennadii/opencv | 163 | 11069973 | #!/usr/bin/env python
""""Core serializaion tests."""
import tempfile
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests
class persistence_test(NewOpenCVTests):
def test_yml_rw(self):
fd, fname = tempfile.mkstemp(prefix="opencv_python_persistence_", suffix=".yml")
os.close(fd)
# Writing ...
expected = np.array([[[0, 1, 2, 3, 4]]])
expected_str = ("Hello", "World", "!")
fs = cv.FileStorage(fname, cv.FILE_STORAGE_WRITE)
fs.write("test", expected)
fs.write("strings", expected_str)
fs.release()
# Reading ...
fs = cv.FileStorage(fname, cv.FILE_STORAGE_READ)
root = fs.getFirstTopLevelNode()
self.assertEqual(root.name(), "test")
test = fs.getNode("test")
self.assertEqual(test.empty(), False)
self.assertEqual(test.name(), "test")
self.assertEqual(test.type(), cv.FILE_NODE_MAP)
self.assertEqual(test.isMap(), True)
actual = test.mat()
self.assertEqual(actual.shape, expected.shape)
self.assertEqual(np.array_equal(expected, actual), True)
strings = fs.getNode("strings")
self.assertEqual(strings.isSeq(), True)
self.assertEqual(strings.size(), len(expected_str))
self.assertEqual(all(strings.at(i).isString() for i in range(strings.size())), True)
self.assertSequenceEqual([strings.at(i).string() for i in range(strings.size())], expected_str)
fs.release()
os.remove(fname)
|
frameworks/CPLELearning.py | mrengler/semisup-learn | 101 | 11069994 | class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
import sys
sys.stdout = Unbuffered(sys.stdout)
from sklearn.base import BaseEstimator
import numpy
import sklearn.metrics
from sklearn.linear_model import LogisticRegression as LR
import nlopt
import scipy.stats
class CPLELearningModel(BaseEstimator):
"""
Contrastive Pessimistic Likelihood Estimation framework for semi-supervised
learning, based on (Loog, 2015). This implementation contains two
significant differences to (Loog, 2015):
- the discriminative likelihood p(y|X), instead of the generative
likelihood p(X), is used for optimization
- apart from `pessimism' (the assumption that the true labels of the
unlabeled instances are as adversarial to the likelihood as possible), the
optimization objective also tries to increase the likelihood on the labeled
examples
This class takes a base model (any scikit learn estimator),
trains it on the labeled examples, and then uses global optimization to
find (soft) label hypotheses for the unlabeled examples in a pessimistic
fashion (such that the model log likelihood on the unlabeled data is as
small as possible, but the log likelihood on the labeled data is as high
as possible)
See Loog, Marco. "Contrastive Pessimistic Likelihood Estimation for
Semi-Supervised Classification." arXiv preprint arXiv:1503.00269 (2015).
http://arxiv.org/pdf/1503.00269
Attributes
----------
basemodel : BaseEstimator instance
Base classifier to be trained on the partially supervised data
pessimistic : boolean, optional (default=True)
Whether the label hypotheses for the unlabeled instances should be
pessimistic (i.e. minimize log likelihood) or optimistic (i.e.
maximize log likelihood).
Pessimistic label hypotheses ensure safety (i.e. the semi-supervised
solution will not be worse than a model trained on the purely
supervised instances)
predict_from_probabilities : boolean, optional (default=False)
The prediction is calculated from the probabilities if this is True
(1 if more likely than the mean predicted probability or 0 otherwise).
If it is false, the normal base model predictions are used.
This only affects the predict function. Warning: only set to true if
predict will be called with a substantial number of data points
use_sample_weighting : boolean, optional (default=True)
Whether to use sample weights (soft labels) for the unlabeled instances.
Setting this to False allows the use of base classifiers which do not
support sample weights (but might slow down the optimization)
max_iter : int, optional (default=3000)
Maximum number of iterations
verbose : int, optional (default=1)
Enable verbose output (1 shows progress, 2 shows the detailed log
likelihood at every iteration).
"""
def __init__(self, basemodel, pessimistic=True, predict_from_probabilities = False, use_sample_weighting = True, max_iter=3000, verbose = 1):
self.model = basemodel
self.pessimistic = pessimistic
self.predict_from_probabilities = predict_from_probabilities
self.use_sample_weighting = use_sample_weighting
self.max_iter = max_iter
self.verbose = verbose
self.it = 0 # iteration counter
self.noimprovementsince = 0 # log likelihood hasn't improved since this number of iterations
self.maxnoimprovementsince = 3 # threshold for iterations without improvements (convergence is assumed when this is reached)
self.buffersize = 200
# buffer for the last few discriminative likelihoods (used to check for convergence)
self.lastdls = [0]*self.buffersize
# best discriminative likelihood and corresponding soft labels; updated during training
self.bestdl = numpy.infty
self.bestlbls = []
# unique id
self.id = str(chr(numpy.random.randint(26)+97))+str(chr(numpy.random.randint(26)+97))
def discriminative_likelihood(self, model, labeledData, labeledy = None, unlabeledData = None, unlabeledWeights = None, unlabeledlambda = 1, gradient=[], alpha = 0.01):
unlabeledy = (unlabeledWeights[:, 0]<0.5)*1
uweights = numpy.copy(unlabeledWeights[:, 0]) # large prob. for k=0 instances, small prob. for k=1 instances
uweights[unlabeledy==1] = 1-uweights[unlabeledy==1] # subtract from 1 for k=1 instances to reflect confidence
weights = numpy.hstack((numpy.ones(len(labeledy)), uweights))
labels = numpy.hstack((labeledy, unlabeledy))
# fit model on supervised data
if self.use_sample_weighting:
model.fit(numpy.vstack((labeledData, unlabeledData)), labels, sample_weight=weights)
else:
model.fit(numpy.vstack((labeledData, unlabeledData)), labels)
# probability of labeled data
P = model.predict_proba(labeledData)
try:
# labeled discriminative log likelihood
labeledDL = -sklearn.metrics.log_loss(labeledy, P)
except Exception as e:
print(e)
P = model.predict_proba(labeledData)
# probability of unlabeled data
unlabeledP = model.predict_proba(unlabeledData)
try:
# unlabeled discriminative log likelihood
eps = 1e-15
unlabeledP = numpy.clip(unlabeledP, eps, 1 - eps)
unlabeledDL = numpy.average((unlabeledWeights*numpy.vstack((1-unlabeledy, unlabeledy)).T*numpy.log(unlabeledP)).sum(axis=1))
except Exception as e:
print(e)
unlabeledP = model.predict_proba(unlabeledData)
if self.pessimistic:
# pessimistic: minimize the difference between unlabeled and labeled discriminative likelihood (assume worst case for unknown true labels)
dl = unlabeledlambda * unlabeledDL - labeledDL
else:
# optimistic: minimize negative total discriminative likelihood (i.e. maximize likelihood)
dl = - unlabeledlambda * unlabeledDL - labeledDL
return dl
def discriminative_likelihood_objective(self, model, labeledData, labeledy = None, unlabeledData = None, unlabeledWeights = None, unlabeledlambda = 1, gradient=[], alpha = 0.01):
if self.it == 0:
self.lastdls = [0]*self.buffersize
dl = self.discriminative_likelihood(model, labeledData, labeledy, unlabeledData, unlabeledWeights, unlabeledlambda, gradient, alpha)
self.it += 1
self.lastdls[numpy.mod(self.it, len(self.lastdls))] = dl
if numpy.mod(self.it, self.buffersize) == 0: # or True:
improvement = numpy.mean((self.lastdls[(len(self.lastdls)/2):])) - numpy.mean((self.lastdls[:(len(self.lastdls)/2)]))
# ttest - test for hypothesis that the likelihoods have not changed (i.e. there has been no improvement, and we are close to convergence)
_, prob = scipy.stats.ttest_ind(self.lastdls[(len(self.lastdls)/2):], self.lastdls[:(len(self.lastdls)/2)])
# if improvement is not certain accoring to t-test...
noimprovement = prob > 0.1 and numpy.mean(self.lastdls[(len(self.lastdls)/2):]) < numpy.mean(self.lastdls[:(len(self.lastdls)/2)])
if noimprovement:
self.noimprovementsince += 1
if self.noimprovementsince >= self.maxnoimprovementsince:
# no improvement since a while - converged; exit
self.noimprovementsince = 0
raise Exception(" converged.") # we need to raise an exception to get NLopt to stop before exceeding the iteration budget
else:
self.noimprovementsince = 0
if self.verbose == 2:
print(self.id,self.it, dl, numpy.mean(self.lastdls), improvement, round(prob, 3), (prob < 0.1))
elif self.verbose:
sys.stdout.write(('.' if self.pessimistic else '.') if not noimprovement else 'n')
if dl < self.bestdl:
self.bestdl = dl
self.bestlbls = numpy.copy(unlabeledWeights[:, 0])
return dl
def fit(self, X, y): # -1 for unlabeled
unlabeledX = X[y==-1, :]
labeledX = X[y!=-1, :]
labeledy = y[y!=-1]
M = unlabeledX.shape[0]
# train on labeled data
self.model.fit(labeledX, labeledy)
unlabeledy = self.predict(unlabeledX)
#re-train, labeling unlabeled instances pessimistically
# pessimistic soft labels ('weights') q for unlabelled points, q=P(k=0|Xu)
f = lambda softlabels, grad=[]: self.discriminative_likelihood_objective(self.model, labeledX, labeledy=labeledy, unlabeledData=unlabeledX, unlabeledWeights=numpy.vstack((softlabels, 1-softlabels)).T, gradient=grad) #- supLL
lblinit = numpy.random.random(len(unlabeledy))
try:
self.it = 0
opt = nlopt.opt(nlopt.GN_DIRECT_L_RAND, M)
opt.set_lower_bounds(numpy.zeros(M))
opt.set_upper_bounds(numpy.ones(M))
opt.set_min_objective(f)
opt.set_maxeval(self.max_iter)
self.bestsoftlbl = opt.optimize(lblinit)
print(" max_iter exceeded.")
except Exception as e:
print(e)
self.bestsoftlbl = self.bestlbls
if numpy.any(self.bestsoftlbl != self.bestlbls):
self.bestsoftlbl = self.bestlbls
ll = f(self.bestsoftlbl)
unlabeledy = (self.bestsoftlbl<0.5)*1
uweights = numpy.copy(self.bestsoftlbl) # large prob. for k=0 instances, small prob. for k=1 instances
uweights[unlabeledy==1] = 1-uweights[unlabeledy==1] # subtract from 1 for k=1 instances to reflect confidence
weights = numpy.hstack((numpy.ones(len(labeledy)), uweights))
labels = numpy.hstack((labeledy, unlabeledy))
if self.use_sample_weighting:
self.model.fit(numpy.vstack((labeledX, unlabeledX)), labels, sample_weight=weights)
else:
self.model.fit(numpy.vstack((labeledX, unlabeledX)), labels)
if self.verbose > 1:
print("number of non-one soft labels: ", numpy.sum(self.bestsoftlbl != 1), ", balance:", numpy.sum(self.bestsoftlbl<0.5), " / ", len(self.bestsoftlbl))
print("current likelihood: ", ll)
if not getattr(self.model, "predict_proba", None):
# Platt scaling
self.plattlr = LR()
preds = self.model.predict(labeledX)
self.plattlr.fit( preds.reshape( -1, 1 ), labeledy )
return self
def predict_proba(self, X):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
if getattr(self.model, "predict_proba", None):
return self.model.predict_proba(X)
else:
preds = self.model.predict(X)
return self.plattlr.predict_proba(preds.reshape( -1, 1 ))
def predict(self, X):
"""Perform classification on samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
y_pred : array, shape = [n_samples]
Class labels for samples in X.
"""
if self.predict_from_probabilities:
P = self.predict_proba(X)
return (P[:, 0]<numpy.average(P[:, 0]))
else:
return self.model.predict(X)
def score(self, X, y, sample_weight=None):
return sklearn.metrics.accuracy_score(y, self.predict(X), sample_weight=sample_weight)
|
tests/cmdline/params/types/test_data.py | mkrack/aiida-core | 153 | 11070000 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the `DataParamType`."""
import pytest
from aiida.cmdline.params.types import DataParamType
from aiida.orm import Data
from aiida.orm.utils.loaders import OrmEntityLoader
class TestDataParamType:
"""Tests for the `DataParamType`."""
@pytest.fixture(autouse=True)
def init_profile(self, aiida_profile_clean): # pylint: disable=unused-argument
"""
Create some code to test the DataParamType parameter type for the command line infrastructure
We create an initial code with a random name and then on purpose create two code with a name
that matches exactly the ID and UUID, respectively, of the first one. This allows us to test
the rules implemented to solve ambiguities that arise when determing the identifier type
"""
# pylint: disable=attribute-defined-outside-init
self.param = DataParamType()
self.entity_01 = Data().store()
self.entity_02 = Data().store()
self.entity_03 = Data().store()
self.entity_01.label = 'data_01'
self.entity_02.label = str(self.entity_01.pk)
self.entity_03.label = str(self.entity_01.uuid)
def test_get_by_id(self):
"""
Verify that using the ID will retrieve the correct entity
"""
identifier = f'{self.entity_01.pk}'
result = self.param.convert(identifier, None, None)
assert result.uuid == self.entity_01.uuid
def test_get_by_uuid(self):
"""
Verify that using the UUID will retrieve the correct entity
"""
identifier = f'{self.entity_01.uuid}'
result = self.param.convert(identifier, None, None)
assert result.uuid == self.entity_01.uuid
def test_get_by_label(self):
"""
Verify that using the LABEL will retrieve the correct entity
"""
identifier = f'{self.entity_01.label}'
result = self.param.convert(identifier, None, None)
assert result.uuid == self.entity_01.uuid
def test_ambiguous_label_pk(self):
"""
Situation: LABEL of entity_02 is exactly equal to ID of entity_01
Verify that using an ambiguous identifier gives precedence to the ID interpretation
Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL
"""
identifier = f'{self.entity_02.label}'
result = self.param.convert(identifier, None, None)
assert result.uuid == self.entity_01.uuid
identifier = f'{self.entity_02.label}{OrmEntityLoader.label_ambiguity_breaker}'
result = self.param.convert(identifier, None, None)
assert result.uuid == self.entity_02.uuid
def test_ambiguous_label_uuid(self):
"""
Situation: LABEL of entity_03 is exactly equal to UUID of entity_01
Verify that using an ambiguous identifier gives precedence to the UUID interpretation
Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL
"""
identifier = f'{self.entity_03.label}'
result = self.param.convert(identifier, None, None)
assert result.uuid == self.entity_01.uuid
identifier = f'{self.entity_03.label}{OrmEntityLoader.label_ambiguity_breaker}'
result = self.param.convert(identifier, None, None)
assert result.uuid == self.entity_03.uuid
|
kafka/tools/protocol/requests/delete_topics_v0.py | akashvacher/kafka-tools | 578 | 11070006 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools.protocol.requests import BaseRequest, ArgumentError
from kafka.tools.protocol.responses.delete_topics_v0 import DeleteTopicsV0Response
class DeleteTopicsV0Request(BaseRequest):
api_key = 20
api_version = 0
cmd = "DeleteTopics"
response = DeleteTopicsV0Response
supports_cli = True
help_string = ("Request: {0}V{1}\n".format(cmd, api_version) +
"Format: {0}V{1} timeout (topic_name ...)\n".format(cmd, api_version) +
"Description: Delete the specified topics.\n")
schema = [
{'name': 'topics', 'type': 'array', 'item_type': 'string'},
{'name': 'timeout', 'type': 'int32'},
]
@classmethod
def process_arguments(cls, cmd_args):
if (len(cmd_args) < 2) or (not cmd_args[0].isdigit()):
raise ArgumentError("The first argument must be an integer, and at least one topic must be provided")
return {'topics': cmd_args[1:], 'timeout': int(cmd_args[0])}
|
GeometryReaders/XMLIdealGeometryESSource/python/cmsGeometryDB_cff.py | ckamtsikis/cmssw | 852 | 11070020 | <reponame>ckamtsikis/cmssw<filename>GeometryReaders/XMLIdealGeometryESSource/python/cmsGeometryDB_cff.py
import FWCore.ParameterSet.Config as cms
XMLFromDBSource = cms.ESProducer("XMLIdealGeometryESProducer",
rootDDName = cms.string('cms:OCMS'),
label = cms.string('Extended')
)
|
var/spack/repos/builtin/packages/geode/package.py | kkauder/spack | 2,360 | 11070024 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Geode(Package):
"""
Apache Geode is a data management platform that provides real-time,
consistent access to data-intensive applications throughout widely
distributed cloud architectures.
"""
homepage = "https://geode.apache.org/"
url = "https://archive.apache.org/dist/geode/1.9.2/apache-geode-1.9.2.tgz"
version('1.9.2', sha256='4b8118114ef43166f6bf73af56b93aadbf9108fcab06d1fbbb8e27f7d559d7e0')
version('1.9.0', sha256='8794808ebc89bc855f0b989b32e91e890d446cfd058e123f6ccb9e12597c1c4f')
version('1.8.0', sha256='58edc41edac4eabd899322b73a24727eac41f6253274c2ce7d0a82227121ae3e')
version('1.7.0', sha256='91eec04420f46e949d32104479c4a4b5b34a4e5570dca7b98ca067a30d5a783d')
version('1.6.0', sha256='79e8d81d058b1c4edd5fb414ff30ac530f7913b978f5abc899c353fcb06e5ef3')
depends_on('java', type='run')
def install(self, spec, prefix):
install_tree('.', prefix)
|
Source/driver/parse_castro_params.py | joehellmers/Castro | 178 | 11070025 | #!/usr/bin/env python3
"""
This script parses the list of C++ runtime parameters and writes the
necessary header files and Fortran routines to make them available
in Castro's C++ routines.
parameters have the format:
name type default need-in-fortran? ifdef
the first three (name, type, default) are mandatory:
name: the name of the parameter. This will be the same name as the
variable in C++ unless a pair is specified as (name, cpp_name)
type: the C++ data type (int, bool, Real, string)
default: the default value. If specified as a pair, (a, b), then
the first value is the normal default and the second is for
debug mode (#ifdef AMREX_DEBUG)
the next are optional:
need-in-fortran: no longer used
ifdef: only define this parameter if the name provided is #ifdef-ed
Any line beginning with a "#" is ignored
Commands begin with a "@":
@namespace: sets the namespace that these will be under (see below)
e.g. @namespace castro
Note: categories listed in the input file aren't used for code generation
but are used for the documentation generation
For a namespace, name, we write out:
-- name_params.H (for castro, included in Castro.H):
sets up the namespace and extern parameters
-- name_declares.H (for castro, included in Castro.cpp):
declares the runtime parameters
-- name_queries.H (for castro, included in Castro.cpp):
does the parmparse query to override the default in C++
-- name_job_info_tests.H
this tests the current value against the default and outputs
into a file
"""
import argparse
import re
import sys
import runtime_parameters as rp
CWARNING = """
// This file is automatically created by parse_castro_params.py at build time.
// To update or add runtime parameters, please edit _cpp_parameters and rebuild.\n
"""
def parse_params(infile, out_directory):
params = []
namespace = None
try:
f = open(infile)
except IOError:
sys.exit("error opening the input file")
for line in f:
if line[0] == "#":
continue
if line.strip() == "":
continue
if line[0] == "@":
# this is a command
cmd, value = line.split(":")
if cmd == "@namespace":
fields = value.split()
namespace = fields[0]
else:
sys.exit("invalid command")
continue
# this splits the line into separate fields. A field is a
# single word or a pair in parentheses like "(a, b)"
fields = re.findall(r'[\w\"\+\.-]+|\([\w+\.-]+\s*,\s*[\w\+\.-]+\)', line)
name = fields[0]
if name[0] == "(":
name, cpp_var_name = re.findall(r"\w+", name)
else:
cpp_var_name = name
dtype = fields[1].lower()
default = fields[2]
if default[0] == "(":
default, debug_default = re.findall(r"\w+", default)
else:
debug_default = None
try:
in_fortran_string = fields[3]
except IndexError:
in_fortran = 0
else:
if in_fortran_string.lower().strip() == "y":
in_fortran = 1
else:
in_fortran = 0
try:
ifdef = fields[4]
except IndexError:
ifdef = None
if namespace is None:
sys.exit("namespace not set")
params.append(rp.Param(name, dtype, default,
cpp_var_name=cpp_var_name,
namespace=namespace,
debug_default=debug_default,
in_fortran=in_fortran,
ifdef=ifdef))
# output
# find all the namespaces
namespaces = {q.namespace for q in params}
for nm in namespaces:
params_nm = [q for q in params if q.namespace == nm]
ifdefs = {q.ifdef for q in params_nm}
# write name_declares.H
try:
cd = open(f"{out_directory}/{nm}_declares.H", "w")
except IOError:
sys.exit(f"unable to open {nm}_declares.H for writing")
cd.write(CWARNING)
cd.write(f"#ifndef _{nm.upper()}_DECLARES_H_\n")
cd.write(f"#define _{nm.upper()}_DECLARES_H_\n")
for ifdef in ifdefs:
if ifdef is None:
for p in [q for q in params_nm if q.ifdef is None]:
cd.write(p.get_declare_string())
else:
cd.write(f"#ifdef {ifdef}\n")
for p in [q for q in params_nm if q.ifdef == ifdef]:
cd.write(p.get_declare_string())
cd.write("#endif\n")
cd.write("#endif\n")
cd.close()
# write name_params.H
try:
cp = open(f"{out_directory}/{nm}_params.H", "w")
except IOError:
sys.exit(f"unable to open {nm}_params.H for writing")
cp.write(CWARNING)
cp.write(f"#ifndef _{nm.upper()}_PARAMS_H_\n")
cp.write(f"#define _{nm.upper()}_PARAMS_H_\n")
cp.write("\n")
cp.write(f"namespace {nm} {{\n")
for ifdef in ifdefs:
if ifdef is None:
for p in [q for q in params_nm if q.ifdef is None]:
cp.write(p.get_decl_string())
else:
cp.write(f"#ifdef {ifdef}\n")
for p in [q for q in params_nm if q.ifdef == ifdef]:
cp.write(p.get_decl_string())
cp.write("#endif\n")
cp.write("}\n\n")
cp.write("#endif\n")
cp.close()
# write castro_queries.H
try:
cq = open(f"{out_directory}/{nm}_queries.H", "w")
except IOError:
sys.exit(f"unable to open {nm}_queries.H for writing")
cq.write(CWARNING)
for ifdef in ifdefs:
if ifdef is None:
for p in [q for q in params_nm if q.ifdef is None]:
cq.write(p.get_default_string())
cq.write(p.get_query_string("C++"))
cq.write("\n")
else:
cq.write(f"#ifdef {ifdef}\n")
for p in [q for q in params_nm if q.ifdef == ifdef]:
cq.write(p.get_default_string())
cq.write(p.get_query_string("C++"))
cq.write("\n")
cq.write("#endif\n")
cq.write("\n")
cq.close()
# write the job info tests
try:
jo = open(f"{out_directory}/{nm}_job_info_tests.H", "w")
except IOError:
sys.exit(f"unable to open {nm}_job_info_tests.H")
for ifdef in ifdefs:
if ifdef is None:
for p in [q for q in params_nm if q.ifdef is None]:
jo.write(p.get_job_info_test())
else:
jo.write(f"#ifdef {ifdef}\n")
for p in [q for q in params_nm if q.ifdef == ifdef]:
jo.write(p.get_job_info_test())
jo.write("#endif\n")
jo.close()
def main():
"""the main driver"""
parser = argparse.ArgumentParser()
parser.add_argument("-o", type=str, default=None,
help="output directory for the generated files")
parser.add_argument("input_file", type=str, nargs=1,
help="input file containing the list of parameters we will define")
args = parser.parse_args()
parse_params(args.input_file[0], args.o)
if __name__ == "__main__":
main()
|
scripts/external_libs/scapy-2.3.1/python2/scapy/asn1packet.py | klement/trex-core | 995 | 11070036 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) <NAME> <<EMAIL>>
## This program is published under a GPLv2 license
"""
Packet holding data in Abstract Syntax Notation (ASN.1).
"""
from packet import *
class ASN1_Packet(Packet):
ASN1_root = None
ASN1_codec = None
def init_fields(self):
flist = self.ASN1_root.get_fields_list()
self.do_init_fields(flist)
self.fields_desc = flist
def self_build(self):
if self.raw_packet_cache is not None:
return self.raw_packet_cache
return self.ASN1_root.build(self)
def do_dissect(self, x):
return self.ASN1_root.dissect(self, x)
|
DynamoDB-SDK-Examples/python/WorkingWithQueries/query_begins_with.py | mikeweltejr/aws-dynamodb-examples | 261 | 11070044 | <filename>DynamoDB-SDK-Examples/python/WorkingWithQueries/query_begins_with.py
import boto3, json
from boto3.dynamodb.conditions import Key
# boto3 is the AWS SDK library for Python.
# The "resources" interface allows for a higher-level abstraction than the low-level client interface.
# For more details, go to http://boto3.readthedocs.io/en/latest/guide/resources.html
dynamodb = boto3.resource('dynamodb', region_name='us-west-2')
table = dynamodb.Table('RetailDatabase')
# When making a Query API call, we use the KeyConditionExpression parameter to specify the partition key on which we want to query.
# We're using the Key object from the Boto3 library to specify that we want the attribute name ("pk")
# to equal "<EMAIL>" by using the ".eq()" method. Further we get all of those items that have a sort key using the
# "begins_with()" method to look for "meta::".
resp = table.query(KeyConditionExpression=Key('pk').eq('<EMAIL>') & Key('sk').begins_with('meta::'))
print("The query returned the following items:")
for item in resp['Items']:
print(json.dumps(item, indent=4, sort_keys=True))
|
changes/compat.py | michaeljoseph/changes | 135 | 11070048 | <filename>changes/compat.py
import sys
IS_WINDOWS = 'win32' in str(sys.platform).lower()
|
rupo/util/timeit.py | dagrigorev/rupo | 171 | 11070053 | <reponame>dagrigorev/rupo
import time
import logging
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
logging.debug('%s %2.2f sec' % (method.__name__, te-ts))
return result
return timed |
tests/conftest.py | jakul/pytest_httpx | 148 | 11070057 | # see https://docs.pytest.org/en/documentation-restructure/how-to/writing_plugins.html#testing-plugins
pytest_plugins = ["pytester"]
|
salt/modules/boto_cfn.py | markgras/salt | 9,425 | 11070077 | """
Connection module for Amazon Cloud Formation
.. versionadded:: 2015.5.0
:configuration: This module accepts explicit AWS credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
cfn.keyid: <KEY>
cfn.key: <KEY>
A region may also be specified in the configuration:
.. code-block:: yaml
cfn.region: us-east-1
:depends: boto
"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
import logging
import salt.utils.versions
log = logging.getLogger(__name__)
# pylint: disable=import-error
try:
# pylint: disable=unused-import
import boto
import boto.cloudformation
# pylint: enable=unused-import
from boto.exception import BotoServerError
logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
"""
Only load if boto libraries exist.
"""
return salt.utils.versions.check_boto_reqs(check_boto3=False)
def __init__(opts):
if HAS_BOTO:
__utils__["boto.assign_funcs"](
__name__, "cfn", module="cloudformation", pack=__salt__
)
def exists(name, region=None, key=None, keyid=None, profile=None):
"""
Check to see if a stack exists.
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.exists mystack region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
# Returns an object if stack exists else an exception
exists = conn.describe_stacks(name)
log.debug("Stack %s exists.", name)
return True
except BotoServerError as e:
log.debug("boto_cfn.exists raised an exception", exc_info=True)
return False
def describe(name, region=None, key=None, keyid=None, profile=None):
"""
Describe a stack.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.describe mystack region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
# Returns an object if stack exists else an exception
r = conn.describe_stacks(name)
if r:
stack = r[0]
log.debug("Found VPC: %s", stack.stack_id)
keys = (
"stack_id",
"description",
"stack_status",
"stack_status_reason",
"tags",
)
ret = {k: getattr(stack, k) for k in keys if hasattr(stack, k)}
o = getattr(stack, "outputs")
p = getattr(stack, "parameters")
outputs = {}
parameters = {}
for i in o:
outputs[i.key] = i.value
ret["outputs"] = outputs
for j in p:
parameters[j.key] = j.value
ret["parameters"] = parameters
return {"stack": ret}
log.debug("Stack %s exists.", name)
return True
except BotoServerError as e:
log.warning("Could not describe stack %s.\n%s", name, e)
return False
def create(
name,
template_body=None,
template_url=None,
parameters=None,
notification_arns=None,
disable_rollback=None,
timeout_in_minutes=None,
capabilities=None,
tags=None,
on_failure=None,
stack_policy_body=None,
stack_policy_url=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Create a CFN stack.
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.create mystack template_url='https://s3.amazonaws.com/bucket/template.cft' \
region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.create_stack(
name,
template_body,
template_url,
parameters,
notification_arns,
disable_rollback,
timeout_in_minutes,
capabilities,
tags,
on_failure,
stack_policy_body,
stack_policy_url,
)
except BotoServerError as e:
msg = "Failed to create stack {}.\n{}".format(name, e)
log.error(msg)
log.debug(e)
return False
def update_stack(
name,
template_body=None,
template_url=None,
parameters=None,
notification_arns=None,
disable_rollback=False,
timeout_in_minutes=None,
capabilities=None,
tags=None,
use_previous_template=None,
stack_policy_during_update_body=None,
stack_policy_during_update_url=None,
stack_policy_body=None,
stack_policy_url=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Update a CFN stack.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.update_stack mystack template_url='https://s3.amazonaws.com/bucket/template.cft' \
region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
update = conn.update_stack(
name,
template_body,
template_url,
parameters,
notification_arns,
disable_rollback,
timeout_in_minutes,
capabilities,
tags,
use_previous_template,
stack_policy_during_update_body,
stack_policy_during_update_url,
stack_policy_body,
stack_policy_url,
)
log.debug("Updated result is : %s.", update)
return update
except BotoServerError as e:
msg = "Failed to update stack {}.".format(name)
log.debug(e)
log.error(msg)
return str(e)
def delete(name, region=None, key=None, keyid=None, profile=None):
"""
Delete a CFN stack.
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.delete mystack region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.delete_stack(name)
except BotoServerError as e:
msg = "Failed to create stack {}.".format(name)
log.error(msg)
log.debug(e)
return str(e)
def get_template(name, region=None, key=None, keyid=None, profile=None):
"""
Check to see if attributes are set on a CFN stack.
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.get_template mystack
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
template = conn.get_template(name)
log.info("Retrieved template for stack %s", name)
return template
except BotoServerError as e:
log.debug(e)
msg = "Template {} does not exist".format(name)
log.error(msg)
return str(e)
def validate_template(
template_body=None,
template_url=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Validate cloudformation template
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.validate_template mystack-template
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
# Returns an object if json is validated and an exception if its not
return conn.validate_template(template_body, template_url)
except BotoServerError as e:
log.debug(e)
msg = "Error while trying to validate template {}.".format(template_body)
log.error(msg)
return str(e)
|
CIFAR/utils.py | Omid-Nejati/MoEx | 130 | 11070106 | # original code: https://github.com/eladhoffer/convNet.pytorch/blob/master/preprocess.py
import torch
import random
__all__ = ["Compose", "Lighting", "ColorJitter"]
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class Grayscale(object):
def __call__(self, img):
gs = img.clone()
gs[0].mul_(0.299).add_(0.587, gs[1]).add_(0.114, gs[2])
gs[1].copy_(gs[0])
gs[2].copy_(gs[0])
return gs
class Saturation(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = Grayscale()(img)
alpha = random.uniform(-self.var, self.var)
return img.lerp(gs, alpha)
class Brightness(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = img.new().resize_as_(img).zero_()
alpha = random.uniform(-self.var, self.var)
return img.lerp(gs, alpha)
class Contrast(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = Grayscale()(img)
gs.fill_(gs.mean())
alpha = random.uniform(-self.var, self.var)
return img.lerp(gs, alpha)
class ColorJitter(object):
def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
def __call__(self, img):
self.transforms = []
if self.brightness != 0:
self.transforms.append(Brightness(self.brightness))
if self.contrast != 0:
self.transforms.append(Contrast(self.contrast))
if self.saturation != 0:
self.transforms.append(Saturation(self.saturation))
random.shuffle(self.transforms)
transform = Compose(self.transforms)
return transform(img)
|
benchmarks/api_hour/benchmarks/benchmarks/services/__init__.py | Ovvovy/API-Hour | 571 | 11070116 | <filename>benchmarks/api_hour/benchmarks/benchmarks/services/__init__.py
from . import agents |
Codeforces/91 Beta Division 2/Problem B/B.py | VastoLorde95/Competitive-Programming | 170 | 11070123 | <reponame>VastoLorde95/Competitive-Programming<filename>Codeforces/91 Beta Division 2/Problem B/B.py
s = raw_input()
r = ""
i = 0
while i < len(s) and s[i] == '0':
i += 1
while i < len(s):
r += s[i]
i += 1
l = []
i = 0
for i in xrange(len(r)):
tmp = ""
for j in xrange(i, len(r)):
flag = 0
for k in xrange(i, j+1):
if r[k] == '4' or r[k] == '7':
tmp += r[i]
else:
flag = 1
break
if flag == 0:
l.append(tmp)
if len(l) == 0:
print -1
else:
d = {}
for i in l:
d[i] = r.count(i)
mx = 0
l = []
for i in d:
mx = max(mx, d[i])
for i in d:
if d[i] == mx:
l.append(i)
l.sort()
print l[0]
|
seoanalyzer/__init__.py | cobolbaby/python-seo-analyzer | 743 | 11070134 | <reponame>cobolbaby/python-seo-analyzer
#!/usr/bin/env python3
from .analyzer import analyze
from .stemmer import stem |
bigsi/graph/__init__.py | Phelimb/bfg | 109 | 11070168 | from bigsi.graph.bigsi import BIGSI
#
|
cumulusci/utils/yaml/cumulusci_yml.py | SFDO-Tooling/CumulusCI | 163 | 11070173 | import enum
from logging import getLogger
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from pydantic import Field, root_validator
from pydantic.types import DirectoryPath
from typing_extensions import Literal, TypedDict
from cumulusci.utils.fileutils import DataInput, load_from_source
from cumulusci.utils.yaml.model_parser import CCIDictModel, HashableBaseModel
from cumulusci.utils.yaml.safer_loader import load_yaml_data
default_logger = getLogger(__name__)
# type aliases
PythonClassPath = str
URL = str
# additionalProperties here works around an
# incompatibility with VSCode's Red Hat YAML validator
# Can probably remove it at some future date if the
# bug/incompatibilty is fixed elsewhere in the stack
VSCodeFriendlyDict = Field({}, additionalProperties=True)
class PreflightCheck(CCIDictModel):
when: str = None
action: str = None
message: str = None
class Step(CCIDictModel):
task: str = None
flow: str = None
ignore_failure: bool = False
when: str = None # is this allowed?
options: Dict[str, Any] = VSCodeFriendlyDict
ui_options: Dict[str, Any] = VSCodeFriendlyDict
checks: List[PreflightCheck] = []
description: str = None
@root_validator()
def _check(cls, values):
has_task = values.get("task") and values["task"] != "None"
has_flow = values.get("flow") and values["flow"] != "None"
assert not (
has_task and has_flow
), "Steps must have either task or flow but not both"
return values
class Task(CCIDictModel):
class_path: str = None
description: str = None
group: str = None
# additionalProperties here works around an
# incompatibility with VSCode's Red Hat YAML validator
options: Dict[str, Any] = VSCodeFriendlyDict
ui_options: Dict[str, Any] = VSCodeFriendlyDict
name: str = None # get rid of this???
class Flow(CCIDictModel):
description: str = None
steps: Dict[str, Step] = None
group: str = None
class Package(CCIDictModel):
name: Optional[str] = None
name_managed: Optional[str] = None
namespace: Optional[str] = None
install_class: str = None
uninstall_class: str = None
api_version: str = None
metadata_package_id: str = None
class Test(CCIDictModel):
name_match: str
class ReleaseNotesParser(CCIDictModel):
class_path: PythonClassPath
title: str
class ReleaseNotes(CCIDictModel):
parsers: Dict[int, ReleaseNotesParser]
class Git(CCIDictModel):
repo_url: str = None
default_branch: str = None
prefix_feature: str = None
prefix_beta: str = None
prefix_release: str = None
push_prefix_sandbox: str = None
push_prefix_production: str = None
release_notes: ReleaseNotes = None
two_gp_context: str = Field(None, alias="2gp_context")
class Plan(CCIDictModel): # MetaDeploy plans
title: str = None
description: str = None
tier: Literal["primary", "secondary", "additional"] = None
slug: str = None
is_listed: bool = True
steps: Dict[str, Step] = None
checks: List[PreflightCheck] = []
group: str = None
error_message: str = None
post_install_message: str = None
preflight_message: str = None
class DependencyResolutions(CCIDictModel):
production: str = None
preproduction: str = None
resolution_strategies: Dict[str, List[str]] = None
class Project(CCIDictModel):
name: str = None
package: Package = None
test: Test = None
git: Git = None
dependencies: List[Dict[str, str]] = None # TODO
dependency_resolutions: DependencyResolutions = None
source_format: Literal["sfdx", "mdapi"] = "mdapi"
class ScratchOrg(CCIDictModel):
config_file: Path = None
days: int = None
namespaced: str = None
setup_flow: str = None
noancestors: bool = None
class Orgs(CCIDictModel):
scratch: Dict[str, ScratchOrg] = None
class ServiceAttribute(CCIDictModel):
description: str = None
required: bool = None
default_factory: PythonClassPath = None
class Service(CCIDictModel):
description: str = None
class_path: Optional[str]
attributes: Dict[str, ServiceAttribute] = None
validator: PythonClassPath = None
class CumulusCIConfig(CCIDictModel):
keychain: PythonClassPath
class GitHubSourceRelease(str, enum.Enum):
LATEST = "latest"
PREVIOUS = "previous"
LATEST_BETA = "latest_beta"
class GitHubSourceModel(HashableBaseModel):
github: str
resolution_strategy: Optional[str]
commit: Optional[str]
ref: Optional[str]
branch: Optional[str]
tag: Optional[str]
release: Optional[GitHubSourceRelease]
description: Optional[str]
@root_validator
def validate(cls, values):
exclusive_keys = [
"resolution_strategy",
"commit",
"ref",
"branch",
"tag",
"release",
]
key_count = len([x for x in exclusive_keys if x in values and values[x]])
if key_count > 1:
raise ValueError(
'Sources must use only one of "resolution_strategy", "commit", "ref", "branch", "tag", or "release".'
)
elif key_count == 0:
values["resolution_strategy"] = "production"
return values
class LocalFolderSourceModel(HashableBaseModel):
path: DirectoryPath
class CumulusCLIConfig(CCIDictModel):
show_stacktraces: bool = False
plain_output: bool = None
class CumulusCIRoot(CCIDictModel):
tasks: Dict[str, Task] = {}
flows: Dict[str, Flow] = {}
project: Project = {}
orgs: Orgs = {}
services: Dict[str, Service] = {}
cumulusci: CumulusCIConfig = None
plans: Dict[str, Plan] = {}
minimum_cumulusci_version: str = None
sources: Dict[str, Union[LocalFolderSourceModel, GitHubSourceModel]] = {}
cli: CumulusCLIConfig = None
class CumulusCIFile(CCIDictModel):
__root__: Union[CumulusCIRoot, None]
def parse_from_yaml(source) -> dict:
"Parse from a path, url, path-like or file-like"
return CumulusCIFile.parse_from_yaml(source) or {}
def validate_data(
data: Union[dict, list],
context: str = None,
on_error: callable = None,
):
"""Validate data which has already been loaded into a dictionary or list.
context is a string that will be used to give context to error messages.
on_error will be called for any validation errors with a dictionary in Pydantic error format
https://pydantic-docs.helpmanual.io/usage/models/#error-handling
"""
return CumulusCIFile.validate_data(data, context=context, on_error=on_error)
class ErrorDict(TypedDict):
"The structure of a Pydantic error dictionary. Google TypedDict if its new to you."
loc: Sequence[Union[str, int]]
msg: str
type: str
has_shown_yaml_error_message = False
def _log_yaml_errors(logger, errors: List[ErrorDict]):
"Format and log a Pydantic-style error dictionary"
global has_shown_yaml_error_message
plural = "" if len(errors) <= 1 else "s"
logger.warning(f"CumulusCI Configuration Warning{plural}:")
for error in errors:
loc = " -> ".join(repr(x) for x in error["loc"] if x != "__root__")
logger.warning(" %s\n %s", loc, error["msg"])
if not has_shown_yaml_error_message:
logger.error(
"NOTE: These warnings may become errors in future versions of CumulusCI."
)
logger.error(
"If you think your YAML has no error, please report the bug to the CumulusCI team."
)
logger.error("https://github.com/SFDO-Tooling/CumulusCI/issues/\n")
has_shown_yaml_error_message = True
def cci_safe_load(
source: DataInput, context: str = None, on_error: callable = None, logger=None
) -> dict:
"""Load a CumulusCI.yml file and issue warnings for unknown structures."""
errors = []
assert not (
on_error and logger
), "Please specify either on_error or logger but not both"
on_error = on_error or errors.append
logger = logger or default_logger
with load_from_source(source) as (data_stream, filename):
data = load_yaml_data(data_stream, filename)
context = context or filename
try:
validate_data(data, context=context, on_error=on_error)
if errors:
_log_yaml_errors(logger, errors)
except Exception as e:
# should never be executed
print(f"Error validating cumulusci.yml {e}")
if on_error:
on_error(
{
"loc": (context,),
"msg": f"Error validating cumulusci.yml {e}",
"type": "exception",
}
)
pass
return data or {}
def _validate_files(globs):
"Validate YML files from Dev CLI for smoke testing"
from glob import glob
errors = []
for g in globs:
print(g)
filenames = glob(g, recursive=True)
for filename in filenames:
print("Validating", filename)
cci_safe_load(filename, filename, on_error=errors.append)
return errors
def _validate_url(url): # pragma: no cover
"Validate YML URL from Dev CLI for smoke testing"
errors = []
cci_safe_load(url, url, on_error=errors.append)
return errors
# validate YML files as a CLI for smoke testing
if __name__ == "__main__": # pragma: no cover
import sys
from pprint import pprint
if sys.argv[1].startswith("http"):
pprint(_validate_url(sys.argv[1]))
else:
pprint(_validate_files(sys.argv[1:]))
|
hoomd/hpmc/external/user.py | YMWani/hoomd-blue | 204 | 11070195 | # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""User-defined external fields for HPMC simulations."""
import hoomd
from hoomd import _compile
from hoomd.hpmc import integrate
if hoomd.version.llvm_enabled:
from hoomd.hpmc import _jit
from hoomd.operation import _HOOMDBaseObject
from hoomd.data.parameterdicts import ParameterDict
from hoomd.logging import log
class CPPExternalPotential(_HOOMDBaseObject):
"""Define an external potential energy field imposed on all particles in \
the system.
Args:
code (str): C++ function body to compile.
Potentials added using :py:class:`CPPExternalPotential` are added to the
total energy calculation in :py:mod:`hpmc <hoomd.hpmc>` integrators.
:py:class:`CPPExternalPotential` takes C++ code, compiles it at runtime, and
executes the code natively in the MC loop with full performance. It enables
researchers to quickly and easily implement custom energetic field
intractions without the need to modify and recompile HOOMD.
.. rubric:: C++ code
Supply C++ code to the *code* argument and :py:class:`CPPExternalPotential`
will compile the code and call it to evaluate the energy. The text provided
in *code* is the body of a function with the following signature:
.. code::
float eval(const BoxDim& box,
unsigned int type_i,
const vec3<Scalar>& r_i,
const quat<Scalar>& q_i
Scalar diameter,
Scalar charge
)
* *box* is the system box.
* *type_i* is the (integer) particle type.
* *r_i* is the particle position
* *q_i* the quaternion representing the particle orientation.
* *diameter* the particle diameter.
* *charge* the particle charge.
Note:
``vec3`` and ``quat`` are defined in the file `VectorMath.h`_ in the \
HOOMD-blue source code, and ``BoxDim`` is defined in he file \
`BoxDim.h`_ in the HOOMD-blue source code.
Note:
Your code *must* return a value.
.. _VectorMath.h: https://github.com/glotzerlab/hoomd-blue/blob/\
v3.0.0-beta.9/hoomd/VectorMath.h
.. _BoxDim.h: https://github.com/glotzerlab/hoomd-blue/blob/\
v3.0.0-beta.9/hoomd/BoxDim.h
Example:
.. code-block:: python
grav_code = "return r_i.z + box.getL().z/2;"
gravity = hoomd.hpmc.external.user.CPPExternalPotential(
code=grav_code)
mc.external_potential = gravity
Note:
`CPPExternalPotential` does not support execution on GPUs.
Warning:
``CPPExternalPotential`` is **experimental** and subject to change in
future minor releases.
Attributes:
code (str): The code of the body of the external field energy function.
After running zero or more steps, this property cannot be modified.
"""
def __init__(self, code):
param_dict = ParameterDict(code=str)
self._param_dict = param_dict
self.code = code
def _getattr_param(self, attr):
if attr == 'code':
return self._param_dict[attr]
return super()._getattr_param(attr)
def _wrap_cpu_code(self, code):
"""Helper function to wrap the provided code into a function \
with the expected signature.
Args:
code (`str`): Body of the C++ function
"""
cpp_function = """
#include "hoomd/HOOMDMath.h"
#include "hoomd/VectorMath.h"
#include "hoomd/BoxDim.h"
extern "C"
{
float eval(const BoxDim& box,
unsigned int type_i,
const vec3<Scalar> r_i,
const quat<Scalar>& q_i,
Scalar diameter,
Scalar charge
)
{
"""
cpp_function += code
cpp_function += """
}
}
"""
return cpp_function
def _attach(self):
integrator_pairs = {
integrate.Sphere:
_jit.ExternalFieldJITSphere,
integrate.ConvexPolygon:
_jit.ExternalFieldJITConvexPolygon,
integrate.SimplePolygon:
_jit.ExternalFieldJITSimplePolygon,
integrate.ConvexPolyhedron:
_jit.ExternalFieldJITConvexPolyhedron,
integrate.ConvexSpheropolyhedron:
_jit.ExternalFieldJITSpheropolyhedron,
integrate.Ellipsoid:
_jit.ExternalFieldJITEllipsoid,
integrate.ConvexSpheropolygon:
_jit.ExternalFieldJITSpheropolygon,
integrate.FacetedEllipsoid:
_jit.ExternalFieldJITFacetedEllipsoid,
integrate.Polyhedron:
_jit.ExternalFieldJITPolyhedron,
integrate.Sphinx:
_jit.ExternalFieldJITSphinx
}
integrator = self._simulation.operations.integrator
if not isinstance(integrator, integrate.HPMCIntegrator):
raise RuntimeError("The integrator must be a HPMC integrator.")
if (isinstance(self._simulation.device, hoomd.device.GPU)):
msg = 'User-defined external fields are not supported on the GPU.'
raise NotImplementedError(msg)
cpp_cls = integrator_pairs.get(
self._simulation.operations.integrator.__class__, None)
if cpp_cls is None:
raise RuntimeError("Unsupported integrator.\n")
cpu_code = self._wrap_cpu_code(self.code)
cpu_include_options = _compile.get_cpu_include_options()
self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def,
self._simulation.device._cpp_exec_conf,
cpu_code, cpu_include_options)
super()._attach()
@log(requires_run=True)
def energy(self):
"""float: Total field energy of the system in the current state.
Returns `None` when the patch object and integrator are not attached.
"""
timestep = self._simulation.timestep
return self._cpp_obj.computeEnergy(timestep)
|
descarteslabs/workflows/models/tests/test_xyz.py | descarteslabs/descarteslabs-python | 167 | 11070198 | import datetime
import logging
from six.moves import queue
import grpc
import mock
from google.protobuf.timestamp_pb2 import Timestamp
from descarteslabs.common.proto.xyz import xyz_pb2
from descarteslabs.common.proto.logging import logging_pb2
from descarteslabs.workflows import _channel, client
from .. import XYZ, XYZLogListener
from ..published_graft import PublishedGraft
from ..utils import (
pb_datetime_to_milliseconds,
pb_milliseconds_to_datetime,
py_log_level_to_proto_log_level,
)
from ..visualization import VizOption
from . import utils
def mock_CreateXYZ(msg: xyz_pb2.CreateXYZRequest, **kwargs) -> xyz_pb2.XYZ:
assert isinstance(msg, xyz_pb2.CreateXYZRequest)
expires_timestamp = Timestamp()
expires_timestamp.FromJsonString("2003-01-02T04:05:06.789+00:00")
return xyz_pb2.XYZ(
id="mclovin",
name=msg.name,
description=msg.description,
serialized_graft=msg.serialized_graft,
typespec=msg.typespec,
parameters=msg.parameters,
public=msg.public,
viz_options=msg.viz_options,
expires_timestamp=expires_timestamp,
channel=msg.channel,
client_version=msg.client_version,
)
@mock.patch(
"descarteslabs.workflows.models.published_graft.get_global_grpc_client",
new=lambda: utils.MockedClient(),
)
@mock.patch(
"descarteslabs.workflows.models.xyz.get_global_grpc_client",
new=lambda: utils.MockedClient(),
)
@mock.patch("descarteslabs.common.proto.xyz.xyz_pb2_grpc.XYZAPIStub")
class TestXYZ(object):
def test_init(self, stub):
CreateXYZ = stub.return_value.CreateXYZ
CreateXYZ.side_effect = mock_CreateXYZ
obj = utils.Bar(utils.Foo(1))
name = "bar"
desc = "a bar"
public = True
viz_options = [
VizOption(
id="viz1",
bands=["red", "green", "blue"],
scales=[[0, 0.4], [0, 0.4], [0, 0.4]],
),
]
# do some hackery to pull out the `self._message` produced by the superclass's `__init__`
super_message = None
orig_init = PublishedGraft.__init__
def patched_init(self, *args, **kwargs):
"pull out `self._message` at the end of `PublishedGraft.__init__` so we can use it in tests"
orig_init(self, *args, **kwargs)
nonlocal super_message
super_message = self._message
with mock.patch.object(PublishedGraft, "__init__", patched_init):
xyz = XYZ(
obj,
name=name,
description=desc,
viz_options=viz_options,
)
expected_req = xyz_pb2.CreateXYZRequest(
name=name,
description=desc,
serialized_graft=super_message.serialized_graft,
typespec=super_message.typespec,
parameters=super_message.parameters,
public=public,
viz_options=[vp._message for vp in viz_options],
channel=super_message.channel,
client_version=super_message.client_version,
)
CreateXYZ.assert_called_once_with(
expected_req,
timeout=client.Client.DEFAULT_TIMEOUT,
metadata=mock.ANY,
)
assert xyz._message == mock_CreateXYZ(expected_req)
assert xyz.name == name
assert xyz.description == desc
assert xyz.expires_timestamp == datetime.datetime(2003, 1, 2, 4, 5, 6, 789000)
assert xyz.viz_options == viz_options
def test_get(self, stub):
message = "foo"
stub.return_value.GetXYZ.return_value = message
with mock.patch.object(XYZ, "_from_proto") as _from_proto:
XYZ.get("fake_id")
_from_proto.assert_called_once()
assert _from_proto.call_args[0][0] is message
stub.return_value.GetXYZ.assert_called_once_with(
xyz_pb2.GetXYZRequest(xyz_id="fake_id"),
timeout=client.Client.DEFAULT_TIMEOUT,
metadata=mock.ANY,
)
def test_properties(self, stub):
stub.return_value.CreateXYZ.side_effect = mock_CreateXYZ
obj = utils.Bar(utils.Foo(1))
xyz = XYZ(obj, name="bar", description="a bar")
assert xyz.object is obj
assert xyz.type is type(obj)
assert xyz.name == "bar"
assert xyz.description == "a bar"
assert xyz.channel == _channel.__channel__
xyz._message.id = "1234"
xyz._message.created_timestamp = 100
xyz._message.updated_timestamp = 200
assert xyz.id == "1234"
assert xyz.created_timestamp == pb_milliseconds_to_datetime(100)
assert xyz.updated_timestamp == pb_milliseconds_to_datetime(200)
def test_iter_tile_logs(self, stub):
stub.return_value.CreateXYZ.side_effect = mock_CreateXYZ
start_datetime = datetime.datetime.now(datetime.timezone.utc)
log_level = logging.WARNING
session_id = "bar"
logs = [
xyz_pb2.XYZLogRecord(
record=logging_pb2.LogRecord(message="foo"), session_id=session_id
),
xyz_pb2.XYZLogRecord(
record=logging_pb2.LogRecord(message="bar"), session_id=session_id
),
]
xyz = XYZ(utils.Foo(1), name="foo", description="a foo")
stub.return_value.GetXYZSessionLogs.return_value = logs
assert (
list(
xyz.iter_tile_logs(
session_id=session_id,
start_datetime=start_datetime,
level=log_level,
)
)
== logs
)
stub.return_value.GetXYZSessionLogs.assert_called_once_with(
xyz_pb2.GetXYZSessionLogsRequest(
session_id=session_id,
start_timestamp=pb_datetime_to_milliseconds(start_datetime),
xyz_id=xyz.id,
level=py_log_level_to_proto_log_level(log_level),
),
timeout=client.Client.STREAM_TIMEOUT,
metadata=mock.ANY,
)
def test_url(self, stub):
stub.return_value.CreateXYZ.side_effect = mock_CreateXYZ
obj = utils.Foo(1)
xyz = XYZ(obj)
url_template = xyz._message.url_template = "http://base.net"
assert xyz.url() == url_template
def test_wmts_url(self, stub):
stub.return_value.CreateXYZ.side_effect = mock_CreateXYZ
obj = utils.Foo(1)
xyz = XYZ(obj)
wmts_url_template = (
"http://base.net/wmts/xyz/mclovin/1.0.0/WMTSCapabilities.xml"
)
xyz._message.wmts_url_template = wmts_url_template
assert xyz.wmts_url() == wmts_url_template
assert (
xyz.wmts_url(tile_matrix_sets="utm")
== wmts_url_template + "?tile_matrix_sets=utm"
)
assert (
xyz.wmts_url(tile_matrix_sets=["EPSG:4326", "EPSG:3857"])
== wmts_url_template
+ "?tile_matrix_sets=EPSG%3A4326&tile_matrix_sets=EPSG%3A3857"
)
@mock.patch("descarteslabs.workflows.models.xyz._tile_log_stream")
def test_xyz_log_listener(log_stream_mock):
class FakeRendezvous(object):
def __init__(self, q):
self.q = q
def __iter__(self):
while True:
msg = self.q.get()
if msg != "cancel":
yield msg
self.q.task_done()
else:
self.q.task_done()
raise grpc.RpcError
def cancel(self):
self.q.put("cancel")
q = queue.Queue()
rendezvous = FakeRendezvous(q)
log_stream_mock.return_value = rendezvous
listener = XYZLogListener("foobar")
msgs = []
listener.add_callback(lambda msg: msgs.append(msg))
listener.listen("foobar")
log_stream_mock.assert_called_once()
# simulate incoming messages
q.put("first")
q.put("second")
q.join() # avoid possible race condition in test
assert msgs == ["first", "second"]
stopped = listener.stop(timeout=1)
assert stopped
assert not listener.running()
assert len(msgs) == 2
|
libraries/top_menu.py | Rod-O/Deviot | 327 | 11070227 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sublime_plugin
from os import path
from ..api import deviot
from .I18n import I18n
from .menu_files import MenuFiles
from .tools import get_setting, save_setting
class CheckMenuFilesCommand(sublime_plugin.WindowCommand):
def run(self):
menu_path = deviot.main_menu_path()
compile_lang = get_setting('compile_lang', True)
if(compile_lang or not path.exists(menu_path)):
TopMenu().make_menu_files()
save_setting('compile_lang', False)
class CompileMenuCommand(sublime_plugin.WindowCommand):
def run(self):
TopMenu().make_menu_files()
class TopMenu(MenuFiles):
def __init__(self):
super(TopMenu, self).__init__()
def create_main_menu(self):
"""Main Menu
Generates the main menu of the plugin.
The main menu is built from diferents sources, here
the diferents sources are called to get the data, the
data is manipulated (ex. translated) and stored as a
menu file (menu_name.sublime-menu)
"""
menu_preset = self.get_template_menu('main_menu.json')
path = deviot.plugin_path()
for option in menu_preset:
option = self.translate_childrens(option)
for sub in option['children']:
try:
sub = self.translate_childrens(sub)
except KeyError:
pass
self.create_sublime_menu(menu_preset, 'Main', path)
def translate_childrens(self, option_dict):
"""Translate Children Menu
Translate a children sublime text menu
Arguments:
option_dict {dict} -- children to be traslated
Returns:
dict -- children translated
"""
tr = I18n().translate
for children in option_dict['children']:
children['caption'] = tr(children['caption'])
try:
for children_chil in children['children']:
children_chil['caption'] = tr(children_chil['caption'])
except:
pass
return option_dict
def make_menu_files(self):
"""Menu Files
Makes each file who needs to be translated like
the main menu, quick panel, contextual menu
"""
self.create_main_menu()
self.create_quick_commands()
self.create_context_menu()
|
keras_tuner/tuners/hyperband.py | ageron/keras-tuner | 2,676 | 11070230 | # Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
from keras_tuner.engine import oracle as oracle_module
from keras_tuner.engine import tuner as tuner_module
class HyperbandOracle(oracle_module.Oracle):
"""Oracle class for Hyperband.
Note that to use this Oracle with your own subclassed Tuner, your Tuner
class must be able to handle in `Tuner.run_trial` three special hyperparameters
that will be set by this Tuner:
- "tuner/trial_id": String, optionally set. The trial_id of the Trial to load
from when starting this trial.
- "tuner/initial_epoch": Int, always set. The initial epoch the Trial should be
started from.
- "tuner/epochs": Int, always set. The cumulative number of epochs this Trial
should be trained.
These hyperparameters will be set during the "successive halving" portion
of the Hyperband algorithm.
Examples:
```python
def run_trial(self, trial, *args, **kwargs):
hp = trial.hyperparameters
if "tuner/trial_id" in hp:
past_trial = self.oracle.get_trial(hp['tuner/trial_id'])
model = self.load_model(past_trial)
else:
model = self.hypermodel.build(hp)
initial_epoch = hp['tuner/initial_epoch']
last_epoch = hp['tuner/epochs']
for epoch in range(initial_epoch, last_epoch):
self.on_epoch_begin(...)
for step in range(...):
# Run model training step here.
self.on_epoch_end(...)
```
Args:
objective: A string or `keras_tuner.Objective` instance. If a string,
the direction of the optimization (min or max) will be inferred.
It is optional when `Tuner.run_trial()` or `HyperModel.fit()`
returns a single float as the objective to minimize.
max_epochs: Integer, the maximum number of epochs to train one model.
It is recommended to set this to a value slightly higher than the
expected epochs to convergence for your largest Model, and to use
early stopping during training (for example, via
`tf.keras.callbacks.EarlyStopping`). Defaults to 100.
factor: Integer, the reduction factor for the number of epochs
and number of models for each bracket. Defaults to 3.
hyperband_iterations: Integer, at least 1, the number of times to
iterate over the full Hyperband algorithm. One iteration will run
approximately `max_epochs * (math.log(max_epochs, factor) ** 2)`
cumulative epochs across all trials. It is recommended to set this
to as high a value as is within your resource budget. Defaults to
1.
seed: Optional integer, the random seed.
hyperparameters: Optional HyperParameters instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
"""
def __init__(
self,
objective=None,
max_epochs=100,
factor=3,
hyperband_iterations=1,
seed=None,
hyperparameters=None,
allow_new_entries=True,
tune_new_entries=True,
):
super(HyperbandOracle, self).__init__(
objective=objective,
hyperparameters=hyperparameters,
allow_new_entries=allow_new_entries,
tune_new_entries=tune_new_entries,
seed=seed,
)
if factor < 2:
raise ValueError("factor needs to be a int larger than 1.")
self.hyperband_iterations = hyperband_iterations or float("inf")
self.max_epochs = max_epochs
# Minimum epochs before successive halving, Hyperband sweeps through varying
# degress of aggressiveness.
self.min_epochs = 1
self.factor = factor
self.seed = seed or random.randint(1, 10000)
self._max_collisions = 20
self._seed_state = self.seed
self._tried_so_far = set()
self._current_iteration = 0
# Start with most aggressively halving bracket.
self._current_bracket = self._get_num_brackets() - 1
self._brackets = []
self._start_new_bracket()
def populate_space(self, trial_id):
self._remove_completed_brackets()
for bracket in self._brackets:
bracket_num = bracket["bracket_num"]
rounds = bracket["rounds"]
if len(rounds[0]) < self._get_size(bracket_num, round_num=0):
# Populate the initial random trials for this bracket.
return self._random_trial(trial_id, bracket)
else:
# Try to populate incomplete rounds for this bracket.
for round_num in range(1, len(rounds)):
round_info = rounds[round_num]
past_round_info = rounds[round_num - 1]
size = self._get_size(bracket_num, round_num)
past_size = self._get_size(bracket_num, round_num - 1)
# If more trials from the last round are ready than will be
# thrown out, we can select the best to run for the next round.
already_selected = [info["past_id"] for info in round_info]
candidates = [
self.trials[info["id"]]
for info in past_round_info
if info["id"] not in already_selected
]
candidates = [t for t in candidates if t.status == "COMPLETED"]
if len(candidates) > past_size - size:
sorted_candidates = sorted(
candidates,
key=lambda t: t.score,
reverse=self.objective.direction == "max",
)
best_trial = sorted_candidates[0]
values = best_trial.hyperparameters.values.copy()
values["tuner/trial_id"] = best_trial.trial_id
values["tuner/epochs"] = self._get_epochs(
bracket_num, round_num
)
values["tuner/initial_epoch"] = self._get_epochs(
bracket_num, round_num - 1
)
values["tuner/bracket"] = self._current_bracket
values["tuner/round"] = round_num
round_info.append(
{"past_id": best_trial.trial_id, "id": trial_id}
)
return {"status": "RUNNING", "values": values}
# This is reached if no trials from current brackets can be run.
# Max sweeps has been reached, no more brackets should be created.
if (
self._current_bracket == 0
and self._current_iteration + 1 >= self.hyperband_iterations
):
# Stop creating new brackets, but wait to complete other brackets.
if self.ongoing_trials:
return {"status": "IDLE"}
else:
self._increment_bracket_num()
return {"status": "STOPPED"}
# Create a new bracket.
else:
self._increment_bracket_num()
self._start_new_bracket()
return self._random_trial(trial_id, self._brackets[-1])
def _start_new_bracket(self):
rounds = []
for _ in range(self._get_num_rounds(self._current_bracket)):
rounds.append([])
bracket = {"bracket_num": self._current_bracket, "rounds": rounds}
self._brackets.append(bracket)
def _increment_bracket_num(self):
self._current_bracket -= 1
if self._current_bracket < 0:
self._current_bracket = self._get_num_brackets() - 1
self._current_iteration += 1
if self._current_iteration > self.hyperband_iterations:
self._current_bracket = 0
def _remove_completed_brackets(self):
# Filter out completed brackets.
def _bracket_is_incomplete(bracket):
bracket_num = bracket["bracket_num"]
rounds = bracket["rounds"]
last_round = len(rounds) - 1
if len(rounds[last_round]) == self._get_size(bracket_num, last_round):
# All trials have been created for the current bracket.
return False
return True
self._brackets = list(filter(_bracket_is_incomplete, self._brackets))
def _random_trial(self, trial_id, bracket):
bracket_num = bracket["bracket_num"]
rounds = bracket["rounds"]
values = self._random_values()
if values:
values["tuner/epochs"] = self._get_epochs(bracket_num, 0)
values["tuner/initial_epoch"] = 0
values["tuner/bracket"] = self._current_bracket
values["tuner/round"] = 0
rounds[0].append({"past_id": None, "id": trial_id})
return {"status": "RUNNING", "values": values}
elif self.ongoing_trials:
# Can't create new random values, but successive halvings may still
# be needed.
return {"status": "IDLE"}
else:
# Collision and no ongoing trials should trigger an exit.
return {"status": "STOPPED"}
def _get_size(self, bracket_num, round_num):
# Set up so that each bracket takes approx. the same amount of resources.
bracket0_end_size = math.ceil(1 + math.log(self.max_epochs, self.factor))
bracket_end_size = bracket0_end_size / (bracket_num + 1)
return math.ceil(bracket_end_size * self.factor ** (bracket_num - round_num))
def _get_epochs(self, bracket_num, round_num):
return math.ceil(self.max_epochs / self.factor ** (bracket_num - round_num))
def _get_num_rounds(self, bracket_num):
# Bracket 0 just runs random search, others do successive halving.
return bracket_num + 1
def _get_num_brackets(self):
epochs = self.max_epochs
brackets = 0
while epochs >= self.min_epochs:
epochs = epochs / self.factor
brackets += 1
return brackets
def get_state(self):
state = super(HyperbandOracle, self).get_state()
state.update(
{
"hyperband_iterations": self.hyperband_iterations,
"max_epochs": self.max_epochs,
"min_epochs": self.min_epochs,
"factor": self.factor,
"brackets": self._brackets,
"current_bracket": self._current_bracket,
"current_iteration": self._current_iteration,
}
)
return state
def set_state(self, state):
super(HyperbandOracle, self).set_state(state)
self.hyperband_iterations = state["hyperband_iterations"]
self.max_epochs = state["max_epochs"]
self.min_epochs = state["min_epochs"]
self.factor = state["factor"]
self._brackets = state["brackets"]
self._current_bracket = state["current_bracket"]
self._current_iteration = state["current_iteration"]
class Hyperband(tuner_module.Tuner):
"""Variation of HyperBand algorithm.
Reference:
<NAME>, and <NAME>.
["Hyperband: A Novel Bandit-Based
Approach to Hyperparameter Optimization."
Journal of Machine Learning Research 18 (2018): 1-52](
http://jmlr.org/papers/v18/16-558.html).
Args:
hypermodel: Instance of `HyperModel` class (or callable that takes
hyperparameters and returns a `Model` instance). It is optional
when `Tuner.run_trial()` is overriden and does not use
`self.hypermodel`.
objective: A string or `keras_tuner.Objective` instance. If a string,
the direction of the optimization (min or max) will be inferred.
It is optional when `Tuner.run_trial()` or `HyperModel.fit()`
returns a single float as the objective to minimize.
max_epochs: Integer, the maximum number of epochs to train one model.
It is recommended to set this to a value slightly higher than the
expected epochs to convergence for your largest Model, and to use
early stopping during training (for example, via
`tf.keras.callbacks.EarlyStopping`). Defaults to 100.
factor: Integer, the reduction factor for the number of epochs
and number of models for each bracket. Defaults to 3.
hyperband_iterations: Integer, at least 1, the number of times to
iterate over the full Hyperband algorithm. One iteration will run
approximately `max_epochs * (math.log(max_epochs, factor) ** 2)`
cumulative epochs across all trials. It is recommended to set this
to as high a value as is within your resource budget. Defaults to
1.
seed: Optional integer, the random seed.
hyperparameters: Optional HyperParameters instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
**kwargs: Keyword arguments relevant to all `Tuner` subclasses.
Please see the docstring for `Tuner`.
"""
def __init__(
self,
hypermodel=None,
objective=None,
max_epochs=100,
factor=3,
hyperband_iterations=1,
seed=None,
hyperparameters=None,
tune_new_entries=True,
allow_new_entries=True,
**kwargs
):
oracle = HyperbandOracle(
objective,
max_epochs=max_epochs,
factor=factor,
hyperband_iterations=hyperband_iterations,
seed=seed,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries,
)
super(Hyperband, self).__init__(
oracle=oracle, hypermodel=hypermodel, **kwargs
)
def run_trial(self, trial, *fit_args, **fit_kwargs):
hp = trial.hyperparameters
if "tuner/epochs" in hp.values:
fit_kwargs["epochs"] = hp.values["tuner/epochs"]
fit_kwargs["initial_epoch"] = hp.values["tuner/initial_epoch"]
return super(Hyperband, self).run_trial(trial, *fit_args, **fit_kwargs)
def _build_model(self, hp):
model = super(Hyperband, self)._build_model(hp)
if "tuner/trial_id" in hp.values:
trial_id = hp.values["tuner/trial_id"]
history_trial = self.oracle.get_trial(trial_id)
# Load best checkpoint from this trial.
model.load_weights(
self._get_checkpoint_fname(
history_trial.trial_id, history_trial.best_step
)
)
return model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.