max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
city-infrastructure-platform/settings.py | City-of-Helsinki/city-infrastructure-platform | 2 | 10500 | """
Django settings for city-infrastructure-platform project.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import sentry_sdk
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext_lazy as _
from helusers.defaults import SOCIAL_AUTH_PIPELINE # noqa: F401
from sentry_sdk.integrations.django import DjangoIntegration
from .utils import git_version
# Set up .env file
checkout_dir = environ.Path(__file__) - 2
assert os.path.exists(checkout_dir("manage.py"))
parent_dir = checkout_dir.path("..")
if parent_dir() != "/" and os.path.isdir(parent_dir("etc")):
env_file = parent_dir("etc/env")
default_var_root = parent_dir("var")
else:
env_file = checkout_dir(".env")
default_var_root = checkout_dir("var")
BASE_DIR = checkout_dir()
env = environ.Env(
DEBUG=(bool, False),
TIER=(str, "dev"), # one of: prod, qa, stage, test, dev
SECRET_KEY=(str, ""),
VAR_ROOT=(str, default_var_root),
ALLOWED_HOSTS=(list, []),
TRUST_X_FORWARDED_HOST=(bool, False),
DATABASE_URL=(
str,
"postgis:///city-infrastructure-platform",
),
CACHE_URL=(str, "locmemcache://"),
EMAIL_URL=(str, "consolemail://"),
SENTRY_DSN=(str, ""),
AZURE_DEPLOYMENT=(bool, False),
AZURE_ACCOUNT_KEY=(str, False),
AZURE_CONTAINER=(str, False),
AZURE_ACCOUNT_NAME=(str, False),
OIDC_AUTHENTICATION_ENABLED=(bool, True),
SOCIAL_AUTH_TUNNISTAMO_KEY=(str, None),
SOCIAL_AUTH_TUNNISTAMO_SECRET=(str, None),
OIDC_API_TOKEN_AUTH_AUDIENCE=(str, None),
OIDC_API_TOKEN_AUTH_ISSUER=(str, None),
TOKEN_AUTH_MAX_TOKEN_AGE=(int, 600),
OIDC_ENDPOINT=(str, None),
HELUSERS_ADGROUPS_CLAIM=(str, "groups"),
LOGGING_AUTH_DEBUG=(bool, False),
OVERLAY_SOURCE_URL=(str, "https://geoserver.hel.fi/geoserver/city-infra/wms"),
BASEMAP_SOURCE_URL=(str, "https://kartta.hel.fi/ws/geoserver/avoindata/wms"),
STATIC_URL=(str, "/static/"),
MEDIA_URL=(str, "/media/"),
)
if os.path.exists(env_file):
env.read_env(env_file)
SOCIAL_AUTH_TUNNISTAMO_KEY = env("SOCIAL_AUTH_TUNNISTAMO_KEY")
SOCIAL_AUTH_TUNNISTAMO_SECRET = env("SOCIAL_AUTH_TUNNISTAMO_SECRET")
HELUSERS_ADGROUPS_CLAIM = env("HELUSERS_ADGROUPS_CLAIM")
SOCIAL_AUTH_ID_TOKEN_IN_END_SESSION = False
if env("OIDC_ENDPOINT"):
SOCIAL_AUTH_TUNNISTAMO_OIDC_ENDPOINT = env("OIDC_ENDPOINT")
OIDC_API_TOKEN_AUTH = {
"AUDIENCE": env("OIDC_API_TOKEN_AUTH_AUDIENCE"),
"ISSUER": env("OIDC_API_TOKEN_AUTH_ISSUER"),
}
# General settings
DEBUG = env("DEBUG")
OIDC_AUTHENTICATION_ENABLED = env("OIDC_AUTHENTICATION_ENABLED")
TIER = env("TIER")
SECRET_KEY = env("SECRET_KEY")
if DEBUG and not SECRET_KEY:
SECRET_KEY = "xxx"
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
if OIDC_AUTHENTICATION_ENABLED and (
not SOCIAL_AUTH_TUNNISTAMO_KEY
or not SOCIAL_AUTH_TUNNISTAMO_SECRET
or not OIDC_API_TOKEN_AUTH["AUDIENCE"]
or not OIDC_API_TOKEN_AUTH["ISSUER"]
):
raise ImproperlyConfigured("Authentication not configured properly")
CACHES = {"default": env.cache()}
vars().update(env.email_url()) # EMAIL_BACKEND etc.
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {"class": "logging.NullHandler"},
},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO"},
"helusers": {
"handlers": ["console"],
"level": "DEBUG" if env("LOGGING_AUTH_DEBUG") else "INFO",
"propagate": False,
},
},
}
# Application definition
DJANGO_APPS = [
"helusers",
"social_django",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
]
THIRD_PARTY_APPS = [
"django_extensions",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"drf_yasg",
"django_filters",
"auditlog",
]
LOCAL_APPS = [
"users.apps.UsersConfig",
"traffic_control.apps.TrafficControlConfig",
"map.apps.MapConfig",
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
AUTHENTICATION_BACKENDS = (
"helusers.tunnistamo_oidc.TunnistamoOIDCAuth",
"django.contrib.auth.backends.ModelBackend",
)
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "/admin/"
LOGOUT_REDIRECT_URL = "/admin/login/"
SOCIAL_AUTH_TUNNISTAMO_AUTH_EXTRA_ARGUMENTS = {"ui_locales": "fi"}
WAGTAIL_SITE_NAME = _("City Infrastructure Platform")
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
MIDDLEWARE = [
"deployment.middleware.HealthCheckMiddleware",
"azure_client_ip.middleware.AzureClientIPMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.locale.LocaleMiddleware",
"auditlog.middleware.AuditlogMiddleware",
]
ROOT_URLCONF = "city-infrastructure-platform.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [checkout_dir("templates"), checkout_dir("map-view/build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "city-infrastructure-platform.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "fi"
LANGUAGES = [("fi", _("Finnish")), ("en", _("English"))]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
var_root = env.path("VAR_ROOT")
STATIC_ROOT = var_root("static")
MEDIA_ROOT = var_root("media")
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
STATICFILES_DIRS = [checkout_dir("map-view/build/static")]
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Django REST Framework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"helusers.oidc.ApiTokenAuthentication",
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"PAGE_SIZE": 20,
"OIDC_LEEWAY": env("TOKEN_AUTH_MAX_TOKEN_AGE"),
"GROUP_CLAIM_NAME": "groups",
}
# django-cors
if DEBUG:
CORS_ORIGIN_ALLOW_ALL = True
# Azure CLIENT_IP middleware
AZURE_DEPLOYMENT = env.bool("AZURE_DEPLOYMENT")
if AZURE_DEPLOYMENT:
AZURE_ACCOUNT_KEY = env.str("AZURE_ACCOUNT_KEY")
AZURE_CONTAINER = env.str("AZURE_CONTAINER")
AZURE_ACCOUNT_NAME = env.str("AZURE_ACCOUNT_NAME")
DEFAULT_FILE_STORAGE = "storages.backends.azure_storage.AzureStorage"
# Sentry-SDK
SENTRY_DSN = env.str("SENTRY_DSN")
VERSION = git_version()
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()], release=VERSION)
# Custom settings
SRID = 3879 # the spatial reference id used for geometries
OVERLAY_SOURCE_URL = env.str("OVERLAY_SOURCE_URL")
BASEMAP_SOURCE_URL = env.str("BASEMAP_SOURCE_URL")
LOCALE_PATHS = [
"./templates/locale",
]
| """
Django settings for city-infrastructure-platform project.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import sentry_sdk
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext_lazy as _
from helusers.defaults import SOCIAL_AUTH_PIPELINE # noqa: F401
from sentry_sdk.integrations.django import DjangoIntegration
from .utils import git_version
# Set up .env file
checkout_dir = environ.Path(__file__) - 2
assert os.path.exists(checkout_dir("manage.py"))
parent_dir = checkout_dir.path("..")
if parent_dir() != "/" and os.path.isdir(parent_dir("etc")):
env_file = parent_dir("etc/env")
default_var_root = parent_dir("var")
else:
env_file = checkout_dir(".env")
default_var_root = checkout_dir("var")
BASE_DIR = checkout_dir()
env = environ.Env(
DEBUG=(bool, False),
TIER=(str, "dev"), # one of: prod, qa, stage, test, dev
SECRET_KEY=(str, ""),
VAR_ROOT=(str, default_var_root),
ALLOWED_HOSTS=(list, []),
TRUST_X_FORWARDED_HOST=(bool, False),
DATABASE_URL=(
str,
"postgis:///city-infrastructure-platform",
),
CACHE_URL=(str, "locmemcache://"),
EMAIL_URL=(str, "consolemail://"),
SENTRY_DSN=(str, ""),
AZURE_DEPLOYMENT=(bool, False),
AZURE_ACCOUNT_KEY=(str, False),
AZURE_CONTAINER=(str, False),
AZURE_ACCOUNT_NAME=(str, False),
OIDC_AUTHENTICATION_ENABLED=(bool, True),
SOCIAL_AUTH_TUNNISTAMO_KEY=(str, None),
SOCIAL_AUTH_TUNNISTAMO_SECRET=(str, None),
OIDC_API_TOKEN_AUTH_AUDIENCE=(str, None),
OIDC_API_TOKEN_AUTH_ISSUER=(str, None),
TOKEN_AUTH_MAX_TOKEN_AGE=(int, 600),
OIDC_ENDPOINT=(str, None),
HELUSERS_ADGROUPS_CLAIM=(str, "groups"),
LOGGING_AUTH_DEBUG=(bool, False),
OVERLAY_SOURCE_URL=(str, "https://geoserver.hel.fi/geoserver/city-infra/wms"),
BASEMAP_SOURCE_URL=(str, "https://kartta.hel.fi/ws/geoserver/avoindata/wms"),
STATIC_URL=(str, "/static/"),
MEDIA_URL=(str, "/media/"),
)
if os.path.exists(env_file):
env.read_env(env_file)
SOCIAL_AUTH_TUNNISTAMO_KEY = env("SOCIAL_AUTH_TUNNISTAMO_KEY")
SOCIAL_AUTH_TUNNISTAMO_SECRET = env("SOCIAL_AUTH_TUNNISTAMO_SECRET")
HELUSERS_ADGROUPS_CLAIM = env("HELUSERS_ADGROUPS_CLAIM")
SOCIAL_AUTH_ID_TOKEN_IN_END_SESSION = False
if env("OIDC_ENDPOINT"):
SOCIAL_AUTH_TUNNISTAMO_OIDC_ENDPOINT = env("OIDC_ENDPOINT")
OIDC_API_TOKEN_AUTH = {
"AUDIENCE": env("OIDC_API_TOKEN_AUTH_AUDIENCE"),
"ISSUER": env("OIDC_API_TOKEN_AUTH_ISSUER"),
}
# General settings
DEBUG = env("DEBUG")
OIDC_AUTHENTICATION_ENABLED = env("OIDC_AUTHENTICATION_ENABLED")
TIER = env("TIER")
SECRET_KEY = env("SECRET_KEY")
if DEBUG and not SECRET_KEY:
SECRET_KEY = "xxx"
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
if OIDC_AUTHENTICATION_ENABLED and (
not SOCIAL_AUTH_TUNNISTAMO_KEY
or not SOCIAL_AUTH_TUNNISTAMO_SECRET
or not OIDC_API_TOKEN_AUTH["AUDIENCE"]
or not OIDC_API_TOKEN_AUTH["ISSUER"]
):
raise ImproperlyConfigured("Authentication not configured properly")
CACHES = {"default": env.cache()}
vars().update(env.email_url()) # EMAIL_BACKEND etc.
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {"class": "logging.NullHandler"},
},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO"},
"helusers": {
"handlers": ["console"],
"level": "DEBUG" if env("LOGGING_AUTH_DEBUG") else "INFO",
"propagate": False,
},
},
}
# Application definition
DJANGO_APPS = [
"helusers",
"social_django",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
]
THIRD_PARTY_APPS = [
"django_extensions",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"drf_yasg",
"django_filters",
"auditlog",
]
LOCAL_APPS = [
"users.apps.UsersConfig",
"traffic_control.apps.TrafficControlConfig",
"map.apps.MapConfig",
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
AUTHENTICATION_BACKENDS = (
"helusers.tunnistamo_oidc.TunnistamoOIDCAuth",
"django.contrib.auth.backends.ModelBackend",
)
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "/admin/"
LOGOUT_REDIRECT_URL = "/admin/login/"
SOCIAL_AUTH_TUNNISTAMO_AUTH_EXTRA_ARGUMENTS = {"ui_locales": "fi"}
WAGTAIL_SITE_NAME = _("City Infrastructure Platform")
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
MIDDLEWARE = [
"deployment.middleware.HealthCheckMiddleware",
"azure_client_ip.middleware.AzureClientIPMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.locale.LocaleMiddleware",
"auditlog.middleware.AuditlogMiddleware",
]
ROOT_URLCONF = "city-infrastructure-platform.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [checkout_dir("templates"), checkout_dir("map-view/build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "city-infrastructure-platform.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "fi"
LANGUAGES = [("fi", _("Finnish")), ("en", _("English"))]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
var_root = env.path("VAR_ROOT")
STATIC_ROOT = var_root("static")
MEDIA_ROOT = var_root("media")
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
STATICFILES_DIRS = [checkout_dir("map-view/build/static")]
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Django REST Framework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"helusers.oidc.ApiTokenAuthentication",
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"PAGE_SIZE": 20,
"OIDC_LEEWAY": env("TOKEN_AUTH_MAX_TOKEN_AGE"),
"GROUP_CLAIM_NAME": "groups",
}
# django-cors
if DEBUG:
CORS_ORIGIN_ALLOW_ALL = True
# Azure CLIENT_IP middleware
AZURE_DEPLOYMENT = env.bool("AZURE_DEPLOYMENT")
if AZURE_DEPLOYMENT:
AZURE_ACCOUNT_KEY = env.str("AZURE_ACCOUNT_KEY")
AZURE_CONTAINER = env.str("AZURE_CONTAINER")
AZURE_ACCOUNT_NAME = env.str("AZURE_ACCOUNT_NAME")
DEFAULT_FILE_STORAGE = "storages.backends.azure_storage.AzureStorage"
# Sentry-SDK
SENTRY_DSN = env.str("SENTRY_DSN")
VERSION = git_version()
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()], release=VERSION)
# Custom settings
SRID = 3879 # the spatial reference id used for geometries
OVERLAY_SOURCE_URL = env.str("OVERLAY_SOURCE_URL")
BASEMAP_SOURCE_URL = env.str("BASEMAP_SOURCE_URL")
LOCALE_PATHS = [
"./templates/locale",
]
| en | 0.671484 | Django settings for city-infrastructure-platform project. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ # noqa: F401 # Set up .env file # one of: prod, qa, stage, test, dev # General settings # EMAIL_BACKEND etc. # Logging # Just for reference, not used # Application definition # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ # Whether to trust X-Forwarded-Host headers for all purposes # where Django would need to make use of its own hostname # fe. generating absolute URLs pointing to itself # Most often used in reverse proxy setups # Django REST Framework # django-cors # Azure CLIENT_IP middleware # Sentry-SDK # Custom settings # the spatial reference id used for geometries | 2.06058 | 2 |
test/tc/tet_tc_base_predict_multiclass.py | dumpmemory/Pytorch-NLU | 115 | 10501 | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/25 19:30
# @author : Mo
# @function: predict model, 预测模块-多类分类
# 适配linux
import platform
import json
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
path_sys = os.path.join(path_root, "pytorch_nlu", "pytorch_textclassification")
print(path_root)
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from tcPredict import TextClassificationPredict
if __name__ == "__main__":
path_config = "../output/text_classification/model_ERNIE/tc.config"
tcp = TextClassificationPredict(path_config)
texts = [{"text": "平乐县,古称昭州,隶属于广西壮族自治区桂林市,位于广西东北部,桂林市东南部,东临钟山县,南接昭平,西北毗邻阳朔,北连恭城,总面积1919.34平方公里。"},
{"text": "平乐县主要旅游景点有榕津千年古榕、冷水石景苑、仙家温泉、桂江风景区、漓江风景区等,平乐县为漓江分界点,平乐以北称漓江,以南称桂江,是著名的大桂林旅游区之一。"},
{"text": "印岭玲珑,昭水晶莹,环绕我平中。青年的乐园,多士受陶熔。生活自觉自治,学习自发自动。五育并重,手脑并用。迎接新潮流,建设新平中"},
{"text": "桂林山水甲天下, 阳朔山水甲桂林"},
]
res = tcp.predict(texts, logits_type="sigmoid")
print(res)
while True:
print("请输入:")
question = input()
res = tcp.predict([{"text": question}], logits_type="sigmoid")
print(res)
| # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/25 19:30
# @author : Mo
# @function: predict model, 预测模块-多类分类
# 适配linux
import platform
import json
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
path_sys = os.path.join(path_root, "pytorch_nlu", "pytorch_textclassification")
print(path_root)
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from tcPredict import TextClassificationPredict
if __name__ == "__main__":
path_config = "../output/text_classification/model_ERNIE/tc.config"
tcp = TextClassificationPredict(path_config)
texts = [{"text": "平乐县,古称昭州,隶属于广西壮族自治区桂林市,位于广西东北部,桂林市东南部,东临钟山县,南接昭平,西北毗邻阳朔,北连恭城,总面积1919.34平方公里。"},
{"text": "平乐县主要旅游景点有榕津千年古榕、冷水石景苑、仙家温泉、桂江风景区、漓江风景区等,平乐县为漓江分界点,平乐以北称漓江,以南称桂江,是著名的大桂林旅游区之一。"},
{"text": "印岭玲珑,昭水晶莹,环绕我平中。青年的乐园,多士受陶熔。生活自觉自治,学习自发自动。五育并重,手脑并用。迎接新潮流,建设新平中"},
{"text": "桂林山水甲天下, 阳朔山水甲桂林"},
]
res = tcp.predict(texts, logits_type="sigmoid")
print(res)
while True:
print("请输入:")
question = input()
res = tcp.predict([{"text": question}], logits_type="sigmoid")
print(res)
| en | 0.368585 | # !/usr/bin/python # -*- coding: utf-8 -*- # @time : 2021/7/25 19:30 # @author : Mo # @function: predict model, 预测模块-多类分类 # 适配linux # os.environ["CUDA_VISIBLE_DEVICES"] = "-1" | 2.550212 | 3 |
tests/test_create_spreadsheet_values.py | Tunous/StringSheet | 14 | 10502 | import unittest
from stringsheet.parser import create_spreadsheet_values
from stringsheet.parser import create_language_sheet_values
from stringsheet.parser import parse_resources
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.resources = parse_resources('test-resources/res')
class CreateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateSpreadsheetValuesTestCase, self).setUp()
self.values = create_spreadsheet_values(self.resources)
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de', 'pl', 'zh-rCN', 'zh-rTW'],
['a_string', '', 'A string', '', '', '', ''],
['partly_added', '', 'Partly added', 'Partly added (de)', '', '',
''],
['string', 'String with comment', 'String', 'String (de)',
'String (pl)', 'String (zh-rCN)', 'String (zh-rTW)'],
['string_2', '', 'String 2', '', '', '', ''],
['array[0]', 'Item comment', 'First', '', '', '', ''],
['array[1]', '', 'Second', '', '', '', ''],
['array_comment[0]', 'Array comment', 'Some item', '', '', '', ''],
['array_comment[1]', 'Array comment', 'More items', '', '', '', ''],
['array_comment[2]', 'Comment', 'More', '', '', '', ''],
['plural{zero}', 'Parent comment', 'Other', '', '', '', ''],
['plural{one}', 'Parent comment', 'One', '', '', '', ''],
['plural{two}', 'Parent comment', 'Other', '', '', '', ''],
['plural{few}', 'Parent comment', 'Other', '', '', '', ''],
['plural{many}', 'Parent comment', 'Other', '', '', '', ''],
['plural{other}', 'Comment', 'Other', '', '', '', ''],
['plurals{zero}', 'Item comment', 'Zero', '', '', '', ''],
['plurals{one}', '', 'One', '', '', '', ''],
['plurals{two}', '', 'Two', '', '', '', ''],
['plurals{few}', '', 'Few', '', '', '', ''],
['plurals{many}', '', 'Many', '', '', '', ''],
['plurals{other}', '', 'Other', '', '', '', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateLanguageSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateLanguageSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'de')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', 'Partly added (de)'],
['string', 'String with comment', 'String', 'String (de)'],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateTemplateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateTemplateSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'Template')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'language-id'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', ''],
['string', 'String with comment', 'String', ''],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
if __name__ == '__main__':
unittest.main()
| import unittest
from stringsheet.parser import create_spreadsheet_values
from stringsheet.parser import create_language_sheet_values
from stringsheet.parser import parse_resources
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.resources = parse_resources('test-resources/res')
class CreateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateSpreadsheetValuesTestCase, self).setUp()
self.values = create_spreadsheet_values(self.resources)
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de', 'pl', 'zh-rCN', 'zh-rTW'],
['a_string', '', 'A string', '', '', '', ''],
['partly_added', '', 'Partly added', 'Partly added (de)', '', '',
''],
['string', 'String with comment', 'String', 'String (de)',
'String (pl)', 'String (zh-rCN)', 'String (zh-rTW)'],
['string_2', '', 'String 2', '', '', '', ''],
['array[0]', 'Item comment', 'First', '', '', '', ''],
['array[1]', '', 'Second', '', '', '', ''],
['array_comment[0]', 'Array comment', 'Some item', '', '', '', ''],
['array_comment[1]', 'Array comment', 'More items', '', '', '', ''],
['array_comment[2]', 'Comment', 'More', '', '', '', ''],
['plural{zero}', 'Parent comment', 'Other', '', '', '', ''],
['plural{one}', 'Parent comment', 'One', '', '', '', ''],
['plural{two}', 'Parent comment', 'Other', '', '', '', ''],
['plural{few}', 'Parent comment', 'Other', '', '', '', ''],
['plural{many}', 'Parent comment', 'Other', '', '', '', ''],
['plural{other}', 'Comment', 'Other', '', '', '', ''],
['plurals{zero}', 'Item comment', 'Zero', '', '', '', ''],
['plurals{one}', '', 'One', '', '', '', ''],
['plurals{two}', '', 'Two', '', '', '', ''],
['plurals{few}', '', 'Few', '', '', '', ''],
['plurals{many}', '', 'Many', '', '', '', ''],
['plurals{other}', '', 'Other', '', '', '', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateLanguageSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateLanguageSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'de')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', 'Partly added (de)'],
['string', 'String with comment', 'String', 'String (de)'],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateTemplateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateTemplateSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'Template')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'language-id'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', ''],
['string', 'String with comment', 'String', ''],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.603074 | 3 |
|
libs/imgutils.py | EpicKiwi/projet-datascience | 0 | 10503 | <reponame>EpicKiwi/projet-datascience
import cv2
def img_path2array(path):
return cv2.cvtColor(cv2.imread(path, 10), cv2.COLOR_BGR2RGB)
def img_array2file(path, array):
cv2.imwrite(path, cv2.cvtColor(array, cv2.COLOR_RGB2BGR)) | import cv2
def img_path2array(path):
return cv2.cvtColor(cv2.imread(path, 10), cv2.COLOR_BGR2RGB)
def img_array2file(path, array):
cv2.imwrite(path, cv2.cvtColor(array, cv2.COLOR_RGB2BGR)) | none | 1 | 2.700488 | 3 |
|
bin/nsa_fail/nsa_fail.py | changhoonhahn/SEDflow | 18 | 10504 | import os, sys
import numpy as np
from sedflow import obs as Obs
from sedflow import train as Train
from provabgs import infer as Infer
from provabgs import models as Models
####################################################
# input
####################################################
sample = sys.argv[1]
itrain = int(sys.argv[2])
nhidden = int(sys.argv[3])
nblocks = int(sys.argv[4])
niter = int(sys.argv[5])
i0 = int(sys.argv[6])
i1 = int(sys.argv[7])
####################################################
# compile NSA failures
####################################################
# u, g, r, i, z, sigma_u, sigma_g, sigma_r, sigma_i, sigma_z, redshift
y_nsa = Obs.load_nsa_data(test_set=False)
igals = np.load('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')
# convert to flux
y_flux = Train.mag2flux(y_nsa[:,:5])
y_ivar = Train.sigma_mag2flux(y_nsa[:,5:10], y_nsa[:,:5])**-2
y_zred = y_nsa[:,-1]
####################################################
# setup inference
####################################################
# SPS parameter priors
prior_sps = Infer.load_priors([
Infer.UniformPrior(7., 12.5, label='sed'),
Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors
Infer.UniformPrior(0., 1., label='sed'), # burst fraction
Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2
Infer.UniformPrior(-2., 1., label='sed') # uniform priors on dust_index
])
# SPS model
m_sps = Models.NMF(burst=True, emulator=True)
def run_mcmc(i_obs):
# desi MCMC object
nsa_mcmc = Infer.nsaMCMC(model=m_sps, prior=prior_sps)
fmcmc = os.path.join('/scratch/network/chhahn/sedflow/nsa_fail',
'mcmc.nsa.%i.hdf5' % i_obs)
if not os.path.isfile(fmcmc):
print('%s running' % os.path.basename(fmcmc))
if not np.all(np.isfinite(y_flux[i_obs])):
print('NaN photometry', y_flux[i_obs])
return None
if not np.all(np.isfinite(y_ivar[i_obs])):
print('NaN ivar', y_ivar[i_obs])
return None
# run MCMC
zeus_chain = nsa_mcmc.run(
bands='sdss', # u, g, r, i, z
photo_obs=y_flux[i_obs],
photo_ivar_obs=y_ivar[i_obs],
zred=y_zred[i_obs],
vdisp=0.,
sampler='zeus',
nwalkers=30,
burnin=0,
opt_maxiter=2000,
niter=niter,
progress=True,
writeout=fmcmc)
else:
print('%s already exists' % os.path.basename(fmcmc))
return None
for i in range(i0, i1+1):
run_mcmc(igals[i])
| import os, sys
import numpy as np
from sedflow import obs as Obs
from sedflow import train as Train
from provabgs import infer as Infer
from provabgs import models as Models
####################################################
# input
####################################################
sample = sys.argv[1]
itrain = int(sys.argv[2])
nhidden = int(sys.argv[3])
nblocks = int(sys.argv[4])
niter = int(sys.argv[5])
i0 = int(sys.argv[6])
i1 = int(sys.argv[7])
####################################################
# compile NSA failures
####################################################
# u, g, r, i, z, sigma_u, sigma_g, sigma_r, sigma_i, sigma_z, redshift
y_nsa = Obs.load_nsa_data(test_set=False)
igals = np.load('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')
# convert to flux
y_flux = Train.mag2flux(y_nsa[:,:5])
y_ivar = Train.sigma_mag2flux(y_nsa[:,5:10], y_nsa[:,:5])**-2
y_zred = y_nsa[:,-1]
####################################################
# setup inference
####################################################
# SPS parameter priors
prior_sps = Infer.load_priors([
Infer.UniformPrior(7., 12.5, label='sed'),
Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors
Infer.UniformPrior(0., 1., label='sed'), # burst fraction
Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2
Infer.UniformPrior(-2., 1., label='sed') # uniform priors on dust_index
])
# SPS model
m_sps = Models.NMF(burst=True, emulator=True)
def run_mcmc(i_obs):
# desi MCMC object
nsa_mcmc = Infer.nsaMCMC(model=m_sps, prior=prior_sps)
fmcmc = os.path.join('/scratch/network/chhahn/sedflow/nsa_fail',
'mcmc.nsa.%i.hdf5' % i_obs)
if not os.path.isfile(fmcmc):
print('%s running' % os.path.basename(fmcmc))
if not np.all(np.isfinite(y_flux[i_obs])):
print('NaN photometry', y_flux[i_obs])
return None
if not np.all(np.isfinite(y_ivar[i_obs])):
print('NaN ivar', y_ivar[i_obs])
return None
# run MCMC
zeus_chain = nsa_mcmc.run(
bands='sdss', # u, g, r, i, z
photo_obs=y_flux[i_obs],
photo_ivar_obs=y_ivar[i_obs],
zred=y_zred[i_obs],
vdisp=0.,
sampler='zeus',
nwalkers=30,
burnin=0,
opt_maxiter=2000,
niter=niter,
progress=True,
writeout=fmcmc)
else:
print('%s already exists' % os.path.basename(fmcmc))
return None
for i in range(i0, i1+1):
run_mcmc(igals[i])
| de | 0.437012 | #################################################### # input #################################################### #################################################### # compile NSA failures #################################################### # u, g, r, i, z, sigma_u, sigma_g, sigma_r, sigma_i, sigma_z, redshift # convert to flux #################################################### # setup inference #################################################### # SPS parameter priors # flat dirichilet priors # burst fraction # tburst # log uniform priors on ZH coeff # log uniform priors on ZH coeff # uniform priors on dust1 # uniform priors on dust2 # uniform priors on dust_index # SPS model # desi MCMC object # run MCMC # u, g, r, i, z | 2.056662 | 2 |
studio/gs_provider.py | NunoEdgarGFlowHub/studio | 0 | 10505 | import json
import time
import re
from .keyvalue_provider import KeyValueProvider
from .gcloud_artifact_store import GCloudArtifactStore
from .util import timeit
class GSProvider(KeyValueProvider):
def __init__(self, config, blocking_auth=True, verbose=10, store=None):
self.config = config
self.bucket = config.get('bucket', 'studioml-meta')
self.meta_store = GCloudArtifactStore(config, verbose)
super(GSProvider, self).__init__(
config,
blocking_auth,
verbose,
store)
@timeit
def _get(self, key, shallow=False):
bucket = self.meta_store._get_bucket_obj()
retval = {}
if shallow:
blob_iterator = bucket.list_blobs(
prefix=key, delimiter='/')
bloblist = list(blob_iterator)
blobnames = {b.name for b in bloblist}
prefixes = blob_iterator.prefixes
suffixes = [re.sub('^' + key, '', p) for p in prefixes | blobnames]
retval = set({})
for s in suffixes:
if s.endswith('/'):
retval.add(s[:-1])
else:
retval.add(s)
return retval
else:
blob_iterator = bucket.list_blobs(prefix=key)
for blob in blob_iterator:
suffix = re.sub('^' + key, '', blob.name)
if suffix == '':
return json.loads(blob.download_as_string())
path = suffix.split('/')
path = [p for p in path if p != '']
current_dict = retval
for subdir in path[:-1]:
if subdir != '':
if subdir not in current_dict.keys():
current_dict[subdir] = {}
current_dict = current_dict[subdir]
try:
current_dict[path[-1]] = json.loads(
blob.download_as_string())
except BaseException:
pass
if not any(retval):
return None
else:
return retval
def _delete(self, key):
self.meta_store._delete_file(key)
def _set(self, key, value):
no_retries = 10
sleep_time = 1
for i in range(no_retries):
try:
self.meta_store._get_bucket_obj().blob(key) \
.upload_from_string(json.dumps(value))
break
except BaseException as e:
self.logger.error('uploading data raised an exception:')
self.logger.exception(e)
time.sleep(sleep_time)
| import json
import time
import re
from .keyvalue_provider import KeyValueProvider
from .gcloud_artifact_store import GCloudArtifactStore
from .util import timeit
class GSProvider(KeyValueProvider):
def __init__(self, config, blocking_auth=True, verbose=10, store=None):
self.config = config
self.bucket = config.get('bucket', 'studioml-meta')
self.meta_store = GCloudArtifactStore(config, verbose)
super(GSProvider, self).__init__(
config,
blocking_auth,
verbose,
store)
@timeit
def _get(self, key, shallow=False):
bucket = self.meta_store._get_bucket_obj()
retval = {}
if shallow:
blob_iterator = bucket.list_blobs(
prefix=key, delimiter='/')
bloblist = list(blob_iterator)
blobnames = {b.name for b in bloblist}
prefixes = blob_iterator.prefixes
suffixes = [re.sub('^' + key, '', p) for p in prefixes | blobnames]
retval = set({})
for s in suffixes:
if s.endswith('/'):
retval.add(s[:-1])
else:
retval.add(s)
return retval
else:
blob_iterator = bucket.list_blobs(prefix=key)
for blob in blob_iterator:
suffix = re.sub('^' + key, '', blob.name)
if suffix == '':
return json.loads(blob.download_as_string())
path = suffix.split('/')
path = [p for p in path if p != '']
current_dict = retval
for subdir in path[:-1]:
if subdir != '':
if subdir not in current_dict.keys():
current_dict[subdir] = {}
current_dict = current_dict[subdir]
try:
current_dict[path[-1]] = json.loads(
blob.download_as_string())
except BaseException:
pass
if not any(retval):
return None
else:
return retval
def _delete(self, key):
self.meta_store._delete_file(key)
def _set(self, key, value):
no_retries = 10
sleep_time = 1
for i in range(no_retries):
try:
self.meta_store._get_bucket_obj().blob(key) \
.upload_from_string(json.dumps(value))
break
except BaseException as e:
self.logger.error('uploading data raised an exception:')
self.logger.exception(e)
time.sleep(sleep_time)
| none | 1 | 2.145688 | 2 |
|
gslib/tests/test_ls.py | MikeJeffrey/gsutil | 0 | 10506 | <filename>gslib/tests/test_ls.py
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ls command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from datetime import datetime
import os
import posixpath
import re
import stat
import subprocess
import sys
import time
import gslib
from gslib.commands import ls
from gslib.cs_api_map import ApiSelector
from gslib.project_id import PopulateProjectId
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.testcase.integration_testcase import SkipForXML
from gslib.tests.util import CaptureStdout
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import RUN_S3_TESTS
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_MD5
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY1_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY2_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import TEST_ENCRYPTION_KEY3_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY4
from gslib.tests.util import TEST_ENCRYPTION_KEY4_SHA256_B64
from gslib.tests.util import unittest
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import UTF8
from gslib.utils.ls_helper import PrintFullInfoAboutObject
from gslib.utils.retry_util import Retry
from gslib.utils.system_util import IS_WINDOWS
from six import add_move, MovedModule
add_move(MovedModule('mock', 'mock', 'unittest.mock'))
from six.moves import mock
KMS_XML_SKIP_MSG = ('gsutil does not support KMS operations for S3 buckets, '
'or listing KMS keys with the XML API.')
BUCKET_LOCK_SKIP_MSG = ('gsutil does not support bucket lock operations for '
'S3 buckets or listing retention policy with XML API.')
class TestLsUnit(testcase.GsUtilUnitTestCase):
"""Unit tests for ls command."""
def test_one_object_with_L_storage_class_update(self):
"""Tests the JSON storage class update time field."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'XML API has no concept of storage class update time')
# Case 1: Create an object message where Storage class update time is the
# same as Creation time.
current_time = datetime(2017, 1, 2, 3, 4, 5, 6, tzinfo=None)
obj_metadata = apitools_messages.Object(
name='foo',
bucket='bar',
timeCreated=current_time,
updated=current_time,
timeStorageClassUpdated=current_time,
etag='12345')
# Create mock object to point to obj_metadata.
obj_ref = mock.Mock()
obj_ref.root_object = obj_metadata
obj_ref.url_string = 'foo'
# Print out attributes of object message.
with CaptureStdout() as output:
PrintFullInfoAboutObject(obj_ref)
output = '\n'.join(output)
# Verify that no Storage class update time field displays since it's the
# same as Creation time.
find_stor_update_re = re.compile(
r'^\s*Storage class update time:+(?P<stor_update_time_val>.+)$',
re.MULTILINE)
stor_update_time_match = re.search(find_stor_update_re, output)
self.assertIsNone(stor_update_time_match)
# Case 2: Create an object message where Storage class update time differs
# from Creation time.
new_update_time = datetime(2017, 2, 3, 4, 5, 6, 7, tzinfo=None)
obj_metadata2 = apitools_messages.Object(
name='foo2',
bucket='bar2',
timeCreated=current_time,
updated=current_time,
timeStorageClassUpdated=new_update_time,
etag='12345')
# Create mock object to point to obj_metadata2.
obj_ref2 = mock.Mock()
obj_ref2.root_object = obj_metadata2
obj_ref2.url_string = 'foo2'
# Print out attributes of object message.
with CaptureStdout() as output2:
PrintFullInfoAboutObject(obj_ref2)
output2 = '\n'.join(output2)
# Verify that Creation time and Storage class update time fields display and
# are the same as the times set in the object message.
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
time_created_match = re.search(find_time_created_re, output2)
self.assertIsNotNone(time_created_match)
time_created = time_created_match.group('time_created_val')
self.assertEqual(
time_created,
datetime.strftime(current_time, '%a, %d %b %Y %H:%M:%S GMT'))
find_stor_update_re_2 = re.compile(
r'^\s*Storage class update time:+(?P<stor_update_time_val_2>.+)$',
re.MULTILINE)
stor_update_time_match_2 = re.search(find_stor_update_re_2, output2)
self.assertIsNotNone(stor_update_time_match_2)
stor_update_time = stor_update_time_match_2.group('stor_update_time_val_2')
self.assertEqual(
stor_update_time,
datetime.strftime(new_update_time, '%a, %d %b %Y %H:%M:%S GMT'))
@mock.patch.object(ls.LsCommand, 'WildcardIterator')
def test_satisfies_pzs_is_displayed_if_present(self, mock_wildcard):
bucket_uri = self.CreateBucket(bucket_name='foo')
bucket_metadata = apitools_messages.Bucket(name='foo', satisfiesPZS=True)
bucket_uri.root_object = bucket_metadata
bucket_uri.url_string = 'foo'
bucket_uri.storage_url = mock.Mock()
mock_wildcard.return_value.IterBuckets.return_value = [bucket_uri]
# MockKey doesn't support hash_algs, so the MD5 will not match.
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
stdout = self.RunCommand('ls', ['-Lb', suri(bucket_uri)],
return_stdout=True)
self.assertRegex(stdout, 'Satisfies PZS:\t\t\tTrue')
class TestLs(testcase.GsUtilIntegrationTestCase):
"""Integration tests for ls command."""
def test_blank_ls(self):
if not RUN_S3_TESTS: # Blank `ls` command lists GS buckets.
self.RunGsUtil(['ls'])
def test_empty_bucket(self):
bucket_uri = self.CreateBucket()
self.AssertNObjectsInBucket(bucket_uri, 0)
def test_empty_bucket_with_b(self):
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s/\n' % suri(bucket_uri), stdout)
_Check1()
def test_bucket_with_Lb(self):
"""Tests ls -Lb."""
bucket_uri = self.CreateBucket()
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
# Check that the bucket URI is displayed.
self.assertIn(suri(bucket_uri), stdout)
# Check that we don't see output corresponding to listing objects rather
# than buckets.
self.assertNotIn('TOTAL:', stdout)
# Toggle versioning on the bucket so that the modification time will be
# greater than the creation time.
self.RunGsUtil(['versioning', 'set', 'on', suri(bucket_uri)])
self.RunGsUtil(['versioning', 'set', 'off', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
find_metageneration_re = re.compile(
r'^\s*Metageneration:\s+(?P<metageneration_val>.+)$', re.MULTILINE)
find_time_created_re = re.compile(
r'^\s*Time created:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Time updated:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
metageneration_match = re.search(find_metageneration_re, stdout)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
if self.test_api == ApiSelector.XML:
# Check that lines for JSON-specific fields are not displayed.
self.assertIsNone(metageneration_match)
self.assertIsNone(time_created_match)
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
# Check that time created/updated lines are displayed.
self.assertIsNotNone(metageneration_match)
self.assertIsNotNone(time_created_match)
self.assertIsNotNone(time_updated_match)
# Check that updated time > created time.
time_created = time_created_match.group('time_created_val')
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
time_updated = time_updated_match.group('time_updated_val')
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
# Check that for bucket policy only fields.
self._AssertBucketPolicyOnly(False, stdout)
def test_bucket_with_Lb_bucket_policy_only(self):
if self.test_api == ApiSelector.JSON:
bucket_uri = self.CreateBucket(bucket_policy_only=True,
prefer_json_api=True)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)],
return_stdout=True)
self._AssertBucketPolicyOnly(True, stdout)
def _AssertBucketPolicyOnly(self, value, stdout):
bucket_policy_only_re = re.compile(
r'^\s*Bucket Policy Only enabled:\s+(?P<bpo_val>.+)$', re.MULTILINE)
bucket_policy_only_match = re.search(bucket_policy_only_re, stdout)
bucket_policy_only_val = bucket_policy_only_match.group('bpo_val')
self.assertEqual(str(value), bucket_policy_only_val)
def test_bucket_with_lb(self):
"""Tests ls -lb."""
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-lb', suri(bucket_uri)],
return_stdout=True)
self.assertIn(suri(bucket_uri), stdout)
self.assertNotIn('TOTAL:', stdout)
_Check1()
def test_bucket_list_wildcard(self):
"""Tests listing multiple buckets with a wildcard."""
random_prefix = self.MakeRandomTestString()
bucket1_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket2_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
# This just double checks that the common prefix of the two buckets is what
# we think it should be (based on implementation detail of CreateBucket).
# We want to be careful when setting a wildcard on buckets to make sure we
# don't step outside the test buckets to affect other buckets.
common_prefix = posixpath.commonprefix(
[suri(bucket1_uri), suri(bucket2_uri)])
self.assertTrue(
common_prefix.startswith(
'%s://%sgsutil-test-test-bucket-list-wildcard' %
(self.default_provider, random_prefix)))
wildcard = '%s*' % common_prefix
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', wildcard], return_stdout=True)
expected = set([suri(bucket1_uri) + '/', suri(bucket2_uri) + '/'])
actual = set(stdout.split())
self.assertEqual(expected, actual)
_Check1()
def test_nonexistent_bucket_with_ls(self):
"""Tests a bucket that is known not to exist."""
stderr = self.RunGsUtil(
['ls', '-lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-Lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-b', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
def test_list_missing_object(self):
"""Tests listing a non-existent object."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'missing')],
return_stderr=True,
expected_status=1)
self.assertIn('matched no objects', stderr)
def test_with_one_object(self):
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
_Check1()
def location_redirect_test_helper(self, bucket_region, client_region):
bucket_host = 's3.%s.amazonaws.com' % bucket_region
client_host = 's3.%s.amazonaws.com' % client_region
with SetBotoConfigForTest([('s3', 'host', bucket_host)]):
bucket_uri = self.CreateBucket(location=bucket_region)
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1(uri):
stdout = self.RunGsUtil(['ls', uri], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
with SetBotoConfigForTest([('s3', 'host', client_host)]):
# sends a GET request
_Check1(suri(bucket_uri))
# sends a HEAD request, meaning error body is not included.
_Check1(suri(obj_uri))
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_400_location_redirect(self):
# ap-east-1 used here since regions launched before March 20, 2019 do
# some temporary redirecting for new buckets which suppresses 400 errors.
self.location_redirect_test_helper('ap-east-1', 'us-east-2')
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_301_location_redirect(self):
self.location_redirect_test_helper('eu-west-1', 'us-east-2')
@SkipForXML('Credstore file gets created only for json API')
def test_credfile_lock_permissions(self):
tmpdir = self.CreateTempDir()
filepath = os.path.join(tmpdir, 'credstore2')
option = 'GSUtil:state_dir={}'.format(tmpdir)
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['-o', option, 'ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
if os.name == 'posix':
self.assertTrue(os.path.exists(filepath))
mode = oct(stat.S_IMODE(os.stat(filepath).st_mode))
# Assert that only user has read/write permission
self.assertEqual(oct(0o600), mode)
_Check1()
def test_one_object_with_l(self):
"""Tests listing one object with -l."""
obj_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-l', suri(obj_uri)], return_stdout=True)
output_items = stdout.split()
self.assertTrue(output_items[0].isdigit())
# Throws exception if time string is not formatted correctly.
time.strptime(stdout.split()[1], '%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(output_items[2], suri(obj_uri))
def test_one_object_with_L(self):
"""Tests listing one object with -L."""
obj_uri = self.CreateObject(contents=b'foo')
# Ensure that creation and update don't take place in the same second.
time.sleep(2)
# Check that the creation time, rather than the updated time, is displayed.
self.RunGsUtil(['setmeta', '-h', 'x-goog-meta-foo:bar', suri(obj_uri)])
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Update time:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
time_created = time_created_match.group('time_created_val')
self.assertIsNotNone(time_created)
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
if self.test_api == ApiSelector.XML:
# XML API has no concept of updated time.
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
time_updated = time_updated_match.group('time_updated_val')
self.assertIsNotNone(time_updated)
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
def test_subdir(self):
"""Tests listing a bucket subdirectory."""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '%s/dir' % suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s\n' % suri(k2_uri), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_subdir_nocontents(self):
"""Tests listing a bucket subdirectory using -d.
Result will display subdirectory names instead of contents. Uses a wildcard
to show multiple matching subdirectories.
"""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
k3_uri = bucket_uri.clone_replace_name('dir/foo2')
k3_uri.set_contents_from_string('foo')
k4_uri = bucket_uri.clone_replace_name('dir2/foo3')
k4_uri.set_contents_from_string('foo2')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['ls', '-d', '%s/dir*' % suri(bucket_uri)], return_stdout=True)
self.assertEqual(
'%s/dir/\n%s/dir2/\n' % (suri(bucket_uri), suri(bucket_uri)), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_versioning(self):
"""Tests listing a versioned bucket."""
bucket1_uri = self.CreateBucket(test_objects=1)
bucket2_uri = self.CreateVersionedBucket(test_objects=1)
self.AssertNObjectsInBucket(bucket1_uri, 1, versioned=True)
bucket_list = list(bucket1_uri.list_bucket())
objuri = [
bucket1_uri.clone_replace_key(key).versionless_uri
for key in bucket_list
][0]
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-a', suri(bucket2_uri)],
return_stdout=True)
self.assertNumLines(stdout, 3)
stdout = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True)
self.assertIn('%s#' % bucket2_uri.clone_replace_name(bucket_list[0].name),
stdout)
self.assertIn('metageneration=', stdout)
_Check2()
def test_etag(self):
"""Tests that listing an object with an etag."""
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
etag = obj_uri.get_key().etag.strip('"\'')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertNotIn(etag, stdout)
else:
self.assertNotIn('etag=', stdout)
_Check1()
def _Check2():
stdout = self.RunGsUtil(['ls', '-le', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check2()
def _Check3():
stdout = self.RunGsUtil(['ls', '-ale', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check3()
def test_labels(self):
"""Tests listing on a bucket with a label/tagging configuration."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# No labels are present by default.
self.assertRegex(stdout, r'Labels:\s+None')
# Add a label and check that it shows up.
self.RunGsUtil(['label', 'ch', '-l', 'labelkey:labelvalue', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
label_regex = re.compile(r'Labels:\s+\{\s+"labelkey":\s+"labelvalue"\s+\}',
re.MULTILINE)
self.assertRegex(stdout, label_regex)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_location_constraint(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location constraint should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location constraint:', stdout)
# Default location constraint is US
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# Default location may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
self.assertRegex(stdout, r'Location constraint:\s+\S')
# TODO(b/135700569): Stop skipping this once this field is available to all
# projects.
@unittest.skip('b/135700569')
@SkipForXML('Location type not available when using the GCS XML API.')
@SkipForS3('Location type not printed for S3 buckets.')
def test_location_type(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location type should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location type:', stdout)
# Default location type may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertRegex(stdout, r'Location type:\s+\S')
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_logging(self):
"""Tests listing a bucket with logging config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No logging info
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Logging configuration', stdout)
# Logging configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
# Enable and check
self.RunGsUtil(['logging', 'set', 'on', '-b', bucket_suri, bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tPresent', stdout)
# Disable and check
self.RunGsUtil(['logging', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_web(self):
"""Tests listing a bucket with website config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No website configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Website configuration', stdout)
# Website configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['web', 'set', '-m', 'google.com', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tPresent', stdout)
# Clear and check
self.RunGsUtil(['web', 'set', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
@SkipForXML('Requester Pays is not supported for the XML API.')
def test_requesterpays(self):
"""Tests listing a bucket with requester pays (billing) config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No requester pays configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Requester Pays enabled', stdout)
# Requester Pays configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['requesterpays', 'set', 'on', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tTrue', stdout)
# Clear and check
self.RunGsUtil(['requesterpays', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tFalse', stdout)
def test_list_sizes(self):
"""Tests various size listing options."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, contents=b'x' * 2048)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check2()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-al', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check4():
stdout = self.RunGsUtil(['ls', '-lh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check4()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check5():
stdout = self.RunGsUtil(['ls', '-alh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check5()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
def test_list_unicode_filename(self):
"""Tests listing an object with a unicode filename."""
# Note: This test fails on Windows (command.exe). I was able to get ls to
# output Unicode filenames correctly by hacking the UniStream class code
# shown at
# http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/3259271
# into the start of gslib/commands/ls.py, along with no-op flush and
# isastream functions (as an experiment). However, even with that change,
# the current test still fails, since it also needs to run that
# stdout/stderr-replacement code. That UniStream class replacement really
# needs to be added to the site-packages on Windows python.
object_name = u'Аудиоархив'
bucket_uri = self.CreateVersionedBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri,
contents=b'foo',
object_name=object_name)
self.AssertNObjectsInBucket(bucket_uri, 1, versioned=True)
stdout = self.RunGsUtil(['ls', '-ael', suri(key_uri)], return_stdout=True)
self.assertIn(object_name, stdout)
if self.default_provider == 'gs':
self.assertIn(str(key_uri.generation), stdout)
self.assertIn('metageneration=%s' % key_uri.get_key().metageneration,
stdout)
if self.test_api == ApiSelector.XML:
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
else:
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
self.assertIn('etag=', stdout)
elif self.default_provider == 's3':
self.assertIn(key_uri.version_id, stdout)
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
def test_list_acl(self):
"""Tests that long listing includes an ACL."""
key_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertIn('ACL:', stdout)
self.assertNotIn('ACCESS DENIED', stdout)
def test_list_gzip_content_length(self):
"""Tests listing a gzipped object."""
file_size = 10000
file_contents = b'x' * file_size
fpath = self.CreateTempFile(contents=file_contents, file_name='foo.txt')
key_uri = self.CreateObject()
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath), suri(key_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
find_content_length_re = r'Content-Length:\s+(?P<num>\d)'
self.assertRegex(stdout, find_content_length_re)
m = re.search(find_content_length_re, stdout)
content_length = int(m.group('num'))
self.assertGreater(content_length, 0)
self.assertLess(content_length, file_size)
_Check1()
def test_output_chopped(self):
"""Tests that gsutil still succeeds with a truncated stdout."""
bucket_uri = self.CreateBucket(test_objects=2)
# Run Python with the -u flag so output is not buffered.
gsutil_cmd = [
sys.executable, '-u', gslib.GSUTIL_PATH, 'ls',
suri(bucket_uri)
]
# Set bufsize to 0 to make sure output is not buffered.
p = subprocess.Popen(gsutil_cmd, stdout=subprocess.PIPE, bufsize=0)
# Immediately close the stdout pipe so that gsutil gets a broken pipe error.
p.stdout.close()
p.wait()
# Make sure it still exited cleanly.
self.assertEqual(p.returncode, 0)
@SkipForS3('Boto lib required for S3 does not handle paths '
'starting with slash.')
def test_recursive_list_slash_only(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='/', contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/', stdout)
def test_recursive_list_trailing_slash(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/',
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/foo/', stdout)
@SkipForS3('Boto lib required for S3 does not handle paths '
'starting with slash.')
def test_recursive_list_trailing_two_slash(self):
"""Tests listing an object with two trailing slashes."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='//', contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '//', stdout)
def test_wildcard_prefix(self):
"""Tests that an object name with a wildcard does not infinite loop."""
bucket_uri = self.CreateBucket()
wildcard_folder_object = 'wildcard*/'
object_matching_folder = 'wildcard10/foo'
self.CreateObject(bucket_uri=bucket_uri,
object_name=wildcard_folder_object,
contents=b'foo')
self.CreateObject(bucket_uri=bucket_uri,
object_name=object_matching_folder,
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 2)
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'wildcard*')],
return_stderr=True,
expected_status=1)
self.assertIn(
'Cloud folder %s%s contains a wildcard' %
(suri(bucket_uri), '/wildcard*/'), stderr)
# Listing with a flat wildcard should still succeed.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri, '**')],
return_stdout=True)
self.assertNumLines(stdout, 3) # 2 object lines, one summary line.
_Check()
@SkipForS3('S3 anonymous access is not supported.')
def test_get_object_without_list_bucket_permission(self):
# Bucket is not publicly readable by default.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='permitted',
contents=b'foo')
# Set this object to be publicly readable.
self.RunGsUtil(['acl', 'set', 'public-read', suri(object_uri)])
# Drop credentials.
with self.SetAnonymousBotoCreds():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(suri(object_uri), stdout)
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_encrypted_object(self):
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_uri = self.CreateObject(object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1,
encryption_key=TEST_ENCRYPTION_KEY1)
# Listing object with key should return unencrypted hashes.
with SetBotoConfigForTest([('GSUtil', 'encryption_key',
TEST_ENCRYPTION_KEY1)]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectDecrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
_ListExpectDecrypted()
# Listing object without a key should return encrypted hashes.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectEncrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
_ListExpectEncrypted()
# Listing object with a non-matching key should return encrypted hashes.
with SetBotoConfigForTest([('GSUtil', 'encryption_key',
TEST_ENCRYPTION_KEY2)]):
_ListExpectEncrypted()
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_mixed_encryption(self):
"""Tests listing objects with various encryption interactions."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1,
encryption_key=TEST_ENCRYPTION_KEY1)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo2',
contents=TEST_ENCRYPTION_CONTENT2,
encryption_key=TEST_ENCRYPTION_KEY2)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo3',
contents=TEST_ENCRYPTION_CONTENT3,
encryption_key=TEST_ENCRYPTION_KEY3)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo4',
contents=TEST_ENCRYPTION_CONTENT4,
encryption_key=TEST_ENCRYPTION_KEY4)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo5',
contents=TEST_ENCRYPTION_CONTENT5)
# List 5 objects, one encrypted with each of four keys, and one
# unencrypted. Supplying keys [1,3,4] should result in four unencrypted
# listings and one encrypted listing (for key 2).
with SetBotoConfigForTest([
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY3),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY4)
]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectMixed():
"""Validates object listing."""
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY2_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY3_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY4_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_CRC32C, stdout)
_ListExpectMixed()
def test_non_ascii_project_fails(self):
stderr = self.RunGsUtil(['ls', '-p', 'ã', 'gs://fobarbaz'],
expected_status=1,
return_stderr=True)
self.assertIn('Invalid non-ASCII', stderr)
def set_default_kms_key_on_bucket(self, bucket_uri):
# Make sure our keyRing and cryptoKey exist.
keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
key_fqn = self.kms_api.CreateCryptoKey(
keyring_fqn, testcase.KmsTestingResources.CONSTANT_KEY_NAME)
# Make sure that the service account for the desired bucket's parent project
# is authorized to encrypt with the key above.
self.RunGsUtil(['kms', 'encryption', '-k', key_fqn, suri(bucket_uri)])
return key_fqn
@SkipForXML(KMS_XML_SKIP_MSG)
@SkipForS3(KMS_XML_SKIP_MSG)
def test_default_kms_key_listed_for_bucket(self):
bucket_uri = self.CreateBucket()
# Default KMS key is not set by default.
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default KMS key:\s+None')
# Default KMS key's name should be listed after being set on the bucket.
key_fqn = self.set_default_kms_key_on_bucket(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default KMS key:\s+%s' % key_fqn)
@SkipForXML(KMS_XML_SKIP_MSG)
@SkipForS3(KMS_XML_SKIP_MSG)
def test_kms_key_listed_for_kms_encrypted_object(self):
bucket_uri = self.CreateBucket()
key_fqn = self.set_default_kms_key_on_bucket(bucket_uri)
# Copy an object into our bucket and encrypt using the key from above.
obj_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo',
kms_key_name=key_fqn)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
self.assertRegex(stdout, r'KMS key:\s+%s' % key_fqn)
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_retention_policy(self):
bucket_uri = self.CreateBucketWithRetentionPolicy(
retention_period_in_seconds=1)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Retention Policy\:\t*Present')
# Clearing Retention Policy on the bucket.
self.RunGsUtil(['retention', 'clear', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Retention Policy:')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_default_event_based_hold(self):
bucket_uri = self.CreateBucket()
self.RunGsUtil(['retention', 'event-default', 'set', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default Event-Based Hold:\t* *True')
# Clearing the default Event-Based Hold on the bucket.
self.RunGsUtil(['retention', 'event-default', 'release', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Default Event-Based Hold')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_temporary_hold(self):
object_uri = self.CreateObject(contents=b'content')
self.RunGsUtil(['retention', 'temp', 'set', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertRegex(stdout, r'Temporary Hold')
# Clearing the Temporary Hold on the object.
self.RunGsUtil(['retention', 'temp', 'release', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Temporary Hold')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_event_based_hold(self):
object_uri = self.CreateObject(contents=b'content')
self.RunGsUtil(['retention', 'event', 'set', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertRegex(stdout, r'Event-Based Hold')
# Clearing the Event-Based Hold on the object.
self.RunGsUtil(['retention', 'event', 'release', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Event-Based Hold')
| <filename>gslib/tests/test_ls.py
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ls command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from datetime import datetime
import os
import posixpath
import re
import stat
import subprocess
import sys
import time
import gslib
from gslib.commands import ls
from gslib.cs_api_map import ApiSelector
from gslib.project_id import PopulateProjectId
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.testcase.integration_testcase import SkipForXML
from gslib.tests.util import CaptureStdout
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import RUN_S3_TESTS
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_MD5
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY1_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY2_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import TEST_ENCRYPTION_KEY3_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY4
from gslib.tests.util import TEST_ENCRYPTION_KEY4_SHA256_B64
from gslib.tests.util import unittest
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import UTF8
from gslib.utils.ls_helper import PrintFullInfoAboutObject
from gslib.utils.retry_util import Retry
from gslib.utils.system_util import IS_WINDOWS
from six import add_move, MovedModule
add_move(MovedModule('mock', 'mock', 'unittest.mock'))
from six.moves import mock
KMS_XML_SKIP_MSG = ('gsutil does not support KMS operations for S3 buckets, '
'or listing KMS keys with the XML API.')
BUCKET_LOCK_SKIP_MSG = ('gsutil does not support bucket lock operations for '
'S3 buckets or listing retention policy with XML API.')
class TestLsUnit(testcase.GsUtilUnitTestCase):
"""Unit tests for ls command."""
def test_one_object_with_L_storage_class_update(self):
"""Tests the JSON storage class update time field."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'XML API has no concept of storage class update time')
# Case 1: Create an object message where Storage class update time is the
# same as Creation time.
current_time = datetime(2017, 1, 2, 3, 4, 5, 6, tzinfo=None)
obj_metadata = apitools_messages.Object(
name='foo',
bucket='bar',
timeCreated=current_time,
updated=current_time,
timeStorageClassUpdated=current_time,
etag='12345')
# Create mock object to point to obj_metadata.
obj_ref = mock.Mock()
obj_ref.root_object = obj_metadata
obj_ref.url_string = 'foo'
# Print out attributes of object message.
with CaptureStdout() as output:
PrintFullInfoAboutObject(obj_ref)
output = '\n'.join(output)
# Verify that no Storage class update time field displays since it's the
# same as Creation time.
find_stor_update_re = re.compile(
r'^\s*Storage class update time:+(?P<stor_update_time_val>.+)$',
re.MULTILINE)
stor_update_time_match = re.search(find_stor_update_re, output)
self.assertIsNone(stor_update_time_match)
# Case 2: Create an object message where Storage class update time differs
# from Creation time.
new_update_time = datetime(2017, 2, 3, 4, 5, 6, 7, tzinfo=None)
obj_metadata2 = apitools_messages.Object(
name='foo2',
bucket='bar2',
timeCreated=current_time,
updated=current_time,
timeStorageClassUpdated=new_update_time,
etag='12345')
# Create mock object to point to obj_metadata2.
obj_ref2 = mock.Mock()
obj_ref2.root_object = obj_metadata2
obj_ref2.url_string = 'foo2'
# Print out attributes of object message.
with CaptureStdout() as output2:
PrintFullInfoAboutObject(obj_ref2)
output2 = '\n'.join(output2)
# Verify that Creation time and Storage class update time fields display and
# are the same as the times set in the object message.
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
time_created_match = re.search(find_time_created_re, output2)
self.assertIsNotNone(time_created_match)
time_created = time_created_match.group('time_created_val')
self.assertEqual(
time_created,
datetime.strftime(current_time, '%a, %d %b %Y %H:%M:%S GMT'))
find_stor_update_re_2 = re.compile(
r'^\s*Storage class update time:+(?P<stor_update_time_val_2>.+)$',
re.MULTILINE)
stor_update_time_match_2 = re.search(find_stor_update_re_2, output2)
self.assertIsNotNone(stor_update_time_match_2)
stor_update_time = stor_update_time_match_2.group('stor_update_time_val_2')
self.assertEqual(
stor_update_time,
datetime.strftime(new_update_time, '%a, %d %b %Y %H:%M:%S GMT'))
@mock.patch.object(ls.LsCommand, 'WildcardIterator')
def test_satisfies_pzs_is_displayed_if_present(self, mock_wildcard):
bucket_uri = self.CreateBucket(bucket_name='foo')
bucket_metadata = apitools_messages.Bucket(name='foo', satisfiesPZS=True)
bucket_uri.root_object = bucket_metadata
bucket_uri.url_string = 'foo'
bucket_uri.storage_url = mock.Mock()
mock_wildcard.return_value.IterBuckets.return_value = [bucket_uri]
# MockKey doesn't support hash_algs, so the MD5 will not match.
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
stdout = self.RunCommand('ls', ['-Lb', suri(bucket_uri)],
return_stdout=True)
self.assertRegex(stdout, 'Satisfies PZS:\t\t\tTrue')
class TestLs(testcase.GsUtilIntegrationTestCase):
"""Integration tests for ls command."""
def test_blank_ls(self):
if not RUN_S3_TESTS: # Blank `ls` command lists GS buckets.
self.RunGsUtil(['ls'])
def test_empty_bucket(self):
bucket_uri = self.CreateBucket()
self.AssertNObjectsInBucket(bucket_uri, 0)
def test_empty_bucket_with_b(self):
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s/\n' % suri(bucket_uri), stdout)
_Check1()
def test_bucket_with_Lb(self):
"""Tests ls -Lb."""
bucket_uri = self.CreateBucket()
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
# Check that the bucket URI is displayed.
self.assertIn(suri(bucket_uri), stdout)
# Check that we don't see output corresponding to listing objects rather
# than buckets.
self.assertNotIn('TOTAL:', stdout)
# Toggle versioning on the bucket so that the modification time will be
# greater than the creation time.
self.RunGsUtil(['versioning', 'set', 'on', suri(bucket_uri)])
self.RunGsUtil(['versioning', 'set', 'off', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
find_metageneration_re = re.compile(
r'^\s*Metageneration:\s+(?P<metageneration_val>.+)$', re.MULTILINE)
find_time_created_re = re.compile(
r'^\s*Time created:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Time updated:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
metageneration_match = re.search(find_metageneration_re, stdout)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
if self.test_api == ApiSelector.XML:
# Check that lines for JSON-specific fields are not displayed.
self.assertIsNone(metageneration_match)
self.assertIsNone(time_created_match)
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
# Check that time created/updated lines are displayed.
self.assertIsNotNone(metageneration_match)
self.assertIsNotNone(time_created_match)
self.assertIsNotNone(time_updated_match)
# Check that updated time > created time.
time_created = time_created_match.group('time_created_val')
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
time_updated = time_updated_match.group('time_updated_val')
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
# Check that for bucket policy only fields.
self._AssertBucketPolicyOnly(False, stdout)
def test_bucket_with_Lb_bucket_policy_only(self):
if self.test_api == ApiSelector.JSON:
bucket_uri = self.CreateBucket(bucket_policy_only=True,
prefer_json_api=True)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)],
return_stdout=True)
self._AssertBucketPolicyOnly(True, stdout)
def _AssertBucketPolicyOnly(self, value, stdout):
bucket_policy_only_re = re.compile(
r'^\s*Bucket Policy Only enabled:\s+(?P<bpo_val>.+)$', re.MULTILINE)
bucket_policy_only_match = re.search(bucket_policy_only_re, stdout)
bucket_policy_only_val = bucket_policy_only_match.group('bpo_val')
self.assertEqual(str(value), bucket_policy_only_val)
def test_bucket_with_lb(self):
"""Tests ls -lb."""
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-lb', suri(bucket_uri)],
return_stdout=True)
self.assertIn(suri(bucket_uri), stdout)
self.assertNotIn('TOTAL:', stdout)
_Check1()
def test_bucket_list_wildcard(self):
"""Tests listing multiple buckets with a wildcard."""
random_prefix = self.MakeRandomTestString()
bucket1_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket2_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
# This just double checks that the common prefix of the two buckets is what
# we think it should be (based on implementation detail of CreateBucket).
# We want to be careful when setting a wildcard on buckets to make sure we
# don't step outside the test buckets to affect other buckets.
common_prefix = posixpath.commonprefix(
[suri(bucket1_uri), suri(bucket2_uri)])
self.assertTrue(
common_prefix.startswith(
'%s://%sgsutil-test-test-bucket-list-wildcard' %
(self.default_provider, random_prefix)))
wildcard = '%s*' % common_prefix
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', wildcard], return_stdout=True)
expected = set([suri(bucket1_uri) + '/', suri(bucket2_uri) + '/'])
actual = set(stdout.split())
self.assertEqual(expected, actual)
_Check1()
def test_nonexistent_bucket_with_ls(self):
"""Tests a bucket that is known not to exist."""
stderr = self.RunGsUtil(
['ls', '-lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-Lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-b', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
def test_list_missing_object(self):
"""Tests listing a non-existent object."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'missing')],
return_stderr=True,
expected_status=1)
self.assertIn('matched no objects', stderr)
def test_with_one_object(self):
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
_Check1()
def location_redirect_test_helper(self, bucket_region, client_region):
bucket_host = 's3.%s.amazonaws.com' % bucket_region
client_host = 's3.%s.amazonaws.com' % client_region
with SetBotoConfigForTest([('s3', 'host', bucket_host)]):
bucket_uri = self.CreateBucket(location=bucket_region)
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1(uri):
stdout = self.RunGsUtil(['ls', uri], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
with SetBotoConfigForTest([('s3', 'host', client_host)]):
# sends a GET request
_Check1(suri(bucket_uri))
# sends a HEAD request, meaning error body is not included.
_Check1(suri(obj_uri))
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_400_location_redirect(self):
# ap-east-1 used here since regions launched before March 20, 2019 do
# some temporary redirecting for new buckets which suppresses 400 errors.
self.location_redirect_test_helper('ap-east-1', 'us-east-2')
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_301_location_redirect(self):
self.location_redirect_test_helper('eu-west-1', 'us-east-2')
@SkipForXML('Credstore file gets created only for json API')
def test_credfile_lock_permissions(self):
tmpdir = self.CreateTempDir()
filepath = os.path.join(tmpdir, 'credstore2')
option = 'GSUtil:state_dir={}'.format(tmpdir)
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['-o', option, 'ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
if os.name == 'posix':
self.assertTrue(os.path.exists(filepath))
mode = oct(stat.S_IMODE(os.stat(filepath).st_mode))
# Assert that only user has read/write permission
self.assertEqual(oct(0o600), mode)
_Check1()
def test_one_object_with_l(self):
"""Tests listing one object with -l."""
obj_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-l', suri(obj_uri)], return_stdout=True)
output_items = stdout.split()
self.assertTrue(output_items[0].isdigit())
# Throws exception if time string is not formatted correctly.
time.strptime(stdout.split()[1], '%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(output_items[2], suri(obj_uri))
def test_one_object_with_L(self):
"""Tests listing one object with -L."""
obj_uri = self.CreateObject(contents=b'foo')
# Ensure that creation and update don't take place in the same second.
time.sleep(2)
# Check that the creation time, rather than the updated time, is displayed.
self.RunGsUtil(['setmeta', '-h', 'x-goog-meta-foo:bar', suri(obj_uri)])
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Update time:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
time_created = time_created_match.group('time_created_val')
self.assertIsNotNone(time_created)
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
if self.test_api == ApiSelector.XML:
# XML API has no concept of updated time.
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
time_updated = time_updated_match.group('time_updated_val')
self.assertIsNotNone(time_updated)
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
def test_subdir(self):
"""Tests listing a bucket subdirectory."""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '%s/dir' % suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s\n' % suri(k2_uri), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_subdir_nocontents(self):
"""Tests listing a bucket subdirectory using -d.
Result will display subdirectory names instead of contents. Uses a wildcard
to show multiple matching subdirectories.
"""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
k3_uri = bucket_uri.clone_replace_name('dir/foo2')
k3_uri.set_contents_from_string('foo')
k4_uri = bucket_uri.clone_replace_name('dir2/foo3')
k4_uri.set_contents_from_string('foo2')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['ls', '-d', '%s/dir*' % suri(bucket_uri)], return_stdout=True)
self.assertEqual(
'%s/dir/\n%s/dir2/\n' % (suri(bucket_uri), suri(bucket_uri)), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_versioning(self):
"""Tests listing a versioned bucket."""
bucket1_uri = self.CreateBucket(test_objects=1)
bucket2_uri = self.CreateVersionedBucket(test_objects=1)
self.AssertNObjectsInBucket(bucket1_uri, 1, versioned=True)
bucket_list = list(bucket1_uri.list_bucket())
objuri = [
bucket1_uri.clone_replace_key(key).versionless_uri
for key in bucket_list
][0]
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-a', suri(bucket2_uri)],
return_stdout=True)
self.assertNumLines(stdout, 3)
stdout = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True)
self.assertIn('%s#' % bucket2_uri.clone_replace_name(bucket_list[0].name),
stdout)
self.assertIn('metageneration=', stdout)
_Check2()
def test_etag(self):
"""Tests that listing an object with an etag."""
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
etag = obj_uri.get_key().etag.strip('"\'')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertNotIn(etag, stdout)
else:
self.assertNotIn('etag=', stdout)
_Check1()
def _Check2():
stdout = self.RunGsUtil(['ls', '-le', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check2()
def _Check3():
stdout = self.RunGsUtil(['ls', '-ale', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check3()
def test_labels(self):
"""Tests listing on a bucket with a label/tagging configuration."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# No labels are present by default.
self.assertRegex(stdout, r'Labels:\s+None')
# Add a label and check that it shows up.
self.RunGsUtil(['label', 'ch', '-l', 'labelkey:labelvalue', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
label_regex = re.compile(r'Labels:\s+\{\s+"labelkey":\s+"labelvalue"\s+\}',
re.MULTILINE)
self.assertRegex(stdout, label_regex)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_location_constraint(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location constraint should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location constraint:', stdout)
# Default location constraint is US
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# Default location may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
self.assertRegex(stdout, r'Location constraint:\s+\S')
# TODO(b/135700569): Stop skipping this once this field is available to all
# projects.
@unittest.skip('b/135700569')
@SkipForXML('Location type not available when using the GCS XML API.')
@SkipForS3('Location type not printed for S3 buckets.')
def test_location_type(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location type should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location type:', stdout)
# Default location type may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertRegex(stdout, r'Location type:\s+\S')
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_logging(self):
"""Tests listing a bucket with logging config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No logging info
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Logging configuration', stdout)
# Logging configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
# Enable and check
self.RunGsUtil(['logging', 'set', 'on', '-b', bucket_suri, bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tPresent', stdout)
# Disable and check
self.RunGsUtil(['logging', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_web(self):
"""Tests listing a bucket with website config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No website configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Website configuration', stdout)
# Website configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['web', 'set', '-m', 'google.com', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tPresent', stdout)
# Clear and check
self.RunGsUtil(['web', 'set', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
@SkipForXML('Requester Pays is not supported for the XML API.')
def test_requesterpays(self):
"""Tests listing a bucket with requester pays (billing) config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No requester pays configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Requester Pays enabled', stdout)
# Requester Pays configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['requesterpays', 'set', 'on', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tTrue', stdout)
# Clear and check
self.RunGsUtil(['requesterpays', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tFalse', stdout)
def test_list_sizes(self):
"""Tests various size listing options."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, contents=b'x' * 2048)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check2()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-al', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check4():
stdout = self.RunGsUtil(['ls', '-lh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check4()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check5():
stdout = self.RunGsUtil(['ls', '-alh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check5()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
def test_list_unicode_filename(self):
"""Tests listing an object with a unicode filename."""
# Note: This test fails on Windows (command.exe). I was able to get ls to
# output Unicode filenames correctly by hacking the UniStream class code
# shown at
# http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/3259271
# into the start of gslib/commands/ls.py, along with no-op flush and
# isastream functions (as an experiment). However, even with that change,
# the current test still fails, since it also needs to run that
# stdout/stderr-replacement code. That UniStream class replacement really
# needs to be added to the site-packages on Windows python.
object_name = u'Аудиоархив'
bucket_uri = self.CreateVersionedBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri,
contents=b'foo',
object_name=object_name)
self.AssertNObjectsInBucket(bucket_uri, 1, versioned=True)
stdout = self.RunGsUtil(['ls', '-ael', suri(key_uri)], return_stdout=True)
self.assertIn(object_name, stdout)
if self.default_provider == 'gs':
self.assertIn(str(key_uri.generation), stdout)
self.assertIn('metageneration=%s' % key_uri.get_key().metageneration,
stdout)
if self.test_api == ApiSelector.XML:
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
else:
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
self.assertIn('etag=', stdout)
elif self.default_provider == 's3':
self.assertIn(key_uri.version_id, stdout)
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
def test_list_acl(self):
"""Tests that long listing includes an ACL."""
key_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertIn('ACL:', stdout)
self.assertNotIn('ACCESS DENIED', stdout)
def test_list_gzip_content_length(self):
"""Tests listing a gzipped object."""
file_size = 10000
file_contents = b'x' * file_size
fpath = self.CreateTempFile(contents=file_contents, file_name='foo.txt')
key_uri = self.CreateObject()
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath), suri(key_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
find_content_length_re = r'Content-Length:\s+(?P<num>\d)'
self.assertRegex(stdout, find_content_length_re)
m = re.search(find_content_length_re, stdout)
content_length = int(m.group('num'))
self.assertGreater(content_length, 0)
self.assertLess(content_length, file_size)
_Check1()
def test_output_chopped(self):
"""Tests that gsutil still succeeds with a truncated stdout."""
bucket_uri = self.CreateBucket(test_objects=2)
# Run Python with the -u flag so output is not buffered.
gsutil_cmd = [
sys.executable, '-u', gslib.GSUTIL_PATH, 'ls',
suri(bucket_uri)
]
# Set bufsize to 0 to make sure output is not buffered.
p = subprocess.Popen(gsutil_cmd, stdout=subprocess.PIPE, bufsize=0)
# Immediately close the stdout pipe so that gsutil gets a broken pipe error.
p.stdout.close()
p.wait()
# Make sure it still exited cleanly.
self.assertEqual(p.returncode, 0)
@SkipForS3('Boto lib required for S3 does not handle paths '
'starting with slash.')
def test_recursive_list_slash_only(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='/', contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/', stdout)
def test_recursive_list_trailing_slash(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/',
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/foo/', stdout)
@SkipForS3('Boto lib required for S3 does not handle paths '
'starting with slash.')
def test_recursive_list_trailing_two_slash(self):
"""Tests listing an object with two trailing slashes."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='//', contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '//', stdout)
def test_wildcard_prefix(self):
"""Tests that an object name with a wildcard does not infinite loop."""
bucket_uri = self.CreateBucket()
wildcard_folder_object = 'wildcard*/'
object_matching_folder = 'wildcard10/foo'
self.CreateObject(bucket_uri=bucket_uri,
object_name=wildcard_folder_object,
contents=b'foo')
self.CreateObject(bucket_uri=bucket_uri,
object_name=object_matching_folder,
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 2)
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'wildcard*')],
return_stderr=True,
expected_status=1)
self.assertIn(
'Cloud folder %s%s contains a wildcard' %
(suri(bucket_uri), '/wildcard*/'), stderr)
# Listing with a flat wildcard should still succeed.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri, '**')],
return_stdout=True)
self.assertNumLines(stdout, 3) # 2 object lines, one summary line.
_Check()
@SkipForS3('S3 anonymous access is not supported.')
def test_get_object_without_list_bucket_permission(self):
# Bucket is not publicly readable by default.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='permitted',
contents=b'foo')
# Set this object to be publicly readable.
self.RunGsUtil(['acl', 'set', 'public-read', suri(object_uri)])
# Drop credentials.
with self.SetAnonymousBotoCreds():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(suri(object_uri), stdout)
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_encrypted_object(self):
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_uri = self.CreateObject(object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1,
encryption_key=TEST_ENCRYPTION_KEY1)
# Listing object with key should return unencrypted hashes.
with SetBotoConfigForTest([('GSUtil', 'encryption_key',
TEST_ENCRYPTION_KEY1)]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectDecrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
_ListExpectDecrypted()
# Listing object without a key should return encrypted hashes.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectEncrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
_ListExpectEncrypted()
# Listing object with a non-matching key should return encrypted hashes.
with SetBotoConfigForTest([('GSUtil', 'encryption_key',
TEST_ENCRYPTION_KEY2)]):
_ListExpectEncrypted()
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_mixed_encryption(self):
"""Tests listing objects with various encryption interactions."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1,
encryption_key=TEST_ENCRYPTION_KEY1)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo2',
contents=TEST_ENCRYPTION_CONTENT2,
encryption_key=TEST_ENCRYPTION_KEY2)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo3',
contents=TEST_ENCRYPTION_CONTENT3,
encryption_key=TEST_ENCRYPTION_KEY3)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo4',
contents=TEST_ENCRYPTION_CONTENT4,
encryption_key=TEST_ENCRYPTION_KEY4)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo5',
contents=TEST_ENCRYPTION_CONTENT5)
# List 5 objects, one encrypted with each of four keys, and one
# unencrypted. Supplying keys [1,3,4] should result in four unencrypted
# listings and one encrypted listing (for key 2).
with SetBotoConfigForTest([
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY3),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY4)
]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectMixed():
"""Validates object listing."""
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY2_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY3_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY4_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_CRC32C, stdout)
_ListExpectMixed()
def test_non_ascii_project_fails(self):
stderr = self.RunGsUtil(['ls', '-p', 'ã', 'gs://fobarbaz'],
expected_status=1,
return_stderr=True)
self.assertIn('Invalid non-ASCII', stderr)
def set_default_kms_key_on_bucket(self, bucket_uri):
# Make sure our keyRing and cryptoKey exist.
keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
key_fqn = self.kms_api.CreateCryptoKey(
keyring_fqn, testcase.KmsTestingResources.CONSTANT_KEY_NAME)
# Make sure that the service account for the desired bucket's parent project
# is authorized to encrypt with the key above.
self.RunGsUtil(['kms', 'encryption', '-k', key_fqn, suri(bucket_uri)])
return key_fqn
@SkipForXML(KMS_XML_SKIP_MSG)
@SkipForS3(KMS_XML_SKIP_MSG)
def test_default_kms_key_listed_for_bucket(self):
bucket_uri = self.CreateBucket()
# Default KMS key is not set by default.
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default KMS key:\s+None')
# Default KMS key's name should be listed after being set on the bucket.
key_fqn = self.set_default_kms_key_on_bucket(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default KMS key:\s+%s' % key_fqn)
@SkipForXML(KMS_XML_SKIP_MSG)
@SkipForS3(KMS_XML_SKIP_MSG)
def test_kms_key_listed_for_kms_encrypted_object(self):
bucket_uri = self.CreateBucket()
key_fqn = self.set_default_kms_key_on_bucket(bucket_uri)
# Copy an object into our bucket and encrypt using the key from above.
obj_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo',
kms_key_name=key_fqn)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
self.assertRegex(stdout, r'KMS key:\s+%s' % key_fqn)
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_retention_policy(self):
bucket_uri = self.CreateBucketWithRetentionPolicy(
retention_period_in_seconds=1)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Retention Policy\:\t*Present')
# Clearing Retention Policy on the bucket.
self.RunGsUtil(['retention', 'clear', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Retention Policy:')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_default_event_based_hold(self):
bucket_uri = self.CreateBucket()
self.RunGsUtil(['retention', 'event-default', 'set', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default Event-Based Hold:\t* *True')
# Clearing the default Event-Based Hold on the bucket.
self.RunGsUtil(['retention', 'event-default', 'release', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Default Event-Based Hold')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_temporary_hold(self):
object_uri = self.CreateObject(contents=b'content')
self.RunGsUtil(['retention', 'temp', 'set', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertRegex(stdout, r'Temporary Hold')
# Clearing the Temporary Hold on the object.
self.RunGsUtil(['retention', 'temp', 'release', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Temporary Hold')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_event_based_hold(self):
object_uri = self.CreateObject(contents=b'content')
self.RunGsUtil(['retention', 'event', 'set', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertRegex(stdout, r'Event-Based Hold')
# Clearing the Event-Based Hold on the object.
self.RunGsUtil(['retention', 'event', 'release', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Event-Based Hold')
| en | 0.894952 | # -*- coding: utf-8 -*- # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for ls command. Unit tests for ls command. Tests the JSON storage class update time field. # Case 1: Create an object message where Storage class update time is the # same as Creation time. # Create mock object to point to obj_metadata. # Print out attributes of object message. # Verify that no Storage class update time field displays since it's the # same as Creation time. # Case 2: Create an object message where Storage class update time differs # from Creation time. # Create mock object to point to obj_metadata2. # Print out attributes of object message. # Verify that Creation time and Storage class update time fields display and # are the same as the times set in the object message. # MockKey doesn't support hash_algs, so the MD5 will not match. Integration tests for ls command. # Blank `ls` command lists GS buckets. # Use @Retry as hedge against bucket listing eventual consistency. Tests ls -Lb. # Check that the bucket URI is displayed. # Check that we don't see output corresponding to listing objects rather # than buckets. # Toggle versioning on the bucket so that the modification time will be # greater than the creation time. # Check that lines for JSON-specific fields are not displayed. # Check that time created/updated lines are displayed. # Check that updated time > created time. # Check that for bucket policy only fields. Tests ls -lb. # Use @Retry as hedge against bucket listing eventual consistency. Tests listing multiple buckets with a wildcard. # This just double checks that the common prefix of the two buckets is what # we think it should be (based on implementation detail of CreateBucket). # We want to be careful when setting a wildcard on buckets to make sure we # don't step outside the test buckets to affect other buckets. # Use @Retry as hedge against bucket listing eventual consistency. Tests a bucket that is known not to exist. Tests listing a non-existent object. # Use @Retry as hedge against bucket listing eventual consistency. # sends a GET request # sends a HEAD request, meaning error body is not included. # ap-east-1 used here since regions launched before March 20, 2019 do # some temporary redirecting for new buckets which suppresses 400 errors. # Use @Retry as hedge against bucket listing eventual consistency. # Assert that only user has read/write permission Tests listing one object with -l. # Throws exception if time string is not formatted correctly. Tests listing one object with -L. # Ensure that creation and update don't take place in the same second. # Check that the creation time, rather than the updated time, is displayed. # XML API has no concept of updated time. Tests listing a bucket subdirectory. # Use @Retry as hedge against bucket listing eventual consistency. Tests listing a bucket subdirectory using -d. Result will display subdirectory names instead of contents. Uses a wildcard to show multiple matching subdirectories. # Use @Retry as hedge against bucket listing eventual consistency. Tests listing a versioned bucket. # Use @Retry as hedge against bucket listing eventual consistency. #' % bucket2_uri.clone_replace_name(bucket_list[0].name), Tests that listing an object with an etag. # TODO: When testcase setup can use JSON, match against the exact JSON # etag. # Use @Retry as hedge against bucket listing eventual consistency. Tests listing on a bucket with a label/tagging configuration. # No labels are present by default. # Add a label and check that it shows up. Tests listing a bucket with location constraint. # No location constraint should be shown for `-lb` # Default location constraint is US # Default location may vary between test environments; test that some # non-whitespace character is present after the whitespace: # TODO(b/135700569): Stop skipping this once this field is available to all # projects. Tests listing a bucket with location constraint. # No location type should be shown for `-lb` # Default location type may vary between test environments; test that some # non-whitespace character is present after the whitespace: Tests listing a bucket with logging config. # No logging info # Logging configuration is absent by default # Enable and check # Disable and check Tests listing a bucket with website config. # No website configuration # Website configuration is absent by default # Initialize and check # Clear and check Tests listing a bucket with requester pays (billing) config. # No requester pays configuration # Requester Pays configuration is absent by default # Initialize and check # Clear and check Tests various size listing options. # Use @Retry as hedge against bucket listing eventual consistency. # Use @Retry as hedge against bucket listing eventual consistency. # Use @Retry as hedge against bucket listing eventual consistency. # Use @Retry as hedge against bucket listing eventual consistency. # Use @Retry as hedge against bucket listing eventual consistency. Tests listing an object with a unicode filename. # Note: This test fails on Windows (command.exe). I was able to get ls to # output Unicode filenames correctly by hacking the UniStream class code # shown at # http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/3259271 # into the start of gslib/commands/ls.py, along with no-op flush and # isastream functions (as an experiment). However, even with that change, # the current test still fails, since it also needs to run that # stdout/stderr-replacement code. That UniStream class replacement really # needs to be added to the site-packages on Windows python. # TODO: When testcase setup can use JSON, match against the exact JSON # etag. Tests that long listing includes an ACL. Tests listing a gzipped object. # Use @Retry as hedge against bucket listing eventual consistency. Tests that gsutil still succeeds with a truncated stdout. # Run Python with the -u flag so output is not buffered. # Set bufsize to 0 to make sure output is not buffered. # Immediately close the stdout pipe so that gsutil gets a broken pipe error. # Make sure it still exited cleanly. Tests listing an object with a trailing slash. # Note: The suri function normalizes the URI, so the double slash gets # removed. Tests listing an object with a trailing slash. # Note: The suri function normalizes the URI, so the double slash gets # removed. Tests listing an object with two trailing slashes. # Note: The suri function normalizes the URI, so the double slash gets # removed. Tests that an object name with a wildcard does not infinite loop. # Listing with a flat wildcard should still succeed. # Use @Retry as hedge against bucket listing eventual consistency. # 2 object lines, one summary line. # Bucket is not publicly readable by default. # Set this object to be publicly readable. # Drop credentials. # Listing object with key should return unencrypted hashes. # Use @Retry as hedge against bucket listing eventual consistency. # Listing object without a key should return encrypted hashes. # Use @Retry as hedge against bucket listing eventual consistency. # Listing object with a non-matching key should return encrypted hashes. Tests listing objects with various encryption interactions. # List 5 objects, one encrypted with each of four keys, and one # unencrypted. Supplying keys [1,3,4] should result in four unencrypted # listings and one encrypted listing (for key 2). # Use @Retry as hedge against bucket listing eventual consistency. Validates object listing. # Make sure our keyRing and cryptoKey exist. # Make sure that the service account for the desired bucket's parent project # is authorized to encrypt with the key above. # Default KMS key is not set by default. # Default KMS key's name should be listed after being set on the bucket. # Copy an object into our bucket and encrypt using the key from above. # Clearing Retention Policy on the bucket. # Clearing the default Event-Based Hold on the bucket. # Clearing the Temporary Hold on the object. # Clearing the Event-Based Hold on the object. | 1.770328 | 2 |
onadata/libs/utils/audit.py | ubpd/kobocat | 0 | 10507 | # coding: utf-8
from __future__ import unicode_literals, print_function, division, absolute_import
HOME_ACCESSED = "home-accessed"
| # coding: utf-8
from __future__ import unicode_literals, print_function, division, absolute_import
HOME_ACCESSED = "home-accessed"
| en | 0.833554 | # coding: utf-8 | 1.033237 | 1 |
places/management/commands/load_places.py | aevtikheev/dvmn-yandex-afisha | 0 | 10508 | import logging
from urllib.parse import unquote, urlparse
from pathlib import PurePosixPath
import requests
from requests.exceptions import ReadTimeout, ConnectionError, HTTPError
from django.core.management.base import BaseCommand
from django.core.files.base import ContentFile
from places.models import Place, Image
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class Command(BaseCommand):
help = 'Uploads data for a place'
def add_arguments(self, parser):
parser.add_argument('data_urls', nargs='+', type=str)
def handle(self, *args, **options):
for url in options['data_urls']:
response = requests.get(url)
response.raise_for_status()
place_data = response.json()
new_place, created = Place.objects.get_or_create(
title=place_data['title'],
defaults={
'short_description': place_data['description_short'],
'long_description': place_data['description_long'],
'longitude': place_data['coordinates']['lng'],
'latitude': place_data['coordinates']['lat']
}
)
if created:
logging.info(f'Place "{new_place.title}" created')
else:
logging.info(f'Place "{new_place.title}" already exists')
for image_position, image_url in enumerate(place_data['imgs']):
try:
response = requests.get(image_url)
response.raise_for_status()
except (ReadTimeout, ConnectionError, HTTPError) as exception:
logging.exception(exception)
continue
new_image, _ = Image.objects.get_or_create(
place=new_place,
position=image_position
)
image_content = ContentFile(response.content)
image_name = PurePosixPath(unquote(urlparse(image_url).path)).parts[-1]
new_image.image.save(image_name, image_content)
logging.info(f'Image {image_name} for place "{new_place.title}" uploaded')
| import logging
from urllib.parse import unquote, urlparse
from pathlib import PurePosixPath
import requests
from requests.exceptions import ReadTimeout, ConnectionError, HTTPError
from django.core.management.base import BaseCommand
from django.core.files.base import ContentFile
from places.models import Place, Image
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class Command(BaseCommand):
help = 'Uploads data for a place'
def add_arguments(self, parser):
parser.add_argument('data_urls', nargs='+', type=str)
def handle(self, *args, **options):
for url in options['data_urls']:
response = requests.get(url)
response.raise_for_status()
place_data = response.json()
new_place, created = Place.objects.get_or_create(
title=place_data['title'],
defaults={
'short_description': place_data['description_short'],
'long_description': place_data['description_long'],
'longitude': place_data['coordinates']['lng'],
'latitude': place_data['coordinates']['lat']
}
)
if created:
logging.info(f'Place "{new_place.title}" created')
else:
logging.info(f'Place "{new_place.title}" already exists')
for image_position, image_url in enumerate(place_data['imgs']):
try:
response = requests.get(image_url)
response.raise_for_status()
except (ReadTimeout, ConnectionError, HTTPError) as exception:
logging.exception(exception)
continue
new_image, _ = Image.objects.get_or_create(
place=new_place,
position=image_position
)
image_content = ContentFile(response.content)
image_name = PurePosixPath(unquote(urlparse(image_url).path)).parts[-1]
new_image.image.save(image_name, image_content)
logging.info(f'Image {image_name} for place "{new_place.title}" uploaded')
| none | 1 | 2.192505 | 2 |
|
gslib/utils/ls_helper.py | dickmao/gsutil | 1 | 10509 | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions and class for listing commands such as ls and du."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import fnmatch
import sys
import six
from gslib.cloud_api import EncryptionException
from gslib.exception import CommandException
from gslib.plurality_checkable_iterator import PluralityCheckableIterator
from gslib.storage_url import GenerationFromUrlAndString
from gslib.utils.constants import S3_ACL_MARKER_GUID
from gslib.utils.constants import S3_DELETE_MARKER_GUID
from gslib.utils.constants import S3_MARKER_GUIDS
from gslib.utils.constants import UTF8
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.translation_helper import AclTranslation
from gslib.utils import text_util
from gslib.wildcard_iterator import StorageUrlFromString
ENCRYPTED_FIELDS = [
'md5Hash',
'crc32c',
]
UNENCRYPTED_FULL_LISTING_FIELDS = [
'acl',
'cacheControl',
'componentCount',
'contentDisposition',
'contentEncoding',
'contentLanguage',
'contentType',
'customTime',
'kmsKeyName',
'customerEncryption',
'etag',
'eventBasedHold',
'generation',
'metadata',
'metageneration',
'retentionExpirationTime',
'size',
'storageClass',
'temporaryHold',
'timeCreated',
'timeDeleted',
'timeStorageClassUpdated',
'updated',
]
def MakeMetadataLine(label, value, indent=1):
"""Returns a string with a vertically aligned label and value.
Labels of the same indentation level will start at the same column. Values
will all start at the same column (unless the combined left-indent and
label length is excessively long). If a value spans multiple lines,
indentation will only be applied to the first line. Example output from
several calls:
Label1: Value (default indent of 1 was used)
Sublabel1: Value (used indent of 2 here)
Label2: Value
Args:
label: The label to print in the first column.
value: The value to print in the second column.
indent: (4 * indent) spaces will be placed before the label.
Returns:
A string with a vertically aligned label and value.
"""
return '{}{}'.format(((' ' * indent * 4) + label + ':').ljust(28), value)
def PrintBucketHeader(bucket_listing_ref): # pylint: disable=unused-argument
"""Default function for printing headers for buckets.
Header is printed prior to listing the contents of the bucket.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET.
"""
pass
def PrintDir(bucket_listing_ref):
"""Default function for printing buckets or prefixes.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
text_util.print_to_fd(bucket_listing_ref.url_string)
# pylint: disable=unused-argument
def PrintDirSummary(num_bytes, bucket_listing_ref):
"""Off-by-default function for printing buckets or prefix size summaries.
Args:
num_bytes: Number of bytes contained in the directory.
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
pass
def PrintDirHeader(bucket_listing_ref):
"""Default function for printing headers for prefixes.
Header is printed prior to listing the contents of the prefix.
Args:
bucket_listing_ref: BucketListingRef of type PREFIX.
"""
text_util.print_to_fd('{}:'.format(bucket_listing_ref.url_string))
def PrintNewLine():
"""Default function for printing new lines between directories."""
text_util.print_to_fd()
# pylint: disable=too-many-statements
def PrintFullInfoAboutObject(bucket_listing_ref, incl_acl=True):
"""Print full info for given object (like what displays for gsutil ls -L).
Args:
bucket_listing_ref: BucketListingRef being listed.
Must have ref_type OBJECT and a populated root_object
with the desired fields.
incl_acl: True if ACL info should be output.
Returns:
Tuple (number of objects, object_length)
Raises:
Exception: if calling bug encountered.
"""
url_str = bucket_listing_ref.url_string
storage_url = StorageUrlFromString(url_str)
obj = bucket_listing_ref.root_object
if (obj.metadata and
S3_DELETE_MARKER_GUID in obj.metadata.additionalProperties):
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
num_bytes = obj.size
num_objs = 1
text_util.print_to_fd('{}:'.format(url_str))
if obj.timeCreated:
text_util.print_to_fd(
MakeMetadataLine('Creation time',
obj.timeCreated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.updated:
text_util.print_to_fd(
MakeMetadataLine('Update time',
obj.updated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if (obj.timeStorageClassUpdated and
obj.timeStorageClassUpdated != obj.timeCreated):
text_util.print_to_fd(
MakeMetadataLine(
'Storage class update time',
obj.timeStorageClassUpdated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.storageClass:
text_util.print_to_fd(MakeMetadataLine('Storage class', obj.storageClass))
if obj.temporaryHold:
text_util.print_to_fd(MakeMetadataLine('Temporary Hold', 'Enabled'))
if obj.eventBasedHold:
text_util.print_to_fd(MakeMetadataLine('Event-Based Hold', 'Enabled'))
if obj.retentionExpirationTime:
text_util.print_to_fd(
MakeMetadataLine(
'Retention Expiration',
obj.retentionExpirationTime.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.kmsKeyName:
text_util.print_to_fd(MakeMetadataLine('KMS key', obj.kmsKeyName))
if obj.cacheControl:
text_util.print_to_fd(MakeMetadataLine('Cache-Control', obj.cacheControl))
if obj.contentDisposition:
text_util.print_to_fd(
MakeMetadataLine('Content-Disposition', obj.contentDisposition))
if obj.contentEncoding:
text_util.print_to_fd(
MakeMetadataLine('Content-Encoding', obj.contentEncoding))
if obj.contentLanguage:
text_util.print_to_fd(
MakeMetadataLine('Content-Language', obj.contentLanguage))
text_util.print_to_fd(MakeMetadataLine('Content-Length', obj.size))
text_util.print_to_fd(MakeMetadataLine('Content-Type', obj.contentType))
if obj.componentCount:
text_util.print_to_fd(
MakeMetadataLine('Component-Count', obj.componentCount))
if obj.customTime:
text_util.print_to_fd(MakeMetadataLine('Custom-Time', obj.customTime))
if obj.timeDeleted:
text_util.print_to_fd(
MakeMetadataLine('Noncurrent time',
obj.timeDeleted.strftime('%a, %d %b %Y %H:%M:%S GMT')))
marker_props = {}
if obj.metadata and obj.metadata.additionalProperties:
non_marker_props = []
for add_prop in obj.metadata.additionalProperties:
if add_prop.key not in S3_MARKER_GUIDS:
non_marker_props.append(add_prop)
else:
marker_props[add_prop.key] = add_prop.value
if non_marker_props:
text_util.print_to_fd(MakeMetadataLine('Metadata', ''))
for ap in non_marker_props:
ap_key = '{}'.format(ap.key)
ap_value = '{}'.format(ap.value)
meta_data_line = MakeMetadataLine(ap_key, ap_value, indent=2)
text_util.print_to_fd(meta_data_line)
if obj.customerEncryption:
if not obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', 'encrypted'))
if not obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', 'encrypted'))
text_util.print_to_fd(
MakeMetadataLine('Encryption algorithm',
obj.customerEncryption.encryptionAlgorithm))
text_util.print_to_fd(
MakeMetadataLine('Encryption key SHA256',
obj.customerEncryption.keySha256))
if obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', obj.crc32c))
if obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', obj.md5Hash))
text_util.print_to_fd(MakeMetadataLine('ETag', obj.etag.strip('"\'')))
if obj.generation:
generation_str = GenerationFromUrlAndString(storage_url, obj.generation)
text_util.print_to_fd(MakeMetadataLine('Generation', generation_str))
if obj.metageneration:
text_util.print_to_fd(MakeMetadataLine('Metageneration',
obj.metageneration))
if incl_acl:
# JSON API won't return acls as part of the response unless we have
# full control scope
if obj.acl:
text_util.print_to_fd(
MakeMetadataLine('ACL', AclTranslation.JsonFromMessage(obj.acl)))
elif S3_ACL_MARKER_GUID in marker_props:
text_util.print_to_fd(
MakeMetadataLine('ACL', marker_props[S3_ACL_MARKER_GUID]))
else:
# Empty ACLs are possible with Bucket Policy Only and no longer imply
# ACCESS DENIED anymore.
text_util.print_to_fd(MakeMetadataLine('ACL', '[]'))
return (num_objs, num_bytes)
def PrintObject(bucket_listing_ref):
"""Default printing function for objects.
Args:
bucket_listing_ref: BucketListingRef of type OBJECT.
Returns:
(num_objects, num_bytes).
"""
try:
text_util.print_to_fd(bucket_listing_ref.url_string)
except IOError as e:
# Windows throws an IOError 0 here for object names containing Unicode
# chars. Ignore it.
if not (IS_WINDOWS and e.errno == 0):
raise
return (1, 0)
class LsHelper(object):
"""Helper class for ls and du."""
def __init__(self,
iterator_func,
logger,
print_object_func=PrintObject,
print_dir_func=PrintDir,
print_dir_header_func=PrintDirHeader,
print_bucket_header_func=PrintBucketHeader,
print_dir_summary_func=PrintDirSummary,
print_newline_func=PrintNewLine,
all_versions=False,
should_recurse=False,
exclude_patterns=None,
fields=('name',),
list_subdir_contents=True):
"""Initializes the helper class to prepare for listing.
Args:
iterator_func: Function for instantiating iterator.
Inputs-
url_string- Url string to iterate on. May include
wildcards.
all_versions=False- If true, iterate over all object
versions.
logger: Logger for outputting warnings / errors.
print_object_func: Function for printing objects.
print_dir_func: Function for printing buckets/prefixes.
print_dir_header_func: Function for printing header line for buckets
or prefixes.
print_bucket_header_func: Function for printing header line for buckets
or prefixes.
print_dir_summary_func: Function for printing size summaries about
buckets/prefixes.
print_newline_func: Function for printing new lines between dirs.
all_versions: If true, list all object versions.
should_recurse: If true, recursively listing buckets/prefixes.
exclude_patterns: Patterns to exclude when listing.
fields: Fields to request from bucket listings; this should
include all fields that need to be populated in
objects so they can be listed. Can be set to None
to retrieve all object fields. Defaults to short
listing fields.
list_subdir_contents: If true, return the directory and any contents,
otherwise return only the directory itself.
"""
self._iterator_func = iterator_func
self.logger = logger
self._print_object_func = print_object_func
self._print_dir_func = print_dir_func
self._print_dir_header_func = print_dir_header_func
self._print_bucket_header_func = print_bucket_header_func
self._print_dir_summary_func = print_dir_summary_func
self._print_newline_func = print_newline_func
self.all_versions = all_versions
self.should_recurse = should_recurse
self.exclude_patterns = exclude_patterns
self.bucket_listing_fields = fields
self.list_subdir_contents = list_subdir_contents
def ExpandUrlAndPrint(self, url):
"""Iterates over the given URL and calls print functions.
Args:
url: StorageUrl to iterate over.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
print_newline = False
if url.IsBucket() or self.should_recurse:
# IsBucket() implies a top-level listing.
if url.IsBucket():
self._print_bucket_header_func(url)
return self._RecurseExpandUrlAndPrint(url.url_string,
print_initial_newline=False)
else:
# User provided a prefix or object URL, but it's impossible to tell
# which until we do a listing and see what matches.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields))
plurality = top_level_iterator.HasPlurality()
try:
top_level_iterator.PeekException()
except EncryptionException:
# Detailed listing on a single object can perform a GetObjectMetadata
# call, which raises if a matching encryption key isn't found.
# Re-iterate without requesting encrypted fields.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=UNENCRYPTED_FULL_LISTING_FIELDS))
plurality = top_level_iterator.HasPlurality()
for blr in top_level_iterator:
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
print_newline = True
elif blr.IsPrefix():
if print_newline:
self._print_newline_func()
else:
print_newline = True
if plurality and self.list_subdir_contents:
self._print_dir_header_func(blr)
elif plurality and not self.list_subdir_contents:
print_newline = False
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(
wildcard_suffix='*' if self.list_subdir_contents else None)
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a CsBucketListingRef of type Bucket')
num_objects += no
num_dirs += nd
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _RecurseExpandUrlAndPrint(self, url_str, print_initial_newline=True):
"""Iterates over the given URL string and calls print functions.
Args:
url_str: String describing StorageUrl to iterate over.
Must be of depth one or higher.
print_initial_newline: If true, print a newline before recursively
expanded prefixes.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
for blr in self._iterator_func(
'%s' % url_str, all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields):
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
elif blr.IsPrefix():
if self.should_recurse:
if print_initial_newline:
self._print_newline_func()
else:
print_initial_newline = True
self._print_dir_header_func(blr)
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(wildcard_suffix='*')
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
nd, no, nb = 1, 0, 0
self._print_dir_func(blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a bucketListingRef of type Bucket')
num_dirs += nd
num_objects += no
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _MatchesExcludedPattern(self, blr):
"""Checks bucket listing reference against patterns to exclude.
Args:
blr: BucketListingRef to check.
Returns:
True if reference matches a pattern and should be excluded.
"""
if self.exclude_patterns:
tomatch = six.ensure_str(blr.url_string)
for pattern in self.exclude_patterns:
if fnmatch.fnmatch(tomatch, six.ensure_str(pattern)):
return True
return False
| # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions and class for listing commands such as ls and du."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import fnmatch
import sys
import six
from gslib.cloud_api import EncryptionException
from gslib.exception import CommandException
from gslib.plurality_checkable_iterator import PluralityCheckableIterator
from gslib.storage_url import GenerationFromUrlAndString
from gslib.utils.constants import S3_ACL_MARKER_GUID
from gslib.utils.constants import S3_DELETE_MARKER_GUID
from gslib.utils.constants import S3_MARKER_GUIDS
from gslib.utils.constants import UTF8
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.translation_helper import AclTranslation
from gslib.utils import text_util
from gslib.wildcard_iterator import StorageUrlFromString
ENCRYPTED_FIELDS = [
'md5Hash',
'crc32c',
]
UNENCRYPTED_FULL_LISTING_FIELDS = [
'acl',
'cacheControl',
'componentCount',
'contentDisposition',
'contentEncoding',
'contentLanguage',
'contentType',
'customTime',
'kmsKeyName',
'customerEncryption',
'etag',
'eventBasedHold',
'generation',
'metadata',
'metageneration',
'retentionExpirationTime',
'size',
'storageClass',
'temporaryHold',
'timeCreated',
'timeDeleted',
'timeStorageClassUpdated',
'updated',
]
def MakeMetadataLine(label, value, indent=1):
"""Returns a string with a vertically aligned label and value.
Labels of the same indentation level will start at the same column. Values
will all start at the same column (unless the combined left-indent and
label length is excessively long). If a value spans multiple lines,
indentation will only be applied to the first line. Example output from
several calls:
Label1: Value (default indent of 1 was used)
Sublabel1: Value (used indent of 2 here)
Label2: Value
Args:
label: The label to print in the first column.
value: The value to print in the second column.
indent: (4 * indent) spaces will be placed before the label.
Returns:
A string with a vertically aligned label and value.
"""
return '{}{}'.format(((' ' * indent * 4) + label + ':').ljust(28), value)
def PrintBucketHeader(bucket_listing_ref): # pylint: disable=unused-argument
"""Default function for printing headers for buckets.
Header is printed prior to listing the contents of the bucket.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET.
"""
pass
def PrintDir(bucket_listing_ref):
"""Default function for printing buckets or prefixes.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
text_util.print_to_fd(bucket_listing_ref.url_string)
# pylint: disable=unused-argument
def PrintDirSummary(num_bytes, bucket_listing_ref):
"""Off-by-default function for printing buckets or prefix size summaries.
Args:
num_bytes: Number of bytes contained in the directory.
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
pass
def PrintDirHeader(bucket_listing_ref):
"""Default function for printing headers for prefixes.
Header is printed prior to listing the contents of the prefix.
Args:
bucket_listing_ref: BucketListingRef of type PREFIX.
"""
text_util.print_to_fd('{}:'.format(bucket_listing_ref.url_string))
def PrintNewLine():
"""Default function for printing new lines between directories."""
text_util.print_to_fd()
# pylint: disable=too-many-statements
def PrintFullInfoAboutObject(bucket_listing_ref, incl_acl=True):
"""Print full info for given object (like what displays for gsutil ls -L).
Args:
bucket_listing_ref: BucketListingRef being listed.
Must have ref_type OBJECT and a populated root_object
with the desired fields.
incl_acl: True if ACL info should be output.
Returns:
Tuple (number of objects, object_length)
Raises:
Exception: if calling bug encountered.
"""
url_str = bucket_listing_ref.url_string
storage_url = StorageUrlFromString(url_str)
obj = bucket_listing_ref.root_object
if (obj.metadata and
S3_DELETE_MARKER_GUID in obj.metadata.additionalProperties):
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
num_bytes = obj.size
num_objs = 1
text_util.print_to_fd('{}:'.format(url_str))
if obj.timeCreated:
text_util.print_to_fd(
MakeMetadataLine('Creation time',
obj.timeCreated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.updated:
text_util.print_to_fd(
MakeMetadataLine('Update time',
obj.updated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if (obj.timeStorageClassUpdated and
obj.timeStorageClassUpdated != obj.timeCreated):
text_util.print_to_fd(
MakeMetadataLine(
'Storage class update time',
obj.timeStorageClassUpdated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.storageClass:
text_util.print_to_fd(MakeMetadataLine('Storage class', obj.storageClass))
if obj.temporaryHold:
text_util.print_to_fd(MakeMetadataLine('Temporary Hold', 'Enabled'))
if obj.eventBasedHold:
text_util.print_to_fd(MakeMetadataLine('Event-Based Hold', 'Enabled'))
if obj.retentionExpirationTime:
text_util.print_to_fd(
MakeMetadataLine(
'Retention Expiration',
obj.retentionExpirationTime.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.kmsKeyName:
text_util.print_to_fd(MakeMetadataLine('KMS key', obj.kmsKeyName))
if obj.cacheControl:
text_util.print_to_fd(MakeMetadataLine('Cache-Control', obj.cacheControl))
if obj.contentDisposition:
text_util.print_to_fd(
MakeMetadataLine('Content-Disposition', obj.contentDisposition))
if obj.contentEncoding:
text_util.print_to_fd(
MakeMetadataLine('Content-Encoding', obj.contentEncoding))
if obj.contentLanguage:
text_util.print_to_fd(
MakeMetadataLine('Content-Language', obj.contentLanguage))
text_util.print_to_fd(MakeMetadataLine('Content-Length', obj.size))
text_util.print_to_fd(MakeMetadataLine('Content-Type', obj.contentType))
if obj.componentCount:
text_util.print_to_fd(
MakeMetadataLine('Component-Count', obj.componentCount))
if obj.customTime:
text_util.print_to_fd(MakeMetadataLine('Custom-Time', obj.customTime))
if obj.timeDeleted:
text_util.print_to_fd(
MakeMetadataLine('Noncurrent time',
obj.timeDeleted.strftime('%a, %d %b %Y %H:%M:%S GMT')))
marker_props = {}
if obj.metadata and obj.metadata.additionalProperties:
non_marker_props = []
for add_prop in obj.metadata.additionalProperties:
if add_prop.key not in S3_MARKER_GUIDS:
non_marker_props.append(add_prop)
else:
marker_props[add_prop.key] = add_prop.value
if non_marker_props:
text_util.print_to_fd(MakeMetadataLine('Metadata', ''))
for ap in non_marker_props:
ap_key = '{}'.format(ap.key)
ap_value = '{}'.format(ap.value)
meta_data_line = MakeMetadataLine(ap_key, ap_value, indent=2)
text_util.print_to_fd(meta_data_line)
if obj.customerEncryption:
if not obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', 'encrypted'))
if not obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', 'encrypted'))
text_util.print_to_fd(
MakeMetadataLine('Encryption algorithm',
obj.customerEncryption.encryptionAlgorithm))
text_util.print_to_fd(
MakeMetadataLine('Encryption key SHA256',
obj.customerEncryption.keySha256))
if obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', obj.crc32c))
if obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', obj.md5Hash))
text_util.print_to_fd(MakeMetadataLine('ETag', obj.etag.strip('"\'')))
if obj.generation:
generation_str = GenerationFromUrlAndString(storage_url, obj.generation)
text_util.print_to_fd(MakeMetadataLine('Generation', generation_str))
if obj.metageneration:
text_util.print_to_fd(MakeMetadataLine('Metageneration',
obj.metageneration))
if incl_acl:
# JSON API won't return acls as part of the response unless we have
# full control scope
if obj.acl:
text_util.print_to_fd(
MakeMetadataLine('ACL', AclTranslation.JsonFromMessage(obj.acl)))
elif S3_ACL_MARKER_GUID in marker_props:
text_util.print_to_fd(
MakeMetadataLine('ACL', marker_props[S3_ACL_MARKER_GUID]))
else:
# Empty ACLs are possible with Bucket Policy Only and no longer imply
# ACCESS DENIED anymore.
text_util.print_to_fd(MakeMetadataLine('ACL', '[]'))
return (num_objs, num_bytes)
def PrintObject(bucket_listing_ref):
"""Default printing function for objects.
Args:
bucket_listing_ref: BucketListingRef of type OBJECT.
Returns:
(num_objects, num_bytes).
"""
try:
text_util.print_to_fd(bucket_listing_ref.url_string)
except IOError as e:
# Windows throws an IOError 0 here for object names containing Unicode
# chars. Ignore it.
if not (IS_WINDOWS and e.errno == 0):
raise
return (1, 0)
class LsHelper(object):
"""Helper class for ls and du."""
def __init__(self,
iterator_func,
logger,
print_object_func=PrintObject,
print_dir_func=PrintDir,
print_dir_header_func=PrintDirHeader,
print_bucket_header_func=PrintBucketHeader,
print_dir_summary_func=PrintDirSummary,
print_newline_func=PrintNewLine,
all_versions=False,
should_recurse=False,
exclude_patterns=None,
fields=('name',),
list_subdir_contents=True):
"""Initializes the helper class to prepare for listing.
Args:
iterator_func: Function for instantiating iterator.
Inputs-
url_string- Url string to iterate on. May include
wildcards.
all_versions=False- If true, iterate over all object
versions.
logger: Logger for outputting warnings / errors.
print_object_func: Function for printing objects.
print_dir_func: Function for printing buckets/prefixes.
print_dir_header_func: Function for printing header line for buckets
or prefixes.
print_bucket_header_func: Function for printing header line for buckets
or prefixes.
print_dir_summary_func: Function for printing size summaries about
buckets/prefixes.
print_newline_func: Function for printing new lines between dirs.
all_versions: If true, list all object versions.
should_recurse: If true, recursively listing buckets/prefixes.
exclude_patterns: Patterns to exclude when listing.
fields: Fields to request from bucket listings; this should
include all fields that need to be populated in
objects so they can be listed. Can be set to None
to retrieve all object fields. Defaults to short
listing fields.
list_subdir_contents: If true, return the directory and any contents,
otherwise return only the directory itself.
"""
self._iterator_func = iterator_func
self.logger = logger
self._print_object_func = print_object_func
self._print_dir_func = print_dir_func
self._print_dir_header_func = print_dir_header_func
self._print_bucket_header_func = print_bucket_header_func
self._print_dir_summary_func = print_dir_summary_func
self._print_newline_func = print_newline_func
self.all_versions = all_versions
self.should_recurse = should_recurse
self.exclude_patterns = exclude_patterns
self.bucket_listing_fields = fields
self.list_subdir_contents = list_subdir_contents
def ExpandUrlAndPrint(self, url):
"""Iterates over the given URL and calls print functions.
Args:
url: StorageUrl to iterate over.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
print_newline = False
if url.IsBucket() or self.should_recurse:
# IsBucket() implies a top-level listing.
if url.IsBucket():
self._print_bucket_header_func(url)
return self._RecurseExpandUrlAndPrint(url.url_string,
print_initial_newline=False)
else:
# User provided a prefix or object URL, but it's impossible to tell
# which until we do a listing and see what matches.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields))
plurality = top_level_iterator.HasPlurality()
try:
top_level_iterator.PeekException()
except EncryptionException:
# Detailed listing on a single object can perform a GetObjectMetadata
# call, which raises if a matching encryption key isn't found.
# Re-iterate without requesting encrypted fields.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=UNENCRYPTED_FULL_LISTING_FIELDS))
plurality = top_level_iterator.HasPlurality()
for blr in top_level_iterator:
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
print_newline = True
elif blr.IsPrefix():
if print_newline:
self._print_newline_func()
else:
print_newline = True
if plurality and self.list_subdir_contents:
self._print_dir_header_func(blr)
elif plurality and not self.list_subdir_contents:
print_newline = False
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(
wildcard_suffix='*' if self.list_subdir_contents else None)
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a CsBucketListingRef of type Bucket')
num_objects += no
num_dirs += nd
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _RecurseExpandUrlAndPrint(self, url_str, print_initial_newline=True):
"""Iterates over the given URL string and calls print functions.
Args:
url_str: String describing StorageUrl to iterate over.
Must be of depth one or higher.
print_initial_newline: If true, print a newline before recursively
expanded prefixes.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
for blr in self._iterator_func(
'%s' % url_str, all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields):
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
elif blr.IsPrefix():
if self.should_recurse:
if print_initial_newline:
self._print_newline_func()
else:
print_initial_newline = True
self._print_dir_header_func(blr)
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(wildcard_suffix='*')
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
nd, no, nb = 1, 0, 0
self._print_dir_func(blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a bucketListingRef of type Bucket')
num_dirs += nd
num_objects += no
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _MatchesExcludedPattern(self, blr):
"""Checks bucket listing reference against patterns to exclude.
Args:
blr: BucketListingRef to check.
Returns:
True if reference matches a pattern and should be excluded.
"""
if self.exclude_patterns:
tomatch = six.ensure_str(blr.url_string)
for pattern in self.exclude_patterns:
if fnmatch.fnmatch(tomatch, six.ensure_str(pattern)):
return True
return False
| en | 0.76106 | # -*- coding: utf-8 -*- # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Utility functions and class for listing commands such as ls and du. Returns a string with a vertically aligned label and value. Labels of the same indentation level will start at the same column. Values will all start at the same column (unless the combined left-indent and label length is excessively long). If a value spans multiple lines, indentation will only be applied to the first line. Example output from several calls: Label1: Value (default indent of 1 was used) Sublabel1: Value (used indent of 2 here) Label2: Value Args: label: The label to print in the first column. value: The value to print in the second column. indent: (4 * indent) spaces will be placed before the label. Returns: A string with a vertically aligned label and value. # pylint: disable=unused-argument Default function for printing headers for buckets. Header is printed prior to listing the contents of the bucket. Args: bucket_listing_ref: BucketListingRef of type BUCKET. Default function for printing buckets or prefixes. Args: bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX. # pylint: disable=unused-argument Off-by-default function for printing buckets or prefix size summaries. Args: num_bytes: Number of bytes contained in the directory. bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX. Default function for printing headers for prefixes. Header is printed prior to listing the contents of the prefix. Args: bucket_listing_ref: BucketListingRef of type PREFIX. Default function for printing new lines between directories. # pylint: disable=too-many-statements Print full info for given object (like what displays for gsutil ls -L). Args: bucket_listing_ref: BucketListingRef being listed. Must have ref_type OBJECT and a populated root_object with the desired fields. incl_acl: True if ACL info should be output. Returns: Tuple (number of objects, object_length) Raises: Exception: if calling bug encountered. # JSON API won't return acls as part of the response unless we have # full control scope # Empty ACLs are possible with Bucket Policy Only and no longer imply # ACCESS DENIED anymore. Default printing function for objects. Args: bucket_listing_ref: BucketListingRef of type OBJECT. Returns: (num_objects, num_bytes). # Windows throws an IOError 0 here for object names containing Unicode # chars. Ignore it. Helper class for ls and du. Initializes the helper class to prepare for listing. Args: iterator_func: Function for instantiating iterator. Inputs- url_string- Url string to iterate on. May include wildcards. all_versions=False- If true, iterate over all object versions. logger: Logger for outputting warnings / errors. print_object_func: Function for printing objects. print_dir_func: Function for printing buckets/prefixes. print_dir_header_func: Function for printing header line for buckets or prefixes. print_bucket_header_func: Function for printing header line for buckets or prefixes. print_dir_summary_func: Function for printing size summaries about buckets/prefixes. print_newline_func: Function for printing new lines between dirs. all_versions: If true, list all object versions. should_recurse: If true, recursively listing buckets/prefixes. exclude_patterns: Patterns to exclude when listing. fields: Fields to request from bucket listings; this should include all fields that need to be populated in objects so they can be listed. Can be set to None to retrieve all object fields. Defaults to short listing fields. list_subdir_contents: If true, return the directory and any contents, otherwise return only the directory itself. Iterates over the given URL and calls print functions. Args: url: StorageUrl to iterate over. Returns: (num_objects, num_bytes) total number of objects and bytes iterated. # IsBucket() implies a top-level listing. # User provided a prefix or object URL, but it's impossible to tell # which until we do a listing and see what matches. # Detailed listing on a single object can perform a GetObjectMetadata # call, which raises if a matching encryption key isn't found. # Re-iterate without requesting encrypted fields. # We handle all buckets at the top level, so this should never happen. Iterates over the given URL string and calls print functions. Args: url_str: String describing StorageUrl to iterate over. Must be of depth one or higher. print_initial_newline: If true, print a newline before recursively expanded prefixes. Returns: (num_objects, num_bytes) total number of objects and bytes iterated. # We handle all buckets at the top level, so this should never happen. Checks bucket listing reference against patterns to exclude. Args: blr: BucketListingRef to check. Returns: True if reference matches a pattern and should be excluded. | 1.453304 | 1 |
app/config/cnMysql.py | itay-moav/rahl_commander | 1 | 10510 | <filename>app/config/cnMysql.py
'''
Created on Dec 28, 2021
@author: Itay
Abstracting the DB connection piece
'''
import mysql.connector as My
from app import logging as L
from app import exceptions as exceptions
class Connection():
'''
Abstracting the actions on a DB
'''
def __init__(self, connection_config):
self._debug_connection_info = "{}@{}".format(connection_config['username'],connection_config['host'])
L.info("Trying to connect to {}".format(self._debug_connection_info))
self._connection = My.connect(user=connection_config['username'], password=connection_config['password'],host=connection_config['host'],buffered=True)
def get_connection(self):
return self._connection
def change_db(self,new_db):
connection = self.get_connection()
if connection.database != new_db and new_db:
try:
connection.database = new_db
except My.Error as err:
if err.errno == My.errorcode.ER_BAD_DB_ERROR:
return False
else:
msg = "Error occured while changing DB in mysql connection [{}]".format(err)
L.fatal(msg)
raise exceptions.SQLError(msg)
return True
def cursor(self):
return self.get_connection().cursor()
def commit(self):
return self.get_connection().commit()
def debug_connection(self):
return "server: [{}] database: [{}]".format(self.debug_connection_info,self.get_connection().database)
def execute(self,sql,query_params=()):
L.debug("Running sql [{}]".format(sql))
cursor = self.cursor()
cursor.execute(sql,query_params)
return cursor
def execute_fetchall(self,sql):
cursor = self.execute(sql)
return cursor.fetchall()
def insert_rcom_sql_upgrades(self,db,file_values):
sql = "INSERT IGNORE INTO {}.rcom_sql_upgrades VALUES {}".format(db,file_values)
self.execute(sql)
def mark_complete_rcom_sql_upgrades(self,db,file_name):
sql = sql = "UPDATE {}.rcom_sql_upgrades SET execution_status='completed' WHERE file_name = %s LIMIT 1".format(db)
self.execute(sql,(file_name,)) | <filename>app/config/cnMysql.py
'''
Created on Dec 28, 2021
@author: Itay
Abstracting the DB connection piece
'''
import mysql.connector as My
from app import logging as L
from app import exceptions as exceptions
class Connection():
'''
Abstracting the actions on a DB
'''
def __init__(self, connection_config):
self._debug_connection_info = "{}@{}".format(connection_config['username'],connection_config['host'])
L.info("Trying to connect to {}".format(self._debug_connection_info))
self._connection = My.connect(user=connection_config['username'], password=connection_config['password'],host=connection_config['host'],buffered=True)
def get_connection(self):
return self._connection
def change_db(self,new_db):
connection = self.get_connection()
if connection.database != new_db and new_db:
try:
connection.database = new_db
except My.Error as err:
if err.errno == My.errorcode.ER_BAD_DB_ERROR:
return False
else:
msg = "Error occured while changing DB in mysql connection [{}]".format(err)
L.fatal(msg)
raise exceptions.SQLError(msg)
return True
def cursor(self):
return self.get_connection().cursor()
def commit(self):
return self.get_connection().commit()
def debug_connection(self):
return "server: [{}] database: [{}]".format(self.debug_connection_info,self.get_connection().database)
def execute(self,sql,query_params=()):
L.debug("Running sql [{}]".format(sql))
cursor = self.cursor()
cursor.execute(sql,query_params)
return cursor
def execute_fetchall(self,sql):
cursor = self.execute(sql)
return cursor.fetchall()
def insert_rcom_sql_upgrades(self,db,file_values):
sql = "INSERT IGNORE INTO {}.rcom_sql_upgrades VALUES {}".format(db,file_values)
self.execute(sql)
def mark_complete_rcom_sql_upgrades(self,db,file_name):
sql = sql = "UPDATE {}.rcom_sql_upgrades SET execution_status='completed' WHERE file_name = %s LIMIT 1".format(db)
self.execute(sql,(file_name,)) | en | 0.859796 | Created on Dec 28, 2021 @author: Itay Abstracting the DB connection piece Abstracting the actions on a DB | 2.794736 | 3 |
src/detector/pre_process_test_data.py | DomGonthier/PecheFantome | 0 | 10511 | import os
from tqdm import tqdm
import cv2
import numpy as np
#pre process test data:
path = "raw_test_data/"
list_width = []
list_height = []
list_image = []
def pre_process():
print("analyze images")
for Files in tqdm(os.listdir(path)):
if "jpg" in Files:
#print(Files)
img = cv2.imread(path + Files, 1)
height, width, chan = img.shape
#print(width)
#print(height)
list_width.append(width)
list_height.append(height)
max_width = np.max(list_width)
max_height = np.max(list_height)
if max_height == max_width :
print("max height == max width")
print("format images: ")
for image in tqdm(os.listdir(path)):
if "jpg" in image:
#print(image)
img = cv2.imread(path + image, 1)
height, width, chan = img.shape
new_height = (round(max_height/16)+1)*16 # image dimension needs to be a multiple of 16
new_width = new_height # image needs to be squared
delta_width = new_width - width
delta_height = new_height - height
#print("delta height",delta_height)
#print("delta width",delta_width)
pad_img = cv2.copyMakeBorder(img, 0, delta_height, 0, delta_width, cv2.BORDER_CONSTANT,None, value = 0)
#list_image.append(pad_img)
cv2.imwrite("test_data/"+image, pad_img)
pre_process()
for image in list_image:
print(image.shape)
| import os
from tqdm import tqdm
import cv2
import numpy as np
#pre process test data:
path = "raw_test_data/"
list_width = []
list_height = []
list_image = []
def pre_process():
print("analyze images")
for Files in tqdm(os.listdir(path)):
if "jpg" in Files:
#print(Files)
img = cv2.imread(path + Files, 1)
height, width, chan = img.shape
#print(width)
#print(height)
list_width.append(width)
list_height.append(height)
max_width = np.max(list_width)
max_height = np.max(list_height)
if max_height == max_width :
print("max height == max width")
print("format images: ")
for image in tqdm(os.listdir(path)):
if "jpg" in image:
#print(image)
img = cv2.imread(path + image, 1)
height, width, chan = img.shape
new_height = (round(max_height/16)+1)*16 # image dimension needs to be a multiple of 16
new_width = new_height # image needs to be squared
delta_width = new_width - width
delta_height = new_height - height
#print("delta height",delta_height)
#print("delta width",delta_width)
pad_img = cv2.copyMakeBorder(img, 0, delta_height, 0, delta_width, cv2.BORDER_CONSTANT,None, value = 0)
#list_image.append(pad_img)
cv2.imwrite("test_data/"+image, pad_img)
pre_process()
for image in list_image:
print(image.shape)
| en | 0.481555 | #pre process test data: #print(Files) #print(width) #print(height) #print(image) # image dimension needs to be a multiple of 16 # image needs to be squared #print("delta height",delta_height) #print("delta width",delta_width) #list_image.append(pad_img) | 2.852944 | 3 |
tensorflow/python/training/localhost_cluster_performance_test.py | connectthefuture/tensorflow | 101 | 10512 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for creating RPC clusters on localhost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import portpicker
import tensorflow as tf
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return their servers."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)]
ps_servers = [
tf.train.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)]
return workers, ps_servers
class CreateLocalClusterTest(tf.test.TestCase):
def testCreateLocalCluster(self):
workers, _ = create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
var0 = tf.Variable(0.0)
with tf.device("/job:ps/task:1"):
var1 = tf.Variable(1.0)
worker_sessions[0].run([var0.initializer, var1.initializer])
with tf.device("/job:ps/task:0"):
var2 = tf.Variable(2.0)
with tf.device("/job:ps/task:1"):
var3 = tf.Variable(3.0)
worker_sessions[1].run([var2.initializer, var3.initializer])
# Read values back in the opposite session
self.assertAllEqual(0.0, var0.eval(session=worker_sessions[1]))
self.assertAllEqual(1.0, var1.eval(session=worker_sessions[1]))
self.assertAllEqual(2.0, var2.eval(session=worker_sessions[0]))
self.assertAllEqual(3.0, var3.eval(session=worker_sessions[0]))
class CreateLocalClusterBenchmark(tf.test.Benchmark):
def benchmarkCreateLocalCluster(self):
deltas = []
iters = 5
for _ in range(iters):
start_time = time.time()
create_local_cluster(num_workers=1, num_ps=10)
end_time = time.time()
deltas.append(end_time - start_time)
median_deltas = np.median(deltas)
print(
"\n\nbenchmark_create_local_cluster_1_worker_10_ps. "
"iterations: %d, median wall time: %g\n\n" % (iters, median_deltas))
self.report_benchmark(
iters=iters,
wall_time=median_deltas,
name="benchmark_create_local_cluster_1_worker_10_ps")
class PartitionedVariablesBenchmark(tf.test.Benchmark):
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [tf.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024*32, 1024*128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition"
% partition_size)
with tf.device(tf.train.replica_device_setter(ps_tasks=100)):
partitioned_ix = tf.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=tf.float32,
# Each partition to have exactly N float32s
partitioner=tf.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(tf.convert_to_tensor(partitioned_ix))
tf.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats"
% partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))
if __name__ == "__main__":
tf.test.main()
| # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for creating RPC clusters on localhost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import portpicker
import tensorflow as tf
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return their servers."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)]
ps_servers = [
tf.train.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)]
return workers, ps_servers
class CreateLocalClusterTest(tf.test.TestCase):
def testCreateLocalCluster(self):
workers, _ = create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
var0 = tf.Variable(0.0)
with tf.device("/job:ps/task:1"):
var1 = tf.Variable(1.0)
worker_sessions[0].run([var0.initializer, var1.initializer])
with tf.device("/job:ps/task:0"):
var2 = tf.Variable(2.0)
with tf.device("/job:ps/task:1"):
var3 = tf.Variable(3.0)
worker_sessions[1].run([var2.initializer, var3.initializer])
# Read values back in the opposite session
self.assertAllEqual(0.0, var0.eval(session=worker_sessions[1]))
self.assertAllEqual(1.0, var1.eval(session=worker_sessions[1]))
self.assertAllEqual(2.0, var2.eval(session=worker_sessions[0]))
self.assertAllEqual(3.0, var3.eval(session=worker_sessions[0]))
class CreateLocalClusterBenchmark(tf.test.Benchmark):
def benchmarkCreateLocalCluster(self):
deltas = []
iters = 5
for _ in range(iters):
start_time = time.time()
create_local_cluster(num_workers=1, num_ps=10)
end_time = time.time()
deltas.append(end_time - start_time)
median_deltas = np.median(deltas)
print(
"\n\nbenchmark_create_local_cluster_1_worker_10_ps. "
"iterations: %d, median wall time: %g\n\n" % (iters, median_deltas))
self.report_benchmark(
iters=iters,
wall_time=median_deltas,
name="benchmark_create_local_cluster_1_worker_10_ps")
class PartitionedVariablesBenchmark(tf.test.Benchmark):
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [tf.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024*32, 1024*128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition"
% partition_size)
with tf.device(tf.train.replica_device_setter(ps_tasks=100)):
partitioned_ix = tf.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=tf.float32,
# Each partition to have exactly N float32s
partitioner=tf.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(tf.convert_to_tensor(partitioned_ix))
tf.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats"
% partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))
if __name__ == "__main__":
tf.test.main()
| en | 0.838423 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests and benchmarks for creating RPC clusters on localhost. Create local GRPC servers and return their servers. # Read values back in the opposite session # max_shard_bytes is 4, shape is 1000*partition_size float32s which should # partition into 1000 shards, each containing partition_size float32s. # Each partition to have exactly N float32s # Concatenates along axis 0 | 1.793451 | 2 |
extractor/util.py | bcskda/vk-archive-deepercopy | 1 | 10513 | import functools
import glob
import itertools
import logging
import os
from progressbar import progressbar
import re
import requests
from typing import List
class ValueSingleDispatch:
def __init__(self):
self._handlers = dict()
def register(self, key):
def decorator(fn: callable):
if key in self._handlers:
raise KeyError(key)
self._handlers[key] = fn
return fn
return decorator
def call(self, key, *args, **kwargs):
if key not in self._handlers:
raise KeyError(key)
return self._handlers[key](*args, **kwargs)
def valid_keys(self):
return self._handlers.keys()
def alphanumeric_glob(pattern: str):
"""Glob and sort alpahnumerically. Limitations: exactly one `*', no `?', file names with single extention."""
matches = glob.glob(pattern)
asterisk_pos = pattern.find('*')
matches.sort(key=lambda name: int(name[asterisk_pos:name.rfind('.')]))
return matches
def findall_in_files(pattern: re.Pattern, filenames: List[str], encoding: str) -> re.Match:
"""Generator"""
for filename in filenames:
logging.debug('util.findall_in_files: input file %s', filename)
with open(filename, 'rb') as ifile:
for match in pattern.findall(ifile.read().decode(encoding)):
logging.debug('util.findall_in_files(): match: file = %s, text = %s', filename, match)
yield match
def make_pattern(url_regex: str, extentions: List[str]) -> re.Pattern:
if extentions:
ext_regex = '({})'.format('|'.join(extentions))
else:
ext_regex = '()'
return re.compile(url_regex.format(extentions=ext_regex))
def download_by_pattern(url_regex: str, filenames: List[str], output_dir: str, *, extentions=[], encoding='windows-1251', limit=None):
logging.debug('util.download_by_pattern(): pattern = %s, extentions = %s', url_regex, extentions)
pattern = make_pattern(url_regex, extentions)
matches = findall_in_files(pattern, filenames, encoding)
if limit is not None:
matches = itertools.islice(matches, limit)
matches = list(matches)
logging.info('util.download_by_pattern(): %d matches', len(matches))
os.makedirs(output_dir, exist_ok=True)
downloads = 0
# TODO statistics by extention
for idx, (url, ext) in progressbar(enumerate(matches), max_value=len(matches)):
local_name = '{:07d}'.format(idx) + '_' + os.path.basename(url)
try:
download(url, os.path.join(output_dir, local_name))
downloads += 1
except Exception as e:
logging.warning('util.download_by_pattern(): unhandled exception: url = %s, e = %s', match_url, e)
logging.info('util.download_by_pattern(): %d successful downloads', downloads)
if downloads < len(matches):
logging.warning('util.download_by_pattern(): %d downloads failed, see log for warnings', len(matches) - downloads)
def download(url: str, local_path: str) -> bool:
logging.debug('util.download(): url = %s, local = %s', url, local_path)
req = requests.get(url)
with open(local_path, 'wb') as ofile:
ofile.write(req.content)
| import functools
import glob
import itertools
import logging
import os
from progressbar import progressbar
import re
import requests
from typing import List
class ValueSingleDispatch:
def __init__(self):
self._handlers = dict()
def register(self, key):
def decorator(fn: callable):
if key in self._handlers:
raise KeyError(key)
self._handlers[key] = fn
return fn
return decorator
def call(self, key, *args, **kwargs):
if key not in self._handlers:
raise KeyError(key)
return self._handlers[key](*args, **kwargs)
def valid_keys(self):
return self._handlers.keys()
def alphanumeric_glob(pattern: str):
"""Glob and sort alpahnumerically. Limitations: exactly one `*', no `?', file names with single extention."""
matches = glob.glob(pattern)
asterisk_pos = pattern.find('*')
matches.sort(key=lambda name: int(name[asterisk_pos:name.rfind('.')]))
return matches
def findall_in_files(pattern: re.Pattern, filenames: List[str], encoding: str) -> re.Match:
"""Generator"""
for filename in filenames:
logging.debug('util.findall_in_files: input file %s', filename)
with open(filename, 'rb') as ifile:
for match in pattern.findall(ifile.read().decode(encoding)):
logging.debug('util.findall_in_files(): match: file = %s, text = %s', filename, match)
yield match
def make_pattern(url_regex: str, extentions: List[str]) -> re.Pattern:
if extentions:
ext_regex = '({})'.format('|'.join(extentions))
else:
ext_regex = '()'
return re.compile(url_regex.format(extentions=ext_regex))
def download_by_pattern(url_regex: str, filenames: List[str], output_dir: str, *, extentions=[], encoding='windows-1251', limit=None):
logging.debug('util.download_by_pattern(): pattern = %s, extentions = %s', url_regex, extentions)
pattern = make_pattern(url_regex, extentions)
matches = findall_in_files(pattern, filenames, encoding)
if limit is not None:
matches = itertools.islice(matches, limit)
matches = list(matches)
logging.info('util.download_by_pattern(): %d matches', len(matches))
os.makedirs(output_dir, exist_ok=True)
downloads = 0
# TODO statistics by extention
for idx, (url, ext) in progressbar(enumerate(matches), max_value=len(matches)):
local_name = '{:07d}'.format(idx) + '_' + os.path.basename(url)
try:
download(url, os.path.join(output_dir, local_name))
downloads += 1
except Exception as e:
logging.warning('util.download_by_pattern(): unhandled exception: url = %s, e = %s', match_url, e)
logging.info('util.download_by_pattern(): %d successful downloads', downloads)
if downloads < len(matches):
logging.warning('util.download_by_pattern(): %d downloads failed, see log for warnings', len(matches) - downloads)
def download(url: str, local_path: str) -> bool:
logging.debug('util.download(): url = %s, local = %s', url, local_path)
req = requests.get(url)
with open(local_path, 'wb') as ofile:
ofile.write(req.content)
| en | 0.871351 | Glob and sort alpahnumerically. Limitations: exactly one `*', no `?', file names with single extention. Generator # TODO statistics by extention | 2.470792 | 2 |
setup.py | gillins/pyshepseg | 5 | 10514 | <reponame>gillins/pyshepseg
#Copyright 2021 <NAME> and <NAME>. All rights reserved.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without restriction,
#including without limitation the rights to use, copy, modify,
#merge, publish, distribute, sublicense, and/or sell copies of the
#Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
#ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
#CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from numpy.distutils.core import setup
import pyshepseg
setup(name='pyshepseg',
version=pyshepseg.SHEPSEG_VERSION,
description='Python implementation of the image segmentation algorithm described by Shepherd et al',
author='<NAME> and <NAME>',
scripts=['bin/test_pyshepseg.py', 'bin/test_pyshepseg_tiling.py',
'bin/test_pyshepseg_subset.py'],
packages=['pyshepseg'],
license='LICENSE.txt',
url='https://github.com/ubarsc/pyshepseg'
)
| #Copyright 2021 <NAME> and <NAME>. All rights reserved.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without restriction,
#including without limitation the rights to use, copy, modify,
#merge, publish, distribute, sublicense, and/or sell copies of the
#Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
#ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
#CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from numpy.distutils.core import setup
import pyshepseg
setup(name='pyshepseg',
version=pyshepseg.SHEPSEG_VERSION,
description='Python implementation of the image segmentation algorithm described by Shepherd et al',
author='<NAME> and <NAME>',
scripts=['bin/test_pyshepseg.py', 'bin/test_pyshepseg_tiling.py',
'bin/test_pyshepseg_subset.py'],
packages=['pyshepseg'],
license='LICENSE.txt',
url='https://github.com/ubarsc/pyshepseg'
) | en | 0.738971 | #Copyright 2021 <NAME> and <NAME>. All rights reserved. # #Permission is hereby granted, free of charge, to any person #obtaining a copy of this software and associated documentation #files (the "Software"), to deal in the Software without restriction, #including without limitation the rights to use, copy, modify, #merge, publish, distribute, sublicense, and/or sell copies of the #Software, and to permit persons to whom the Software is furnished #to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be #included in all copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, #EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES #OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. #IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR #ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF #CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1.792206 | 2 |
src/utils.py | daochenzha/SimTSC | 23 | 10515 | import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def read_dataset_from_npy(path):
""" Read dataset from .npy file
"""
data = np.load(path, allow_pickle=True)
return data[()]['X'], data[()]['y'], data[()]['train_idx'], data[()]['test_idx']
def read_dataset(ucr_root_dir, dataset_name, shot):
""" Read univariate dataset from UCR
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
y_train = df_train.values[:, 0].astype(np.int64)
y_test = df_test.values[:, 0].astype(np.int64)
y = np.concatenate((y_train, y_test))
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test))
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
X[np.isnan(X)] = 0
std_ = X.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=1, keepdims=True)) / std_
# add a dimension to make it multivariate with one dimension
X = X.reshape((X.shape[0], 1, X.shape[1]))
return X, y, train_idx, test_idx
def read_multivariate_dataset(root_dir, dataset_name, shot):
""" Read multivariate dataset
"""
X = np.load(os.path.join(root_dir, dataset_name+".npy"), allow_pickle=True)
y = np.loadtxt(os.path.join(root_dir, dataset_name+'_label.txt'))
y = y.astype(np.int64)
dim = X[0].shape[0]
max_length = 0
for _X in X:
if _X.shape[1] > max_length:
max_length = _X.shape[1]
X_list = []
for i in range(len(X)):
_X = np.zeros((dim, max_length))
_X[:, :X[i].shape[1]] = X[i]
X_list.append(_X)
X = np.array(X_list, dtype=np.float32)
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
std_ = X.std(axis=2, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=2, keepdims=True)) / std_
return X, y, train_idx, test_idx
def read_X(ucr_root_dir, dataset_name):
""" Read the raw time-series
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test), axis=0)
return X
class Logger:
def __init__(self, f):
self.f = f
def log(self, content):
print(content)
self.f.write(content + '\n')
self.f.flush()
| import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def read_dataset_from_npy(path):
""" Read dataset from .npy file
"""
data = np.load(path, allow_pickle=True)
return data[()]['X'], data[()]['y'], data[()]['train_idx'], data[()]['test_idx']
def read_dataset(ucr_root_dir, dataset_name, shot):
""" Read univariate dataset from UCR
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
y_train = df_train.values[:, 0].astype(np.int64)
y_test = df_test.values[:, 0].astype(np.int64)
y = np.concatenate((y_train, y_test))
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test))
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
X[np.isnan(X)] = 0
std_ = X.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=1, keepdims=True)) / std_
# add a dimension to make it multivariate with one dimension
X = X.reshape((X.shape[0], 1, X.shape[1]))
return X, y, train_idx, test_idx
def read_multivariate_dataset(root_dir, dataset_name, shot):
""" Read multivariate dataset
"""
X = np.load(os.path.join(root_dir, dataset_name+".npy"), allow_pickle=True)
y = np.loadtxt(os.path.join(root_dir, dataset_name+'_label.txt'))
y = y.astype(np.int64)
dim = X[0].shape[0]
max_length = 0
for _X in X:
if _X.shape[1] > max_length:
max_length = _X.shape[1]
X_list = []
for i in range(len(X)):
_X = np.zeros((dim, max_length))
_X[:, :X[i].shape[1]] = X[i]
X_list.append(_X)
X = np.array(X_list, dtype=np.float32)
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
std_ = X.std(axis=2, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=2, keepdims=True)) / std_
return X, y, train_idx, test_idx
def read_X(ucr_root_dir, dataset_name):
""" Read the raw time-series
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test), axis=0)
return X
class Logger:
def __init__(self, f):
self.f = f
def log(self, content):
print(content)
self.f.write(content + '\n')
self.f.flush()
| en | 0.738589 | Read dataset from .npy file Read univariate dataset from UCR # znorm # add a dimension to make it multivariate with one dimension Read multivariate dataset # znorm Read the raw time-series | 2.8779 | 3 |
mpinterfaces/mat2d/friction/analysis.py | yw-fang/MPInterfaces | 56 | 10516 | <filename>mpinterfaces/mat2d/friction/analysis.py
from __future__ import print_function, division, unicode_literals
import os
import warnings
import numpy as np
from scipy import interpolate
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.core.structure import Structure
from pymatgen import Element
from pymatgen.analysis.local_env import ValenceIonicRadiusEvaluator as VE
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "March 3, 2017"
def get_corrugation_factor(structure):
"""
Calculate the "corrugation factor" for a 2D material.
The corrugation factor is defined as the sum of the
outer hemispheres of ionic radii of the atoms on the
material's top and bottom surfaces, divided by the
planar area of the whole unit cell's 001 plane. Top
and bottom corrugation factors are returned
separately in the final dictionary. In general,
a larger corrugation factor means a smoother surface.
Args:
structure (Structure): Pymatgen Structure object.
Returns:
corrugation_factors (dict): Dictionary of "top"
and "bottom" corrugation factors, e.g.
{"top": top_corrugation_factor,
"bottom": bottom_corrugation_factor}
"""
sites = structure.sites
valences = VE(structure).valences
formatted_valences = {}
for e in valences:
temp=e[-1]
if "+" in e or "-" in e:
try:
# Some element names have a number followed
# by a plus or minus, e.g. "O2-"
int(e[-2])
element = e[:-2]
except:
# Others are simply a plus or minus, e.g. "Cl-"
element = e[:-1]
else:
element = e
formatted_valences[Element(element)] = valences[e]
all_z_coords = [s.coords[2] for s in sites]
max_z = max(all_z_coords)
min_z = min(all_z_coords)
top_layer = [s for s in sites if abs(s.coords[2] - max_z) < 0.1]
bottom_layer = [s for s in sites if abs(s.coords[2] - min_z) < 0.1]
pi = np.pi
top_sphere_area = 0
bottom_sphere_area = 0
for site in top_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
top_sphere_area += 2*pi*r*r
for site in bottom_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
bottom_sphere_area += 2*pi*r*r
lattice = structure.lattice
area = abs(np.cross(lattice._matrix[0], lattice._matrix[1])[2])
corrugation = {"top": top_sphere_area / area,
"bottom": bottom_sphere_area / area}
return corrugation
def plot_gamma_surface(fmt='pdf'):
"""
Collect the energies from a grid of static energy
calculations to plot the Gamma surface between two layers of the 2D
material.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
lattice = Structure.from_file('POSCAR').lattice
area = np.cross(lattice._matrix[0], lattice._matrix[1])[2]
ax = plt.figure(figsize=(n_divs_x * 1.2, n_divs_y * 1.2)).gca()
ax.set_xlim(0, n_divs_x + 1)
ax.set_ylim(0, n_divs_y + 1)
energies = []
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
not_converged = []
for x in x_values:
energies.append([])
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy / area
energies[x].append(energy)
except:
not_converged.append('{}x{}'.format(x, y))
energies[x].append(0)
os.chdir('../')
energies[x].append(energies[x][0])
energies.append([])
# ENERGY_ARRAY[n_divs_x] = ENERGY_ARRAY[0]
if not_converged:
warnings.warn('{} did not converge.'.format(not_converged))
for coords in not_converged:
energies[int(coords.split('x')[0])][int(coords.split('x')[1])] = energy
minima = []
maxima = []
for x in x_values:
minima.append(min(energies[x]))
maxima.append(max(energies[x]))
abs_minimum = min(minima)
abs_maximum = max(maxima)
for x in range(n_divs_x + 1):
for y in range(n_divs_y + 1):
# Plot all energies relative to the global minimum.
scaled_energy = energies[x][y] - abs_minimum
if '{}x{}'.format(x, y) in not_converged:
color_code = 'w'
else:
color_code = plt.cm.jet(
scaled_energy/(abs_maximum - abs_minimum))
ax.add_patch(plt.Rectangle((x, y), width=1, height=1,
facecolor=color_code, linewidth=0))
# Get rid of annoying ticks.
ax.axes.get_yaxis().set_ticks([])
ax.axes.get_xaxis().set_ticks([])
os.chdir('../../')
plt.savefig('gamma_surface.{}'.format(fmt), transparent=True)
plt.close()
def get_number_of_surface_atoms():
"""
Count the number of atoms at a 2D material's surface. This
enables energy and force calculations to be normalized to
the number of surface atoms.
Returns:
int. Number of surface atoms (top + bottom) for both
layers in the bilayer model.
"""
structure = Structure.from_file('friction/lateral/POSCAR')
heights = np.array([site.z for site in structure.sites])
max_height = max(heights)
min_height = min(heights)
n_atoms_top = len([height for height in heights if max_height - height < 0.1])
n_atoms_bottom = len([height for height in heights if height - min_height < 0.1])
return (n_atoms_top + n_atoms_bottom) * 2
def get_basin_and_peak_locations():
"""
Find which directories inside 'friction/lateral' represent
the minimum (basin) and maximum (peak) energy stacking
configurations.
Returns:
tuple. Of the form (basin, peak).
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
abs_maximum = -np.Infinity
abs_minimum = np.Infinity
for x in x_values:
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy
if energy < abs_minimum:
basin = dir
abs_minimum = energy
if energy > abs_maximum:
peak = dir
abs_maximum = energy
except:
pass
os.chdir('../')
os.chdir('../../')
return(basin, peak)
def plot_friction_force(fmt='pdf'):
"""
Plot the sinusoidal curve of delta E between basin and saddle
points for each normal spacing dz.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
f, (ax1, ax2) = plt.subplots(2, figsize=(16, 16))
spacings = sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)])
spc_range = spacings[-1] - spacings[0] + 0.1
for spacing in spacings:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2 +
(start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
ax1.plot(x, sinx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax1.set_xticklabels(ax1.get_xticks(), family='serif', fontsize=18)
ax1.set_yticklabels(ax1.get_yticks(), family='serif', fontsize=18)
ax1.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax1.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', family='serif', fontsize=24)
ax2.plot(x, cosx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax2.set_xticklabels(ax2.get_xticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax2.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_f\/(eV/\AA)}$', family='serif', fontsize=24)
os.chdir('../')
ax1.legend(loc='upper right')
ax2.legend(loc='upper right')
os.chdir('../../')
plt.savefig('F_f.{}'.format(fmt))
def plot_normal_force(basin_dir, fmt='pdf'):
"""
Plot the LJ-like curve of the energy at the basin point
as a function of normal spacing dz.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
fig = plt.figure(figsize=(16, 10))
ax = fig.gca()
ax2 = ax.twinx()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
ax.set_xlim(spacings[0], spacings[-1])
ax.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0))
ax2.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0.9))
E_z = ax.plot(xnew, ynew, color=plt.cm.jet(0),
linewidth=4, label=r'$\mathrm{E(z)}$')
F_N = ax2.plot(spacings, [-y for y in ynew_slope], color=plt.cm.jet(0.9),
linewidth=4, label=r'$\mathrm{F_N}$')
ax.set_ylim(ax.get_ylim())
ax.set_xticklabels(ax.get_xticks(), family='serif', fontsize=18)
ax.set_yticklabels(ax.get_yticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax.set_xlabel(r'$\mathrm{z\/(\AA)}$', fontsize=24)
ax.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_N\/(eV/\AA)}$', fontsize=24)
data = E_z + F_N
labs = [l.get_label() for l in data]
ax.legend(data, labs, loc='upper right', fontsize=24)
ax.plot(spacings, E, linewidth=0, marker='o', color=plt.cm.jet(0),
markersize=10, markeredgecolor='none')
os.chdir('../../')
plt.savefig('F_N.{}'.format(fmt))
def plot_mu_vs_F_N(basin_dir, fmt='pdf'):
"""
Plot friction coefficient 'mu' vs. F_Normal.
mu = F_friction / F_Normal.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
fig = plt.figure(figsize=(16, 10))
# ax = fig.gca()
# ax2 = ax.twinx()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd()) if
os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
# xnew = np.arange(spacings[0], spacings[-1], 0.001)
# ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
sorted_dirs = sorted([float(spc) for spc in os.listdir(os.getcwd())
if os.path.isdir(spc)])
for spacing in sorted_dirs:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
ax = plt.figure().gca()
ax.plot(F_N, mu, linewidth=2, marker='o', markeredgecolor='none',
markersize=3, color=plt.cm.jet(0))
plt.savefig('mu_vs_F_N.{}'.format(fmt))
def get_mu_vs_F_N(basin_dir):
"""
Essentially the same function as plotting, but without the plot.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
Returns:
dic: Of the form {'F_N': F_N, 'mu': mu, 'F_f': F_f}, where
forces are in nN.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
# Convert eV.A to nN
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
for spacing in sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)]):
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
try:
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
-
Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
except:
print('One or more jobs in {}/ have not converged.'.format(spacing))
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
return {'F_N': F_N, 'mu': mu, 'F_f': F_f}
| <filename>mpinterfaces/mat2d/friction/analysis.py
from __future__ import print_function, division, unicode_literals
import os
import warnings
import numpy as np
from scipy import interpolate
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.core.structure import Structure
from pymatgen import Element
from pymatgen.analysis.local_env import ValenceIonicRadiusEvaluator as VE
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "March 3, 2017"
def get_corrugation_factor(structure):
"""
Calculate the "corrugation factor" for a 2D material.
The corrugation factor is defined as the sum of the
outer hemispheres of ionic radii of the atoms on the
material's top and bottom surfaces, divided by the
planar area of the whole unit cell's 001 plane. Top
and bottom corrugation factors are returned
separately in the final dictionary. In general,
a larger corrugation factor means a smoother surface.
Args:
structure (Structure): Pymatgen Structure object.
Returns:
corrugation_factors (dict): Dictionary of "top"
and "bottom" corrugation factors, e.g.
{"top": top_corrugation_factor,
"bottom": bottom_corrugation_factor}
"""
sites = structure.sites
valences = VE(structure).valences
formatted_valences = {}
for e in valences:
temp=e[-1]
if "+" in e or "-" in e:
try:
# Some element names have a number followed
# by a plus or minus, e.g. "O2-"
int(e[-2])
element = e[:-2]
except:
# Others are simply a plus or minus, e.g. "Cl-"
element = e[:-1]
else:
element = e
formatted_valences[Element(element)] = valences[e]
all_z_coords = [s.coords[2] for s in sites]
max_z = max(all_z_coords)
min_z = min(all_z_coords)
top_layer = [s for s in sites if abs(s.coords[2] - max_z) < 0.1]
bottom_layer = [s for s in sites if abs(s.coords[2] - min_z) < 0.1]
pi = np.pi
top_sphere_area = 0
bottom_sphere_area = 0
for site in top_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
top_sphere_area += 2*pi*r*r
for site in bottom_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
bottom_sphere_area += 2*pi*r*r
lattice = structure.lattice
area = abs(np.cross(lattice._matrix[0], lattice._matrix[1])[2])
corrugation = {"top": top_sphere_area / area,
"bottom": bottom_sphere_area / area}
return corrugation
def plot_gamma_surface(fmt='pdf'):
"""
Collect the energies from a grid of static energy
calculations to plot the Gamma surface between two layers of the 2D
material.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
lattice = Structure.from_file('POSCAR').lattice
area = np.cross(lattice._matrix[0], lattice._matrix[1])[2]
ax = plt.figure(figsize=(n_divs_x * 1.2, n_divs_y * 1.2)).gca()
ax.set_xlim(0, n_divs_x + 1)
ax.set_ylim(0, n_divs_y + 1)
energies = []
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
not_converged = []
for x in x_values:
energies.append([])
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy / area
energies[x].append(energy)
except:
not_converged.append('{}x{}'.format(x, y))
energies[x].append(0)
os.chdir('../')
energies[x].append(energies[x][0])
energies.append([])
# ENERGY_ARRAY[n_divs_x] = ENERGY_ARRAY[0]
if not_converged:
warnings.warn('{} did not converge.'.format(not_converged))
for coords in not_converged:
energies[int(coords.split('x')[0])][int(coords.split('x')[1])] = energy
minima = []
maxima = []
for x in x_values:
minima.append(min(energies[x]))
maxima.append(max(energies[x]))
abs_minimum = min(minima)
abs_maximum = max(maxima)
for x in range(n_divs_x + 1):
for y in range(n_divs_y + 1):
# Plot all energies relative to the global minimum.
scaled_energy = energies[x][y] - abs_minimum
if '{}x{}'.format(x, y) in not_converged:
color_code = 'w'
else:
color_code = plt.cm.jet(
scaled_energy/(abs_maximum - abs_minimum))
ax.add_patch(plt.Rectangle((x, y), width=1, height=1,
facecolor=color_code, linewidth=0))
# Get rid of annoying ticks.
ax.axes.get_yaxis().set_ticks([])
ax.axes.get_xaxis().set_ticks([])
os.chdir('../../')
plt.savefig('gamma_surface.{}'.format(fmt), transparent=True)
plt.close()
def get_number_of_surface_atoms():
"""
Count the number of atoms at a 2D material's surface. This
enables energy and force calculations to be normalized to
the number of surface atoms.
Returns:
int. Number of surface atoms (top + bottom) for both
layers in the bilayer model.
"""
structure = Structure.from_file('friction/lateral/POSCAR')
heights = np.array([site.z for site in structure.sites])
max_height = max(heights)
min_height = min(heights)
n_atoms_top = len([height for height in heights if max_height - height < 0.1])
n_atoms_bottom = len([height for height in heights if height - min_height < 0.1])
return (n_atoms_top + n_atoms_bottom) * 2
def get_basin_and_peak_locations():
"""
Find which directories inside 'friction/lateral' represent
the minimum (basin) and maximum (peak) energy stacking
configurations.
Returns:
tuple. Of the form (basin, peak).
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
abs_maximum = -np.Infinity
abs_minimum = np.Infinity
for x in x_values:
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy
if energy < abs_minimum:
basin = dir
abs_minimum = energy
if energy > abs_maximum:
peak = dir
abs_maximum = energy
except:
pass
os.chdir('../')
os.chdir('../../')
return(basin, peak)
def plot_friction_force(fmt='pdf'):
"""
Plot the sinusoidal curve of delta E between basin and saddle
points for each normal spacing dz.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
f, (ax1, ax2) = plt.subplots(2, figsize=(16, 16))
spacings = sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)])
spc_range = spacings[-1] - spacings[0] + 0.1
for spacing in spacings:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2 +
(start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
ax1.plot(x, sinx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax1.set_xticklabels(ax1.get_xticks(), family='serif', fontsize=18)
ax1.set_yticklabels(ax1.get_yticks(), family='serif', fontsize=18)
ax1.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax1.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', family='serif', fontsize=24)
ax2.plot(x, cosx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax2.set_xticklabels(ax2.get_xticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax2.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_f\/(eV/\AA)}$', family='serif', fontsize=24)
os.chdir('../')
ax1.legend(loc='upper right')
ax2.legend(loc='upper right')
os.chdir('../../')
plt.savefig('F_f.{}'.format(fmt))
def plot_normal_force(basin_dir, fmt='pdf'):
"""
Plot the LJ-like curve of the energy at the basin point
as a function of normal spacing dz.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
fig = plt.figure(figsize=(16, 10))
ax = fig.gca()
ax2 = ax.twinx()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
ax.set_xlim(spacings[0], spacings[-1])
ax.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0))
ax2.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0.9))
E_z = ax.plot(xnew, ynew, color=plt.cm.jet(0),
linewidth=4, label=r'$\mathrm{E(z)}$')
F_N = ax2.plot(spacings, [-y for y in ynew_slope], color=plt.cm.jet(0.9),
linewidth=4, label=r'$\mathrm{F_N}$')
ax.set_ylim(ax.get_ylim())
ax.set_xticklabels(ax.get_xticks(), family='serif', fontsize=18)
ax.set_yticklabels(ax.get_yticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax.set_xlabel(r'$\mathrm{z\/(\AA)}$', fontsize=24)
ax.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_N\/(eV/\AA)}$', fontsize=24)
data = E_z + F_N
labs = [l.get_label() for l in data]
ax.legend(data, labs, loc='upper right', fontsize=24)
ax.plot(spacings, E, linewidth=0, marker='o', color=plt.cm.jet(0),
markersize=10, markeredgecolor='none')
os.chdir('../../')
plt.savefig('F_N.{}'.format(fmt))
def plot_mu_vs_F_N(basin_dir, fmt='pdf'):
"""
Plot friction coefficient 'mu' vs. F_Normal.
mu = F_friction / F_Normal.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
fig = plt.figure(figsize=(16, 10))
# ax = fig.gca()
# ax2 = ax.twinx()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd()) if
os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
# xnew = np.arange(spacings[0], spacings[-1], 0.001)
# ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
sorted_dirs = sorted([float(spc) for spc in os.listdir(os.getcwd())
if os.path.isdir(spc)])
for spacing in sorted_dirs:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
ax = plt.figure().gca()
ax.plot(F_N, mu, linewidth=2, marker='o', markeredgecolor='none',
markersize=3, color=plt.cm.jet(0))
plt.savefig('mu_vs_F_N.{}'.format(fmt))
def get_mu_vs_F_N(basin_dir):
"""
Essentially the same function as plotting, but without the plot.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
Returns:
dic: Of the form {'F_N': F_N, 'mu': mu, 'F_f': F_f}, where
forces are in nN.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
# Convert eV.A to nN
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
for spacing in sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)]):
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
try:
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
-
Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
except:
print('One or more jobs in {}/ have not converged.'.format(spacing))
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
return {'F_N': F_N, 'mu': mu, 'F_f': F_f}
| en | 0.736389 | Calculate the "corrugation factor" for a 2D material. The corrugation factor is defined as the sum of the outer hemispheres of ionic radii of the atoms on the material's top and bottom surfaces, divided by the planar area of the whole unit cell's 001 plane. Top and bottom corrugation factors are returned separately in the final dictionary. In general, a larger corrugation factor means a smoother surface. Args: structure (Structure): Pymatgen Structure object. Returns: corrugation_factors (dict): Dictionary of "top" and "bottom" corrugation factors, e.g. {"top": top_corrugation_factor, "bottom": bottom_corrugation_factor} # Some element names have a number followed # by a plus or minus, e.g. "O2-" # Others are simply a plus or minus, e.g. "Cl-" Collect the energies from a grid of static energy calculations to plot the Gamma surface between two layers of the 2D material. Args: fmt (str): matplotlib format style. Check the matplotlib docs for options. # ENERGY_ARRAY[n_divs_x] = ENERGY_ARRAY[0] # Plot all energies relative to the global minimum. # Get rid of annoying ticks. Count the number of atoms at a 2D material's surface. This enables energy and force calculations to be normalized to the number of surface atoms. Returns: int. Number of surface atoms (top + bottom) for both layers in the bilayer model. Find which directories inside 'friction/lateral' represent the minimum (basin) and maximum (peak) energy stacking configurations. Returns: tuple. Of the form (basin, peak). Plot the sinusoidal curve of delta E between basin and saddle points for each normal spacing dz. Args: fmt (str): matplotlib format style. Check the matplotlib docs for options. Plot the LJ-like curve of the energy at the basin point as a function of normal spacing dz. Args: basin_dir (str): directory corresponding to the minimum energy on the gamma surface. Generally obtained by the get_basin_and_peak_locations() function. fmt (str): matplotlib format style. Check the matplotlib docs for options. Plot friction coefficient 'mu' vs. F_Normal. mu = F_friction / F_Normal. Args: basin_dir (str): directory corresponding to the minimum energy on the gamma surface. Generally obtained by the get_basin_and_peak_locations() function. fmt (str): matplotlib format style. Check the matplotlib docs for options. # ax = fig.gca() # ax2 = ax.twinx() # xnew = np.arange(spacings[0], spacings[-1], 0.001) # ynew = interpolate.splev(xnew, spline, der=0) # sinx = [amplitude * np.sin(b * val) + amplitude for val in x] Essentially the same function as plotting, but without the plot. Args: basin_dir (str): directory corresponding to the minimum energy on the gamma surface. Generally obtained by the get_basin_and_peak_locations() function. Returns: dic: Of the form {'F_N': F_N, 'mu': mu, 'F_f': F_f}, where forces are in nN. # Convert eV.A to nN # sinx = [amplitude * np.sin(b * val) + amplitude for val in x] | 2.956871 | 3 |
aiopylimit/tests/test_aiopylimit.py | zealotous/aiopylimit | 4 | 10517 | from aiopylimit import AIOPyRateLimit
from aiopylimit import AIOPyRateLimitException
import asynctest
import asyncio
class TestPyLimit(asynctest.TestCase):
async def test_exception(self):
limit = AIOPyRateLimit(10, 10)
await self.assertAsyncRaises(AIOPyRateLimitException,
limit.attempt('test_namespace'))
async def test_throttle(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 20):
await asyncio.sleep(.5)
if x < 10:
self.assertTrue(await limit.attempt('test_namespace'))
else:
self.assertFalse(await limit.attempt('test_namespace'))
await asyncio.sleep(6)
self.assertTrue(await limit.attempt('test_namespace'))
async def test_peek(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 10):
self.assertTrue(await limit.attempt('test_namespace2'))
self.assertTrue(await limit.is_rate_limited('test_namespace2'))
await asyncio.sleep(10)
self.assertFalse(await limit.is_rate_limited('test_namespace2'))
| from aiopylimit import AIOPyRateLimit
from aiopylimit import AIOPyRateLimitException
import asynctest
import asyncio
class TestPyLimit(asynctest.TestCase):
async def test_exception(self):
limit = AIOPyRateLimit(10, 10)
await self.assertAsyncRaises(AIOPyRateLimitException,
limit.attempt('test_namespace'))
async def test_throttle(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 20):
await asyncio.sleep(.5)
if x < 10:
self.assertTrue(await limit.attempt('test_namespace'))
else:
self.assertFalse(await limit.attempt('test_namespace'))
await asyncio.sleep(6)
self.assertTrue(await limit.attempt('test_namespace'))
async def test_peek(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 10):
self.assertTrue(await limit.attempt('test_namespace2'))
self.assertTrue(await limit.is_rate_limited('test_namespace2'))
await asyncio.sleep(10)
self.assertFalse(await limit.is_rate_limited('test_namespace2'))
| none | 1 | 2.396283 | 2 |
|
bookworm/platform_services/_win32/tesseract_download.py | mush42/bookworm | 18 | 10518 | # coding: utf-8
import sys
import shutil
import requests
import wx
from pathlib import Path
from urllib.parse import urljoin, urlsplit
from tempfile import TemporaryFile
from zipfile import ZipFile
from bookworm import typehints as t
from bookworm import app
from bookworm.http_tools import RemoteJsonResource, HttpResource
from bookworm.ocr_engines.tesseract_ocr_engine import (
TesseractOcrEngine,
get_tesseract_path,
)
from bookworm.logger import logger
log = logger.getChild(__name__)
BRANCH = "develop"
TESSERACT_VERSION_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/version"
if app.arch == "x86":
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x86.zip"
else:
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x64.zip"
FAST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_fast/main/{lang_code}.traineddata"
BEST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_best/main/{lang_code}.traineddata"
def get_downloadable_languages():
return (
"afr",
"sqi",
"amh",
"ara",
"hye",
"asm",
"aze_cyrl",
"aze",
"ben",
"eus",
"bel",
"bos",
"bre",
"bul",
"mya",
"cat",
"ceb",
"chr",
"chi_sim",
"hrv",
"ces",
"dan",
"nld",
"dzo",
"eng",
"epo",
"est",
"fao",
"fil",
"fin",
"fra",
"glg",
"kat_old",
"kat",
"deu",
"ell",
"guj",
"heb",
"hin",
"hun",
"isl",
"ind",
"gle",
"ita_old",
"ita",
"jpn_vert",
"jpn",
"jav",
"kan",
"kaz",
"khm",
"kor_vert",
"kor",
"kmr",
"kir",
"lao",
"lav",
"lit",
"ltz",
"mkd",
"msa",
"mal",
"mlt",
"mri",
"mar",
"mon",
"nep",
"nor",
"ori",
"pus",
"fas",
"pol",
"por",
"pan",
"que",
"ron",
"rus",
"gla",
"srp_latn",
"srp",
"snd",
"sin",
"slk",
"slv",
"spa_old",
"spa",
"sun",
"swa",
"swe",
"tgk",
"tam",
"tat",
"tel",
"tha",
"bod",
"tir",
"ton",
"tur",
"ukr",
"urd",
"uig",
"uzb_cyrl",
"uzb",
"vie",
"cym",
"fry",
"yid",
"yor",
)
def is_tesseract_available():
return sys.platform == "win32" and TesseractOcrEngine.check()
def get_tessdata():
return get_tesseract_path() / "tessdata"
def get_language_path(language):
return Path(get_tessdata(), f"{language}.traineddata")
def is_new_tesseract_version_available():
remote_version = requests.get(TESSERACT_VERSION_URL).text
return TesseractOcrEngine.get_tesseract_version() != remote_version
def download_tesseract_engine(progress_dlg):
tesseract_directory = get_tesseract_path()
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
try:
dl_request = HttpResource(TESSERACT_ENGINE_DOWNLOAD_URL).download()
progress_dlg.set_abort_callback(dl_request.cancel)
with TemporaryFile() as dlfile:
dl_request.download_to_file(dlfile, callback)
if dl_request.is_cancelled():
return
with progress_dlg.PulseContinuously(_("Extracting file...")):
with ZipFile(dlfile, "r") as zfile:
tesseract_directory.mkdir(parents=True, exist_ok=True)
zfile.extractall(path=tesseract_directory)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Success"),
# Translators: content of a messagebox
_("Tesseract engine downloaded successfully"),
)
return True
except ConnectionError:
log.debug("Failed to download tesseract OCR engine.", exc_info=True)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Connection Error"),
_(
"Could not download Tesseract OCR Engine.\nPlease check your internet and try again."
),
icon=wx.ICON_ERROR,
)
except:
log.exception(
"An error occurred while installing the Tesseract OCr Engine", exc_info=True
)
wx.GetApp().mainFrame.notify_user(
_("Error"),
_("Could not install the Tesseract OCR engine.\nPlease try again."),
icon=wx.ICON_WARNING,
)
def download_language(lang_code, variant, target_file, progress_dlg):
url_prefix = (
BEST_TRAINEDDATA_DOWNLOAD_URL
if variant == "best"
else FAST_TRAINEDDATA_DOWNLOAD_URL
)
download_url = url_prefix.format(lang_code=lang_code)
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
dl_request = HttpResource(download_url).download()
progress_dlg.set_abort_callback(dl_request.cancel)
dl_request.download_to_filesystem(target_file, callback)
return not dl_request.is_cancelled()
def remove_tesseract():
tesseract_path = get_tesseract_path()
shutil.rmtree(tesseract_path, ignore_errors=False)
| # coding: utf-8
import sys
import shutil
import requests
import wx
from pathlib import Path
from urllib.parse import urljoin, urlsplit
from tempfile import TemporaryFile
from zipfile import ZipFile
from bookworm import typehints as t
from bookworm import app
from bookworm.http_tools import RemoteJsonResource, HttpResource
from bookworm.ocr_engines.tesseract_ocr_engine import (
TesseractOcrEngine,
get_tesseract_path,
)
from bookworm.logger import logger
log = logger.getChild(__name__)
BRANCH = "develop"
TESSERACT_VERSION_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/version"
if app.arch == "x86":
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x86.zip"
else:
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x64.zip"
FAST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_fast/main/{lang_code}.traineddata"
BEST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_best/main/{lang_code}.traineddata"
def get_downloadable_languages():
return (
"afr",
"sqi",
"amh",
"ara",
"hye",
"asm",
"aze_cyrl",
"aze",
"ben",
"eus",
"bel",
"bos",
"bre",
"bul",
"mya",
"cat",
"ceb",
"chr",
"chi_sim",
"hrv",
"ces",
"dan",
"nld",
"dzo",
"eng",
"epo",
"est",
"fao",
"fil",
"fin",
"fra",
"glg",
"kat_old",
"kat",
"deu",
"ell",
"guj",
"heb",
"hin",
"hun",
"isl",
"ind",
"gle",
"ita_old",
"ita",
"jpn_vert",
"jpn",
"jav",
"kan",
"kaz",
"khm",
"kor_vert",
"kor",
"kmr",
"kir",
"lao",
"lav",
"lit",
"ltz",
"mkd",
"msa",
"mal",
"mlt",
"mri",
"mar",
"mon",
"nep",
"nor",
"ori",
"pus",
"fas",
"pol",
"por",
"pan",
"que",
"ron",
"rus",
"gla",
"srp_latn",
"srp",
"snd",
"sin",
"slk",
"slv",
"spa_old",
"spa",
"sun",
"swa",
"swe",
"tgk",
"tam",
"tat",
"tel",
"tha",
"bod",
"tir",
"ton",
"tur",
"ukr",
"urd",
"uig",
"uzb_cyrl",
"uzb",
"vie",
"cym",
"fry",
"yid",
"yor",
)
def is_tesseract_available():
return sys.platform == "win32" and TesseractOcrEngine.check()
def get_tessdata():
return get_tesseract_path() / "tessdata"
def get_language_path(language):
return Path(get_tessdata(), f"{language}.traineddata")
def is_new_tesseract_version_available():
remote_version = requests.get(TESSERACT_VERSION_URL).text
return TesseractOcrEngine.get_tesseract_version() != remote_version
def download_tesseract_engine(progress_dlg):
tesseract_directory = get_tesseract_path()
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
try:
dl_request = HttpResource(TESSERACT_ENGINE_DOWNLOAD_URL).download()
progress_dlg.set_abort_callback(dl_request.cancel)
with TemporaryFile() as dlfile:
dl_request.download_to_file(dlfile, callback)
if dl_request.is_cancelled():
return
with progress_dlg.PulseContinuously(_("Extracting file...")):
with ZipFile(dlfile, "r") as zfile:
tesseract_directory.mkdir(parents=True, exist_ok=True)
zfile.extractall(path=tesseract_directory)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Success"),
# Translators: content of a messagebox
_("Tesseract engine downloaded successfully"),
)
return True
except ConnectionError:
log.debug("Failed to download tesseract OCR engine.", exc_info=True)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Connection Error"),
_(
"Could not download Tesseract OCR Engine.\nPlease check your internet and try again."
),
icon=wx.ICON_ERROR,
)
except:
log.exception(
"An error occurred while installing the Tesseract OCr Engine", exc_info=True
)
wx.GetApp().mainFrame.notify_user(
_("Error"),
_("Could not install the Tesseract OCR engine.\nPlease try again."),
icon=wx.ICON_WARNING,
)
def download_language(lang_code, variant, target_file, progress_dlg):
url_prefix = (
BEST_TRAINEDDATA_DOWNLOAD_URL
if variant == "best"
else FAST_TRAINEDDATA_DOWNLOAD_URL
)
download_url = url_prefix.format(lang_code=lang_code)
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
dl_request = HttpResource(download_url).download()
progress_dlg.set_abort_callback(dl_request.cancel)
dl_request.download_to_filesystem(target_file, callback)
return not dl_request.is_cancelled()
def remove_tesseract():
tesseract_path = get_tesseract_path()
shutil.rmtree(tesseract_path, ignore_errors=False)
| en | 0.568374 | # coding: utf-8 # Translators: title of a messagebox # Translators: content of a messagebox # Translators: title of a messagebox | 2.226008 | 2 |
python/plugins/processing/algs/grass7/ext/v_proj.py | dyna-mis/Hilabeling | 0 | 10519 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_proj.py
---------
Date : November 2017
Copyright : (C) 2017 by <NAME>
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = 'November 2017'
__copyright__ = '(C) 2017, <NAME>'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
from qgis.core import QgsProcessingParameterString
def processInputs(alg, parameters, context, feedback):
# Grab the projection from the input vector layer
layer = alg.parameterAsLayer(parameters, 'input', context)
alg.setSessionProjectionFromLayer(layer)
layerCrs = layer.crs().toProj4()
# Creates a new location with this Crs
newLocation = 'newProj{}'.format(alg.uniqueSuffix)
alg.commands.append('g.proj proj4="{}" location={}'.format(
layerCrs, newLocation))
# Go to the newly created location
alg.commands.append('g.mapset mapset=PERMANENT location={}'.format(
newLocation))
# Import the layer
alg.loadVectorLayerFromParameter(
'input', parameters, context, feedback, False)
# Go back to default location
alg.commands.append('g.mapset mapset=PERMANENT location=temp_location')
# Grab the projected Crs
crs = alg.parameterAsCrs(parameters, 'crs', context)
alg.commands.append('g.proj -c proj4="{}"'.format(
crs.toProj4(), newLocation))
# Remove crs parameter
alg.removeParameter('crs')
# Add the location parameter with proper value
location = QgsProcessingParameterString(
'location',
'new location',
'newProj{}'.format(alg.uniqueSuffix)
)
alg.addParameter(location)
| # -*- coding: utf-8 -*-
"""
***************************************************************************
v_proj.py
---------
Date : November 2017
Copyright : (C) 2017 by <NAME>
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = 'November 2017'
__copyright__ = '(C) 2017, <NAME>'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
from qgis.core import QgsProcessingParameterString
def processInputs(alg, parameters, context, feedback):
# Grab the projection from the input vector layer
layer = alg.parameterAsLayer(parameters, 'input', context)
alg.setSessionProjectionFromLayer(layer)
layerCrs = layer.crs().toProj4()
# Creates a new location with this Crs
newLocation = 'newProj{}'.format(alg.uniqueSuffix)
alg.commands.append('g.proj proj4="{}" location={}'.format(
layerCrs, newLocation))
# Go to the newly created location
alg.commands.append('g.mapset mapset=PERMANENT location={}'.format(
newLocation))
# Import the layer
alg.loadVectorLayerFromParameter(
'input', parameters, context, feedback, False)
# Go back to default location
alg.commands.append('g.mapset mapset=PERMANENT location=temp_location')
# Grab the projected Crs
crs = alg.parameterAsCrs(parameters, 'crs', context)
alg.commands.append('g.proj -c proj4="{}"'.format(
crs.toProj4(), newLocation))
# Remove crs parameter
alg.removeParameter('crs')
# Add the location parameter with proper value
location = QgsProcessingParameterString(
'location',
'new location',
'newProj{}'.format(alg.uniqueSuffix)
)
alg.addParameter(location)
| en | 0.614673 | # -*- coding: utf-8 -*- *************************************************************************** v_proj.py --------- Date : November 2017 Copyright : (C) 2017 by <NAME> Email : medspx at medspx dot fr *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** # This will get replaced with a git SHA1 when you do a git archive # Grab the projection from the input vector layer # Creates a new location with this Crs # Go to the newly created location # Import the layer # Go back to default location # Grab the projected Crs # Remove crs parameter # Add the location parameter with proper value | 1.890194 | 2 |
examples/diode/gmsh_diode2d.py | QuantumOfMoose/devsim | 0 | 10520 | <filename>examples/diode/gmsh_diode2d.py
# Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from devsim import *
from devsim.python_packages.simple_physics import *
import diode_common
device="diode2d"
region="Bulk"
diode_common.Create2DGmshMesh(device, region)
# this is is the devsim format
write_devices (file="gmsh_diode2d_out.msh")
diode_common.SetParameters(device=device, region=region)
####
#### NetDoping
####
node_model(device=device, region=region, name="Acceptors", equation="1.0e18*step(0.5e-5-y);")
node_model(device=device, region=region, name="Donors" , equation="1.0e18*step(y-0.5e-5);")
node_model(device=device, region=region, name="NetDoping", equation="Donors-Acceptors;")
diode_common.InitialSolution(device, region)
####
#### Initial DC solution
####
solve(type="dc", absolute_error=1.0, relative_error=1e-12, maximum_iterations=30)
###
### Drift diffusion simulation at equilibrium
###
diode_common.DriftDiffusionInitialSolution(device, region)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=50)
v = 0.0
while v < 0.51:
set_parameter(device=device, name=GetContactBiasName("top"), value=v)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=30)
PrintCurrents(device, "top")
PrintCurrents(device, "bot")
v += 0.1
write_devices(file="gmsh_diode2d.dat", type="tecplot")
write_devices(file="gmsh_diode2d_dd.msh", type="devsim")
| <filename>examples/diode/gmsh_diode2d.py
# Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from devsim import *
from devsim.python_packages.simple_physics import *
import diode_common
device="diode2d"
region="Bulk"
diode_common.Create2DGmshMesh(device, region)
# this is is the devsim format
write_devices (file="gmsh_diode2d_out.msh")
diode_common.SetParameters(device=device, region=region)
####
#### NetDoping
####
node_model(device=device, region=region, name="Acceptors", equation="1.0e18*step(0.5e-5-y);")
node_model(device=device, region=region, name="Donors" , equation="1.0e18*step(y-0.5e-5);")
node_model(device=device, region=region, name="NetDoping", equation="Donors-Acceptors;")
diode_common.InitialSolution(device, region)
####
#### Initial DC solution
####
solve(type="dc", absolute_error=1.0, relative_error=1e-12, maximum_iterations=30)
###
### Drift diffusion simulation at equilibrium
###
diode_common.DriftDiffusionInitialSolution(device, region)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=50)
v = 0.0
while v < 0.51:
set_parameter(device=device, name=GetContactBiasName("top"), value=v)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=30)
PrintCurrents(device, "top")
PrintCurrents(device, "bot")
v += 0.1
write_devices(file="gmsh_diode2d.dat", type="tecplot")
write_devices(file="gmsh_diode2d_dd.msh", type="devsim")
| en | 0.806307 | # Copyright 2013 Devsim LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this is is the devsim format #### #### NetDoping #### #### #### Initial DC solution #### ### ### Drift diffusion simulation at equilibrium ### | 2.442997 | 2 |
python/astro_imaging/config.py | taranu/astro_imaging | 0 | 10521 | <reponame>taranu/astro_imaging
from dataclasses import dataclass
import os
path_base_default = os.getenv('ASTRO_IMAGING_DATA_PATH', default='./')
@dataclass
class Paths:
base: str = path_base_default
catalogs: str = None
images: str = None
def __post_init__(self):
if self.catalogs is None:
self.catalogs = os.path.join(self.base, 'catalogs')
if self.images is None:
self.images = os.path.join(self.base, 'images')
paths_default = Paths()
| from dataclasses import dataclass
import os
path_base_default = os.getenv('ASTRO_IMAGING_DATA_PATH', default='./')
@dataclass
class Paths:
base: str = path_base_default
catalogs: str = None
images: str = None
def __post_init__(self):
if self.catalogs is None:
self.catalogs = os.path.join(self.base, 'catalogs')
if self.images is None:
self.images = os.path.join(self.base, 'images')
paths_default = Paths() | none | 1 | 2.544549 | 3 |
|
AT.py | MTandHJ/roboc | 8 | 10522 | <reponame>MTandHJ/roboc
#!/usr/bin/env python
from typing import Tuple
import argparse
from src.loadopts import *
METHOD = "RobOC-AT"
SAVE_FREQ = 5
PRINT_FREQ = 20
FMT = "{description}={scale}-{leverage}" \
"={learning_policy}-{optimizer}-{lr}" \
"={attack}-{epsilon:.4f}-{stepsize}-{steps}" \
"={batch_size}={transform}"
parser = argparse.ArgumentParser()
parser.add_argument("model", type=str)
parser.add_argument("dataset", type=str)
# for orthogonal classifier
parser.add_argument("--scale", type=float, default=10.,
help="the length of weights")
parser.add_argument("--leverage", type=float, default=0.15,
help="the hyper-parameter governs the relative weight between clean and adversarial samples")
# adversarial training settings
parser.add_argument("--attack", type=str, default="pgd-squared")
parser.add_argument("--epsilon", type=float, default=8/255)
parser.add_argument("--stepsize", type=float, default=0.25,
help="pgd:rel_stepsize, cwl2:step_size, deepfool:overshoot, bb:lr")
parser.add_argument("--steps", type=int, default=10)
# basic settings
parser.add_argument("--loss", type=str, default="square")
parser.add_argument("--optimizer", type=str, choices=("sgd", "adam"), default="sgd")
parser.add_argument("-mom", "--momentum", type=float, default=0.9,
help="the momentum used for SGD")
parser.add_argument("-beta1", "--beta1", type=float, default=0.9,
help="the first beta argument for Adam")
parser.add_argument("-beta2", "--beta2", type=float, default=0.999,
help="the second beta argument for Adam")
parser.add_argument("-wd", "--weight_decay", type=float, default=5e-4,
help="weight decay")
parser.add_argument("-lr", "--lr", "--LR", "--learning_rate", type=float, default=0.1)
parser.add_argument("-lp", "--learning_policy", type=str, default="default",
help="learning rate schedule defined in config.py")
parser.add_argument("--epochs", type=int, default=180)
parser.add_argument("-b", "--batch_size", type=int, default=128)
parser.add_argument("--transform", type=str, default='default',
help="the data augmentation which will be applied during training.")
parser.add_argument("--resume", action="store_true", default=False)
parser.add_argument("--progress", action="store_true", default=False,
help="show the progress if true")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("-m", "--description", type=str, default="RobOC-AT")
opts = parser.parse_args()
opts.description = FMT.format(**opts.__dict__)
def load_cfg() -> Tuple[Config, str]:
from src.dict2obj import Config
from src.base import Coach, AdversaryForTrain
from src.utils import gpu, set_seed, load_checkpoint
cfg = Config()
set_seed(opts.seed)
# the model and other settings for training
model = load_model(opts.model)(
num_classes=get_num_classes(opts.dataset),
scale=opts.scale
)
device = gpu(model)
# load the dataset
trainset = load_dataset(
dataset_type=opts.dataset,
transform=opts.transform,
train=True
)
cfg['trainloader'] = load_dataloader(
dataset=trainset,
batch_size=opts.batch_size,
train=True,
show_progress=opts.progress
)
testset = load_dataset(
dataset_type=opts.dataset,
transform=opts.transform,
train=False
)
cfg['testloader'] = load_dataloader(
dataset=testset,
batch_size=opts.batch_size,
train=False,
show_progress=opts.progress
)
normalizer = load_normalizer(dataset_type=opts.dataset)
# load the optimizer and learning_policy
optimizer = load_optimizer(
model=model, optim_type=opts.optimizer, lr=opts.lr,
momentum=opts.momentum, betas=(opts.beta1, opts.beta2),
weight_decay=opts.weight_decay
)
learning_policy = load_learning_policy(
optimizer=optimizer,
learning_policy_type=opts.learning_policy,
T_max=opts.epochs
)
# generate the path for logging information and saving parameters
cfg['info_path'], cfg['log_path'] = generate_path(
method=METHOD, dataset_type=opts.dataset,
model=opts.model, description=opts.description
)
if opts.resume:
cfg['start_epoch'] = load_checkpoint(
path=cfg.info_path, model=model,
optimizer=optimizer, lr_scheduler=learning_policy
)
else:
cfg['start_epoch'] = 0
cfg['coach'] = Coach(
model=model, device=device,
loss_func=load_loss_func(opts.loss)(model=model),
normalizer=normalizer, optimizer=optimizer,
learning_policy=learning_policy
)
# set the attack
attack, bounds, preprocessing = load_attacks(
attack_type=opts.attack, dataset_type=opts.dataset,
stepsize=opts.stepsize, steps=opts.steps
)
cfg['attacker'] = AdversaryForTrain(
model=model, attacker=attack, device=device,
bounds=bounds, preprocessing=preprocessing, epsilon=opts.epsilon
)
cfg['valider'] = load_valider(
model=model, device=device, dataset_type=opts.dataset
)
return cfg
def evaluate(
valider, trainloader, testloader,
acc_logger, rob_logger, writter,
epoch = 8888
):
train_accuracy, train_success = valider.evaluate(trainloader)
valid_accuracy, valid_success = valider.evaluate(testloader)
print(f"Train >>> [TA: {train_accuracy:.5f}] [RA: {1 - train_success:.5f}]")
print(f"Test. >>> [TA: {valid_accuracy:.5f}] [RA: {1 - valid_success:.5f}]")
writter.add_scalars("Accuracy", {"train":train_accuracy, "valid":valid_accuracy}, epoch)
writter.add_scalars("Success", {"train":train_success, "valid":valid_success}, epoch)
acc_logger.train(data=train_accuracy, T=epoch)
acc_logger.valid(data=valid_accuracy, T=epoch)
rob_logger.train(data=1 - train_success, T=epoch)
rob_logger.valid(data=1 - valid_success, T=epoch)
def main(
coach, attacker, valider,
trainloader, testloader, start_epoch,
info_path, log_path
):
from src.utils import save_checkpoint, TrackMeter, ImageMeter
from src.dict2obj import Config
acc_logger = Config(
train=TrackMeter("Train"),
valid=TrackMeter("Valid")
)
acc_logger.plotter = ImageMeter(*acc_logger.values(), title="Accuracy")
rob_logger = Config(
train=TrackMeter("Train"),
valid=TrackMeter("Valid")
)
rob_logger.plotter = ImageMeter(*rob_logger.values(), title="Robustness")
for epoch in range(start_epoch, opts.epochs):
if epoch % SAVE_FREQ == 0:
save_checkpoint(info_path, coach.model, coach.optimizer, coach.learning_policy, epoch)
if epoch % PRINT_FREQ == 0:
evaluate(
valider=valider, trainloader=trainloader, testloader=testloader,
acc_logger=acc_logger, rob_logger=rob_logger, writter=writter,
epoch=epoch
)
running_loss = coach.adv_train(trainloader, attacker, leverage=opts.leverage, epoch=epoch)
writter.add_scalar("Loss", running_loss, epoch)
evaluate(
valider=valider, trainloader=trainloader, testloader=testloader,
acc_logger=acc_logger, rob_logger=rob_logger, writter=writter,
epoch=opts.epochs
)
acc_logger.plotter.plot()
rob_logger.plotter.plot()
acc_logger.plotter.save(writter)
rob_logger.plotter.save(writter)
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
from src.utils import mkdirs, readme
cfg = load_cfg()
mkdirs(cfg.info_path, cfg.log_path)
readme(cfg.info_path, opts)
readme(cfg.log_path, opts, mode="a")
writter = SummaryWriter(log_dir=cfg.log_path, filename_suffix=METHOD)
main(**cfg)
cfg['coach'].save(cfg.info_path)
writter.close()
| #!/usr/bin/env python
from typing import Tuple
import argparse
from src.loadopts import *
METHOD = "RobOC-AT"
SAVE_FREQ = 5
PRINT_FREQ = 20
FMT = "{description}={scale}-{leverage}" \
"={learning_policy}-{optimizer}-{lr}" \
"={attack}-{epsilon:.4f}-{stepsize}-{steps}" \
"={batch_size}={transform}"
parser = argparse.ArgumentParser()
parser.add_argument("model", type=str)
parser.add_argument("dataset", type=str)
# for orthogonal classifier
parser.add_argument("--scale", type=float, default=10.,
help="the length of weights")
parser.add_argument("--leverage", type=float, default=0.15,
help="the hyper-parameter governs the relative weight between clean and adversarial samples")
# adversarial training settings
parser.add_argument("--attack", type=str, default="pgd-squared")
parser.add_argument("--epsilon", type=float, default=8/255)
parser.add_argument("--stepsize", type=float, default=0.25,
help="pgd:rel_stepsize, cwl2:step_size, deepfool:overshoot, bb:lr")
parser.add_argument("--steps", type=int, default=10)
# basic settings
parser.add_argument("--loss", type=str, default="square")
parser.add_argument("--optimizer", type=str, choices=("sgd", "adam"), default="sgd")
parser.add_argument("-mom", "--momentum", type=float, default=0.9,
help="the momentum used for SGD")
parser.add_argument("-beta1", "--beta1", type=float, default=0.9,
help="the first beta argument for Adam")
parser.add_argument("-beta2", "--beta2", type=float, default=0.999,
help="the second beta argument for Adam")
parser.add_argument("-wd", "--weight_decay", type=float, default=5e-4,
help="weight decay")
parser.add_argument("-lr", "--lr", "--LR", "--learning_rate", type=float, default=0.1)
parser.add_argument("-lp", "--learning_policy", type=str, default="default",
help="learning rate schedule defined in config.py")
parser.add_argument("--epochs", type=int, default=180)
parser.add_argument("-b", "--batch_size", type=int, default=128)
parser.add_argument("--transform", type=str, default='default',
help="the data augmentation which will be applied during training.")
parser.add_argument("--resume", action="store_true", default=False)
parser.add_argument("--progress", action="store_true", default=False,
help="show the progress if true")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("-m", "--description", type=str, default="RobOC-AT")
opts = parser.parse_args()
opts.description = FMT.format(**opts.__dict__)
def load_cfg() -> Tuple[Config, str]:
from src.dict2obj import Config
from src.base import Coach, AdversaryForTrain
from src.utils import gpu, set_seed, load_checkpoint
cfg = Config()
set_seed(opts.seed)
# the model and other settings for training
model = load_model(opts.model)(
num_classes=get_num_classes(opts.dataset),
scale=opts.scale
)
device = gpu(model)
# load the dataset
trainset = load_dataset(
dataset_type=opts.dataset,
transform=opts.transform,
train=True
)
cfg['trainloader'] = load_dataloader(
dataset=trainset,
batch_size=opts.batch_size,
train=True,
show_progress=opts.progress
)
testset = load_dataset(
dataset_type=opts.dataset,
transform=opts.transform,
train=False
)
cfg['testloader'] = load_dataloader(
dataset=testset,
batch_size=opts.batch_size,
train=False,
show_progress=opts.progress
)
normalizer = load_normalizer(dataset_type=opts.dataset)
# load the optimizer and learning_policy
optimizer = load_optimizer(
model=model, optim_type=opts.optimizer, lr=opts.lr,
momentum=opts.momentum, betas=(opts.beta1, opts.beta2),
weight_decay=opts.weight_decay
)
learning_policy = load_learning_policy(
optimizer=optimizer,
learning_policy_type=opts.learning_policy,
T_max=opts.epochs
)
# generate the path for logging information and saving parameters
cfg['info_path'], cfg['log_path'] = generate_path(
method=METHOD, dataset_type=opts.dataset,
model=opts.model, description=opts.description
)
if opts.resume:
cfg['start_epoch'] = load_checkpoint(
path=cfg.info_path, model=model,
optimizer=optimizer, lr_scheduler=learning_policy
)
else:
cfg['start_epoch'] = 0
cfg['coach'] = Coach(
model=model, device=device,
loss_func=load_loss_func(opts.loss)(model=model),
normalizer=normalizer, optimizer=optimizer,
learning_policy=learning_policy
)
# set the attack
attack, bounds, preprocessing = load_attacks(
attack_type=opts.attack, dataset_type=opts.dataset,
stepsize=opts.stepsize, steps=opts.steps
)
cfg['attacker'] = AdversaryForTrain(
model=model, attacker=attack, device=device,
bounds=bounds, preprocessing=preprocessing, epsilon=opts.epsilon
)
cfg['valider'] = load_valider(
model=model, device=device, dataset_type=opts.dataset
)
return cfg
def evaluate(
valider, trainloader, testloader,
acc_logger, rob_logger, writter,
epoch = 8888
):
train_accuracy, train_success = valider.evaluate(trainloader)
valid_accuracy, valid_success = valider.evaluate(testloader)
print(f"Train >>> [TA: {train_accuracy:.5f}] [RA: {1 - train_success:.5f}]")
print(f"Test. >>> [TA: {valid_accuracy:.5f}] [RA: {1 - valid_success:.5f}]")
writter.add_scalars("Accuracy", {"train":train_accuracy, "valid":valid_accuracy}, epoch)
writter.add_scalars("Success", {"train":train_success, "valid":valid_success}, epoch)
acc_logger.train(data=train_accuracy, T=epoch)
acc_logger.valid(data=valid_accuracy, T=epoch)
rob_logger.train(data=1 - train_success, T=epoch)
rob_logger.valid(data=1 - valid_success, T=epoch)
def main(
coach, attacker, valider,
trainloader, testloader, start_epoch,
info_path, log_path
):
from src.utils import save_checkpoint, TrackMeter, ImageMeter
from src.dict2obj import Config
acc_logger = Config(
train=TrackMeter("Train"),
valid=TrackMeter("Valid")
)
acc_logger.plotter = ImageMeter(*acc_logger.values(), title="Accuracy")
rob_logger = Config(
train=TrackMeter("Train"),
valid=TrackMeter("Valid")
)
rob_logger.plotter = ImageMeter(*rob_logger.values(), title="Robustness")
for epoch in range(start_epoch, opts.epochs):
if epoch % SAVE_FREQ == 0:
save_checkpoint(info_path, coach.model, coach.optimizer, coach.learning_policy, epoch)
if epoch % PRINT_FREQ == 0:
evaluate(
valider=valider, trainloader=trainloader, testloader=testloader,
acc_logger=acc_logger, rob_logger=rob_logger, writter=writter,
epoch=epoch
)
running_loss = coach.adv_train(trainloader, attacker, leverage=opts.leverage, epoch=epoch)
writter.add_scalar("Loss", running_loss, epoch)
evaluate(
valider=valider, trainloader=trainloader, testloader=testloader,
acc_logger=acc_logger, rob_logger=rob_logger, writter=writter,
epoch=opts.epochs
)
acc_logger.plotter.plot()
rob_logger.plotter.plot()
acc_logger.plotter.save(writter)
rob_logger.plotter.save(writter)
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
from src.utils import mkdirs, readme
cfg = load_cfg()
mkdirs(cfg.info_path, cfg.log_path)
readme(cfg.info_path, opts)
readme(cfg.log_path, opts, mode="a")
writter = SummaryWriter(log_dir=cfg.log_path, filename_suffix=METHOD)
main(**cfg)
cfg['coach'].save(cfg.info_path)
writter.close() | en | 0.685364 | #!/usr/bin/env python # for orthogonal classifier # adversarial training settings # basic settings # the model and other settings for training # load the dataset # load the optimizer and learning_policy # generate the path for logging information and saving parameters # set the attack | 2.463921 | 2 |
notesapp/api_v1/models.py | kampkelly/drf_template | 0 | 10523 | from django.db import models
# Create your models here.
class CommonFieldsMixin(models.Model):
"""Add created_at and updated_at fields."""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
class Meta:
"""Define metadata options."""
abstract = True
class Category(CommonFieldsMixin):
name = models.CharField(max_length=250,null=False,unique=True)
class Notes(CommonFieldsMixin):
title = models.CharField(max_length=250,null=False,unique=False)
body = models.TextField(null=False)
category = models.ForeignKey(Category,on_delete=models.CASCADE,default=None)
| from django.db import models
# Create your models here.
class CommonFieldsMixin(models.Model):
"""Add created_at and updated_at fields."""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
class Meta:
"""Define metadata options."""
abstract = True
class Category(CommonFieldsMixin):
name = models.CharField(max_length=250,null=False,unique=True)
class Notes(CommonFieldsMixin):
title = models.CharField(max_length=250,null=False,unique=False)
body = models.TextField(null=False)
category = models.ForeignKey(Category,on_delete=models.CASCADE,default=None)
| en | 0.82233 | # Create your models here. Add created_at and updated_at fields. Define metadata options. | 2.411068 | 2 |
src/main_TS_tsconv_jma.py | inoue0406/radarJMA | 6 | 10524 | # seq2seq LSTM (no-convolutional model) for time series prediction
import numpy as np
import torch
import torchvision
import torch.utils.data as data
import torchvision.transforms as transforms
import pandas as pd
import h5py
import os
import sys
import json
import time
import pdb
from jma_timeseries_dataset import *
from scaler import *
from train_valid_epoch_tsconv import *
from utils import Logger
from opts_ts import parse_opts
def count_parameters(model,f):
for name,p in model.named_parameters():
f.write("name,"+name+", Trainable, "+str(p.requires_grad)+",#params, "+str(p.numel())+"\n")
Nparam = sum(p.numel() for p in model.parameters())
Ntrain = sum(p.numel() for p in model.parameters() if p.requires_grad)
f.write("Number of params:"+str(Nparam)+", Trainable parameters:"+str(Ntrain)+"\n")
print("Number of params:"+str(Nparam)+", Trainable parameters:"+str(Ntrain)+"\n")
if __name__ == '__main__':
# parse command-line options
opt = parse_opts()
print(opt)
# create result dir
if not os.path.exists(opt.result_path):
os.mkdir(opt.result_path)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
# generic log file
logfile = open(os.path.join(opt.result_path, 'log_run.txt'),'w')
logfile.write('Start time:'+time.ctime()+'\n')
tstart = time.time()
# model information
modelinfo = open(os.path.join(opt.result_path, 'model_info.txt'),'w')
# prepare scaler for data
if opt.data_scaling == 'linear':
scl = LinearScaler()
if opt.data_scaling == 'root':
scl = RootScaler()
if not opt.no_train:
# loading datasets
train_dataset = JMATSConvDataset(csv_data=opt.train_data_path,
csv_anno=opt.train_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
valid_dataset = JMATSConvDataset(csv_data=opt.valid_data_path,
csv_anno=opt.valid_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
#tstdata = next(iter(train_dataset))
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=False)
if opt.model_name == 'seq2seq':
# lstm seq2seq model
CONV_HID_DIM = 32
INPUT_DIM = 1 + CONV_HID_DIM
OUTPUT_DIM = 1
HID_DIM = 512
N_LAYERS = 3
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
from models.seq2seq_convlstm_ts import *
enc = Encoder(INPUT_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2SeqConv(enc, dec, CONV_HID_DIM, device='cuda').cuda()
if opt.transfer_path != 'None':
# Use pretrained weights for transfer learning
print('loading pretrained model:',opt.transfer_path)
model = torch.load(opt.transfer_path)
modelinfo.write('Model Structure \n')
modelinfo.write(str(model))
count_parameters(model,modelinfo)
# modelinfo.close()
if opt.loss_function == 'MSE':
loss_fn = torch.nn.MSELoss()
# Type of optimizers adam/rmsprop
if opt.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
elif opt.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=opt.learning_rate)
# learning rate scheduler
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=opt.lr_decay)
# Prep logger
train_logger = Logger(
os.path.join(opt.result_path, 'train.log'),
['epoch', 'loss', 'lr'])
train_batch_logger = Logger(
os.path.join(opt.result_path, 'train_batch.log'),
['epoch', 'batch', 'loss', 'lr'])
valid_logger = Logger(
os.path.join(opt.result_path, 'valid.log'),
['epoch', 'loss'])
# training
for epoch in range(1,opt.n_epochs+1):
if epoch < 10:
# freeze conv_encoder for first 10 epochs
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = False
else:
# unfreeze conv_encoder for the rest
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = True
count_parameters(model,modelinfo)
#import pdb;pdb.set_trace()
# step scheduler
scheduler.step()
# training & validation
train_epoch(epoch,opt.n_epochs,train_loader,model,loss_fn,optimizer,
train_logger,train_batch_logger,opt,scl)
valid_epoch(epoch,opt.n_epochs,valid_loader,model,loss_fn,
valid_logger,opt,scl)
if epoch % opt.checkpoint == 0:
# save the trained model for every checkpoint
# (1) as binary
torch.save(model,os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.model' % epoch))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.dict' % epoch))
# save the trained model
# (1) as binary
torch.save(model,os.path.join(opt.result_path, 'trained_seq2seq.model'))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path, 'trained_seq2seq.dict'))
# test datasets if specified
if opt.test:
if opt.no_train:
#load pretrained model from results directory
model_fname = os.path.join(opt.result_path, opt.test_model_fname)
print('loading pretrained model:',model_fname)
model = torch.load(model_fname)
loss_fn = torch.nn.MSELoss()
# prepare loader
test_dataset = JMATSConvDataset(csv_data=opt.test_data_path,
csv_anno=opt.test_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
transform=None)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=opt.batch_size,
batch_size=3, # small batch size used
num_workers=7,
drop_last=True,
shuffle=False)
# testing for the trained model
test_epoch(test_loader,model,loss_fn,opt,scl)
# output elapsed time
logfile.write('End time: '+time.ctime()+'\n')
tend = time.time()
tdiff = float(tend-tstart)/3600.0
logfile.write('Elapsed time[hours]: %f \n' % tdiff)
| # seq2seq LSTM (no-convolutional model) for time series prediction
import numpy as np
import torch
import torchvision
import torch.utils.data as data
import torchvision.transforms as transforms
import pandas as pd
import h5py
import os
import sys
import json
import time
import pdb
from jma_timeseries_dataset import *
from scaler import *
from train_valid_epoch_tsconv import *
from utils import Logger
from opts_ts import parse_opts
def count_parameters(model,f):
for name,p in model.named_parameters():
f.write("name,"+name+", Trainable, "+str(p.requires_grad)+",#params, "+str(p.numel())+"\n")
Nparam = sum(p.numel() for p in model.parameters())
Ntrain = sum(p.numel() for p in model.parameters() if p.requires_grad)
f.write("Number of params:"+str(Nparam)+", Trainable parameters:"+str(Ntrain)+"\n")
print("Number of params:"+str(Nparam)+", Trainable parameters:"+str(Ntrain)+"\n")
if __name__ == '__main__':
# parse command-line options
opt = parse_opts()
print(opt)
# create result dir
if not os.path.exists(opt.result_path):
os.mkdir(opt.result_path)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
# generic log file
logfile = open(os.path.join(opt.result_path, 'log_run.txt'),'w')
logfile.write('Start time:'+time.ctime()+'\n')
tstart = time.time()
# model information
modelinfo = open(os.path.join(opt.result_path, 'model_info.txt'),'w')
# prepare scaler for data
if opt.data_scaling == 'linear':
scl = LinearScaler()
if opt.data_scaling == 'root':
scl = RootScaler()
if not opt.no_train:
# loading datasets
train_dataset = JMATSConvDataset(csv_data=opt.train_data_path,
csv_anno=opt.train_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
valid_dataset = JMATSConvDataset(csv_data=opt.valid_data_path,
csv_anno=opt.valid_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
#tstdata = next(iter(train_dataset))
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=False)
if opt.model_name == 'seq2seq':
# lstm seq2seq model
CONV_HID_DIM = 32
INPUT_DIM = 1 + CONV_HID_DIM
OUTPUT_DIM = 1
HID_DIM = 512
N_LAYERS = 3
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
from models.seq2seq_convlstm_ts import *
enc = Encoder(INPUT_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2SeqConv(enc, dec, CONV_HID_DIM, device='cuda').cuda()
if opt.transfer_path != 'None':
# Use pretrained weights for transfer learning
print('loading pretrained model:',opt.transfer_path)
model = torch.load(opt.transfer_path)
modelinfo.write('Model Structure \n')
modelinfo.write(str(model))
count_parameters(model,modelinfo)
# modelinfo.close()
if opt.loss_function == 'MSE':
loss_fn = torch.nn.MSELoss()
# Type of optimizers adam/rmsprop
if opt.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
elif opt.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=opt.learning_rate)
# learning rate scheduler
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=opt.lr_decay)
# Prep logger
train_logger = Logger(
os.path.join(opt.result_path, 'train.log'),
['epoch', 'loss', 'lr'])
train_batch_logger = Logger(
os.path.join(opt.result_path, 'train_batch.log'),
['epoch', 'batch', 'loss', 'lr'])
valid_logger = Logger(
os.path.join(opt.result_path, 'valid.log'),
['epoch', 'loss'])
# training
for epoch in range(1,opt.n_epochs+1):
if epoch < 10:
# freeze conv_encoder for first 10 epochs
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = False
else:
# unfreeze conv_encoder for the rest
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = True
count_parameters(model,modelinfo)
#import pdb;pdb.set_trace()
# step scheduler
scheduler.step()
# training & validation
train_epoch(epoch,opt.n_epochs,train_loader,model,loss_fn,optimizer,
train_logger,train_batch_logger,opt,scl)
valid_epoch(epoch,opt.n_epochs,valid_loader,model,loss_fn,
valid_logger,opt,scl)
if epoch % opt.checkpoint == 0:
# save the trained model for every checkpoint
# (1) as binary
torch.save(model,os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.model' % epoch))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.dict' % epoch))
# save the trained model
# (1) as binary
torch.save(model,os.path.join(opt.result_path, 'trained_seq2seq.model'))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path, 'trained_seq2seq.dict'))
# test datasets if specified
if opt.test:
if opt.no_train:
#load pretrained model from results directory
model_fname = os.path.join(opt.result_path, opt.test_model_fname)
print('loading pretrained model:',model_fname)
model = torch.load(model_fname)
loss_fn = torch.nn.MSELoss()
# prepare loader
test_dataset = JMATSConvDataset(csv_data=opt.test_data_path,
csv_anno=opt.test_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
transform=None)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=opt.batch_size,
batch_size=3, # small batch size used
num_workers=7,
drop_last=True,
shuffle=False)
# testing for the trained model
test_epoch(test_loader,model,loss_fn,opt,scl)
# output elapsed time
logfile.write('End time: '+time.ctime()+'\n')
tend = time.time()
tdiff = float(tend-tstart)/3600.0
logfile.write('Elapsed time[hours]: %f \n' % tdiff)
| en | 0.730113 | # seq2seq LSTM (no-convolutional model) for time series prediction #params, "+str(p.numel())+"\n") # parse command-line options # create result dir # generic log file # model information # prepare scaler for data # loading datasets #tstdata = next(iter(train_dataset)) # lstm seq2seq model # Use pretrained weights for transfer learning # modelinfo.close() # Type of optimizers adam/rmsprop # learning rate scheduler # Prep logger # training # freeze conv_encoder for first 10 epochs # unfreeze conv_encoder for the rest #import pdb;pdb.set_trace() # step scheduler # training & validation # save the trained model for every checkpoint # (1) as binary # (2) as state dictionary # save the trained model # (1) as binary # (2) as state dictionary # test datasets if specified #load pretrained model from results directory # prepare loader # batch_size=opt.batch_size, # small batch size used # testing for the trained model # output elapsed time | 2.293006 | 2 |
bell2014/energy/prob_abs_s.py | dmaugis/intrinsic | 134 | 10525 | <gh_stars>100-1000
import numpy as np
class ProbAbsoluteShading(object):
def __init__(self, params):
self.params = params
def cost(self, s_nz):
if self.params.abs_shading_weight:
if self.params.abs_shading_log:
return self.params.abs_shading_weight * \
np.abs(np.log(s_nz) - np.log(self.params.abs_shading_gray_point))
else:
return self.params.abs_shading_weight * \
np.abs(s_nz - self.params.abs_shading_gray_point)
else:
return 0
| import numpy as np
class ProbAbsoluteShading(object):
def __init__(self, params):
self.params = params
def cost(self, s_nz):
if self.params.abs_shading_weight:
if self.params.abs_shading_log:
return self.params.abs_shading_weight * \
np.abs(np.log(s_nz) - np.log(self.params.abs_shading_gray_point))
else:
return self.params.abs_shading_weight * \
np.abs(s_nz - self.params.abs_shading_gray_point)
else:
return 0 | none | 1 | 2.583698 | 3 |
|
onemsdk/parser/tag.py | mvnm/onemsdk | 0 | 10526 | import inspect
import sys
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Union, Type, Optional, Dict, Any
from pydantic import BaseModel
from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException
from .node import Node
__all__ = ['Tag', 'HeaderTag', 'FooterTag', 'BrTag', 'UlTag', 'LiTag', 'FormTag',
'SectionTag', 'InputTagAttrs', 'InputTag', 'FormTagAttrs', 'PTag', 'ATag',
'ATagAttrs', 'get_tag_cls', 'SectionTagAttrs', 'LiTagAttrs', 'InputTagType']
class Tag(BaseModel, ABC):
class Config:
tag_name: str = None
attrs: Any = None
children: List[Union['Tag', str]] = []
@abstractmethod
def render(self) -> str:
pass
@classmethod
def from_node(cls, node: Node) -> 'Tag':
if node.tag != cls.Config.tag_name:
raise NodeTagMismatchException(
f'Expected tag <{cls.Config.tag_name}>, received <{node.tag}>')
attrs = cls.get_attrs(node)
children = []
for node_child in node.children:
if isinstance(node_child, str):
children.append(node_child)
else:
child_tag_cls = get_tag_cls(node_child.tag)
children.append(child_tag_cls.from_node(node_child))
return cls(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return None
class HeaderTag(Tag):
class Config:
tag_name = 'header'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<header> must have max 1 text child')
super(HeaderTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return None
HeaderTag.update_forward_refs()
class FooterTag(Tag):
class Config:
tag_name = 'footer'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<footer> must have max 1 text child')
super(FooterTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return None
FooterTag.update_forward_refs()
class InputTagType(str, Enum):
# standard HTML5 input values
text = 'text'
date = 'date'
number = 'number'
hidden = 'hidden'
email = 'email'
url = 'url'
# not standard
datetime = 'datetime'
location = 'location'
class InputTagAttrs(BaseModel):
# standard HTML5 attributes
type: InputTagType
min: Union[int, float] = None
minlength: int = None
max: Union[int, float] = None
maxlength: int = None
step: int = None
value: str = None # only for type="hidden"
pattern: str = None
# not standard
min_error: str = None
minlength_error: str = None
max_error: str = None
maxlength_error: str = None
class InputTag(Tag):
class Config:
tag_name = 'input'
attrs: InputTagAttrs
def __init__(self, attrs: InputTagAttrs, **data):
super(InputTag, self).__init__(attrs=attrs)
@classmethod
def get_attrs(cls, node: Node):
return InputTagAttrs(
type=node.attrs.get('type'),
min=node.attrs.get('min'),
min_error=node.attrs.get('min-error'),
minlength=node.attrs.get('minlength'),
minlength_error=node.attrs.get('minlength-error'),
max=node.attrs.get('max'),
max_error=node.attrs.get('max-error'),
maxlength=node.attrs.get('maxlength'),
maxlength_error=node.attrs.get('maxlength-error'),
step=node.attrs.get('step'),
value=node.attrs.get('value'),
pattern=node.attrs.get('pattern'),
)
def render(self):
return ''
def data(self) -> Optional[Dict[str, str]]:
return None
InputTag.update_forward_refs()
class LabelTag(Tag):
class Config:
tag_name = 'label'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<label> must have max 1 text child')
super(LabelTag, self).__init__(children=children)
def render(self):
return self.children[0]
LabelTag.update_forward_refs()
class ATagAttrs(BaseModel):
href: str
method: Optional[str] = 'GET'
class ATag(Tag):
class Config:
tag_name: str = 'a'
attrs: ATagAttrs
def __init__(self, attrs: ATagAttrs, children: List[str]):
if len(children) != 1 or not isinstance(children[0], str):
raise ONEmSDKException('<a> must have 1 text child')
super(ATag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node) -> ATagAttrs:
return ATagAttrs(href=node.attrs.get('href'),
method=node.attrs.get('method') or 'GET')
def render(self):
return self.children[0]
def data(self) -> Dict[str, str]:
return {
**self.attrs.dict(),
'text': self.children[0]
}
ATag.update_forward_refs()
class LiTagAttrs(BaseModel):
value: Optional[str]
text_search: Optional[str]
class LiTag(Tag):
class Config:
tag_name = 'li'
attrs: LiTagAttrs
def __init__(self, children: List[Union[ATag, str]], attrs: LiTagAttrs = None):
if len(children) != 1 or not isinstance(children[0], (str, ATag)):
raise ONEmSDKException('<li> must have 1 (text or <a>) child')
if attrs is None:
attrs = LiTagAttrs()
super(LiTag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return LiTagAttrs(
value=node.attrs.get('value'),
text_search=node.attrs.get('text-search'),
)
def render(self):
if isinstance(self.children[0], ATag):
return self.children[0].render()
return self.children[0]
LiTag.update_forward_refs()
class UlTag(Tag):
class Config:
tag_name = 'ul'
def __init__(self, children: List[LiTag], **data):
if not children or not isinstance(children[0], LiTag):
raise ONEmSDKException('<ul> must have min 1 <li> child')
super(UlTag, self).__init__(children=children)
def render(self):
return '\n'.join([child.render() for child in self.children])
UlTag.update_forward_refs()
class PTag(Tag):
class Config:
tag_name = 'p'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<p> must have max 1 text child')
super(PTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return {
'text': self.children[0],
'href': None,
'data': None
}
PTag.update_forward_refs()
class BrTag(Tag):
class Config:
tag_name = 'br'
def __init__(self, **data):
super(BrTag, self).__init__()
def render(self):
return '\n'
def data(self):
return {
'text': '\n',
'data': None,
'href': None
}
BrTag.update_forward_refs()
class SectionTagAttrs(BaseModel):
header: Optional[str]
footer: Optional[str]
name: Optional[str]
auto_select: bool = False
multi_select: bool = False
numbered: bool = False
chunking_footer: Optional[str]
confirmation_label: Optional[str]
method: Optional[str]
required: Optional[bool]
status_exclude: Optional[bool]
status_prepend: Optional[bool]
url: Optional[str]
validate_type_error: Optional[str]
validate_type_error_footer: Optional[str]
validate_url: Optional[str]
class SectionTag(Tag):
class Config:
tag_name = 'section'
attrs: SectionTagAttrs
def __init__(self, attrs: SectionTagAttrs = None, children: List = None):
children = children or []
allowed_children = (FooterTag, HeaderTag, UlTag, PTag,
InputTag, LabelTag, BrTag, str)
for child in children:
if not isinstance(child, allowed_children):
raise ONEmSDKException(
f'<{child.Config.tag_name}> cannot be child for <section>')
super(SectionTag, self).__init__(attrs=attrs, children=children)
def render(self, exclude_header: bool = False, exclude_footer: bool = False):
# Add a temporary \n for help
rendered_children = ['\n']
for child in self.children:
if isinstance(child, HeaderTag) and exclude_header:
# Do not include header
continue
if isinstance(child, FooterTag) and exclude_footer:
# Do not include footer
continue
if isinstance(child, str):
text = child
else:
text = child.render()
if text:
if isinstance(child, PTag) or isinstance(child, UlTag):
if rendered_children[-1] != '\n':
rendered_children.append('\n')
rendered_children.append(text)
rendered_children.append('\n')
else:
rendered_children.append(text)
# Remove the temporary \n
del rendered_children[0]
if rendered_children and rendered_children[-1] == '\n':
del rendered_children[-1]
return ''.join(rendered_children)
@classmethod
def get_attrs(cls, node: Node) -> SectionTagAttrs:
return SectionTagAttrs(
header=node.attrs.get('header'),
footer=node.attrs.get('footer'),
name=node.attrs.get('name'),
# Note that boolean attributes in HTML are evaluated to True if they are
# present (their actual value does not matter). They are evaluated to False
# only when they are missing
auto_select='auto-select' in node.attrs,
multi_select='multi-select' in node.attrs,
numbered='numbered' in node.attrs,
chunking_footer=node.attrs.get('chunking-footer'),
confirmation_label=node.attrs.get('confirmation-label'),
method=node.attrs.get('method'),
required='required' in node.attrs,
status_exclude='status-exclude' in node.attrs,
status_prepend='status-prepend' in node.attrs,
url=node.attrs.get('url'),
validate_type_error=node.attrs.get('validate-type-error'),
validate_type_error_footer=node.attrs.get('validate-type-error-footer'),
validate_url=node.attrs.get('validate-url'),
)
SectionTag.update_forward_refs()
class FormTagAttrs(BaseModel):
header: Optional[str]
footer: Optional[str]
action: str
method: str = 'POST'
completion_status_show: bool = False
completion_status_in_header: bool = False
skip_confirmation: bool = False
class FormTag(Tag):
class Config:
tag_name = 'form'
attrs: FormTagAttrs
children: List[SectionTag]
def __init__(self, attrs: FormTagAttrs, children: List[SectionTag]):
if not children:
raise ONEmSDKException('<form> must have at least 1 child')
for child in children:
if not isinstance(child, SectionTag):
raise ONEmSDKException('<form> can have only <section> children')
if not child.attrs.name:
raise ONEmSDKException('<form> can contain only named <section> tags. '
'Please add a unique "name" attribute in each form '
'section.')
super(FormTag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return FormTagAttrs(
header=node.attrs.get('header'),
footer=node.attrs.get('footer'),
action=node.attrs.get('action'),
method=node.attrs.get('method') or 'POST',
completion_status_show='completion-status-show' in node.attrs,
completion_status_in_header='completion-status-in-header' in node.attrs,
skip_confirmation='skip-confirmation' in node.attrs,
)
def render(self):
return '\n'.join([child.render() for child in self.children])
FormTag.update_forward_refs()
_map_tag_cls = {}
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Tag):
_map_tag_cls[obj.Config.tag_name] = obj
def get_tag_cls(tag_name: str) -> Type[Tag]:
global _map_tag_cls
try:
return _map_tag_cls[tag_name]
except KeyError:
raise ONEmSDKException(f'Tag <{tag_name}> is not supported')
| import inspect
import sys
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Union, Type, Optional, Dict, Any
from pydantic import BaseModel
from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException
from .node import Node
__all__ = ['Tag', 'HeaderTag', 'FooterTag', 'BrTag', 'UlTag', 'LiTag', 'FormTag',
'SectionTag', 'InputTagAttrs', 'InputTag', 'FormTagAttrs', 'PTag', 'ATag',
'ATagAttrs', 'get_tag_cls', 'SectionTagAttrs', 'LiTagAttrs', 'InputTagType']
class Tag(BaseModel, ABC):
class Config:
tag_name: str = None
attrs: Any = None
children: List[Union['Tag', str]] = []
@abstractmethod
def render(self) -> str:
pass
@classmethod
def from_node(cls, node: Node) -> 'Tag':
if node.tag != cls.Config.tag_name:
raise NodeTagMismatchException(
f'Expected tag <{cls.Config.tag_name}>, received <{node.tag}>')
attrs = cls.get_attrs(node)
children = []
for node_child in node.children:
if isinstance(node_child, str):
children.append(node_child)
else:
child_tag_cls = get_tag_cls(node_child.tag)
children.append(child_tag_cls.from_node(node_child))
return cls(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return None
class HeaderTag(Tag):
class Config:
tag_name = 'header'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<header> must have max 1 text child')
super(HeaderTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return None
HeaderTag.update_forward_refs()
class FooterTag(Tag):
class Config:
tag_name = 'footer'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<footer> must have max 1 text child')
super(FooterTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return None
FooterTag.update_forward_refs()
class InputTagType(str, Enum):
# standard HTML5 input values
text = 'text'
date = 'date'
number = 'number'
hidden = 'hidden'
email = 'email'
url = 'url'
# not standard
datetime = 'datetime'
location = 'location'
class InputTagAttrs(BaseModel):
# standard HTML5 attributes
type: InputTagType
min: Union[int, float] = None
minlength: int = None
max: Union[int, float] = None
maxlength: int = None
step: int = None
value: str = None # only for type="hidden"
pattern: str = None
# not standard
min_error: str = None
minlength_error: str = None
max_error: str = None
maxlength_error: str = None
class InputTag(Tag):
class Config:
tag_name = 'input'
attrs: InputTagAttrs
def __init__(self, attrs: InputTagAttrs, **data):
super(InputTag, self).__init__(attrs=attrs)
@classmethod
def get_attrs(cls, node: Node):
return InputTagAttrs(
type=node.attrs.get('type'),
min=node.attrs.get('min'),
min_error=node.attrs.get('min-error'),
minlength=node.attrs.get('minlength'),
minlength_error=node.attrs.get('minlength-error'),
max=node.attrs.get('max'),
max_error=node.attrs.get('max-error'),
maxlength=node.attrs.get('maxlength'),
maxlength_error=node.attrs.get('maxlength-error'),
step=node.attrs.get('step'),
value=node.attrs.get('value'),
pattern=node.attrs.get('pattern'),
)
def render(self):
return ''
def data(self) -> Optional[Dict[str, str]]:
return None
InputTag.update_forward_refs()
class LabelTag(Tag):
class Config:
tag_name = 'label'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<label> must have max 1 text child')
super(LabelTag, self).__init__(children=children)
def render(self):
return self.children[0]
LabelTag.update_forward_refs()
class ATagAttrs(BaseModel):
href: str
method: Optional[str] = 'GET'
class ATag(Tag):
class Config:
tag_name: str = 'a'
attrs: ATagAttrs
def __init__(self, attrs: ATagAttrs, children: List[str]):
if len(children) != 1 or not isinstance(children[0], str):
raise ONEmSDKException('<a> must have 1 text child')
super(ATag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node) -> ATagAttrs:
return ATagAttrs(href=node.attrs.get('href'),
method=node.attrs.get('method') or 'GET')
def render(self):
return self.children[0]
def data(self) -> Dict[str, str]:
return {
**self.attrs.dict(),
'text': self.children[0]
}
ATag.update_forward_refs()
class LiTagAttrs(BaseModel):
value: Optional[str]
text_search: Optional[str]
class LiTag(Tag):
class Config:
tag_name = 'li'
attrs: LiTagAttrs
def __init__(self, children: List[Union[ATag, str]], attrs: LiTagAttrs = None):
if len(children) != 1 or not isinstance(children[0], (str, ATag)):
raise ONEmSDKException('<li> must have 1 (text or <a>) child')
if attrs is None:
attrs = LiTagAttrs()
super(LiTag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return LiTagAttrs(
value=node.attrs.get('value'),
text_search=node.attrs.get('text-search'),
)
def render(self):
if isinstance(self.children[0], ATag):
return self.children[0].render()
return self.children[0]
LiTag.update_forward_refs()
class UlTag(Tag):
class Config:
tag_name = 'ul'
def __init__(self, children: List[LiTag], **data):
if not children or not isinstance(children[0], LiTag):
raise ONEmSDKException('<ul> must have min 1 <li> child')
super(UlTag, self).__init__(children=children)
def render(self):
return '\n'.join([child.render() for child in self.children])
UlTag.update_forward_refs()
class PTag(Tag):
class Config:
tag_name = 'p'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<p> must have max 1 text child')
super(PTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return {
'text': self.children[0],
'href': None,
'data': None
}
PTag.update_forward_refs()
class BrTag(Tag):
class Config:
tag_name = 'br'
def __init__(self, **data):
super(BrTag, self).__init__()
def render(self):
return '\n'
def data(self):
return {
'text': '\n',
'data': None,
'href': None
}
BrTag.update_forward_refs()
class SectionTagAttrs(BaseModel):
header: Optional[str]
footer: Optional[str]
name: Optional[str]
auto_select: bool = False
multi_select: bool = False
numbered: bool = False
chunking_footer: Optional[str]
confirmation_label: Optional[str]
method: Optional[str]
required: Optional[bool]
status_exclude: Optional[bool]
status_prepend: Optional[bool]
url: Optional[str]
validate_type_error: Optional[str]
validate_type_error_footer: Optional[str]
validate_url: Optional[str]
class SectionTag(Tag):
class Config:
tag_name = 'section'
attrs: SectionTagAttrs
def __init__(self, attrs: SectionTagAttrs = None, children: List = None):
children = children or []
allowed_children = (FooterTag, HeaderTag, UlTag, PTag,
InputTag, LabelTag, BrTag, str)
for child in children:
if not isinstance(child, allowed_children):
raise ONEmSDKException(
f'<{child.Config.tag_name}> cannot be child for <section>')
super(SectionTag, self).__init__(attrs=attrs, children=children)
def render(self, exclude_header: bool = False, exclude_footer: bool = False):
# Add a temporary \n for help
rendered_children = ['\n']
for child in self.children:
if isinstance(child, HeaderTag) and exclude_header:
# Do not include header
continue
if isinstance(child, FooterTag) and exclude_footer:
# Do not include footer
continue
if isinstance(child, str):
text = child
else:
text = child.render()
if text:
if isinstance(child, PTag) or isinstance(child, UlTag):
if rendered_children[-1] != '\n':
rendered_children.append('\n')
rendered_children.append(text)
rendered_children.append('\n')
else:
rendered_children.append(text)
# Remove the temporary \n
del rendered_children[0]
if rendered_children and rendered_children[-1] == '\n':
del rendered_children[-1]
return ''.join(rendered_children)
@classmethod
def get_attrs(cls, node: Node) -> SectionTagAttrs:
return SectionTagAttrs(
header=node.attrs.get('header'),
footer=node.attrs.get('footer'),
name=node.attrs.get('name'),
# Note that boolean attributes in HTML are evaluated to True if they are
# present (their actual value does not matter). They are evaluated to False
# only when they are missing
auto_select='auto-select' in node.attrs,
multi_select='multi-select' in node.attrs,
numbered='numbered' in node.attrs,
chunking_footer=node.attrs.get('chunking-footer'),
confirmation_label=node.attrs.get('confirmation-label'),
method=node.attrs.get('method'),
required='required' in node.attrs,
status_exclude='status-exclude' in node.attrs,
status_prepend='status-prepend' in node.attrs,
url=node.attrs.get('url'),
validate_type_error=node.attrs.get('validate-type-error'),
validate_type_error_footer=node.attrs.get('validate-type-error-footer'),
validate_url=node.attrs.get('validate-url'),
)
SectionTag.update_forward_refs()
class FormTagAttrs(BaseModel):
header: Optional[str]
footer: Optional[str]
action: str
method: str = 'POST'
completion_status_show: bool = False
completion_status_in_header: bool = False
skip_confirmation: bool = False
class FormTag(Tag):
class Config:
tag_name = 'form'
attrs: FormTagAttrs
children: List[SectionTag]
def __init__(self, attrs: FormTagAttrs, children: List[SectionTag]):
if not children:
raise ONEmSDKException('<form> must have at least 1 child')
for child in children:
if not isinstance(child, SectionTag):
raise ONEmSDKException('<form> can have only <section> children')
if not child.attrs.name:
raise ONEmSDKException('<form> can contain only named <section> tags. '
'Please add a unique "name" attribute in each form '
'section.')
super(FormTag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return FormTagAttrs(
header=node.attrs.get('header'),
footer=node.attrs.get('footer'),
action=node.attrs.get('action'),
method=node.attrs.get('method') or 'POST',
completion_status_show='completion-status-show' in node.attrs,
completion_status_in_header='completion-status-in-header' in node.attrs,
skip_confirmation='skip-confirmation' in node.attrs,
)
def render(self):
return '\n'.join([child.render() for child in self.children])
FormTag.update_forward_refs()
_map_tag_cls = {}
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Tag):
_map_tag_cls[obj.Config.tag_name] = obj
def get_tag_cls(tag_name: str) -> Type[Tag]:
global _map_tag_cls
try:
return _map_tag_cls[tag_name]
except KeyError:
raise ONEmSDKException(f'Tag <{tag_name}> is not supported')
| en | 0.845596 | # standard HTML5 input values # not standard # standard HTML5 attributes # only for type="hidden" # not standard # Add a temporary \n for help # Do not include header # Do not include footer # Remove the temporary \n # Note that boolean attributes in HTML are evaluated to True if they are # present (their actual value does not matter). They are evaluated to False # only when they are missing | 2.379118 | 2 |
backend/syntax/rule.py | austinmarsray/Ccompiler | 0 | 10527 | class Sign:
"""
符号
"""
def __init__(self, sign_type, sign_str='', sign_line=-1):
"""
构造
:param sign_type: 符号的类型
:param sign_str: 符号的内容(可以为空)
:param sign_line: 符号所在行数(可以为空)
"""
self.type = sign_type
self.str = sign_str
self.line = sign_line
def is_terminal_sign(self):
"""
是不是终结符
:return: True/False
"""
if self.type == 'empty':
return True
else:
for i in terminal_sign_type:
if i == self.type:
return True
return False
def is_non_terminal_sign(self):
"""
是不是非终结符
:return: True/False
"""
for i in non_terminal_sign_type:
if i == self.type:
return True
return False
def is_empty_sign(self):
"""
是不是空字
:return: True/False
"""
return self.type == 'empty'
class Production:
"""
产生式
"""
def __init__(self, left_type, right_types):
"""
产生式左边
:param left_type: 产生式左边的符号类型
:param right_types: 产生式右边的符号类型列表
:param semantic_start: 语义操作关键字 - 开始
:param semantic_children: 语义操作关键字 - 孩子
:param semantic_end: 语义操作关键字 - 结束
"""
self.left = Sign(left_type)
self.right = list()
for i in right_types:
self.right.append(Sign(i))
# 调试用的
SignToChar = {
'else': 'else',
'if': 'if',
'int': 'int',
'return': 'return',
'void': 'void',
'while': 'while',
'addition': '+',
'subtraction': '-',
'multiplication': '*',
'division': '/',
'bigger': '>',
'bigger-equal': '>=',
'smaller': '<',
'smaller-equal': '<=',
'equal': '==',
'not-equal': '!=',
'evaluate': '=',
'semicolon': ';',
'comma': ',',
'left-parentheses': '(',
'right-parentheses': ')',
'left-bracket': '[',
'right-bracket': ']',
'left-brace': '{',
'right-brace': '}',
'id': 'id',
'num': 'num',
'pound': '#'
}
self.str = self.left.type + ' ->'
if len(self.right) == 0:
self.str += 'ϵ'
else:
for i in self.right:
if i.is_non_terminal_sign():
self.str += ' ' + i.type
else:
self.str += ' ' + SignToChar[i.type]
"""
1. program -> define-list
2. define-list -> define define-list
| empty
3. define -> type ID define-type
4. define-type -> var-define-follow
| fun-define-follow
5. var-define-follow -> ;
| [ NUM ] ;
6. type -> int
| void
7. fun-define-follow -> ( params ) code-block
8. params -> param-list
| empty
9. param-list -> param param-follow
10. param-follow -> , param param-follow
| empty
11. param -> type ID array-subscript
12. array-subscript -> [ ]
| empty
13. code-block -> { local-define-list code-list }
14. local-define-list -> local-var-define local-define-list
| empty
15. local-var-define -> type ID var-define-follow
16. code-list -> code code-list
| empty
17. code -> normal-statement
| selection-statement
| iteration-statement
| return-statement
18. normal-statement -> ;
| ID normal-statement-follow
19. normal-statement-follow -> var-follow = expression ;
| call-follow ;
20. call-follow -> ( call-params )
21. call-params -> call-param-list
| empty
22. call-param-list -> expression call-param-follow
23. call-param-follow -> , expression call-param-follow
| empty
24. selection-statement -> if ( expression ) { code-list } selection-follow
25. selection-follow -> else { code-list }
| empty
26. iteration-statement -> while ( expression ) iteration-follow
27. iteration-follow -> { code-list }
| code
28. return-statement -> return return-follow
29. return-follow -> ;
| expression ;
30. var-follow -> [ expression ]
| empty
31. expression -> additive-expr expression-follow
32. expression-follow -> rel-op additive-expr
| empty
33. rel-op -> <=
| <
| >
| >=
| ==
| !=
34. additive-expr -> term additive-expr-follow
35. additive-expr-follow -> add-op term additive-expr-follow
| empty
36. add-op -> +
| -
37. term -> factor term-follow
38. term-follow -> mul-op factor term-follow
| empty
39. mul-op -> *
| /
40. factor -> ( expression )
| ID id-factor-follow | NUM
41. id-factor-follow -> var-follow
| ( args )
42. args -> arg-list
| empty
43. arg-list -> expression arg-list-follow
44. arg-list-follow -> , expression arg-list-follow
| empty
"""
# 所有终结符的类型
terminal_sign_type = [
'else',
'if',
'int',
'return',
'void',
'while',
'addition',
'subtraction',
'multiplication',
'division',
'bigger',
'bigger-equal',
'smaller',
'smaller-equal',
'equal',
'not-equal',
'evaluate',
'semicolon',
'comma',
'left-parentheses',
'right-parentheses',
'left-bracket',
'right-bracket',
'left-brace',
'right-brace',
'id',
'num',
# 在这之前添加非终结符类型,请务必不要动 'pound'
'pound'
]
# 所有非终结符的类型
non_terminal_sign_type = [
'program',
'define-list',
'define',
'define-type',
'var-define-follow',
'type',
'fun-define-follow',
'params',
'param-list',
'param-follow',
'param',
'array-subscript',
'code-block',
'local-define-list',
'local-var-define',
'code-list',
'code',
'normal-statement',
'normal-statement-follow',
'call-follow',
'call-params',
'call-param-list',
'call-param-follow',
'selection-statement',
'selection-follow',
'iteration-statement',
'iteration-follow',
'return-statement',
'return-follow',
# 'eval-statement',
# 'var',
'var-follow',
'expression',
'expression-follow',
'rel-op',
'additive-expr',
'additive-expr-follow',
'add-op',
'term',
'term-follow',
'mul-op',
'factor',
'id-factor-follow',
'args',
'arg-list',
'arg-list-follow'
]
# 文法产生式
productions = [
# 0
Production('program', ['define-list']),
# 1
Production('define-list', ['define', 'define-list']),
Production('define-list', []),
# 2
Production('define', ['type', 'id', 'define-type']),
# 3
Production('define-type', ['var-define-follow']),
Production('define-type', ['fun-define-follow']),
# 4
Production('var-define-follow', ['semicolon']),
Production('var-define-follow', ['left-bracket', 'num', 'right-bracket', 'semicolon']),
# 5
Production('type', ['int']),
Production('type', ['void']),
# 6
Production('fun-define-follow', ['left-parentheses', 'params', 'right-parentheses', 'code-block']),
# 7
Production('params', ['param-list']),
Production('params', []),
# 8
Production('param-list', ['param', 'param-follow']),
# 9
Production('param-follow', ['comma', 'param', 'param-follow']),
Production('param-follow', []),
# 10
Production('param', ['type', 'id', 'array-subscript']),
# 11
Production('array-subscript', ['left-bracket', 'right-bracket']),
Production('array-subscript', []),
# 12
Production('code-block', ['left-brace', 'local-define-list', 'code-list', 'right-brace']),
# 13
Production('local-define-list', ['local-var-define', 'local-define-list']),
Production('local-define-list', []),
# 14
Production('local-var-define', ['type', 'id', 'var-define-follow']),
# 15
Production('code-list', ['code', 'code-list']),
Production('code-list', []),
# 16
Production('code', ['normal-statement']),
Production('code', ['selection-statement']),
Production('code', ['iteration-statement']),
Production('code', ['return-statement']),
# Production('normal-statement', ['eval-statement', 'semicolon']),
# Production('normal-statement', ['semicolon']),
# 17
Production('normal-statement', ['semicolon']),
Production('normal-statement', ['id', 'normal-statement-follow']),
# 18
Production('normal-statement-follow', ['var-follow', 'evaluate', 'expression', 'semicolon']),
Production('normal-statement-follow', ['call-follow', 'semicolon']),
# 19
Production('call-follow', ['left-parentheses', 'call-params', 'right-parentheses']),
# 20
Production('call-params', ['call-param-list']),
Production('call-params', []),
# 21
Production('call-param-list', ['expression', 'call-param-follow']),
# 22
Production('call-param-follow', ['comma', 'expression', 'call-param-follow']),
Production('call-param-follow', []),
# 23
Production('selection-statement',
['if', 'left-parentheses', 'expression', 'right-parentheses', 'left-brace',
'code-list', 'right-brace', 'selection-follow']),
# 24
Production('selection-follow', ['else', 'left-brace', 'code-list', 'right-brace']),
Production('selection-follow', []),
# 25
Production('iteration-statement', ['while', 'left-parentheses', 'expression',
'right-parentheses', 'iteration-follow']),
# 26
Production('iteration-follow', ['left-brace', 'code-list', 'right-brace']),
Production('iteration-follow', ['code']),
# 27
Production('return-statement', ['return', 'return-follow']),
# 28
Production('return-follow', ['semicolon']),
Production('return-follow', ['expression', 'semicolon']),
# Production('eval-statement', ['var', 'evaluate', 'expression']),
# Production('var', ['id', 'var-follow']),
# 29
Production('var-follow', ['left-bracket', 'expression', 'right-bracket']),
Production('var-follow', []),
# 30
Production('expression', ['additive-expr', 'expression-follow']),
# 31
Production('expression-follow', ['rel-op', 'additive-expr']),
Production('expression-follow', []),
# 32
Production('rel-op', ['smaller-equal']),
Production('rel-op', ['smaller']),
Production('rel-op', ['bigger']),
Production('rel-op', ['bigger-equal']),
Production('rel-op', ['equal']),
Production('rel-op', ['not-equal']),
# 33
Production('additive-expr', ['term', 'additive-expr-follow']),
# 34
Production('additive-expr-follow', ['add-op', 'term', 'additive-expr-follow']),
Production('additive-expr-follow', []),
# 35
Production('add-op', ['addition']),
Production('add-op', ['subtraction']),
# 36
Production('term', ['factor', 'term-follow']),
# 37
Production('term-follow', ['mul-op', 'factor', 'term-follow']),
Production('term-follow', []),
# 38
Production('mul-op', ['multiplication']),
Production('mul-op', ['division']),
# 39
Production('factor', ['left-parentheses', 'expression', 'right-parentheses']),
Production('factor', ['id', 'id-factor-follow']),
Production('factor', ['num']),
# 40
Production('id-factor-follow', ['var-follow']),
Production('id-factor-follow', ['left-parentheses', 'args', 'right-parentheses']),
# 41
Production('args', ['arg-list']),
Production('args', []),
# 42
Production('arg-list', ['expression', 'arg-list-follow']),
Production('arg-list-follow', ['comma', 'expression', 'arg-list-follow']),
Production('arg-list-follow', [])
]
# 文法开始符号
grammar_start = Sign('program') | class Sign:
"""
符号
"""
def __init__(self, sign_type, sign_str='', sign_line=-1):
"""
构造
:param sign_type: 符号的类型
:param sign_str: 符号的内容(可以为空)
:param sign_line: 符号所在行数(可以为空)
"""
self.type = sign_type
self.str = sign_str
self.line = sign_line
def is_terminal_sign(self):
"""
是不是终结符
:return: True/False
"""
if self.type == 'empty':
return True
else:
for i in terminal_sign_type:
if i == self.type:
return True
return False
def is_non_terminal_sign(self):
"""
是不是非终结符
:return: True/False
"""
for i in non_terminal_sign_type:
if i == self.type:
return True
return False
def is_empty_sign(self):
"""
是不是空字
:return: True/False
"""
return self.type == 'empty'
class Production:
"""
产生式
"""
def __init__(self, left_type, right_types):
"""
产生式左边
:param left_type: 产生式左边的符号类型
:param right_types: 产生式右边的符号类型列表
:param semantic_start: 语义操作关键字 - 开始
:param semantic_children: 语义操作关键字 - 孩子
:param semantic_end: 语义操作关键字 - 结束
"""
self.left = Sign(left_type)
self.right = list()
for i in right_types:
self.right.append(Sign(i))
# 调试用的
SignToChar = {
'else': 'else',
'if': 'if',
'int': 'int',
'return': 'return',
'void': 'void',
'while': 'while',
'addition': '+',
'subtraction': '-',
'multiplication': '*',
'division': '/',
'bigger': '>',
'bigger-equal': '>=',
'smaller': '<',
'smaller-equal': '<=',
'equal': '==',
'not-equal': '!=',
'evaluate': '=',
'semicolon': ';',
'comma': ',',
'left-parentheses': '(',
'right-parentheses': ')',
'left-bracket': '[',
'right-bracket': ']',
'left-brace': '{',
'right-brace': '}',
'id': 'id',
'num': 'num',
'pound': '#'
}
self.str = self.left.type + ' ->'
if len(self.right) == 0:
self.str += 'ϵ'
else:
for i in self.right:
if i.is_non_terminal_sign():
self.str += ' ' + i.type
else:
self.str += ' ' + SignToChar[i.type]
"""
1. program -> define-list
2. define-list -> define define-list
| empty
3. define -> type ID define-type
4. define-type -> var-define-follow
| fun-define-follow
5. var-define-follow -> ;
| [ NUM ] ;
6. type -> int
| void
7. fun-define-follow -> ( params ) code-block
8. params -> param-list
| empty
9. param-list -> param param-follow
10. param-follow -> , param param-follow
| empty
11. param -> type ID array-subscript
12. array-subscript -> [ ]
| empty
13. code-block -> { local-define-list code-list }
14. local-define-list -> local-var-define local-define-list
| empty
15. local-var-define -> type ID var-define-follow
16. code-list -> code code-list
| empty
17. code -> normal-statement
| selection-statement
| iteration-statement
| return-statement
18. normal-statement -> ;
| ID normal-statement-follow
19. normal-statement-follow -> var-follow = expression ;
| call-follow ;
20. call-follow -> ( call-params )
21. call-params -> call-param-list
| empty
22. call-param-list -> expression call-param-follow
23. call-param-follow -> , expression call-param-follow
| empty
24. selection-statement -> if ( expression ) { code-list } selection-follow
25. selection-follow -> else { code-list }
| empty
26. iteration-statement -> while ( expression ) iteration-follow
27. iteration-follow -> { code-list }
| code
28. return-statement -> return return-follow
29. return-follow -> ;
| expression ;
30. var-follow -> [ expression ]
| empty
31. expression -> additive-expr expression-follow
32. expression-follow -> rel-op additive-expr
| empty
33. rel-op -> <=
| <
| >
| >=
| ==
| !=
34. additive-expr -> term additive-expr-follow
35. additive-expr-follow -> add-op term additive-expr-follow
| empty
36. add-op -> +
| -
37. term -> factor term-follow
38. term-follow -> mul-op factor term-follow
| empty
39. mul-op -> *
| /
40. factor -> ( expression )
| ID id-factor-follow | NUM
41. id-factor-follow -> var-follow
| ( args )
42. args -> arg-list
| empty
43. arg-list -> expression arg-list-follow
44. arg-list-follow -> , expression arg-list-follow
| empty
"""
# 所有终结符的类型
terminal_sign_type = [
'else',
'if',
'int',
'return',
'void',
'while',
'addition',
'subtraction',
'multiplication',
'division',
'bigger',
'bigger-equal',
'smaller',
'smaller-equal',
'equal',
'not-equal',
'evaluate',
'semicolon',
'comma',
'left-parentheses',
'right-parentheses',
'left-bracket',
'right-bracket',
'left-brace',
'right-brace',
'id',
'num',
# 在这之前添加非终结符类型,请务必不要动 'pound'
'pound'
]
# 所有非终结符的类型
non_terminal_sign_type = [
'program',
'define-list',
'define',
'define-type',
'var-define-follow',
'type',
'fun-define-follow',
'params',
'param-list',
'param-follow',
'param',
'array-subscript',
'code-block',
'local-define-list',
'local-var-define',
'code-list',
'code',
'normal-statement',
'normal-statement-follow',
'call-follow',
'call-params',
'call-param-list',
'call-param-follow',
'selection-statement',
'selection-follow',
'iteration-statement',
'iteration-follow',
'return-statement',
'return-follow',
# 'eval-statement',
# 'var',
'var-follow',
'expression',
'expression-follow',
'rel-op',
'additive-expr',
'additive-expr-follow',
'add-op',
'term',
'term-follow',
'mul-op',
'factor',
'id-factor-follow',
'args',
'arg-list',
'arg-list-follow'
]
# 文法产生式
productions = [
# 0
Production('program', ['define-list']),
# 1
Production('define-list', ['define', 'define-list']),
Production('define-list', []),
# 2
Production('define', ['type', 'id', 'define-type']),
# 3
Production('define-type', ['var-define-follow']),
Production('define-type', ['fun-define-follow']),
# 4
Production('var-define-follow', ['semicolon']),
Production('var-define-follow', ['left-bracket', 'num', 'right-bracket', 'semicolon']),
# 5
Production('type', ['int']),
Production('type', ['void']),
# 6
Production('fun-define-follow', ['left-parentheses', 'params', 'right-parentheses', 'code-block']),
# 7
Production('params', ['param-list']),
Production('params', []),
# 8
Production('param-list', ['param', 'param-follow']),
# 9
Production('param-follow', ['comma', 'param', 'param-follow']),
Production('param-follow', []),
# 10
Production('param', ['type', 'id', 'array-subscript']),
# 11
Production('array-subscript', ['left-bracket', 'right-bracket']),
Production('array-subscript', []),
# 12
Production('code-block', ['left-brace', 'local-define-list', 'code-list', 'right-brace']),
# 13
Production('local-define-list', ['local-var-define', 'local-define-list']),
Production('local-define-list', []),
# 14
Production('local-var-define', ['type', 'id', 'var-define-follow']),
# 15
Production('code-list', ['code', 'code-list']),
Production('code-list', []),
# 16
Production('code', ['normal-statement']),
Production('code', ['selection-statement']),
Production('code', ['iteration-statement']),
Production('code', ['return-statement']),
# Production('normal-statement', ['eval-statement', 'semicolon']),
# Production('normal-statement', ['semicolon']),
# 17
Production('normal-statement', ['semicolon']),
Production('normal-statement', ['id', 'normal-statement-follow']),
# 18
Production('normal-statement-follow', ['var-follow', 'evaluate', 'expression', 'semicolon']),
Production('normal-statement-follow', ['call-follow', 'semicolon']),
# 19
Production('call-follow', ['left-parentheses', 'call-params', 'right-parentheses']),
# 20
Production('call-params', ['call-param-list']),
Production('call-params', []),
# 21
Production('call-param-list', ['expression', 'call-param-follow']),
# 22
Production('call-param-follow', ['comma', 'expression', 'call-param-follow']),
Production('call-param-follow', []),
# 23
Production('selection-statement',
['if', 'left-parentheses', 'expression', 'right-parentheses', 'left-brace',
'code-list', 'right-brace', 'selection-follow']),
# 24
Production('selection-follow', ['else', 'left-brace', 'code-list', 'right-brace']),
Production('selection-follow', []),
# 25
Production('iteration-statement', ['while', 'left-parentheses', 'expression',
'right-parentheses', 'iteration-follow']),
# 26
Production('iteration-follow', ['left-brace', 'code-list', 'right-brace']),
Production('iteration-follow', ['code']),
# 27
Production('return-statement', ['return', 'return-follow']),
# 28
Production('return-follow', ['semicolon']),
Production('return-follow', ['expression', 'semicolon']),
# Production('eval-statement', ['var', 'evaluate', 'expression']),
# Production('var', ['id', 'var-follow']),
# 29
Production('var-follow', ['left-bracket', 'expression', 'right-bracket']),
Production('var-follow', []),
# 30
Production('expression', ['additive-expr', 'expression-follow']),
# 31
Production('expression-follow', ['rel-op', 'additive-expr']),
Production('expression-follow', []),
# 32
Production('rel-op', ['smaller-equal']),
Production('rel-op', ['smaller']),
Production('rel-op', ['bigger']),
Production('rel-op', ['bigger-equal']),
Production('rel-op', ['equal']),
Production('rel-op', ['not-equal']),
# 33
Production('additive-expr', ['term', 'additive-expr-follow']),
# 34
Production('additive-expr-follow', ['add-op', 'term', 'additive-expr-follow']),
Production('additive-expr-follow', []),
# 35
Production('add-op', ['addition']),
Production('add-op', ['subtraction']),
# 36
Production('term', ['factor', 'term-follow']),
# 37
Production('term-follow', ['mul-op', 'factor', 'term-follow']),
Production('term-follow', []),
# 38
Production('mul-op', ['multiplication']),
Production('mul-op', ['division']),
# 39
Production('factor', ['left-parentheses', 'expression', 'right-parentheses']),
Production('factor', ['id', 'id-factor-follow']),
Production('factor', ['num']),
# 40
Production('id-factor-follow', ['var-follow']),
Production('id-factor-follow', ['left-parentheses', 'args', 'right-parentheses']),
# 41
Production('args', ['arg-list']),
Production('args', []),
# 42
Production('arg-list', ['expression', 'arg-list-follow']),
Production('arg-list-follow', ['comma', 'expression', 'arg-list-follow']),
Production('arg-list-follow', [])
]
# 文法开始符号
grammar_start = Sign('program') | en | 0.301673 | 符号 构造 :param sign_type: 符号的类型 :param sign_str: 符号的内容(可以为空) :param sign_line: 符号所在行数(可以为空) 是不是终结符 :return: True/False 是不是非终结符 :return: True/False 是不是空字 :return: True/False 产生式 产生式左边 :param left_type: 产生式左边的符号类型 :param right_types: 产生式右边的符号类型列表 :param semantic_start: 语义操作关键字 - 开始 :param semantic_children: 语义操作关键字 - 孩子 :param semantic_end: 语义操作关键字 - 结束 # 调试用的 1. program -> define-list 2. define-list -> define define-list | empty 3. define -> type ID define-type 4. define-type -> var-define-follow | fun-define-follow 5. var-define-follow -> ; | [ NUM ] ; 6. type -> int | void 7. fun-define-follow -> ( params ) code-block 8. params -> param-list | empty 9. param-list -> param param-follow 10. param-follow -> , param param-follow | empty 11. param -> type ID array-subscript 12. array-subscript -> [ ] | empty 13. code-block -> { local-define-list code-list } 14. local-define-list -> local-var-define local-define-list | empty 15. local-var-define -> type ID var-define-follow 16. code-list -> code code-list | empty 17. code -> normal-statement | selection-statement | iteration-statement | return-statement 18. normal-statement -> ; | ID normal-statement-follow 19. normal-statement-follow -> var-follow = expression ; | call-follow ; 20. call-follow -> ( call-params ) 21. call-params -> call-param-list | empty 22. call-param-list -> expression call-param-follow 23. call-param-follow -> , expression call-param-follow | empty 24. selection-statement -> if ( expression ) { code-list } selection-follow 25. selection-follow -> else { code-list } | empty 26. iteration-statement -> while ( expression ) iteration-follow 27. iteration-follow -> { code-list } | code 28. return-statement -> return return-follow 29. return-follow -> ; | expression ; 30. var-follow -> [ expression ] | empty 31. expression -> additive-expr expression-follow 32. expression-follow -> rel-op additive-expr | empty 33. rel-op -> <= | < | > | >= | == | != 34. additive-expr -> term additive-expr-follow 35. additive-expr-follow -> add-op term additive-expr-follow | empty 36. add-op -> + | - 37. term -> factor term-follow 38. term-follow -> mul-op factor term-follow | empty 39. mul-op -> * | / 40. factor -> ( expression ) | ID id-factor-follow | NUM 41. id-factor-follow -> var-follow | ( args ) 42. args -> arg-list | empty 43. arg-list -> expression arg-list-follow 44. arg-list-follow -> , expression arg-list-follow | empty # 所有终结符的类型 # 在这之前添加非终结符类型,请务必不要动 'pound' # 所有非终结符的类型 # 'eval-statement', # 'var', # 文法产生式 # 0 # 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 11 # 12 # 13 # 14 # 15 # 16 # Production('normal-statement', ['eval-statement', 'semicolon']), # Production('normal-statement', ['semicolon']), # 17 # 18 # 19 # 20 # 21 # 22 # 23 # 24 # 25 # 26 # 27 # 28 # Production('eval-statement', ['var', 'evaluate', 'expression']), # Production('var', ['id', 'var-follow']), # 29 # 30 # 31 # 32 # 33 # 34 # 35 # 36 # 37 # 38 # 39 # 40 # 41 # 42 # 文法开始符号 | 3.613135 | 4 |
redash/models.py | slachiewicz/redash | 1 | 10528 | import json
import hashlib
import logging
import os
import threading
import time
import datetime
import itertools
import peewee
from passlib.apps import custom_app_context as pwd_context
from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase
from flask.ext.login import UserMixin, AnonymousUserMixin
import psycopg2
from redash import utils, settings, redis_connection
from redash.query_runner import get_query_runner
class Database(object):
def __init__(self):
self.database_config = dict(settings.DATABASE_CONFIG)
self.database_config['register_hstore'] = False
self.database_name = self.database_config.pop('name')
self.database = PostgresqlExtDatabase(self.database_name, **self.database_config)
self.app = None
self.pid = os.getpid()
def init_app(self, app):
self.app = app
self.register_handlers()
def connect_db(self):
self._check_pid()
self.database.connect()
def close_db(self, exc):
self._check_pid()
if not self.database.is_closed():
self.database.close()
def _check_pid(self):
current_pid = os.getpid()
if self.pid != current_pid:
logging.info("New pid detected (%d!=%d); resetting database lock.", self.pid, current_pid)
self.pid = os.getpid()
self.database._conn_lock = threading.Lock()
def register_handlers(self):
self.app.before_request(self.connect_db)
self.app.teardown_request(self.close_db)
db = Database()
class BaseModel(peewee.Model):
class Meta:
database = db.database
@classmethod
def get_by_id(cls, model_id):
return cls.get(cls.id == model_id)
def pre_save(self, created):
pass
def post_save(self, created):
# Handler for post_save operations. Overriding if needed.
pass
def save(self, *args, **kwargs):
pk_value = self._get_pk_value()
created = kwargs.get('force_insert', False) or not bool(pk_value)
self.pre_save(created)
super(BaseModel, self).save(*args, **kwargs)
self.post_save(created)
class ModelTimestampsMixin(BaseModel):
updated_at = DateTimeTZField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
def pre_save(self, created):
super(ModelTimestampsMixin, self).pre_save(created)
self.updated_at = datetime.datetime.now()
class PermissionsCheckMixin(object):
def has_permission(self, permission):
return self.has_permissions((permission,))
def has_permissions(self, permissions):
has_permissions = reduce(lambda a, b: a and b,
map(lambda permission: permission in self.permissions,
permissions),
True)
return has_permissions
class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):
@property
def permissions(self):
return []
class ApiUser(UserMixin, PermissionsCheckMixin):
def __init__(self, api_key):
self.id = api_key
def __repr__(self):
return u"<ApiUser: {}>".format(self.id)
@property
def permissions(self):
return ['view_query']
class Group(BaseModel):
DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query',
'view_query', 'view_source', 'execute_query']
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=100)
permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)
tables = ArrayField(peewee.CharField)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'groups'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'permissions': self.permissions,
'tables': self.tables,
'created_at': self.created_at
}
def __unicode__(self):
return unicode(self.id)
class User(ModelTimestampsMixin, BaseModel, UserMixin, PermissionsCheckMixin):
DEFAULT_GROUPS = ['default']
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=320)
email = peewee.CharField(max_length=320, index=True, unique=True)
password_hash = peewee.CharField(max_length=128, null=True)
groups = ArrayField(peewee.CharField, default=DEFAULT_GROUPS)
class Meta:
db_table = 'users'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'email': self.email,
'updated_at': self.updated_at,
'created_at': self.created_at
}
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self._allowed_tables = None
@property
def permissions(self):
# TODO: this should be cached.
return list(itertools.chain(*[g.permissions for g in
Group.select().where(Group.name << self.groups)]))
@property
def allowed_tables(self):
# TODO: cache this as weel
if self._allowed_tables is None:
self._allowed_tables = set([t.lower() for t in itertools.chain(*[g.tables for g in
Group.select().where(Group.name << self.groups)])])
return self._allowed_tables
@classmethod
def get_by_email(cls, email):
return cls.get(cls.email == email)
def __unicode__(self):
return '%r, %r' % (self.name, self.email)
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return self.password_hash and pwd_context.verify(password, self.password_hash)
class ActivityLog(BaseModel):
QUERY_EXECUTION = 1
id = peewee.PrimaryKeyField()
user = peewee.ForeignKeyField(User)
type = peewee.IntegerField()
activity = peewee.TextField()
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'activity_log'
def to_dict(self):
return {
'id': self.id,
'user': self.user.to_dict(),
'type': self.type,
'activity': self.activity,
'created_at': self.created_at
}
def __unicode__(self):
return unicode(self.id)
class DataSource(BaseModel):
id = peewee.PrimaryKeyField()
name = peewee.CharField(unique=True)
type = peewee.CharField()
options = peewee.TextField()
queue_name = peewee.CharField(default="queries")
scheduled_queue_name = peewee.CharField(default="queries")
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'data_sources'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'type': self.type,
'syntax': self.query_runner.syntax
}
def get_schema(self, refresh=False):
key = "data_source:schema:{}".format(self.id)
cache = None
if not refresh:
cache = redis_connection.get(key)
if cache is None:
query_runner = self.query_runner
schema = sorted(query_runner.get_schema(), key=lambda t: t['name'])
redis_connection.set(key, json.dumps(schema))
else:
schema = json.loads(cache)
return schema
@property
def query_runner(self):
return get_query_runner(self.type, self.options)
@classmethod
def all(cls):
return cls.select().order_by(cls.id.asc())
class QueryResult(BaseModel):
id = peewee.PrimaryKeyField()
data_source = peewee.ForeignKeyField(DataSource)
query_hash = peewee.CharField(max_length=32, index=True)
query = peewee.TextField()
data = peewee.TextField()
runtime = peewee.FloatField()
retrieved_at = DateTimeTZField()
class Meta:
db_table = 'query_results'
def to_dict(self):
return {
'id': self.id,
'query_hash': self.query_hash,
'query': self.query,
'data': json.loads(self.data),
'data_source_id': self._data.get('data_source', None),
'runtime': self.runtime,
'retrieved_at': self.retrieved_at
}
@classmethod
def unused(cls):
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
return unused_results
@classmethod
def get_latest(cls, data_source, query, max_age=0):
query_hash = utils.gen_query_hash(query)
if max_age == -1:
query = cls.select().where(cls.query_hash == query_hash,
cls.data_source == data_source).order_by(cls.retrieved_at.desc())
else:
query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source,
peewee.SQL("retrieved_at + interval '%s second' >= now() at time zone 'utc'",
max_age)).order_by(cls.retrieved_at.desc())
return query.first()
@classmethod
def store_result(cls, data_source_id, query_hash, query, data, run_time, retrieved_at):
query_result = cls.create(query_hash=query_hash,
query=query,
runtime=run_time,
data_source=data_source_id,
retrieved_at=retrieved_at,
data=data)
logging.info("Inserted query (%s) data; id=%s", query_hash, query_result.id)
updated_count = Query.update(latest_query_data=query_result).\
where(Query.query_hash==query_hash, Query.data_source==data_source_id).\
execute()
logging.info("Updated %s queries with result (%s).", updated_count, query_hash)
return query_result
def __unicode__(self):
return u"%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at)
def should_schedule_next(previous_iteration, now, schedule):
if schedule.isdigit():
ttl = int(schedule)
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
else:
hour, minute = schedule.split(':')
hour, minute = int(hour), int(minute)
# The following logic is needed for cases like the following:
# - The query scheduled to run at 23:59.
# - The scheduler wakes up at 00:01.
# - Using naive implementation of comparing timestamps, it will skip the execution.
normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)
if normalized_previous_iteration > previous_iteration:
previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)
return now > next_iteration
class Query(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
data_source = peewee.ForeignKeyField(DataSource)
latest_query_data = peewee.ForeignKeyField(QueryResult, null=True)
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
query = peewee.TextField()
query_hash = peewee.CharField(max_length=32)
api_key = peewee.CharField(max_length=40)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
last_modified_by = peewee.ForeignKeyField(User, null=True, related_name="modified_queries")
is_archived = peewee.BooleanField(default=False, index=True)
schedule = peewee.CharField(max_length=10, null=True)
class Meta:
db_table = 'queries'
def to_dict(self, with_stats=False, with_visualizations=False, with_user=True):
d = {
'id': self.id,
'latest_query_data_id': self._data.get('latest_query_data', None),
'name': self.name,
'description': self.description,
'query': self.query,
'query_hash': self.query_hash,
'schedule': self.schedule,
'api_key': self.api_key,
'is_archived': self.is_archived,
'updated_at': self.updated_at,
'created_at': self.created_at,
'data_source_id': self._data.get('data_source', None)
}
if with_user:
d['user'] = self.user.to_dict()
d['last_modified_by'] = self.last_modified_by.to_dict()
else:
d['user_id'] = self._data['user']
if with_stats:
d['retrieved_at'] = self.retrieved_at
d['runtime'] = self.runtime
if with_visualizations:
d['visualizations'] = [vis.to_dict(with_query=False)
for vis in self.visualizations]
return d
def archive(self):
self.is_archived = True
self.schedule = None
for vis in self.visualizations:
for w in vis.widgets:
w.delete_instance()
self.save()
@classmethod
def all_queries(cls):
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
.switch(Query).join(User)\
.where(Query.is_archived==False)\
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
.order_by(cls.created_at.desc())
return q
@classmethod
def outdated_queries(cls):
queries = cls.select(cls, QueryResult.retrieved_at, DataSource)\
.join(QueryResult)\
.switch(Query).join(DataSource)\
.where(cls.schedule != None)
now = datetime.datetime.utcnow().replace(tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=0, name=None))
outdated_queries = {}
for query in queries:
if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):
key = "{}:{}".format(query.query_hash, query.data_source.id)
outdated_queries[key] = query
return outdated_queries.values()
@classmethod
def search(cls, term):
# This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
where = (cls.name**u"%{}%".format(term)) | (cls.description**u"%{}%".format(term))
if term.isdigit():
where |= cls.id == term
where &= cls.is_archived == False
return cls.select().where(where).order_by(cls.created_at.desc())
@classmethod
def recent(cls, user_id):
# TODO: instead of t2 here, we should define table_alias for Query table
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")).\
join(Event, on=(Query.id == peewee.SQL("t2.object_id::integer"))).\
where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\
where(Event.user == user_id).\
where(~(Event.object_id >> None)).\
where(Event.object_type == 'query'). \
where(cls.is_archived == False).\
group_by(Event.object_id, Query.id).\
order_by(peewee.SQL("count(0) desc"))
@classmethod
def update_instance(cls, query_id, **kwargs):
if 'query' in kwargs:
kwargs['query_hash'] = utils.gen_query_hash(kwargs['query'])
update = cls.update(**kwargs).where(cls.id == query_id)
return update.execute()
def pre_save(self, created):
super(Query, self).pre_save(created)
self.query_hash = utils.gen_query_hash(self.query)
self._set_api_key()
if self.last_modified_by is None:
self.last_modified_by = self.user
def post_save(self, created):
if created:
self._create_default_visualizations()
def _create_default_visualizations(self):
table_visualization = Visualization(query=self, name="Table",
description='',
type="TABLE", options="{}")
table_visualization.save()
def _set_api_key(self):
if not self.api_key:
self.api_key = hashlib.sha1(
u''.join((str(time.time()), self.query, str(self._data['user']), self.name)).encode('utf-8')).hexdigest()
@property
def runtime(self):
return self.latest_query_data.runtime
@property
def retrieved_at(self):
return self.latest_query_data.retrieved_at
def __unicode__(self):
return unicode(self.id)
class Dashboard(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
slug = peewee.CharField(max_length=140, index=True)
name = peewee.CharField(max_length=100)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
layout = peewee.TextField()
dashboard_filters_enabled = peewee.BooleanField(default=False)
is_archived = peewee.BooleanField(default=False, index=True)
class Meta:
db_table = 'dashboards'
def to_dict(self, with_widgets=False):
layout = json.loads(self.layout)
if with_widgets:
widgets = Widget.select(Widget, Visualization, Query, User)\
.where(Widget.dashboard == self.id)\
.join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)\
.join(User, join_type=peewee.JOIN_LEFT_OUTER)
widgets = {w.id: w.to_dict() for w in widgets}
# The following is a workaround for cases when the widget object gets deleted without the dashboard layout
# updated. This happens for users with old databases that didn't have a foreign key relationship between
# visualizations and widgets.
# It's temporary until better solution is implemented (we probably should move the position information
# to the widget).
widgets_layout = []
for row in layout:
new_row = []
for widget_id in row:
widget = widgets.get(widget_id, None)
if widget:
new_row.append(widget)
widgets_layout.append(new_row)
# widgets_layout = map(lambda row: map(lambda widget_id: widgets.get(widget_id, None), row), layout)
else:
widgets_layout = None
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'user_id': self._data['user'],
'layout': layout,
'dashboard_filters_enabled': self.dashboard_filters_enabled,
'widgets': widgets_layout,
'updated_at': self.updated_at,
'created_at': self.created_at
}
@classmethod
def get_by_slug(cls, slug):
return cls.get(cls.slug == slug)
@classmethod
def recent(cls, user_id):
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")). \
join(Event, on=(Dashboard.id == peewee.SQL("t2.object_id::integer"))). \
where(Event.action << ('edit', 'view')).\
where(Event.user == user_id). \
where(~(Event.object_id >> None)). \
where(Event.object_type == 'dashboard'). \
group_by(Event.object_id, Dashboard.id). \
order_by(peewee.SQL("count(0) desc"))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = utils.slugify(self.name)
tries = 1
while self.select().where(Dashboard.slug == self.slug).first() is not None:
self.slug = utils.slugify(self.name) + "_{0}".format(tries)
tries += 1
super(Dashboard, self).save(*args, **kwargs)
def __unicode__(self):
return u"%s=%s" % (self.id, self.name)
class Visualization(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
type = peewee.CharField(max_length=100)
query = peewee.ForeignKeyField(Query, related_name='visualizations')
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
options = peewee.TextField()
class Meta:
db_table = 'visualizations'
def to_dict(self, with_query=True):
d = {
'id': self.id,
'type': self.type,
'name': self.name,
'description': self.description,
'options': json.loads(self.options),
'updated_at': self.updated_at,
'created_at': self.created_at
}
if with_query:
d['query'] = self.query.to_dict()
return d
def __unicode__(self):
return u"%s %s" % (self.id, self.type)
class Widget(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True)
text = peewee.TextField(null=True)
width = peewee.IntegerField()
options = peewee.TextField()
dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)
# unused; kept for backward compatability:
type = peewee.CharField(max_length=100, null=True)
query_id = peewee.IntegerField(null=True)
class Meta:
db_table = 'widgets'
def to_dict(self):
d = {
'id': self.id,
'width': self.width,
'options': json.loads(self.options),
'dashboard_id': self._data['dashboard'],
'text': self.text,
'updated_at': self.updated_at,
'created_at': self.created_at
}
if self.visualization and self.visualization.id:
d['visualization'] = self.visualization.to_dict()
return d
def __unicode__(self):
return u"%s" % self.id
def delete_instance(self, *args, **kwargs):
layout = json.loads(self.dashboard.layout)
layout = map(lambda row: filter(lambda w: w != self.id, row), layout)
layout = filter(lambda row: len(row) > 0, layout)
self.dashboard.layout = json.dumps(layout)
self.dashboard.save()
super(Widget, self).delete_instance(*args, **kwargs)
class Event(BaseModel):
user = peewee.ForeignKeyField(User, related_name="events", null=True)
action = peewee.CharField()
object_type = peewee.CharField()
object_id = peewee.CharField(null=True)
additional_properties = peewee.TextField(null=True)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'events'
def __unicode__(self):
return u"%s,%s,%s,%s" % (self._data['user'], self.action, self.object_type, self.object_id)
@classmethod
def record(cls, event):
user = event.pop('user_id')
action = event.pop('action')
object_type = event.pop('object_type')
object_id = event.pop('object_id', None)
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
additional_properties = json.dumps(event)
event = cls.create(user=user, action=action, object_type=object_type, object_id=object_id,
additional_properties=additional_properties, created_at=created_at)
return event
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
def init_db():
Group.insert(name='admin', permissions=['admin'], tables=['*']).execute()
Group.insert(name='default', permissions=Group.DEFAULT_PERMISSIONS, tables=['*']).execute()
def create_db(create_tables, drop_tables):
db.connect_db()
for model in all_models:
if drop_tables and model.table_exists():
# TODO: submit PR to peewee to allow passing cascade option to drop_table.
db.database.execute_sql('DROP TABLE %s CASCADE' % model._meta.db_table)
if create_tables and not model.table_exists():
model.create_table()
db.close_db(None)
| import json
import hashlib
import logging
import os
import threading
import time
import datetime
import itertools
import peewee
from passlib.apps import custom_app_context as pwd_context
from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase
from flask.ext.login import UserMixin, AnonymousUserMixin
import psycopg2
from redash import utils, settings, redis_connection
from redash.query_runner import get_query_runner
class Database(object):
def __init__(self):
self.database_config = dict(settings.DATABASE_CONFIG)
self.database_config['register_hstore'] = False
self.database_name = self.database_config.pop('name')
self.database = PostgresqlExtDatabase(self.database_name, **self.database_config)
self.app = None
self.pid = os.getpid()
def init_app(self, app):
self.app = app
self.register_handlers()
def connect_db(self):
self._check_pid()
self.database.connect()
def close_db(self, exc):
self._check_pid()
if not self.database.is_closed():
self.database.close()
def _check_pid(self):
current_pid = os.getpid()
if self.pid != current_pid:
logging.info("New pid detected (%d!=%d); resetting database lock.", self.pid, current_pid)
self.pid = os.getpid()
self.database._conn_lock = threading.Lock()
def register_handlers(self):
self.app.before_request(self.connect_db)
self.app.teardown_request(self.close_db)
db = Database()
class BaseModel(peewee.Model):
class Meta:
database = db.database
@classmethod
def get_by_id(cls, model_id):
return cls.get(cls.id == model_id)
def pre_save(self, created):
pass
def post_save(self, created):
# Handler for post_save operations. Overriding if needed.
pass
def save(self, *args, **kwargs):
pk_value = self._get_pk_value()
created = kwargs.get('force_insert', False) or not bool(pk_value)
self.pre_save(created)
super(BaseModel, self).save(*args, **kwargs)
self.post_save(created)
class ModelTimestampsMixin(BaseModel):
updated_at = DateTimeTZField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
def pre_save(self, created):
super(ModelTimestampsMixin, self).pre_save(created)
self.updated_at = datetime.datetime.now()
class PermissionsCheckMixin(object):
def has_permission(self, permission):
return self.has_permissions((permission,))
def has_permissions(self, permissions):
has_permissions = reduce(lambda a, b: a and b,
map(lambda permission: permission in self.permissions,
permissions),
True)
return has_permissions
class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):
@property
def permissions(self):
return []
class ApiUser(UserMixin, PermissionsCheckMixin):
def __init__(self, api_key):
self.id = api_key
def __repr__(self):
return u"<ApiUser: {}>".format(self.id)
@property
def permissions(self):
return ['view_query']
class Group(BaseModel):
DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query',
'view_query', 'view_source', 'execute_query']
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=100)
permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)
tables = ArrayField(peewee.CharField)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'groups'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'permissions': self.permissions,
'tables': self.tables,
'created_at': self.created_at
}
def __unicode__(self):
return unicode(self.id)
class User(ModelTimestampsMixin, BaseModel, UserMixin, PermissionsCheckMixin):
DEFAULT_GROUPS = ['default']
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=320)
email = peewee.CharField(max_length=320, index=True, unique=True)
password_hash = peewee.CharField(max_length=128, null=True)
groups = ArrayField(peewee.CharField, default=DEFAULT_GROUPS)
class Meta:
db_table = 'users'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'email': self.email,
'updated_at': self.updated_at,
'created_at': self.created_at
}
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self._allowed_tables = None
@property
def permissions(self):
# TODO: this should be cached.
return list(itertools.chain(*[g.permissions for g in
Group.select().where(Group.name << self.groups)]))
@property
def allowed_tables(self):
# TODO: cache this as weel
if self._allowed_tables is None:
self._allowed_tables = set([t.lower() for t in itertools.chain(*[g.tables for g in
Group.select().where(Group.name << self.groups)])])
return self._allowed_tables
@classmethod
def get_by_email(cls, email):
return cls.get(cls.email == email)
def __unicode__(self):
return '%r, %r' % (self.name, self.email)
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return self.password_hash and pwd_context.verify(password, self.password_hash)
class ActivityLog(BaseModel):
QUERY_EXECUTION = 1
id = peewee.PrimaryKeyField()
user = peewee.ForeignKeyField(User)
type = peewee.IntegerField()
activity = peewee.TextField()
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'activity_log'
def to_dict(self):
return {
'id': self.id,
'user': self.user.to_dict(),
'type': self.type,
'activity': self.activity,
'created_at': self.created_at
}
def __unicode__(self):
return unicode(self.id)
class DataSource(BaseModel):
id = peewee.PrimaryKeyField()
name = peewee.CharField(unique=True)
type = peewee.CharField()
options = peewee.TextField()
queue_name = peewee.CharField(default="queries")
scheduled_queue_name = peewee.CharField(default="queries")
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'data_sources'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'type': self.type,
'syntax': self.query_runner.syntax
}
def get_schema(self, refresh=False):
key = "data_source:schema:{}".format(self.id)
cache = None
if not refresh:
cache = redis_connection.get(key)
if cache is None:
query_runner = self.query_runner
schema = sorted(query_runner.get_schema(), key=lambda t: t['name'])
redis_connection.set(key, json.dumps(schema))
else:
schema = json.loads(cache)
return schema
@property
def query_runner(self):
return get_query_runner(self.type, self.options)
@classmethod
def all(cls):
return cls.select().order_by(cls.id.asc())
class QueryResult(BaseModel):
id = peewee.PrimaryKeyField()
data_source = peewee.ForeignKeyField(DataSource)
query_hash = peewee.CharField(max_length=32, index=True)
query = peewee.TextField()
data = peewee.TextField()
runtime = peewee.FloatField()
retrieved_at = DateTimeTZField()
class Meta:
db_table = 'query_results'
def to_dict(self):
return {
'id': self.id,
'query_hash': self.query_hash,
'query': self.query,
'data': json.loads(self.data),
'data_source_id': self._data.get('data_source', None),
'runtime': self.runtime,
'retrieved_at': self.retrieved_at
}
@classmethod
def unused(cls):
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
return unused_results
@classmethod
def get_latest(cls, data_source, query, max_age=0):
query_hash = utils.gen_query_hash(query)
if max_age == -1:
query = cls.select().where(cls.query_hash == query_hash,
cls.data_source == data_source).order_by(cls.retrieved_at.desc())
else:
query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source,
peewee.SQL("retrieved_at + interval '%s second' >= now() at time zone 'utc'",
max_age)).order_by(cls.retrieved_at.desc())
return query.first()
@classmethod
def store_result(cls, data_source_id, query_hash, query, data, run_time, retrieved_at):
query_result = cls.create(query_hash=query_hash,
query=query,
runtime=run_time,
data_source=data_source_id,
retrieved_at=retrieved_at,
data=data)
logging.info("Inserted query (%s) data; id=%s", query_hash, query_result.id)
updated_count = Query.update(latest_query_data=query_result).\
where(Query.query_hash==query_hash, Query.data_source==data_source_id).\
execute()
logging.info("Updated %s queries with result (%s).", updated_count, query_hash)
return query_result
def __unicode__(self):
return u"%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at)
def should_schedule_next(previous_iteration, now, schedule):
if schedule.isdigit():
ttl = int(schedule)
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
else:
hour, minute = schedule.split(':')
hour, minute = int(hour), int(minute)
# The following logic is needed for cases like the following:
# - The query scheduled to run at 23:59.
# - The scheduler wakes up at 00:01.
# - Using naive implementation of comparing timestamps, it will skip the execution.
normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)
if normalized_previous_iteration > previous_iteration:
previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)
return now > next_iteration
class Query(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
data_source = peewee.ForeignKeyField(DataSource)
latest_query_data = peewee.ForeignKeyField(QueryResult, null=True)
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
query = peewee.TextField()
query_hash = peewee.CharField(max_length=32)
api_key = peewee.CharField(max_length=40)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
last_modified_by = peewee.ForeignKeyField(User, null=True, related_name="modified_queries")
is_archived = peewee.BooleanField(default=False, index=True)
schedule = peewee.CharField(max_length=10, null=True)
class Meta:
db_table = 'queries'
def to_dict(self, with_stats=False, with_visualizations=False, with_user=True):
d = {
'id': self.id,
'latest_query_data_id': self._data.get('latest_query_data', None),
'name': self.name,
'description': self.description,
'query': self.query,
'query_hash': self.query_hash,
'schedule': self.schedule,
'api_key': self.api_key,
'is_archived': self.is_archived,
'updated_at': self.updated_at,
'created_at': self.created_at,
'data_source_id': self._data.get('data_source', None)
}
if with_user:
d['user'] = self.user.to_dict()
d['last_modified_by'] = self.last_modified_by.to_dict()
else:
d['user_id'] = self._data['user']
if with_stats:
d['retrieved_at'] = self.retrieved_at
d['runtime'] = self.runtime
if with_visualizations:
d['visualizations'] = [vis.to_dict(with_query=False)
for vis in self.visualizations]
return d
def archive(self):
self.is_archived = True
self.schedule = None
for vis in self.visualizations:
for w in vis.widgets:
w.delete_instance()
self.save()
@classmethod
def all_queries(cls):
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
.switch(Query).join(User)\
.where(Query.is_archived==False)\
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
.order_by(cls.created_at.desc())
return q
@classmethod
def outdated_queries(cls):
queries = cls.select(cls, QueryResult.retrieved_at, DataSource)\
.join(QueryResult)\
.switch(Query).join(DataSource)\
.where(cls.schedule != None)
now = datetime.datetime.utcnow().replace(tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=0, name=None))
outdated_queries = {}
for query in queries:
if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):
key = "{}:{}".format(query.query_hash, query.data_source.id)
outdated_queries[key] = query
return outdated_queries.values()
@classmethod
def search(cls, term):
# This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
where = (cls.name**u"%{}%".format(term)) | (cls.description**u"%{}%".format(term))
if term.isdigit():
where |= cls.id == term
where &= cls.is_archived == False
return cls.select().where(where).order_by(cls.created_at.desc())
@classmethod
def recent(cls, user_id):
# TODO: instead of t2 here, we should define table_alias for Query table
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")).\
join(Event, on=(Query.id == peewee.SQL("t2.object_id::integer"))).\
where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\
where(Event.user == user_id).\
where(~(Event.object_id >> None)).\
where(Event.object_type == 'query'). \
where(cls.is_archived == False).\
group_by(Event.object_id, Query.id).\
order_by(peewee.SQL("count(0) desc"))
@classmethod
def update_instance(cls, query_id, **kwargs):
if 'query' in kwargs:
kwargs['query_hash'] = utils.gen_query_hash(kwargs['query'])
update = cls.update(**kwargs).where(cls.id == query_id)
return update.execute()
def pre_save(self, created):
super(Query, self).pre_save(created)
self.query_hash = utils.gen_query_hash(self.query)
self._set_api_key()
if self.last_modified_by is None:
self.last_modified_by = self.user
def post_save(self, created):
if created:
self._create_default_visualizations()
def _create_default_visualizations(self):
table_visualization = Visualization(query=self, name="Table",
description='',
type="TABLE", options="{}")
table_visualization.save()
def _set_api_key(self):
if not self.api_key:
self.api_key = hashlib.sha1(
u''.join((str(time.time()), self.query, str(self._data['user']), self.name)).encode('utf-8')).hexdigest()
@property
def runtime(self):
return self.latest_query_data.runtime
@property
def retrieved_at(self):
return self.latest_query_data.retrieved_at
def __unicode__(self):
return unicode(self.id)
class Dashboard(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
slug = peewee.CharField(max_length=140, index=True)
name = peewee.CharField(max_length=100)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
layout = peewee.TextField()
dashboard_filters_enabled = peewee.BooleanField(default=False)
is_archived = peewee.BooleanField(default=False, index=True)
class Meta:
db_table = 'dashboards'
def to_dict(self, with_widgets=False):
layout = json.loads(self.layout)
if with_widgets:
widgets = Widget.select(Widget, Visualization, Query, User)\
.where(Widget.dashboard == self.id)\
.join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)\
.join(User, join_type=peewee.JOIN_LEFT_OUTER)
widgets = {w.id: w.to_dict() for w in widgets}
# The following is a workaround for cases when the widget object gets deleted without the dashboard layout
# updated. This happens for users with old databases that didn't have a foreign key relationship between
# visualizations and widgets.
# It's temporary until better solution is implemented (we probably should move the position information
# to the widget).
widgets_layout = []
for row in layout:
new_row = []
for widget_id in row:
widget = widgets.get(widget_id, None)
if widget:
new_row.append(widget)
widgets_layout.append(new_row)
# widgets_layout = map(lambda row: map(lambda widget_id: widgets.get(widget_id, None), row), layout)
else:
widgets_layout = None
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'user_id': self._data['user'],
'layout': layout,
'dashboard_filters_enabled': self.dashboard_filters_enabled,
'widgets': widgets_layout,
'updated_at': self.updated_at,
'created_at': self.created_at
}
@classmethod
def get_by_slug(cls, slug):
return cls.get(cls.slug == slug)
@classmethod
def recent(cls, user_id):
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")). \
join(Event, on=(Dashboard.id == peewee.SQL("t2.object_id::integer"))). \
where(Event.action << ('edit', 'view')).\
where(Event.user == user_id). \
where(~(Event.object_id >> None)). \
where(Event.object_type == 'dashboard'). \
group_by(Event.object_id, Dashboard.id). \
order_by(peewee.SQL("count(0) desc"))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = utils.slugify(self.name)
tries = 1
while self.select().where(Dashboard.slug == self.slug).first() is not None:
self.slug = utils.slugify(self.name) + "_{0}".format(tries)
tries += 1
super(Dashboard, self).save(*args, **kwargs)
def __unicode__(self):
return u"%s=%s" % (self.id, self.name)
class Visualization(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
type = peewee.CharField(max_length=100)
query = peewee.ForeignKeyField(Query, related_name='visualizations')
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
options = peewee.TextField()
class Meta:
db_table = 'visualizations'
def to_dict(self, with_query=True):
d = {
'id': self.id,
'type': self.type,
'name': self.name,
'description': self.description,
'options': json.loads(self.options),
'updated_at': self.updated_at,
'created_at': self.created_at
}
if with_query:
d['query'] = self.query.to_dict()
return d
def __unicode__(self):
return u"%s %s" % (self.id, self.type)
class Widget(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True)
text = peewee.TextField(null=True)
width = peewee.IntegerField()
options = peewee.TextField()
dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)
# unused; kept for backward compatability:
type = peewee.CharField(max_length=100, null=True)
query_id = peewee.IntegerField(null=True)
class Meta:
db_table = 'widgets'
def to_dict(self):
d = {
'id': self.id,
'width': self.width,
'options': json.loads(self.options),
'dashboard_id': self._data['dashboard'],
'text': self.text,
'updated_at': self.updated_at,
'created_at': self.created_at
}
if self.visualization and self.visualization.id:
d['visualization'] = self.visualization.to_dict()
return d
def __unicode__(self):
return u"%s" % self.id
def delete_instance(self, *args, **kwargs):
layout = json.loads(self.dashboard.layout)
layout = map(lambda row: filter(lambda w: w != self.id, row), layout)
layout = filter(lambda row: len(row) > 0, layout)
self.dashboard.layout = json.dumps(layout)
self.dashboard.save()
super(Widget, self).delete_instance(*args, **kwargs)
class Event(BaseModel):
user = peewee.ForeignKeyField(User, related_name="events", null=True)
action = peewee.CharField()
object_type = peewee.CharField()
object_id = peewee.CharField(null=True)
additional_properties = peewee.TextField(null=True)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'events'
def __unicode__(self):
return u"%s,%s,%s,%s" % (self._data['user'], self.action, self.object_type, self.object_id)
@classmethod
def record(cls, event):
user = event.pop('user_id')
action = event.pop('action')
object_type = event.pop('object_type')
object_id = event.pop('object_id', None)
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
additional_properties = json.dumps(event)
event = cls.create(user=user, action=action, object_type=object_type, object_id=object_id,
additional_properties=additional_properties, created_at=created_at)
return event
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
def init_db():
Group.insert(name='admin', permissions=['admin'], tables=['*']).execute()
Group.insert(name='default', permissions=Group.DEFAULT_PERMISSIONS, tables=['*']).execute()
def create_db(create_tables, drop_tables):
db.connect_db()
for model in all_models:
if drop_tables and model.table_exists():
# TODO: submit PR to peewee to allow passing cascade option to drop_table.
db.database.execute_sql('DROP TABLE %s CASCADE' % model._meta.db_table)
if create_tables and not model.table_exists():
model.create_table()
db.close_db(None)
| en | 0.87251 | # Handler for post_save operations. Overriding if needed. # TODO: this should be cached. # TODO: cache this as weel # The following logic is needed for cases like the following: # - The query scheduled to run at 23:59. # - The scheduler wakes up at 00:01. # - Using naive implementation of comparing timestamps, it will skip the execution. # This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution. # TODO: instead of t2 here, we should define table_alias for Query table # The following is a workaround for cases when the widget object gets deleted without the dashboard layout # updated. This happens for users with old databases that didn't have a foreign key relationship between # visualizations and widgets. # It's temporary until better solution is implemented (we probably should move the position information # to the widget). # widgets_layout = map(lambda row: map(lambda widget_id: widgets.get(widget_id, None), row), layout) # unused; kept for backward compatability: # TODO: submit PR to peewee to allow passing cascade option to drop_table. | 2.006425 | 2 |
xclib/classifier/ova.py | sushantsondhi/pyxclib | 4 | 10529 | <reponame>sushantsondhi/pyxclib<gh_stars>1-10
import numpy as np
import time
import logging
from .base import BaseClassifier
import scipy.sparse as sp
from ._svm import train_one
from functools import partial
from ..utils import sparse
from ..data import data_loader
from ._svm import train_one, _get_liblinear_solver_type
from joblib import Parallel, delayed
from ..utils.matrix import SMatrix
from tqdm import tqdm
def separate(result):
return [item[0] for item in result], [item[1] for item in result]
def convert_to_sparse(weight, bias):
weight = np.vstack(weight).squeeze()
bias = np.vstack(bias).squeeze()
return sp.csr_matrix(weight), sp.csr_matrix(bias).transpose()
class OVAClassifier(BaseClassifier):
"""
One-vs-all classifier for sparse or dense data
(suitable for large label set)
Parameters:
-----------
solver: str, optional, default='liblinear'
solver to use
loss: str, optional, default='squared_hinge'
loss to optimize,
- hinge
- squared_hinge
C: float, optional, default=1.0
cost in svm
verbose: int, optional, default=0
print progress in svm
max_iter: int, optional, default=20
iteration in solver
tol: float, optional, default=0.1
tolerance in solver
threshold: float, optional, default=0.01
threshold for hard thresholding (after training classifier)
- bias values are not touched
- 0.01: for sparse features
- 1e-5: for dense features
feature_type: str, optional, default='sparse'
feature type: sparse or dense
dual: boolean, optional, default=true
solve in primal or dual
use_bias: boolean, optional, default=True
train bias parameter or not
num_threads: int, optional, default=10
use multiple threads to parallelize
batch_size: int, optional, default=1000
train these many classifiers in parallel
norm: str, optional, default='l2'
normalize features
penalty: str, optional, default='l2'
l1 or l2 regularizer
"""
def __init__(self, solver='liblinear', loss='squared_hinge', C=1.0,
verbose=0, max_iter=20, tol=0.1, threshold=0.01,
feature_type='sparse', dual=True, use_bias=True,
num_threads=12, batch_size=1000, norm='l2', penalty='l2'):
super().__init__(verbose, use_bias, feature_type)
self.loss = loss
self.C = C
self.penalty = penalty
self.norm = norm
self.num_threads = num_threads
self.verbose = verbose
self.max_iter = max_iter
self.threshold = threshold
self.tol = tol
self.dual = dual
self.batch_size = batch_size
self.num_labels = None
self.valid_labels = None
self.num_labels_ = None
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger('OVAClassifier')
self.logger.info("Parameters:: {}".format(str(self)))
def _merge_weights(self, weights, biases):
# Bias is always a dense array
if self.feature_type == 'sparse':
self.weight = sp.vstack(
weights, format='csr', dtype=np.float32)
self.bias = sp.vstack(
biases, format='csr', dtype=np.float32).toarray()
else:
self.weight = np.vstack(weights).astype(np.float32).squeeze()
self.bias = np.vstack(biases).astype(np.float32)
def get_data_loader(self, data_dir, dataset, feat_fname,
label_fname, mode, batch_order):
"""Data loader
- batch_order: 'label' during training
- batch_order: 'instances' during prediction
"""
return data_loader.Dataloader(
batch_size=self.batch_size,
data_dir=data_dir,
dataset=dataset,
feat_fname=feat_fname,
label_fname=label_fname,
feature_type=self.feature_type,
mode=mode,
batch_order=batch_order,
norm=self.norm,
start_index=0,
end_index=-1)
def fit(self, data_dir, dataset, feat_fname, label_fname,
model_dir, save_after=1):
"""Train the classifier
Will create batches on labels and then parallelize
- Not very efficient when training time per classifier is too low
- Will not train for labels without any datapoints
A list will be maintained which will used to remap labels
to original ids
Arguments:
---------
data_dir: str
data directory with all files
dataset: str
Name of the dataset; like EURLex-4K
feat_fname: str
File name of training feature file
Should be in sparse format with header
label_fname: str
File name of training label file
Should be in sparse format with header
model_dir: str
dump checkpoints in this directory
based on save_after
save_after: int, default=1
save checkpoints after these many steps
"""
self.logger.info("Training!")
data = self.get_data_loader(
data_dir, dataset, feat_fname, label_fname, 'train', 'labels')
self.num_labels = data.num_labels # valid labels
self.num_labels_ = data.num_labels_ # number of original labels
self.valid_labels = data.valid_labels
weights, biases = [], []
run_time = 0.0
start_time = time.time()
idx = 0
for batch_data in tqdm(data):
start_time = time.time()
batch_weight, batch_bias = self._train(
batch_data, self.num_threads)
del batch_data
if self.feature_type == 'sparse':
batch_weight, batch_bias = convert_to_sparse(
batch_weight, batch_bias)
batch_time = time.time() - start_time
run_time += batch_time
weights.append(batch_weight), biases.extend(batch_bias)
if idx != 0 and idx % save_after == 0:
# TODO: Delete these to save memory?
self._merge_weights(weights, biases)
self._save_state(model_dir, idx)
self.logger.info("Saved state at epoch: {}".format(idx))
idx += 1
self._merge_weights(weights, biases)
self.logger.info("Training time (sec): {}, model size (MB): {}".format(
run_time, self.model_size))
def _train(self, data, num_threads):
"""Train SVM for multiple labels
Arguments:
---------
data: list
[{'X': X, 'Y': y}]
Returns
-------
weights: np.ndarray
weight of the classifier
bias: float
bias of the classifier
"""
_func = self._get_partial_train()
with Parallel(n_jobs=num_threads) as parallel:
result = parallel(delayed(_func)(d) for d in data)
weights, biases = separate(result)
del result
return weights, biases
def predict(self, data_dir, dataset, feat_fname, label_fname, top_k=10):
"""Predict using the classifier
Will create batches on instance and then parallelize
Arguments:
---------
data_dir: str
data directory with all files
dataset: str
Name of the dataset; like EURLex-4K
feat_fname: str
File name of training feature file
Should be in sparse format with header
label_fname: str
File name of training label file
Should be in sparse format with header
TODO: Avoid sending labels as they are not used
"""
self._transpose_weights()
self.logger.info("Predicting!")
use_sparse = self.feature_type == 'sparse'
data = self.get_data_loader(
data_dir, dataset, feat_fname, label_fname, 'predict', 'instances')
num_instances = data.num_instances
predicted_labels = SMatrix(
n_rows=num_instances,
n_cols=self.num_labels,
nnz=top_k)
start_time = time.time()
start_idx = 0
for batch_data in tqdm(data):
pred = batch_data['data'][batch_data['ind']
] @ self.weight + self.bias
predicted_labels.update_block(
start_idx,
ind=None,
val=pred.view(np.ndarray) if use_sparse else pred)
start_idx += pred.shape[0]
end_time = time.time()
self.logger.info(
"Prediction time/sample (ms): {}".format(
(end_time-start_time)*1000/num_instances))
return self._map_to_original(predicted_labels.data())
def _get_partial_train(self):
return partial(train_one, solver_type=self.solver, C=self.C,
verbose=self.verbose, max_iter=self.max_iter,
threshold=self.threshold, tol=self.tol,
intercept_scaling=1.0, fit_intercept=self.use_bias,
epsilon=0)
def _map_to_original(self, X):
"""Some labels were removed during training as training data was
not availale; remap to original mapping
- Assumes documents need not be remapped
"""
shape = (X.shape[0], self.num_labels_)
return sparse._map_cols(X, self.valid_labels, shape)
def _transpose_weights(self):
self.weight = self.weight.transpose()
self.bias = self.bias.transpose()
def __repr__(self):
s = "C: {C}, max_iter: {max_iter}, threshold: {threshold}" \
", loss: {loss}, dual: {dual}, bias: {use_bias}, norm: {norm}" \
", num_threads: {num_threads}, batch_size: {batch_size}"\
", tol: {tol}, penalty: {penalty}"
return s.format(**self.__dict__)
@property
def solver(self):
return _get_liblinear_solver_type(
'ovr', self.penalty, self.loss, self.dual)
| import numpy as np
import time
import logging
from .base import BaseClassifier
import scipy.sparse as sp
from ._svm import train_one
from functools import partial
from ..utils import sparse
from ..data import data_loader
from ._svm import train_one, _get_liblinear_solver_type
from joblib import Parallel, delayed
from ..utils.matrix import SMatrix
from tqdm import tqdm
def separate(result):
return [item[0] for item in result], [item[1] for item in result]
def convert_to_sparse(weight, bias):
weight = np.vstack(weight).squeeze()
bias = np.vstack(bias).squeeze()
return sp.csr_matrix(weight), sp.csr_matrix(bias).transpose()
class OVAClassifier(BaseClassifier):
"""
One-vs-all classifier for sparse or dense data
(suitable for large label set)
Parameters:
-----------
solver: str, optional, default='liblinear'
solver to use
loss: str, optional, default='squared_hinge'
loss to optimize,
- hinge
- squared_hinge
C: float, optional, default=1.0
cost in svm
verbose: int, optional, default=0
print progress in svm
max_iter: int, optional, default=20
iteration in solver
tol: float, optional, default=0.1
tolerance in solver
threshold: float, optional, default=0.01
threshold for hard thresholding (after training classifier)
- bias values are not touched
- 0.01: for sparse features
- 1e-5: for dense features
feature_type: str, optional, default='sparse'
feature type: sparse or dense
dual: boolean, optional, default=true
solve in primal or dual
use_bias: boolean, optional, default=True
train bias parameter or not
num_threads: int, optional, default=10
use multiple threads to parallelize
batch_size: int, optional, default=1000
train these many classifiers in parallel
norm: str, optional, default='l2'
normalize features
penalty: str, optional, default='l2'
l1 or l2 regularizer
"""
def __init__(self, solver='liblinear', loss='squared_hinge', C=1.0,
verbose=0, max_iter=20, tol=0.1, threshold=0.01,
feature_type='sparse', dual=True, use_bias=True,
num_threads=12, batch_size=1000, norm='l2', penalty='l2'):
super().__init__(verbose, use_bias, feature_type)
self.loss = loss
self.C = C
self.penalty = penalty
self.norm = norm
self.num_threads = num_threads
self.verbose = verbose
self.max_iter = max_iter
self.threshold = threshold
self.tol = tol
self.dual = dual
self.batch_size = batch_size
self.num_labels = None
self.valid_labels = None
self.num_labels_ = None
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger('OVAClassifier')
self.logger.info("Parameters:: {}".format(str(self)))
def _merge_weights(self, weights, biases):
# Bias is always a dense array
if self.feature_type == 'sparse':
self.weight = sp.vstack(
weights, format='csr', dtype=np.float32)
self.bias = sp.vstack(
biases, format='csr', dtype=np.float32).toarray()
else:
self.weight = np.vstack(weights).astype(np.float32).squeeze()
self.bias = np.vstack(biases).astype(np.float32)
def get_data_loader(self, data_dir, dataset, feat_fname,
label_fname, mode, batch_order):
"""Data loader
- batch_order: 'label' during training
- batch_order: 'instances' during prediction
"""
return data_loader.Dataloader(
batch_size=self.batch_size,
data_dir=data_dir,
dataset=dataset,
feat_fname=feat_fname,
label_fname=label_fname,
feature_type=self.feature_type,
mode=mode,
batch_order=batch_order,
norm=self.norm,
start_index=0,
end_index=-1)
def fit(self, data_dir, dataset, feat_fname, label_fname,
model_dir, save_after=1):
"""Train the classifier
Will create batches on labels and then parallelize
- Not very efficient when training time per classifier is too low
- Will not train for labels without any datapoints
A list will be maintained which will used to remap labels
to original ids
Arguments:
---------
data_dir: str
data directory with all files
dataset: str
Name of the dataset; like EURLex-4K
feat_fname: str
File name of training feature file
Should be in sparse format with header
label_fname: str
File name of training label file
Should be in sparse format with header
model_dir: str
dump checkpoints in this directory
based on save_after
save_after: int, default=1
save checkpoints after these many steps
"""
self.logger.info("Training!")
data = self.get_data_loader(
data_dir, dataset, feat_fname, label_fname, 'train', 'labels')
self.num_labels = data.num_labels # valid labels
self.num_labels_ = data.num_labels_ # number of original labels
self.valid_labels = data.valid_labels
weights, biases = [], []
run_time = 0.0
start_time = time.time()
idx = 0
for batch_data in tqdm(data):
start_time = time.time()
batch_weight, batch_bias = self._train(
batch_data, self.num_threads)
del batch_data
if self.feature_type == 'sparse':
batch_weight, batch_bias = convert_to_sparse(
batch_weight, batch_bias)
batch_time = time.time() - start_time
run_time += batch_time
weights.append(batch_weight), biases.extend(batch_bias)
if idx != 0 and idx % save_after == 0:
# TODO: Delete these to save memory?
self._merge_weights(weights, biases)
self._save_state(model_dir, idx)
self.logger.info("Saved state at epoch: {}".format(idx))
idx += 1
self._merge_weights(weights, biases)
self.logger.info("Training time (sec): {}, model size (MB): {}".format(
run_time, self.model_size))
def _train(self, data, num_threads):
"""Train SVM for multiple labels
Arguments:
---------
data: list
[{'X': X, 'Y': y}]
Returns
-------
weights: np.ndarray
weight of the classifier
bias: float
bias of the classifier
"""
_func = self._get_partial_train()
with Parallel(n_jobs=num_threads) as parallel:
result = parallel(delayed(_func)(d) for d in data)
weights, biases = separate(result)
del result
return weights, biases
def predict(self, data_dir, dataset, feat_fname, label_fname, top_k=10):
"""Predict using the classifier
Will create batches on instance and then parallelize
Arguments:
---------
data_dir: str
data directory with all files
dataset: str
Name of the dataset; like EURLex-4K
feat_fname: str
File name of training feature file
Should be in sparse format with header
label_fname: str
File name of training label file
Should be in sparse format with header
TODO: Avoid sending labels as they are not used
"""
self._transpose_weights()
self.logger.info("Predicting!")
use_sparse = self.feature_type == 'sparse'
data = self.get_data_loader(
data_dir, dataset, feat_fname, label_fname, 'predict', 'instances')
num_instances = data.num_instances
predicted_labels = SMatrix(
n_rows=num_instances,
n_cols=self.num_labels,
nnz=top_k)
start_time = time.time()
start_idx = 0
for batch_data in tqdm(data):
pred = batch_data['data'][batch_data['ind']
] @ self.weight + self.bias
predicted_labels.update_block(
start_idx,
ind=None,
val=pred.view(np.ndarray) if use_sparse else pred)
start_idx += pred.shape[0]
end_time = time.time()
self.logger.info(
"Prediction time/sample (ms): {}".format(
(end_time-start_time)*1000/num_instances))
return self._map_to_original(predicted_labels.data())
def _get_partial_train(self):
return partial(train_one, solver_type=self.solver, C=self.C,
verbose=self.verbose, max_iter=self.max_iter,
threshold=self.threshold, tol=self.tol,
intercept_scaling=1.0, fit_intercept=self.use_bias,
epsilon=0)
def _map_to_original(self, X):
"""Some labels were removed during training as training data was
not availale; remap to original mapping
- Assumes documents need not be remapped
"""
shape = (X.shape[0], self.num_labels_)
return sparse._map_cols(X, self.valid_labels, shape)
def _transpose_weights(self):
self.weight = self.weight.transpose()
self.bias = self.bias.transpose()
def __repr__(self):
s = "C: {C}, max_iter: {max_iter}, threshold: {threshold}" \
", loss: {loss}, dual: {dual}, bias: {use_bias}, norm: {norm}" \
", num_threads: {num_threads}, batch_size: {batch_size}"\
", tol: {tol}, penalty: {penalty}"
return s.format(**self.__dict__)
@property
def solver(self):
return _get_liblinear_solver_type(
'ovr', self.penalty, self.loss, self.dual) | en | 0.667662 | One-vs-all classifier for sparse or dense data (suitable for large label set) Parameters: ----------- solver: str, optional, default='liblinear' solver to use loss: str, optional, default='squared_hinge' loss to optimize, - hinge - squared_hinge C: float, optional, default=1.0 cost in svm verbose: int, optional, default=0 print progress in svm max_iter: int, optional, default=20 iteration in solver tol: float, optional, default=0.1 tolerance in solver threshold: float, optional, default=0.01 threshold for hard thresholding (after training classifier) - bias values are not touched - 0.01: for sparse features - 1e-5: for dense features feature_type: str, optional, default='sparse' feature type: sparse or dense dual: boolean, optional, default=true solve in primal or dual use_bias: boolean, optional, default=True train bias parameter or not num_threads: int, optional, default=10 use multiple threads to parallelize batch_size: int, optional, default=1000 train these many classifiers in parallel norm: str, optional, default='l2' normalize features penalty: str, optional, default='l2' l1 or l2 regularizer # Bias is always a dense array Data loader - batch_order: 'label' during training - batch_order: 'instances' during prediction Train the classifier Will create batches on labels and then parallelize - Not very efficient when training time per classifier is too low - Will not train for labels without any datapoints A list will be maintained which will used to remap labels to original ids Arguments: --------- data_dir: str data directory with all files dataset: str Name of the dataset; like EURLex-4K feat_fname: str File name of training feature file Should be in sparse format with header label_fname: str File name of training label file Should be in sparse format with header model_dir: str dump checkpoints in this directory based on save_after save_after: int, default=1 save checkpoints after these many steps # valid labels # number of original labels # TODO: Delete these to save memory? Train SVM for multiple labels Arguments: --------- data: list [{'X': X, 'Y': y}] Returns ------- weights: np.ndarray weight of the classifier bias: float bias of the classifier Predict using the classifier Will create batches on instance and then parallelize Arguments: --------- data_dir: str data directory with all files dataset: str Name of the dataset; like EURLex-4K feat_fname: str File name of training feature file Should be in sparse format with header label_fname: str File name of training label file Should be in sparse format with header TODO: Avoid sending labels as they are not used Some labels were removed during training as training data was not availale; remap to original mapping - Assumes documents need not be remapped | 2.291935 | 2 |
ipgroup_test.py | RyPeck/python-ipgroup | 1 | 10530 | #!/usr/bin/env python3
import ipaddress
import random
import unittest
import ipgroup
class TestGroupIPs(unittest.TestCase):
def setUp(self):
pass
def test_group(self):
IPs = ["127.0.0.1",
"127.0.1.1",
"127.1.1.1",
"127.1.0.1",
"127.2.0.1",
"127.2.1.1",
]
expected_results = {"127.0.0.0/16": 2,
"127.1.0.0/16": 2,
"127.2.0.0/16": 2,
}
a = ipgroup.IPv4Group(IPs, 16)
self.assertEqual(expected_results, a.group)
def test_group2(self):
IPs = ["127.0.0.1",
"127.0.1.1",
"127.1.1.1",
"127.1.0.1",
"127.2.0.1",
"127.2.1.1",
]
expected_results = {"127.0.0.0/24": 1,
"127.0.1.0/24": 1,
"127.1.0.0/24": 1,
"127.1.1.0/24": 1,
"127.2.0.0/24": 1,
"127.2.1.0/24": 1,
}
b = ipgroup.IPv4Group(IPs, 24)
self.assertEqual(expected_results, b.group)
def test_group3(self):
""" 'Random' test """
# Small Netblock so we don't do over 2**10 hosts to test with
random_cidr = random.randint(22, 30)
network = ipaddress.IPv4Network(("172.16.58.3/%s" % random_cidr))
# So out sample size is never bigger than the population of hosts
random_int = random.randint(1, 2**(32 - random_cidr - 1))
IPs = random.sample(set(network.hosts()), random_int)
expected_results = {("172.16.58.3/%s" % random_cidr): random_int}
c = ipgroup.IPv4Group(IPs, random_cidr)
self.assertEqual(expected_results, c.group)
def test_IPv6(self):
""" 'Random' test """
# Small Netblock so we don't do over 2**10 hosts to test with
random_cidr = random.randint(118, 126)
network = ipaddress.IPv6Network(("2607:f8b0:4009:803::/%s" %
random_cidr))
# So out sample size is never bigger than the population of hosts
random_int = random.randint(1, 2**(128 - random_cidr - 1))
IPs = random.sample(set(network.hosts()), random_int)
expected_results = {("2607:f8b0:4009:803::/%s" % random_cidr):
random_int}
d = ipgroup.IPv6Group(IPs, random_cidr)
self.assertEqual(expected_results, d.group)
def test_reGroup(self):
IPs = ["127.0.0.1",
"127.1.0.1",
"127.1.1.1",
]
expected_results1 = {"127.0.0.0/24": 1,
"127.1.0.0/24": 1,
"127.1.1.0/24": 1,
}
g = ipgroup.IPv4Group(IPs, 24)
self.assertEqual(expected_results1, g.group)
expected_results2 = {"127.0.0.0/16": 1,
"127.1.0.0/16": 2,
}
g.reGroup(16)
self.assertEqual(expected_results2, g.group)
class TestTotalAddresses(unittest.TestCase):
"""
Tests totalAddresses function returns correct number of unique addresses
in various scenarios
"""
def setUp(self):
pass
def test_total_address1(self):
self.assertEqual(8, ipgroup.totalAddresses("127.0.0.0/29"))
def test_total_address2(self):
total = ipgroup.totalAddresses(["192.168.1.1/16",
"127.0.0.0/16",
])
self.assertEqual(2**17, total)
def test_total_address3(self):
total = ipgroup.totalAddresses(["192.168.1.1/16",
"127.0.0.0/28"
])
self.assertEqual((2**16 + 2**4), total)
def test_total_address4(self):
total = ipgroup.totalAddresses(["192.168.127.12/24",
"192.168.127.12/30",
])
self.assertEqual(2**8, total)
def test_total_address5(self):
total = ipgroup.totalAddresses(["192.168.127.12/24",
"192.168.127.12/23",
])
self.assertEqual(2**9, total)
def test_total_address_overlapping(self):
""" For the scenario where networks will contain eachother. """
total = ipgroup.totalAddresses(["172.16.58.3/16",
"172.16.58.3/18",
"172.16.58.3/24",
])
self.assertEqual(2**16, total)
def test_total_address_overlapping2(self):
""" For the scenario where networks will contain eachother big networks
to show that the function is fast, no longer enumerating all networks.
"""
total = ipgroup.totalAddresses(["1.0.0.0/8",
"2.0.0.0/8",
"2.0.0.0/16",
"2.1.1.0/24",
"1.0.0.0/16",
"1.1.1.0/24",
"2.0.0.0/8",
])
self.assertEqual((2**24 + 2**24), total)
def test_total_address_overlapping3(self):
""" For the scenario where networks will contain eachother big networks
to show that the function is fast, no longer enumerating all networks.
"""
total = ipgroup.totalAddresses(["1.0.0.0/8",
"1.0.0.0/4",
"2.0.0.0/8",
"2.0.0.0/16",
"2.1.1.0/24",
"1.0.0.0/16",
"1.1.1.0/24",
"2.0.0.0/8",
])
self.assertEqual(2**28, total)
def test_total_address_overlap_IPv6(self):
total = ipgroup.totalAddresses(['2620:008d:8000::/48',
'2620:008d:8000:e693::/64',
])
self.assertEqual(2**80, total)
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/env python3
import ipaddress
import random
import unittest
import ipgroup
class TestGroupIPs(unittest.TestCase):
def setUp(self):
pass
def test_group(self):
IPs = ["127.0.0.1",
"127.0.1.1",
"127.1.1.1",
"127.1.0.1",
"127.2.0.1",
"127.2.1.1",
]
expected_results = {"127.0.0.0/16": 2,
"127.1.0.0/16": 2,
"127.2.0.0/16": 2,
}
a = ipgroup.IPv4Group(IPs, 16)
self.assertEqual(expected_results, a.group)
def test_group2(self):
IPs = ["127.0.0.1",
"127.0.1.1",
"127.1.1.1",
"127.1.0.1",
"127.2.0.1",
"127.2.1.1",
]
expected_results = {"127.0.0.0/24": 1,
"127.0.1.0/24": 1,
"127.1.0.0/24": 1,
"127.1.1.0/24": 1,
"127.2.0.0/24": 1,
"127.2.1.0/24": 1,
}
b = ipgroup.IPv4Group(IPs, 24)
self.assertEqual(expected_results, b.group)
def test_group3(self):
""" 'Random' test """
# Small Netblock so we don't do over 2**10 hosts to test with
random_cidr = random.randint(22, 30)
network = ipaddress.IPv4Network(("172.16.58.3/%s" % random_cidr))
# So out sample size is never bigger than the population of hosts
random_int = random.randint(1, 2**(32 - random_cidr - 1))
IPs = random.sample(set(network.hosts()), random_int)
expected_results = {("172.16.58.3/%s" % random_cidr): random_int}
c = ipgroup.IPv4Group(IPs, random_cidr)
self.assertEqual(expected_results, c.group)
def test_IPv6(self):
""" 'Random' test """
# Small Netblock so we don't do over 2**10 hosts to test with
random_cidr = random.randint(118, 126)
network = ipaddress.IPv6Network(("2607:f8b0:4009:803::/%s" %
random_cidr))
# So out sample size is never bigger than the population of hosts
random_int = random.randint(1, 2**(128 - random_cidr - 1))
IPs = random.sample(set(network.hosts()), random_int)
expected_results = {("2607:f8b0:4009:803::/%s" % random_cidr):
random_int}
d = ipgroup.IPv6Group(IPs, random_cidr)
self.assertEqual(expected_results, d.group)
def test_reGroup(self):
IPs = ["127.0.0.1",
"127.1.0.1",
"127.1.1.1",
]
expected_results1 = {"127.0.0.0/24": 1,
"127.1.0.0/24": 1,
"127.1.1.0/24": 1,
}
g = ipgroup.IPv4Group(IPs, 24)
self.assertEqual(expected_results1, g.group)
expected_results2 = {"127.0.0.0/16": 1,
"127.1.0.0/16": 2,
}
g.reGroup(16)
self.assertEqual(expected_results2, g.group)
class TestTotalAddresses(unittest.TestCase):
"""
Tests totalAddresses function returns correct number of unique addresses
in various scenarios
"""
def setUp(self):
pass
def test_total_address1(self):
self.assertEqual(8, ipgroup.totalAddresses("127.0.0.0/29"))
def test_total_address2(self):
total = ipgroup.totalAddresses(["192.168.1.1/16",
"127.0.0.0/16",
])
self.assertEqual(2**17, total)
def test_total_address3(self):
total = ipgroup.totalAddresses(["192.168.1.1/16",
"127.0.0.0/28"
])
self.assertEqual((2**16 + 2**4), total)
def test_total_address4(self):
total = ipgroup.totalAddresses(["192.168.127.12/24",
"192.168.127.12/30",
])
self.assertEqual(2**8, total)
def test_total_address5(self):
total = ipgroup.totalAddresses(["192.168.127.12/24",
"192.168.127.12/23",
])
self.assertEqual(2**9, total)
def test_total_address_overlapping(self):
""" For the scenario where networks will contain eachother. """
total = ipgroup.totalAddresses(["172.16.58.3/16",
"172.16.58.3/18",
"172.16.58.3/24",
])
self.assertEqual(2**16, total)
def test_total_address_overlapping2(self):
""" For the scenario where networks will contain eachother big networks
to show that the function is fast, no longer enumerating all networks.
"""
total = ipgroup.totalAddresses(["1.0.0.0/8",
"2.0.0.0/8",
"2.0.0.0/16",
"2.1.1.0/24",
"1.0.0.0/16",
"1.1.1.0/24",
"2.0.0.0/8",
])
self.assertEqual((2**24 + 2**24), total)
def test_total_address_overlapping3(self):
""" For the scenario where networks will contain eachother big networks
to show that the function is fast, no longer enumerating all networks.
"""
total = ipgroup.totalAddresses(["1.0.0.0/8",
"1.0.0.0/4",
"2.0.0.0/8",
"2.0.0.0/16",
"2.1.1.0/24",
"1.0.0.0/16",
"1.1.1.0/24",
"2.0.0.0/8",
])
self.assertEqual(2**28, total)
def test_total_address_overlap_IPv6(self):
total = ipgroup.totalAddresses(['2620:008d:8000::/48',
'2620:008d:8000:e693::/64',
])
self.assertEqual(2**80, total)
if __name__ == "__main__":
unittest.main()
| en | 0.850628 | #!/usr/bin/env python3 'Random' test # Small Netblock so we don't do over 2**10 hosts to test with # So out sample size is never bigger than the population of hosts 'Random' test # Small Netblock so we don't do over 2**10 hosts to test with # So out sample size is never bigger than the population of hosts Tests totalAddresses function returns correct number of unique addresses in various scenarios For the scenario where networks will contain eachother. For the scenario where networks will contain eachother big networks to show that the function is fast, no longer enumerating all networks. For the scenario where networks will contain eachother big networks to show that the function is fast, no longer enumerating all networks. | 3.237244 | 3 |
progress.py | PsiLupan/calcprogress | 2 | 10531 | from dataclasses import dataclass
from pickle import FALSE
from dol import Dol
from asm_section_list import AsmSection, AsmSectionType
@dataclass
class Slice:
start: int
end: int
def size(self) -> int:
assert self.end > self.start
return self.end - self.start
def contains_section(self, sect: AsmSection) -> bool:
return self.start <= sect.start and self.end > sect.start + sect.size
@dataclass
class SliceGroup:
name: str
slices: list[Slice]
def total_size(self) -> int:
size = 0
for _slice in self.slices:
size += _slice.size()
return size
def contains_section(self, sect: AsmSection) -> bool:
for _slice in self.slices:
if _slice.contains_section(sect):
return True
return False
def calc_generic_progress(dol: Dol, asm_list: list[AsmSection]):
# Sum up code/data in ASM
asm_code_size = 0
asm_data_size = 0
for section in asm_list:
if section.type == AsmSectionType.CODE:
asm_code_size += section.size
elif section.type == AsmSectionType.DATA:
asm_data_size += section.size
else:
assert False, f"Invalid section type ({section.type})!"
# Dol sizes
dol_code_size = dol.code_size()
dol_data_size = dol.data_size()
# Decompiled sizes
decomp_code_size = dol_code_size - asm_code_size
decomp_data_size = dol_data_size - asm_data_size
# Percentages
code_percent = decomp_code_size / dol_code_size
data_percent = decomp_data_size / dol_data_size
print(f"Code sections: {decomp_code_size} / {dol_code_size} bytes in src ({code_percent:%})")
print(f"Data sections: {decomp_data_size} / {dol_data_size} bytes in src ({data_percent:%})")
def calc_slice_progress(slices: SliceGroup, asm_list: list[AsmSection]):
asm_slice_size = 0
for section in asm_list:
if slices.contains_section(section):
if section.type == AsmSectionType.CODE:
asm_slice_size += section.size
elif section.type == AsmSectionType.DATA:
asm_slice_size += section.size
else:
assert False, f"Invalid section type ({section.type})!"
# Dol sizes
dol_slice_size = slices.total_size()
# Decompiled sizes
decomp_slice_size = dol_slice_size - asm_slice_size
# Percentages
slice_percent = decomp_slice_size / dol_slice_size
print(f"\t{slices.name}: {decomp_slice_size} / {dol_slice_size} bytes in src ({slice_percent:%})") | from dataclasses import dataclass
from pickle import FALSE
from dol import Dol
from asm_section_list import AsmSection, AsmSectionType
@dataclass
class Slice:
start: int
end: int
def size(self) -> int:
assert self.end > self.start
return self.end - self.start
def contains_section(self, sect: AsmSection) -> bool:
return self.start <= sect.start and self.end > sect.start + sect.size
@dataclass
class SliceGroup:
name: str
slices: list[Slice]
def total_size(self) -> int:
size = 0
for _slice in self.slices:
size += _slice.size()
return size
def contains_section(self, sect: AsmSection) -> bool:
for _slice in self.slices:
if _slice.contains_section(sect):
return True
return False
def calc_generic_progress(dol: Dol, asm_list: list[AsmSection]):
# Sum up code/data in ASM
asm_code_size = 0
asm_data_size = 0
for section in asm_list:
if section.type == AsmSectionType.CODE:
asm_code_size += section.size
elif section.type == AsmSectionType.DATA:
asm_data_size += section.size
else:
assert False, f"Invalid section type ({section.type})!"
# Dol sizes
dol_code_size = dol.code_size()
dol_data_size = dol.data_size()
# Decompiled sizes
decomp_code_size = dol_code_size - asm_code_size
decomp_data_size = dol_data_size - asm_data_size
# Percentages
code_percent = decomp_code_size / dol_code_size
data_percent = decomp_data_size / dol_data_size
print(f"Code sections: {decomp_code_size} / {dol_code_size} bytes in src ({code_percent:%})")
print(f"Data sections: {decomp_data_size} / {dol_data_size} bytes in src ({data_percent:%})")
def calc_slice_progress(slices: SliceGroup, asm_list: list[AsmSection]):
asm_slice_size = 0
for section in asm_list:
if slices.contains_section(section):
if section.type == AsmSectionType.CODE:
asm_slice_size += section.size
elif section.type == AsmSectionType.DATA:
asm_slice_size += section.size
else:
assert False, f"Invalid section type ({section.type})!"
# Dol sizes
dol_slice_size = slices.total_size()
# Decompiled sizes
decomp_slice_size = dol_slice_size - asm_slice_size
# Percentages
slice_percent = decomp_slice_size / dol_slice_size
print(f"\t{slices.name}: {decomp_slice_size} / {dol_slice_size} bytes in src ({slice_percent:%})") | en | 0.613135 | # Sum up code/data in ASM # Dol sizes # Decompiled sizes # Percentages # Dol sizes # Decompiled sizes # Percentages | 2.561798 | 3 |
main_qm9.py | maxxxzdn/en_flows | 0 | 10532 | <filename>main_qm9.py
import utils
import argparse
import wandb
from os.path import join
from qm9 import dataset
from qm9 import losses
from qm9.models import get_optim, get_model
from flows.utils import assert_mean_zero_with_mask, remove_mean_with_mask,\
assert_correctly_masked
import torch
import time
import pickle
import numpy as np
import qm9.visualizer as vis
from qm9.analyze import analyze_stability_for_molecules
from qm9.utils import prepare_context
from qm9.sampling import sample_chain, sample
from qm9 import mol_dim
parser = argparse.ArgumentParser(description='SE3')
parser.add_argument('--exp_name', type=str, default='debug_10')
parser.add_argument('--model', type=str, default='egnn_dynamics',
help='our_dynamics | schnet | simple_dynamics | '
'kernel_dynamics | egnn_dynamics |gnn_dynamics')
parser.add_argument('--n_epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--brute_force', type=eval, default=False,
help='True | False')
parser.add_argument('--actnorm', type=eval, default=True,
help='True | False')
parser.add_argument('--break_train_epoch', type=eval, default=False,
help='True | False')
parser.add_argument('--dp', type=eval, default=True,
help='True | False')
parser.add_argument('--condition_time', type=eval, default=True,
help='True | False')
parser.add_argument('--clip_grad', type=eval, default=True,
help='True | False')
parser.add_argument('--trace', type=str, default='hutch',
help='hutch | exact')
parser.add_argument('--n_layers', type=int, default=6,
help='number of layers')
parser.add_argument('--nf', type=int, default=64,
help='number of layers')
parser.add_argument('--ode_regularization', type=float, default=1e-3)
parser.add_argument('--dataset', type=str, default='qm9',
help='qm9 | qm9_positional')
parser.add_argument('--dequantization', type=str, default='argmax_variational',
help='uniform | variational | argmax_variational')
parser.add_argument('--tanh', type=eval, default=True,
help='use tanh in the coord_mlp')
parser.add_argument('--attention', type=eval, default=True,
help='use attention in the EGNN')
parser.add_argument('--n_report_steps', type=int, default=1)
parser.add_argument('--wandb_usr', type=str, default='')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--save_model', type=eval, default=True,
help='save model')
parser.add_argument('--generate_epochs', type=int, default=1,
help='save model')
parser.add_argument('--num_workers', type=int, default=0, help='Number of worker for the dataloader')
parser.add_argument('--test_epochs', type=int, default=1)
parser.add_argument('--physics', type=int, default=0, help='Minimize energy loss or not')
parser.add_argument('--data_augmentation', type=eval, default=False,
help='use attention in the EGNN')
parser.add_argument('--x_aggregation', type=str, default='sum',
help='sum | mean')
parser.add_argument("--conditioning", nargs='+', default=[],
help='multiple arguments can be passed, '
'including: homo | onehot | lumo | num_atoms | etc. '
'usage: "--conditioning H_thermo homo onehot H_thermo"')
parser.add_argument('--resume', type=str, default=None,
help='')
parser.add_argument('--start_epoch', type=int, default=0,
help='')
args, unparsed_args = parser.parse_known_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
dtype = torch.float32
if args.resume is not None:
exp_name = args.exp_name + '_resume'
start_epoch = args.start_epoch
resume = args.resume
wandb_usr = args.wandb_usr
with open(join(args.resume, 'args.pickle'), 'rb') as f:
args = pickle.load(f)
args.resume = resume
args.break_train_epoch = False
args.exp_name = exp_name
args.start_epoch = start_epoch
args.wandb_usr = wandb_usr
print(args)
utils.create_folders(args)
print(args)
# Log all args to wandb
wandb.init(entity='aipp', project='eegVAE', name=args.exp_name, config=args)
wandb.save('*.txt')
# Retrieve QM9 dataloaders
dataloaders, charge_scale = dataset.retrieve_dataloaders(args.batch_size, args.num_workers)
data_dummy = next(iter(dataloaders['train']))
if len(args.conditioning) > 0:
print(f'Conditioning on {args.conditioning}')
context_dummy = prepare_context(args.conditioning, data_dummy)
context_node_nf = context_dummy.size(2)
else:
context_node_nf = 0
args.context_node_nf = context_node_nf
# Create EGNN flow
prior, flow, dequantizer, nodes_dist = get_model(args, device)
flow = flow.to(device)
dequantizer = dequantizer.to(device)
optim = get_optim(args, flow, dequantizer)
print(flow)
gradnorm_queue = utils.Queue()
gradnorm_queue.add(3000) # Add large value that will be flushed.
def check_mask_correct(variables, node_mask):
for variable in variables:
assert_correctly_masked(variable, node_mask)
def train_epoch(loader, epoch, flow, flow_dp):
nll_epoch = []
for i, data in enumerate(loader):
# Get data
x = data['positions'].to(device, dtype)
node_mask = data['atom_mask'].to(device, dtype).unsqueeze(2)
edge_mask = data['edge_mask'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = data['charges'].to(device, dtype).unsqueeze(2)
x = remove_mean_with_mask(x, node_mask)
if args.data_augmentation:
x = utils.random_rotation(x).detach()
check_mask_correct([x, one_hot, charges], node_mask)
assert_mean_zero_with_mask(x, node_mask)
h = {'categorical': one_hot, 'integer': charges}
if len(args.conditioning) > 0:
context = prepare_context(args.conditioning, data).to(device, dtype)
assert_correctly_masked(context, node_mask)
else:
context = None
optim.zero_grad()
if args.physics:
energy_loss = mol_dim.compute_energy_loss(dequantizer, flow, prior,
nodes_dist, x.clone(), node_mask, edge_mask, context)
else:
energy_loss = 0
nll, reg_term, mean_abs_z = losses.compute_loss_and_nll(args, dequantizer, flow_dp, prior, nodes_dist, x, h,
node_mask, edge_mask, context)
loss = 0.01*energy_loss + nll + args.ode_regularization * reg_term
if args.clip_grad:
grad_norm = utils.gradient_clipping(flow, gradnorm_queue)
else:
grad_norm = 0.
if i % args.n_report_steps == 0:
print(f"\repoch: {epoch}, iter: {i}/{len(loader)}, "
f"Loss {loss.item():.2f}, NLL: {nll.item():.2f}, "
f"RegTerm: {reg_term.item():.1f}, "
f"PhysTerm: {energy_loss.item():.1f}, "
f"GradNorm: {grad_norm:.1f}")
loss.backward()
optim.step()
nll_epoch.append(nll.item())
if i % 100 == 0 and i!=0:
analyze_and_save(epoch)
save_and_sample_chain(epoch=epoch)
sample_different_sizes_and_save(epoch=epoch)
vis.visualize("outputs/%s/epoch_%d" % (args.exp_name, epoch), wandb=wandb)
vis.visualize_chain(
"outputs/%s/epoch_%d/chain/" % (args.exp_name, epoch),
wandb=wandb)
wandb.log({"mean(abs(z))": mean_abs_z}, commit=False)
wandb.log({"Batch NLL": nll.item()}, commit=True)
wandb.log({"Energy": energy_loss.item()}, commit=True)
if args.break_train_epoch:
break
wandb.log({"Train Epoch NLL": np.mean(nll_epoch)}, commit=False)
def test(loader, epoch, flow_dp, partition='Test'):
with torch.no_grad():
nll_epoch = 0
n_samples = 0
for i, data in enumerate(loader):
# Get data
x = data['positions'].to(device, dtype)
batch_size = x.size(0)
node_mask = data['atom_mask'].to(device, dtype).unsqueeze(2)
edge_mask = data['edge_mask'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = data['charges'].to(device, dtype).unsqueeze(2)
x = remove_mean_with_mask(x, node_mask)
check_mask_correct([x, one_hot, charges], node_mask)
assert_mean_zero_with_mask(x, node_mask)
h = {'categorical': one_hot, 'integer': charges}
if len(args.conditioning) > 0:
context = prepare_context(args.conditioning, data).to(device, dtype)
assert_correctly_masked(context, node_mask)
else:
context = None
# transform batch through flow
nll, _, _ = losses.compute_loss_and_nll(args, dequantizer, flow_dp, prior, nodes_dist, x, h, node_mask,
edge_mask, context)
# standard nll from forward KL
nll_epoch += nll.item() * batch_size
n_samples += batch_size
if i % args.n_report_steps == 0:
print(f"\r {partition} NLL \t epoch: {epoch}, iter: {i}/{len(loader)}, "
f"NLL: {nll_epoch/n_samples:.2f}")
if args.break_train_epoch:
break
return nll_epoch/n_samples
def save_and_sample_chain(epoch=0, id_from=0):
one_hot, charges, x = sample_chain(
args, device, flow, dequantizer, prior, n_tries=1)
vis.save_xyz_file(
'outputs/%s/epoch_%d/chain/' % (args.exp_name, epoch), one_hot, charges, x,
id_from, name='chain')
return one_hot, charges, x
def sample_different_sizes_and_save(n_samples=10, epoch=0):
for counter in range(n_samples):
n_nodes = nodes_dist.sample()
one_hot, charges, x = sample(args, device, flow, dequantizer, prior, n_samples=1, n_nodes=n_nodes)
vis.save_xyz_file(
'outputs/%s/epoch_%d/' % (args.exp_name, epoch), one_hot,
charges, x,
1*counter, name='molecule')
def analyze_and_save(epoch, n_samples=1000):
print('Analyzing molecule validity...')
molecule_list = []
for i in range(n_samples):
n_nodes = nodes_dist.sample()
one_hot, charges, x = sample(
args, device, flow, dequantizer, prior, n_samples=1, n_nodes=n_nodes)
molecule_list.append((one_hot.detach(), x.detach()))
validity_dict, _ = analyze_stability_for_molecules(molecule_list)
wandb.log(validity_dict)
return validity_dict
def sample_batch(prior, flow):
print('Creating...')
n_nodes = nodes_dist.sample()
_, _, x = sample(args, device, flow, dequantizer, prior, n_samples=1, n_nodes=n_nodes)
return x
def main():
if args.resume is not None:
flow_state_dict = torch.load(join(args.resume, 'flow.npy'))
dequantizer_state_dict = torch.load(join(args.resume, 'dequantizer.npy'))
optim_state_dict = torch.load(join(args.resume, 'optim.npy'))
flow.load_state_dict(flow_state_dict)
dequantizer.load_state_dict(dequantizer_state_dict)
optim.load_state_dict(optim_state_dict)
flow_dp = flow
if args.dp and torch.cuda.device_count() > 1:
print(f'Training using {torch.cuda.device_count()} GPUs')
flow_dp = torch.nn.DataParallel(flow_dp.cpu())
flow_dp = flow_dp.cuda()
best_nll_val = 1e8
best_nll_test = 1e8
for epoch in range(args.start_epoch, args.n_epochs):
start_epoch = time.time()
train_epoch(dataloaders['train'], epoch, flow, flow_dp)
print(f"Epoch took {time.time() - start_epoch:.1f} seconds.")
if epoch % args.test_epochs == 0:
analyze_and_save(epoch)
nll_val = test(dataloaders['valid'], epoch, flow_dp, partition='Val')
nll_test = test(dataloaders['test'], epoch, flow_dp, partition='Test')
if nll_val < best_nll_val:
best_nll_val = nll_val
best_nll_test = nll_test
if args.save_model:
args.current_epoch = epoch + 1
utils.save_model(optim, 'outputs/%s/optim.npy' % args.exp_name)
utils.save_model(flow, 'outputs/%s/flow.npy' % args.exp_name)
utils.save_model(dequantizer, 'outputs/%s/dequantizer.npy' % args.exp_name)
with open('outputs/%s/args.pickle' % args.exp_name, 'wb') as f:
pickle.dump(args, f)
if args.save_model and epoch > 28:
utils.save_model(optim, 'outputs/%s/optim_%d.npy' % (args.exp_name, epoch))
utils.save_model(flow, 'outputs/%s/flow_%d.npy' % (args.exp_name, epoch))
utils.save_model(dequantizer, 'outputs/%s/dequantizer_%d.npy' % (args.exp_name, epoch))
with open('outputs/%s/args_%d.pickle' % (args.exp_name, epoch), 'wb') as f:
pickle.dump(args, f)
print('Val loss: %.4f \t Test loss: %.4f' % (nll_val, nll_test))
print('Best val loss: %.4f \t Best test loss: %.4f' % (best_nll_val, best_nll_test))
wandb.log({"Val loss ": nll_val}, commit=True)
wandb.log({"Test loss ": nll_test}, commit=True)
wandb.log({"Best cross-validated test loss ": best_nll_test}, commit=True)
if __name__ == "__main__":
main()
| <filename>main_qm9.py
import utils
import argparse
import wandb
from os.path import join
from qm9 import dataset
from qm9 import losses
from qm9.models import get_optim, get_model
from flows.utils import assert_mean_zero_with_mask, remove_mean_with_mask,\
assert_correctly_masked
import torch
import time
import pickle
import numpy as np
import qm9.visualizer as vis
from qm9.analyze import analyze_stability_for_molecules
from qm9.utils import prepare_context
from qm9.sampling import sample_chain, sample
from qm9 import mol_dim
parser = argparse.ArgumentParser(description='SE3')
parser.add_argument('--exp_name', type=str, default='debug_10')
parser.add_argument('--model', type=str, default='egnn_dynamics',
help='our_dynamics | schnet | simple_dynamics | '
'kernel_dynamics | egnn_dynamics |gnn_dynamics')
parser.add_argument('--n_epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--brute_force', type=eval, default=False,
help='True | False')
parser.add_argument('--actnorm', type=eval, default=True,
help='True | False')
parser.add_argument('--break_train_epoch', type=eval, default=False,
help='True | False')
parser.add_argument('--dp', type=eval, default=True,
help='True | False')
parser.add_argument('--condition_time', type=eval, default=True,
help='True | False')
parser.add_argument('--clip_grad', type=eval, default=True,
help='True | False')
parser.add_argument('--trace', type=str, default='hutch',
help='hutch | exact')
parser.add_argument('--n_layers', type=int, default=6,
help='number of layers')
parser.add_argument('--nf', type=int, default=64,
help='number of layers')
parser.add_argument('--ode_regularization', type=float, default=1e-3)
parser.add_argument('--dataset', type=str, default='qm9',
help='qm9 | qm9_positional')
parser.add_argument('--dequantization', type=str, default='argmax_variational',
help='uniform | variational | argmax_variational')
parser.add_argument('--tanh', type=eval, default=True,
help='use tanh in the coord_mlp')
parser.add_argument('--attention', type=eval, default=True,
help='use attention in the EGNN')
parser.add_argument('--n_report_steps', type=int, default=1)
parser.add_argument('--wandb_usr', type=str, default='')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--save_model', type=eval, default=True,
help='save model')
parser.add_argument('--generate_epochs', type=int, default=1,
help='save model')
parser.add_argument('--num_workers', type=int, default=0, help='Number of worker for the dataloader')
parser.add_argument('--test_epochs', type=int, default=1)
parser.add_argument('--physics', type=int, default=0, help='Minimize energy loss or not')
parser.add_argument('--data_augmentation', type=eval, default=False,
help='use attention in the EGNN')
parser.add_argument('--x_aggregation', type=str, default='sum',
help='sum | mean')
parser.add_argument("--conditioning", nargs='+', default=[],
help='multiple arguments can be passed, '
'including: homo | onehot | lumo | num_atoms | etc. '
'usage: "--conditioning H_thermo homo onehot H_thermo"')
parser.add_argument('--resume', type=str, default=None,
help='')
parser.add_argument('--start_epoch', type=int, default=0,
help='')
args, unparsed_args = parser.parse_known_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
dtype = torch.float32
if args.resume is not None:
exp_name = args.exp_name + '_resume'
start_epoch = args.start_epoch
resume = args.resume
wandb_usr = args.wandb_usr
with open(join(args.resume, 'args.pickle'), 'rb') as f:
args = pickle.load(f)
args.resume = resume
args.break_train_epoch = False
args.exp_name = exp_name
args.start_epoch = start_epoch
args.wandb_usr = wandb_usr
print(args)
utils.create_folders(args)
print(args)
# Log all args to wandb
wandb.init(entity='aipp', project='eegVAE', name=args.exp_name, config=args)
wandb.save('*.txt')
# Retrieve QM9 dataloaders
dataloaders, charge_scale = dataset.retrieve_dataloaders(args.batch_size, args.num_workers)
data_dummy = next(iter(dataloaders['train']))
if len(args.conditioning) > 0:
print(f'Conditioning on {args.conditioning}')
context_dummy = prepare_context(args.conditioning, data_dummy)
context_node_nf = context_dummy.size(2)
else:
context_node_nf = 0
args.context_node_nf = context_node_nf
# Create EGNN flow
prior, flow, dequantizer, nodes_dist = get_model(args, device)
flow = flow.to(device)
dequantizer = dequantizer.to(device)
optim = get_optim(args, flow, dequantizer)
print(flow)
gradnorm_queue = utils.Queue()
gradnorm_queue.add(3000) # Add large value that will be flushed.
def check_mask_correct(variables, node_mask):
for variable in variables:
assert_correctly_masked(variable, node_mask)
def train_epoch(loader, epoch, flow, flow_dp):
nll_epoch = []
for i, data in enumerate(loader):
# Get data
x = data['positions'].to(device, dtype)
node_mask = data['atom_mask'].to(device, dtype).unsqueeze(2)
edge_mask = data['edge_mask'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = data['charges'].to(device, dtype).unsqueeze(2)
x = remove_mean_with_mask(x, node_mask)
if args.data_augmentation:
x = utils.random_rotation(x).detach()
check_mask_correct([x, one_hot, charges], node_mask)
assert_mean_zero_with_mask(x, node_mask)
h = {'categorical': one_hot, 'integer': charges}
if len(args.conditioning) > 0:
context = prepare_context(args.conditioning, data).to(device, dtype)
assert_correctly_masked(context, node_mask)
else:
context = None
optim.zero_grad()
if args.physics:
energy_loss = mol_dim.compute_energy_loss(dequantizer, flow, prior,
nodes_dist, x.clone(), node_mask, edge_mask, context)
else:
energy_loss = 0
nll, reg_term, mean_abs_z = losses.compute_loss_and_nll(args, dequantizer, flow_dp, prior, nodes_dist, x, h,
node_mask, edge_mask, context)
loss = 0.01*energy_loss + nll + args.ode_regularization * reg_term
if args.clip_grad:
grad_norm = utils.gradient_clipping(flow, gradnorm_queue)
else:
grad_norm = 0.
if i % args.n_report_steps == 0:
print(f"\repoch: {epoch}, iter: {i}/{len(loader)}, "
f"Loss {loss.item():.2f}, NLL: {nll.item():.2f}, "
f"RegTerm: {reg_term.item():.1f}, "
f"PhysTerm: {energy_loss.item():.1f}, "
f"GradNorm: {grad_norm:.1f}")
loss.backward()
optim.step()
nll_epoch.append(nll.item())
if i % 100 == 0 and i!=0:
analyze_and_save(epoch)
save_and_sample_chain(epoch=epoch)
sample_different_sizes_and_save(epoch=epoch)
vis.visualize("outputs/%s/epoch_%d" % (args.exp_name, epoch), wandb=wandb)
vis.visualize_chain(
"outputs/%s/epoch_%d/chain/" % (args.exp_name, epoch),
wandb=wandb)
wandb.log({"mean(abs(z))": mean_abs_z}, commit=False)
wandb.log({"Batch NLL": nll.item()}, commit=True)
wandb.log({"Energy": energy_loss.item()}, commit=True)
if args.break_train_epoch:
break
wandb.log({"Train Epoch NLL": np.mean(nll_epoch)}, commit=False)
def test(loader, epoch, flow_dp, partition='Test'):
with torch.no_grad():
nll_epoch = 0
n_samples = 0
for i, data in enumerate(loader):
# Get data
x = data['positions'].to(device, dtype)
batch_size = x.size(0)
node_mask = data['atom_mask'].to(device, dtype).unsqueeze(2)
edge_mask = data['edge_mask'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = data['charges'].to(device, dtype).unsqueeze(2)
x = remove_mean_with_mask(x, node_mask)
check_mask_correct([x, one_hot, charges], node_mask)
assert_mean_zero_with_mask(x, node_mask)
h = {'categorical': one_hot, 'integer': charges}
if len(args.conditioning) > 0:
context = prepare_context(args.conditioning, data).to(device, dtype)
assert_correctly_masked(context, node_mask)
else:
context = None
# transform batch through flow
nll, _, _ = losses.compute_loss_and_nll(args, dequantizer, flow_dp, prior, nodes_dist, x, h, node_mask,
edge_mask, context)
# standard nll from forward KL
nll_epoch += nll.item() * batch_size
n_samples += batch_size
if i % args.n_report_steps == 0:
print(f"\r {partition} NLL \t epoch: {epoch}, iter: {i}/{len(loader)}, "
f"NLL: {nll_epoch/n_samples:.2f}")
if args.break_train_epoch:
break
return nll_epoch/n_samples
def save_and_sample_chain(epoch=0, id_from=0):
one_hot, charges, x = sample_chain(
args, device, flow, dequantizer, prior, n_tries=1)
vis.save_xyz_file(
'outputs/%s/epoch_%d/chain/' % (args.exp_name, epoch), one_hot, charges, x,
id_from, name='chain')
return one_hot, charges, x
def sample_different_sizes_and_save(n_samples=10, epoch=0):
for counter in range(n_samples):
n_nodes = nodes_dist.sample()
one_hot, charges, x = sample(args, device, flow, dequantizer, prior, n_samples=1, n_nodes=n_nodes)
vis.save_xyz_file(
'outputs/%s/epoch_%d/' % (args.exp_name, epoch), one_hot,
charges, x,
1*counter, name='molecule')
def analyze_and_save(epoch, n_samples=1000):
print('Analyzing molecule validity...')
molecule_list = []
for i in range(n_samples):
n_nodes = nodes_dist.sample()
one_hot, charges, x = sample(
args, device, flow, dequantizer, prior, n_samples=1, n_nodes=n_nodes)
molecule_list.append((one_hot.detach(), x.detach()))
validity_dict, _ = analyze_stability_for_molecules(molecule_list)
wandb.log(validity_dict)
return validity_dict
def sample_batch(prior, flow):
print('Creating...')
n_nodes = nodes_dist.sample()
_, _, x = sample(args, device, flow, dequantizer, prior, n_samples=1, n_nodes=n_nodes)
return x
def main():
if args.resume is not None:
flow_state_dict = torch.load(join(args.resume, 'flow.npy'))
dequantizer_state_dict = torch.load(join(args.resume, 'dequantizer.npy'))
optim_state_dict = torch.load(join(args.resume, 'optim.npy'))
flow.load_state_dict(flow_state_dict)
dequantizer.load_state_dict(dequantizer_state_dict)
optim.load_state_dict(optim_state_dict)
flow_dp = flow
if args.dp and torch.cuda.device_count() > 1:
print(f'Training using {torch.cuda.device_count()} GPUs')
flow_dp = torch.nn.DataParallel(flow_dp.cpu())
flow_dp = flow_dp.cuda()
best_nll_val = 1e8
best_nll_test = 1e8
for epoch in range(args.start_epoch, args.n_epochs):
start_epoch = time.time()
train_epoch(dataloaders['train'], epoch, flow, flow_dp)
print(f"Epoch took {time.time() - start_epoch:.1f} seconds.")
if epoch % args.test_epochs == 0:
analyze_and_save(epoch)
nll_val = test(dataloaders['valid'], epoch, flow_dp, partition='Val')
nll_test = test(dataloaders['test'], epoch, flow_dp, partition='Test')
if nll_val < best_nll_val:
best_nll_val = nll_val
best_nll_test = nll_test
if args.save_model:
args.current_epoch = epoch + 1
utils.save_model(optim, 'outputs/%s/optim.npy' % args.exp_name)
utils.save_model(flow, 'outputs/%s/flow.npy' % args.exp_name)
utils.save_model(dequantizer, 'outputs/%s/dequantizer.npy' % args.exp_name)
with open('outputs/%s/args.pickle' % args.exp_name, 'wb') as f:
pickle.dump(args, f)
if args.save_model and epoch > 28:
utils.save_model(optim, 'outputs/%s/optim_%d.npy' % (args.exp_name, epoch))
utils.save_model(flow, 'outputs/%s/flow_%d.npy' % (args.exp_name, epoch))
utils.save_model(dequantizer, 'outputs/%s/dequantizer_%d.npy' % (args.exp_name, epoch))
with open('outputs/%s/args_%d.pickle' % (args.exp_name, epoch), 'wb') as f:
pickle.dump(args, f)
print('Val loss: %.4f \t Test loss: %.4f' % (nll_val, nll_test))
print('Best val loss: %.4f \t Best test loss: %.4f' % (best_nll_val, best_nll_test))
wandb.log({"Val loss ": nll_val}, commit=True)
wandb.log({"Test loss ": nll_test}, commit=True)
wandb.log({"Best cross-validated test loss ": best_nll_test}, commit=True)
if __name__ == "__main__":
main()
| en | 0.746269 | # Log all args to wandb # Retrieve QM9 dataloaders # Create EGNN flow # Add large value that will be flushed. # Get data # Get data # transform batch through flow # standard nll from forward KL | 1.786914 | 2 |
vise/tests/util/test_string.py | kumagai-group/vise | 16 | 10533 | <reponame>kumagai-group/vise<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright (c) 2021. Distributed under the terms of the MIT License.
from vise.util.string import numbers_to_lowercases
def test_numbers_to_lowercases():
assert numbers_to_lowercases("Mg2") == "Mg₂" | # -*- coding: utf-8 -*-
# Copyright (c) 2021. Distributed under the terms of the MIT License.
from vise.util.string import numbers_to_lowercases
def test_numbers_to_lowercases():
assert numbers_to_lowercases("Mg2") == "Mg₂" | en | 0.829111 | # -*- coding: utf-8 -*- # Copyright (c) 2021. Distributed under the terms of the MIT License. | 3.421526 | 3 |
code/ch_02_foundations/_02_noneness.py | SuppMonkey/write.pythonic.code | 679 | 10534 | <filename>code/ch_02_foundations/_02_noneness.py<gh_stars>100-1000
def find_accounts(search_text):
# perform search...
if not db_is_available:
return None
# returns a list of account IDs
return db_search(search_text)
accounts = find_accounts('python')
if accounts is None:
print("Error: DB not available")
else:
print("Accounts found: Would list them here...")
def db_search(search_text):
return [1, 11]
db_is_availble = True
| <filename>code/ch_02_foundations/_02_noneness.py<gh_stars>100-1000
def find_accounts(search_text):
# perform search...
if not db_is_available:
return None
# returns a list of account IDs
return db_search(search_text)
accounts = find_accounts('python')
if accounts is None:
print("Error: DB not available")
else:
print("Accounts found: Would list them here...")
def db_search(search_text):
return [1, 11]
db_is_availble = True
| en | 0.702773 | # perform search... # returns a list of account IDs | 3.078994 | 3 |
pupa/tests/importers/test_base_importer.py | influence-usa/pupa | 0 | 10535 | <reponame>influence-usa/pupa<filename>pupa/tests/importers/test_base_importer.py
import os
import json
import shutil
import tempfile
import mock
import pytest
from opencivicdata.models import Person
from pupa.scrape import Person as ScrapePerson
from pupa.scrape import Organization as ScrapeOrganization
from pupa.importers.base import omnihash, BaseImporter
from pupa.importers import PersonImporter, OrganizationImporter
from pupa.exceptions import UnresolvedIdError, DataImportError
class FakeImporter(BaseImporter):
_type = 'test'
def test_omnihash_python_types():
# string
assert omnihash('test') == omnihash('test')
# list
assert omnihash(['this', 'is', 'a', 'list']) == omnihash(['this', 'is', 'a', 'list'])
# set
assert omnihash({'and', 'a', 'set'}) == omnihash({'set', 'set', 'and', 'a'})
# dict w/ set and tuple as well
assert (omnihash({'a': {('fancy', 'nested'): {'dict'}}}) ==
omnihash({'a': {('fancy', 'nested'): {'dict'}}}))
def test_import_directory():
# write out some temp data to filesystem
datadir = tempfile.mkdtemp()
dicta = {'test': 'A'}
dictb = {'test': 'B'}
open(os.path.join(datadir, 'test_a.json'), 'w').write(json.dumps(dicta))
open(os.path.join(datadir, 'test_b.json'), 'w').write(json.dumps(dictb))
# simply ensure that import directory calls import_data with all dicts
ti = FakeImporter('jurisdiction-id')
with mock.patch.object(ti, attribute='import_data') as mockobj:
ti.import_directory(datadir)
# import_data should be called once
assert mockobj.call_count == 1
# kind of hacky, get the total list of args passed in
arg_objs = list(mockobj.call_args[0][0])
# 2 args only, make sure a and b are in there
assert len(arg_objs) == 2
assert dicta in arg_objs
assert dictb in arg_objs
# clean up datadir
shutil.rmtree(datadir)
# doing these next few tests just on a Person because it is the same code that handles it
# but for completeness maybe it is better to do these on each type?
@pytest.mark.django_db
def test_deduplication_identical_object():
p1 = ScrapePerson('Dwayne').as_dict()
p2 = ScrapePerson('Dwayne').as_dict()
PersonImporter('jid').import_data([p1, p2])
assert Person.objects.count() == 1
@pytest.mark.django_db
def test_exception_on_identical_objects_in_import_stream():
# these two objects aren't identical, but refer to the same thing
# at the moment we consider this an error (but there may be a better way to handle this?)
o1 = ScrapeOrganization('X-Men', classification='unknown').as_dict()
o2 = ScrapeOrganization('X-Men', founding_date='1970', classification='unknown').as_dict()
with pytest.raises(Exception):
OrganizationImporter('jid').import_data([o1, o2])
@pytest.mark.django_db
def test_resolve_json_id():
p1 = ScrapePerson('Dwayne').as_dict()
p2 = ScrapePerson('Dwayne').as_dict()
pi = PersonImporter('jid')
# do import and get database id
p1_id = p1['_id']
p2_id = p2['_id']
pi.import_data([p1, p2])
db_id = Person.objects.get().id
# simplest case
assert pi.resolve_json_id(p1_id) == db_id
# duplicate should resolve to same id
assert pi.resolve_json_id(p2_id) == db_id
# a null id should map to None
assert pi.resolve_json_id(None) is None
# no such id
with pytest.raises(UnresolvedIdError):
pi.resolve_json_id('this-is-invalid')
@pytest.mark.django_db
def test_invalid_fields():
p1 = ScrapePerson('Dwayne').as_dict()
p1['newfield'] = "shouldn't happen"
with pytest.raises(DataImportError):
PersonImporter('jid').import_data([p1])
@pytest.mark.django_db
def test_invalid_fields_related_item():
p1 = ScrapePerson('Dwayne')
p1.add_link('http://example.com')
p1 = p1.as_dict()
p1['links'][0]['test'] = 3
with pytest.raises(DataImportError):
PersonImporter('jid').import_data([p1])
| import os
import json
import shutil
import tempfile
import mock
import pytest
from opencivicdata.models import Person
from pupa.scrape import Person as ScrapePerson
from pupa.scrape import Organization as ScrapeOrganization
from pupa.importers.base import omnihash, BaseImporter
from pupa.importers import PersonImporter, OrganizationImporter
from pupa.exceptions import UnresolvedIdError, DataImportError
class FakeImporter(BaseImporter):
_type = 'test'
def test_omnihash_python_types():
# string
assert omnihash('test') == omnihash('test')
# list
assert omnihash(['this', 'is', 'a', 'list']) == omnihash(['this', 'is', 'a', 'list'])
# set
assert omnihash({'and', 'a', 'set'}) == omnihash({'set', 'set', 'and', 'a'})
# dict w/ set and tuple as well
assert (omnihash({'a': {('fancy', 'nested'): {'dict'}}}) ==
omnihash({'a': {('fancy', 'nested'): {'dict'}}}))
def test_import_directory():
# write out some temp data to filesystem
datadir = tempfile.mkdtemp()
dicta = {'test': 'A'}
dictb = {'test': 'B'}
open(os.path.join(datadir, 'test_a.json'), 'w').write(json.dumps(dicta))
open(os.path.join(datadir, 'test_b.json'), 'w').write(json.dumps(dictb))
# simply ensure that import directory calls import_data with all dicts
ti = FakeImporter('jurisdiction-id')
with mock.patch.object(ti, attribute='import_data') as mockobj:
ti.import_directory(datadir)
# import_data should be called once
assert mockobj.call_count == 1
# kind of hacky, get the total list of args passed in
arg_objs = list(mockobj.call_args[0][0])
# 2 args only, make sure a and b are in there
assert len(arg_objs) == 2
assert dicta in arg_objs
assert dictb in arg_objs
# clean up datadir
shutil.rmtree(datadir)
# doing these next few tests just on a Person because it is the same code that handles it
# but for completeness maybe it is better to do these on each type?
@pytest.mark.django_db
def test_deduplication_identical_object():
p1 = ScrapePerson('Dwayne').as_dict()
p2 = ScrapePerson('Dwayne').as_dict()
PersonImporter('jid').import_data([p1, p2])
assert Person.objects.count() == 1
@pytest.mark.django_db
def test_exception_on_identical_objects_in_import_stream():
# these two objects aren't identical, but refer to the same thing
# at the moment we consider this an error (but there may be a better way to handle this?)
o1 = ScrapeOrganization('X-Men', classification='unknown').as_dict()
o2 = ScrapeOrganization('X-Men', founding_date='1970', classification='unknown').as_dict()
with pytest.raises(Exception):
OrganizationImporter('jid').import_data([o1, o2])
@pytest.mark.django_db
def test_resolve_json_id():
p1 = ScrapePerson('Dwayne').as_dict()
p2 = ScrapePerson('Dwayne').as_dict()
pi = PersonImporter('jid')
# do import and get database id
p1_id = p1['_id']
p2_id = p2['_id']
pi.import_data([p1, p2])
db_id = Person.objects.get().id
# simplest case
assert pi.resolve_json_id(p1_id) == db_id
# duplicate should resolve to same id
assert pi.resolve_json_id(p2_id) == db_id
# a null id should map to None
assert pi.resolve_json_id(None) is None
# no such id
with pytest.raises(UnresolvedIdError):
pi.resolve_json_id('this-is-invalid')
@pytest.mark.django_db
def test_invalid_fields():
p1 = ScrapePerson('Dwayne').as_dict()
p1['newfield'] = "shouldn't happen"
with pytest.raises(DataImportError):
PersonImporter('jid').import_data([p1])
@pytest.mark.django_db
def test_invalid_fields_related_item():
p1 = ScrapePerson('Dwayne')
p1.add_link('http://example.com')
p1 = p1.as_dict()
p1['links'][0]['test'] = 3
with pytest.raises(DataImportError):
PersonImporter('jid').import_data([p1]) | en | 0.906481 | # string # list # set # dict w/ set and tuple as well # write out some temp data to filesystem # simply ensure that import directory calls import_data with all dicts # import_data should be called once # kind of hacky, get the total list of args passed in # 2 args only, make sure a and b are in there # clean up datadir # doing these next few tests just on a Person because it is the same code that handles it # but for completeness maybe it is better to do these on each type? # these two objects aren't identical, but refer to the same thing # at the moment we consider this an error (but there may be a better way to handle this?) # do import and get database id # simplest case # duplicate should resolve to same id # a null id should map to None # no such id | 2.150662 | 2 |
packaging/bdist_trinoadmin.py | wgzhao/trino-admin | 0 | 10536 | <filename>packaging/bdist_trinoadmin.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from distutils import log as logger
from distutils.dir_util import remove_tree
import pip
try:
from setuptools import Command
except ImportError:
from distutils.core import Command
from packaging import package_dir
class bdist_trinoadmin(Command):
description = 'create a distribution for trino-admin'
user_options = [('bdist-dir=', 'b',
'temporary directory for creating the distribution'),
('dist-dir=', 'd',
'directory to put final built distributions in'),
('virtualenv-version=', None,
'version of virtualenv to download'),
('keep-temp', 'k',
'keep the pseudo-installation tree around after ' +
'creating the distribution archive'),
('online-install', None, 'boolean flag indicating if ' +
'the installation should pull dependencies from the ' +
'Internet or use the ones supplied in the third party ' +
'directory'),
('plat-name=', 'p',
'platform name to embed in generated filenames' +
'(default: linux_x86_64)')
]
default_virtualenv_version = '12.0.7'
NATIVE_WHEELS = ['pycrypto-2.6.1-{0}-none-linux_x86_64.whl', 'twofish-0.3.0-{0}-none-linux_x86_64.whl']
def build_wheel(self, build_dir):
cmd = self.reinitialize_command('bdist_wheel')
cmd.dist_dir = build_dir
self.run_command('bdist_wheel')
cmd.compression = 'deflated'
# Ensure that you get the finalized archive name
cmd.finalize_options()
# wheel_name = cmd.get_archive_basename()
# logger.info('creating %s in %s', wheel_name + '.whl', build_dir)
return ""
def generate_install_script(self, wheel_name, build_dir):
with open(os.path.join(package_dir, 'install-trinoadmin.template'), 'r') as template:
with open(os.path.join(build_dir, 'install-trinoadmin.sh'), 'w') as install_script_file:
install_script = self._fill_in_template(template.readlines(), wheel_name)
install_script_file.write(install_script)
os.chmod(os.path.join(build_dir, 'install-trinoadmin.sh'), 0o755)
def _fill_in_template(self, template_lines, wheel_name):
if self.online_install:
extra_install_args = ''
else:
extra_install_args = '--no-index --find-links third-party'
filled_in = [self._replace_template_values(line, wheel_name, extra_install_args) for line in template_lines]
return ''.join(filled_in)
def _replace_template_values(self, line, wheel_name, extra_install_args):
line = re.sub(r'%ONLINE_OR_OFFLINE_INSTALL%', extra_install_args, line)
line = re.sub(r'%WHEEL_NAME%', wheel_name, line)
line = re.sub(r'%VIRTUALENV_VERSION%', self.virtualenv_version, line)
return line
def package_dependencies(self, build_dir):
thirdparty_dir = os.path.join(build_dir, 'third-party')
requirements = self.distribution.install_requires
for requirement in requirements:
pip.main(['wheel',
'--wheel-dir={0}'.format(thirdparty_dir),
'--no-cache',
requirement])
pip.main(['download',
'-d',
thirdparty_dir,
'--no-cache-dir',
'--no-binary',
':all:',
'virtualenv=={0}'.format(self.virtualenv_version)])
def archive_dist(self, build_dir, dist_dir):
archive_basename = self.distribution.get_fullname()
if self.online_install:
archive_basename += '-online'
else:
archive_basename += '-offline'
archive_file = os.path.join(dist_dir, archive_basename)
self.mkpath(os.path.dirname(archive_file))
self.make_archive(archive_file, 'gztar',
root_dir=os.path.dirname(build_dir),
base_dir=os.path.basename(build_dir))
logger.info('created %s.tar.gz', archive_file)
def run(self):
build_dir = self.bdist_dir
self.mkpath(build_dir)
wheel_name = self.build_wheel(build_dir)
self.generate_install_script(wheel_name, build_dir)
if not self.online_install:
self.package_dependencies(build_dir)
self.archive_dist(build_dir, self.dist_dir)
if not self.keep_temp:
remove_tree(build_dir)
def initialize_options(self):
self.bdist_dir = None
self.dist_dir = None
self.virtualenv_url_base = None
self.virtualenv_version = None
self.keep_temp = False
self.online_install = False
self.plat_name = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, self.distribution.get_name())
if self.dist_dir is None:
self.dist_dir = 'dist'
if self.virtualenv_version is None:
self.virtualenv_version = self.default_virtualenv_version
self.plat_name_supplied = self.plat_name is not None
| <filename>packaging/bdist_trinoadmin.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from distutils import log as logger
from distutils.dir_util import remove_tree
import pip
try:
from setuptools import Command
except ImportError:
from distutils.core import Command
from packaging import package_dir
class bdist_trinoadmin(Command):
description = 'create a distribution for trino-admin'
user_options = [('bdist-dir=', 'b',
'temporary directory for creating the distribution'),
('dist-dir=', 'd',
'directory to put final built distributions in'),
('virtualenv-version=', None,
'version of virtualenv to download'),
('keep-temp', 'k',
'keep the pseudo-installation tree around after ' +
'creating the distribution archive'),
('online-install', None, 'boolean flag indicating if ' +
'the installation should pull dependencies from the ' +
'Internet or use the ones supplied in the third party ' +
'directory'),
('plat-name=', 'p',
'platform name to embed in generated filenames' +
'(default: linux_x86_64)')
]
default_virtualenv_version = '12.0.7'
NATIVE_WHEELS = ['pycrypto-2.6.1-{0}-none-linux_x86_64.whl', 'twofish-0.3.0-{0}-none-linux_x86_64.whl']
def build_wheel(self, build_dir):
cmd = self.reinitialize_command('bdist_wheel')
cmd.dist_dir = build_dir
self.run_command('bdist_wheel')
cmd.compression = 'deflated'
# Ensure that you get the finalized archive name
cmd.finalize_options()
# wheel_name = cmd.get_archive_basename()
# logger.info('creating %s in %s', wheel_name + '.whl', build_dir)
return ""
def generate_install_script(self, wheel_name, build_dir):
with open(os.path.join(package_dir, 'install-trinoadmin.template'), 'r') as template:
with open(os.path.join(build_dir, 'install-trinoadmin.sh'), 'w') as install_script_file:
install_script = self._fill_in_template(template.readlines(), wheel_name)
install_script_file.write(install_script)
os.chmod(os.path.join(build_dir, 'install-trinoadmin.sh'), 0o755)
def _fill_in_template(self, template_lines, wheel_name):
if self.online_install:
extra_install_args = ''
else:
extra_install_args = '--no-index --find-links third-party'
filled_in = [self._replace_template_values(line, wheel_name, extra_install_args) for line in template_lines]
return ''.join(filled_in)
def _replace_template_values(self, line, wheel_name, extra_install_args):
line = re.sub(r'%ONLINE_OR_OFFLINE_INSTALL%', extra_install_args, line)
line = re.sub(r'%WHEEL_NAME%', wheel_name, line)
line = re.sub(r'%VIRTUALENV_VERSION%', self.virtualenv_version, line)
return line
def package_dependencies(self, build_dir):
thirdparty_dir = os.path.join(build_dir, 'third-party')
requirements = self.distribution.install_requires
for requirement in requirements:
pip.main(['wheel',
'--wheel-dir={0}'.format(thirdparty_dir),
'--no-cache',
requirement])
pip.main(['download',
'-d',
thirdparty_dir,
'--no-cache-dir',
'--no-binary',
':all:',
'virtualenv=={0}'.format(self.virtualenv_version)])
def archive_dist(self, build_dir, dist_dir):
archive_basename = self.distribution.get_fullname()
if self.online_install:
archive_basename += '-online'
else:
archive_basename += '-offline'
archive_file = os.path.join(dist_dir, archive_basename)
self.mkpath(os.path.dirname(archive_file))
self.make_archive(archive_file, 'gztar',
root_dir=os.path.dirname(build_dir),
base_dir=os.path.basename(build_dir))
logger.info('created %s.tar.gz', archive_file)
def run(self):
build_dir = self.bdist_dir
self.mkpath(build_dir)
wheel_name = self.build_wheel(build_dir)
self.generate_install_script(wheel_name, build_dir)
if not self.online_install:
self.package_dependencies(build_dir)
self.archive_dist(build_dir, self.dist_dir)
if not self.keep_temp:
remove_tree(build_dir)
def initialize_options(self):
self.bdist_dir = None
self.dist_dir = None
self.virtualenv_url_base = None
self.virtualenv_version = None
self.keep_temp = False
self.online_install = False
self.plat_name = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, self.distribution.get_name())
if self.dist_dir is None:
self.dist_dir = 'dist'
if self.virtualenv_version is None:
self.virtualenv_version = self.default_virtualenv_version
self.plat_name_supplied = self.plat_name is not None
| en | 0.80077 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Ensure that you get the finalized archive name # wheel_name = cmd.get_archive_basename() # logger.info('creating %s in %s', wheel_name + '.whl', build_dir) | 1.599458 | 2 |
2020-05-month-long-challenge/day22.py | jkbockstael/leetcode | 0 | 10537 | <filename>2020-05-month-long-challenge/day22.py<gh_stars>0
#!/usr/bin/env python3
# Day 22: Sort Characters By Frequency
#
# Given a string, sort it in decreasing order based on the frequency of
# characters.
import collections
class Solution:
def frequencySort(self, s: str) -> str:
return "".join(map(
lambda t: t[0] * t[1],
collections.Counter(s).most_common(len(s))))
# Tests
assert Solution().frequencySort("tree") in ["eert", "eetr"]
assert Solution().frequencySort("cccaaa") in ["cccaaa", "aaaccc"]
assert Solution().frequencySort("Aabb") in ["bbAa", "bbaA"]
| <filename>2020-05-month-long-challenge/day22.py<gh_stars>0
#!/usr/bin/env python3
# Day 22: Sort Characters By Frequency
#
# Given a string, sort it in decreasing order based on the frequency of
# characters.
import collections
class Solution:
def frequencySort(self, s: str) -> str:
return "".join(map(
lambda t: t[0] * t[1],
collections.Counter(s).most_common(len(s))))
# Tests
assert Solution().frequencySort("tree") in ["eert", "eetr"]
assert Solution().frequencySort("cccaaa") in ["cccaaa", "aaaccc"]
assert Solution().frequencySort("Aabb") in ["bbAa", "bbaA"]
| en | 0.820876 | #!/usr/bin/env python3 # Day 22: Sort Characters By Frequency # # Given a string, sort it in decreasing order based on the frequency of # characters. # Tests | 4.05423 | 4 |
loc.py | relax-space/pandas-first | 0 | 10538 | <gh_stars>0
'''
说明: loc和iloc有几个功能
1. 可以获取一行或者多行数据
2. 可以获取1列或多列数据
3. 可以获取某个单元格的数据
对应dataframe来说, 在不指定index和columns的情况下,iloc和loc一样
区别在于,iloc根据索引下标取值, loc根据索引值取值
'''
import numpy as np
import pandas as pd
def test_1():
# 按行取值
pf = pd.DataFrame([[1, 2], [3, 4]])
iloc_0 = pf.iloc[0]
loc_0 = pf.loc[0]
assert pd.Series == type(iloc_0) == type(loc_0), 'loc error'
assert [1, 2
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc 2 error'
# 看看下面的区别,索引下标和索引值的区别
iloc_01 = pf.iloc[0:2]
loc_01 = pf.loc[0:1]
assert [[1, 2], [
3, 4
]] == iloc_01.values.tolist() == loc_01.values.tolist(), 'loc 3 error'
def test_2():
# 按列取值
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
iloc_0 = df.iloc[:, 0]
loc_0 = df.loc[:, 0]
assert pd.Series == type(iloc_0) == type(loc_0), 'loc2 1 error'
assert [
1, 4
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc2 2 error'
loc_01 = df.loc[:, 0:1]
assert pd.DataFrame == type(loc_01), 'loc2 3 error'
assert [[1, 2], [4, 5]] == loc_01.values.tolist(), 'loc2 4 error'
def test_3():
# 按单元格取值
df = pd.DataFrame([[1, 2], [3, 4]])
iloc_00 = df.iloc[0, 0]
loc_00 = df.loc[0, 0]
assert np.int64 == type(iloc_00) == type(loc_00), 'loc3 1 error'
assert 1.0 == iloc_00 == loc_00, 'loc3 2 error'
def test_4():
# loc 和iloc 区别, 当设置index或columns参数后
df = pd.DataFrame([[1, 2], [3, 4]],
index=['day1', 'day2'],
columns=['grape', 'pineapple'])
# 第一行
iloc_0 = df.iloc[0]
loc_0 = df.loc['day1']
assert [
1, 2
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc4 1 error'
# 第一列
iloc_col_0 = df.iloc[:, 0]
loc_col_0 = df.loc[:, 'grape']
assert [1, 3] == iloc_col_0.values.tolist() == loc_col_0.values.tolist(
), 'loc4 2 error'
| '''
说明: loc和iloc有几个功能
1. 可以获取一行或者多行数据
2. 可以获取1列或多列数据
3. 可以获取某个单元格的数据
对应dataframe来说, 在不指定index和columns的情况下,iloc和loc一样
区别在于,iloc根据索引下标取值, loc根据索引值取值
'''
import numpy as np
import pandas as pd
def test_1():
# 按行取值
pf = pd.DataFrame([[1, 2], [3, 4]])
iloc_0 = pf.iloc[0]
loc_0 = pf.loc[0]
assert pd.Series == type(iloc_0) == type(loc_0), 'loc error'
assert [1, 2
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc 2 error'
# 看看下面的区别,索引下标和索引值的区别
iloc_01 = pf.iloc[0:2]
loc_01 = pf.loc[0:1]
assert [[1, 2], [
3, 4
]] == iloc_01.values.tolist() == loc_01.values.tolist(), 'loc 3 error'
def test_2():
# 按列取值
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
iloc_0 = df.iloc[:, 0]
loc_0 = df.loc[:, 0]
assert pd.Series == type(iloc_0) == type(loc_0), 'loc2 1 error'
assert [
1, 4
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc2 2 error'
loc_01 = df.loc[:, 0:1]
assert pd.DataFrame == type(loc_01), 'loc2 3 error'
assert [[1, 2], [4, 5]] == loc_01.values.tolist(), 'loc2 4 error'
def test_3():
# 按单元格取值
df = pd.DataFrame([[1, 2], [3, 4]])
iloc_00 = df.iloc[0, 0]
loc_00 = df.loc[0, 0]
assert np.int64 == type(iloc_00) == type(loc_00), 'loc3 1 error'
assert 1.0 == iloc_00 == loc_00, 'loc3 2 error'
def test_4():
# loc 和iloc 区别, 当设置index或columns参数后
df = pd.DataFrame([[1, 2], [3, 4]],
index=['day1', 'day2'],
columns=['grape', 'pineapple'])
# 第一行
iloc_0 = df.iloc[0]
loc_0 = df.loc['day1']
assert [
1, 2
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc4 1 error'
# 第一列
iloc_col_0 = df.iloc[:, 0]
loc_col_0 = df.loc[:, 'grape']
assert [1, 3] == iloc_col_0.values.tolist() == loc_col_0.values.tolist(
), 'loc4 2 error' | zh | 0.949569 | 说明: loc和iloc有几个功能 1. 可以获取一行或者多行数据 2. 可以获取1列或多列数据 3. 可以获取某个单元格的数据 对应dataframe来说, 在不指定index和columns的情况下,iloc和loc一样 区别在于,iloc根据索引下标取值, loc根据索引值取值 # 按行取值 # 看看下面的区别,索引下标和索引值的区别 # 按列取值 # 按单元格取值 # loc 和iloc 区别, 当设置index或columns参数后 # 第一行 # 第一列 | 3.179134 | 3 |
checkproject/runner.py | perror/checkproject | 0 | 10539 | <gh_stars>0
"""Runner to discover, run and collect the results of all the checks."""
def import_module(module_path):
"""Import a Python file as a module in the current context.
@param module_path: Path to the Python file.
@return: A reference to the module once loaded.
"""
import os
import sys
module_filename = module_path.split(os.sep)[-1]
if int(sys.version[0]) >= 3:
if int(sys.version[2]) >= 5:
# Running a Python 3.5+ version
from importlib.util import spec_from_file_location, module_from_spec
spec = spec_from_file_location(module_filename, module_path)
module = module_from_spec(spec)
spec.loader.exec_module(module)
else:
# Running a Python <= 3.4 version
from importlib.machinery import SourceFileLoader
module = SourceFileLoader(module_filename, module_path).load_module()
else:
# Running a Python 2 version
import imp
module = imp.load_source(module_filename, module_path)
return module
class CheckRunner(object):
"""A class to discover all the checks, run it sequentially and collect
all the results.
"""
def __init__(self, project_dir, checks_dir):
"""Initialize the default runner class.
@param project_dir: Root directory where to find the source
files of the tested project.
@param checks_dir: Root directory where to find are all the
checks.
"""
self.project_dir = project_dir
self.checks_dir = checks_dir
self.checks = None
def discover(self, pattern='check_*.py', top_dir=None):
"""Discover all the checks in the directory 'top_dir' with all methods
matching the given pattern 'pattern' and update the list of checks.
@param pattern: Prefix pattern of the methods for all
checks.
"""
from checkproject.utils import remove_prefix
import os
import fnmatch
if top_dir is None:
top_dir = self.checks_dir
# List of all the check files detected
check_paths = []
# Scanning all files and subdirectories in breadth-first
for path, _, files in os.walk(os.path.abspath(top_dir)):
for filename in fnmatch.filter(files, pattern):
check_paths.append(remove_prefix(os.path.join(path, filename),
self.checks_dir))
# Initialize self.checks
if self.checks is None:
self.checks = []
# Update self.checks
self.checks = sorted(set(self.checks + check_paths))
def list(self, pattern='Check*'):
"""List all the checks discovered in the order of execution.
@return: A list of all the checks ordered as for executing it.
"""
import os
import re
# Initializing self.checks if needed
if self.checks is None:
self.discover()
# Initializing return value
checks = []
# Scanning all the modules
for check_module in self.checks:
module_path = os.path.join(self.checks_dir, check_module)
module_name = module_path.split(os.sep)[-1].split('.')[0]
module = import_module(module_path)
# Extract all the 'Check' classes
classes = [cls for cls in dir(module)
if re.compile(pattern).search(cls) and cls is not 'CheckCase']
for class_name in classes:
cls = getattr(module, class_name)
check = cls(self.project_dir)
checks += [module_name + '.' + cls.__name__ + '.' + m
for m in check.list()]
return checks
def run(self, pattern='Check*'):
"""Execute the checks and collect all the results"""
import os
import re
# Initializing self.checks if needed
if self.checks is None:
self.discover()
# Initializing return value
result = None
# Scanning all the modules
for check_module in self.checks:
module_path = os.path.join(self.checks_dir, check_module)
module = import_module(module_path)
# Extract all the 'Check' classes
classes = [cls for cls in dir(module)
if re.compile(pattern).search(cls) and cls is not 'CheckCase']
for class_name in classes:
cls = getattr(module, class_name)
check = cls(self.project_dir)
result = check.run(result)
return result
| """Runner to discover, run and collect the results of all the checks."""
def import_module(module_path):
"""Import a Python file as a module in the current context.
@param module_path: Path to the Python file.
@return: A reference to the module once loaded.
"""
import os
import sys
module_filename = module_path.split(os.sep)[-1]
if int(sys.version[0]) >= 3:
if int(sys.version[2]) >= 5:
# Running a Python 3.5+ version
from importlib.util import spec_from_file_location, module_from_spec
spec = spec_from_file_location(module_filename, module_path)
module = module_from_spec(spec)
spec.loader.exec_module(module)
else:
# Running a Python <= 3.4 version
from importlib.machinery import SourceFileLoader
module = SourceFileLoader(module_filename, module_path).load_module()
else:
# Running a Python 2 version
import imp
module = imp.load_source(module_filename, module_path)
return module
class CheckRunner(object):
"""A class to discover all the checks, run it sequentially and collect
all the results.
"""
def __init__(self, project_dir, checks_dir):
"""Initialize the default runner class.
@param project_dir: Root directory where to find the source
files of the tested project.
@param checks_dir: Root directory where to find are all the
checks.
"""
self.project_dir = project_dir
self.checks_dir = checks_dir
self.checks = None
def discover(self, pattern='check_*.py', top_dir=None):
"""Discover all the checks in the directory 'top_dir' with all methods
matching the given pattern 'pattern' and update the list of checks.
@param pattern: Prefix pattern of the methods for all
checks.
"""
from checkproject.utils import remove_prefix
import os
import fnmatch
if top_dir is None:
top_dir = self.checks_dir
# List of all the check files detected
check_paths = []
# Scanning all files and subdirectories in breadth-first
for path, _, files in os.walk(os.path.abspath(top_dir)):
for filename in fnmatch.filter(files, pattern):
check_paths.append(remove_prefix(os.path.join(path, filename),
self.checks_dir))
# Initialize self.checks
if self.checks is None:
self.checks = []
# Update self.checks
self.checks = sorted(set(self.checks + check_paths))
def list(self, pattern='Check*'):
"""List all the checks discovered in the order of execution.
@return: A list of all the checks ordered as for executing it.
"""
import os
import re
# Initializing self.checks if needed
if self.checks is None:
self.discover()
# Initializing return value
checks = []
# Scanning all the modules
for check_module in self.checks:
module_path = os.path.join(self.checks_dir, check_module)
module_name = module_path.split(os.sep)[-1].split('.')[0]
module = import_module(module_path)
# Extract all the 'Check' classes
classes = [cls for cls in dir(module)
if re.compile(pattern).search(cls) and cls is not 'CheckCase']
for class_name in classes:
cls = getattr(module, class_name)
check = cls(self.project_dir)
checks += [module_name + '.' + cls.__name__ + '.' + m
for m in check.list()]
return checks
def run(self, pattern='Check*'):
"""Execute the checks and collect all the results"""
import os
import re
# Initializing self.checks if needed
if self.checks is None:
self.discover()
# Initializing return value
result = None
# Scanning all the modules
for check_module in self.checks:
module_path = os.path.join(self.checks_dir, check_module)
module = import_module(module_path)
# Extract all the 'Check' classes
classes = [cls for cls in dir(module)
if re.compile(pattern).search(cls) and cls is not 'CheckCase']
for class_name in classes:
cls = getattr(module, class_name)
check = cls(self.project_dir)
result = check.run(result)
return result | en | 0.735106 | Runner to discover, run and collect the results of all the checks. Import a Python file as a module in the current context. @param module_path: Path to the Python file. @return: A reference to the module once loaded. # Running a Python 3.5+ version # Running a Python <= 3.4 version # Running a Python 2 version A class to discover all the checks, run it sequentially and collect all the results. Initialize the default runner class. @param project_dir: Root directory where to find the source files of the tested project. @param checks_dir: Root directory where to find are all the checks. Discover all the checks in the directory 'top_dir' with all methods matching the given pattern 'pattern' and update the list of checks. @param pattern: Prefix pattern of the methods for all checks. # List of all the check files detected # Scanning all files and subdirectories in breadth-first # Initialize self.checks # Update self.checks List all the checks discovered in the order of execution. @return: A list of all the checks ordered as for executing it. # Initializing self.checks if needed # Initializing return value # Scanning all the modules # Extract all the 'Check' classes Execute the checks and collect all the results # Initializing self.checks if needed # Initializing return value # Scanning all the modules # Extract all the 'Check' classes | 2.917383 | 3 |
neutron_fwaas/extensions/firewall_v2.py | sapcc/neutron-fwaas | 0 | 10540 | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from debtcollector import moves
from neutron.api.v2 import resource_helper
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import firewall_v2
from neutron_lib.api import extensions
from neutron_lib.exceptions import firewall_v2 as f_exc
from neutron_lib.services import base as service_base
from oslo_config import cfg
import six
from neutron_fwaas._i18n import _
from neutron_fwaas.common import fwaas_constants
FirewallGroupNotFound = moves.moved_class(
f_exc.FirewallGroupNotFound, 'FirewallGroupNotFound', __name__)
FirewallGroupInUse = moves.moved_class(
f_exc.FirewallGroupInUse, 'FirewallGroupInUse', __name__)
FirewallGroupInPendingState = moves.moved_class(
f_exc.FirewallGroupInPendingState, 'FirewallGroupInPendingState', __name__)
FirewallGroupPortInvalid = moves.moved_class(
f_exc.FirewallGroupPortInvalid, 'FirewallGroupPortInvalid', __name__)
FirewallGroupPortInvalidProject = moves.moved_class(
f_exc.FirewallGroupPortInvalidProject, 'FirewallGroupPortInvalidProject',
__name__)
FirewallGroupPortInUse = moves.moved_class(
f_exc.FirewallGroupPortInUse, 'FirewallGroupPortInUse', __name__)
FirewallPolicyNotFound = moves.moved_class(
f_exc.FirewallPolicyNotFound, 'FirewallPolicyNotFound', __name__)
FirewallPolicyInUse = moves.moved_class(
f_exc.FirewallPolicyInUse, 'FirewallPolicyInUse', __name__)
FirewallPolicyConflict = moves.moved_class(
f_exc.FirewallPolicyConflict, 'FirewallPolicyConflict', __name__)
FirewallRuleSharingConflict = moves.moved_class(
f_exc.FirewallRuleSharingConflict, 'FirewallRuleSharingConflict',
__name__)
FirewallPolicySharingConflict = moves.moved_class(
f_exc.FirewallPolicySharingConflict, 'FirewallPolicySharingConflict',
__name__)
FirewallRuleNotFound = moves.moved_class(
f_exc.FirewallRuleNotFound, 'FirewallRuleNotFound', __name__)
FirewallRuleInUse = moves.moved_class(
f_exc.FirewallRuleInUse, 'FirewallRuleInUse', __name__)
FirewallRuleNotAssociatedWithPolicy = moves.moved_class(
f_exc.FirewallRuleNotAssociatedWithPolicy,
'FirewallRuleNotAssociatedWithPolicy',
__name__)
FirewallRuleInvalidProtocol = moves.moved_class(
f_exc.FirewallRuleInvalidProtocol, 'FirewallRuleInvalidProtocol',
__name__)
FirewallRuleInvalidAction = moves.moved_class(
f_exc.FirewallRuleInvalidAction, 'FirewallRuleInvalidAction',
__name__)
FirewallRuleInvalidICMPParameter = moves.moved_class(
f_exc.FirewallRuleInvalidICMPParameter,
'FirewallRuleInvalidICMPParameter', __name__)
FirewallRuleWithPortWithoutProtocolInvalid = moves.moved_class(
f_exc.FirewallRuleWithPortWithoutProtocolInvalid,
'FirewallRuleWithPortWithoutProtocolInvalid', __name__)
FirewallRuleInvalidPortValue = moves.moved_class(
f_exc.FirewallRuleInvalidPortValue, 'FirewallRuleInvalidPortValue',
__name__)
FirewallRuleInfoMissing = moves.moved_class(
f_exc.FirewallRuleInfoMissing, 'FirewallRuleInfoMissing', __name__)
FirewallIpAddressConflict = moves.moved_class(
f_exc.FirewallIpAddressConflict, 'FirewallIpAddressConflict', __name__)
FirewallInternalDriverError = moves.moved_class(
f_exc.FirewallInternalDriverError, 'FirewallInternalDriverError', __name__)
FirewallRuleConflict = moves.moved_class(
f_exc.FirewallRuleConflict, 'FirewallRuleConflict', __name__)
FirewallRuleAlreadyAssociated = moves.moved_class(
f_exc.FirewallRuleAlreadyAssociated, 'FirewallRuleAlreadyAssociated',
__name__)
default_fwg_rules_opts = [
cfg.StrOpt('ingress_action',
default=api_const.FWAAS_DENY,
help=_('Firewall group rule action allow or '
'deny or reject for ingress. '
'Default is deny.')),
cfg.StrOpt('ingress_source_ipv4_address',
default=None,
help=_('IPv4 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_ipv6_address',
default=None,
help=_('IPv6 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for ingress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('ingress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('egress_action',
default=api_const.FWAAS_ALLOW,
help=_('Firewall group rule action allow or '
'deny or reject for egress. '
'Default is allow.')),
cfg.StrOpt('egress_source_ipv4_address',
default=None,
help=_('IPv4 source address for egress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('egress_source_ipv6_address',
default=None,
help=_('IPv6 source address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.StrOpt('egress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.BoolOpt('shared',
default=False,
help=_('Firewall group rule shared. '
'Default is False.')),
cfg.StrOpt('protocol',
default=None,
help=_('Network protocols (tcp, udp, ...). '
'Default is None.')),
cfg.BoolOpt('enabled',
default=True,
help=_('Firewall group rule enabled. '
'Default is True.')),
]
firewall_quota_opts = [
cfg.IntOpt('quota_firewall_group',
default=10,
help=_('Number of firewall groups allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_policy',
default=10,
help=_('Number of firewall policies allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_rule',
default=100,
help=_('Number of firewall rules allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(default_fwg_rules_opts, 'default_fwg_rules')
cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS')
# TODO(Reedip): Remove the convert_to functionality after bug1706061 is fixed.
def convert_to_string(value):
if value is not None:
return str(value)
return None
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'source_port']['convert_to'] = convert_to_string
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'destination_port']['convert_to'] = convert_to_string
class Firewall_v2(extensions.APIExtensionDescriptor):
api_definition = firewall_v2
@classmethod
def get_resources(cls):
special_mappings = {'firewall_policies': 'firewall_policy'}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, firewall_v2.RESOURCE_ATTRIBUTE_MAP)
return resource_helper.build_resource_info(
plural_mappings, firewall_v2.RESOURCE_ATTRIBUTE_MAP,
fwaas_constants.FIREWALL_V2, action_map=firewall_v2.ACTION_MAP,
register_quota=True)
@classmethod
def get_plugin_interface(cls):
return Firewallv2PluginBase
@six.add_metaclass(abc.ABCMeta)
class Firewallv2PluginBase(service_base.ServicePluginBase):
def get_plugin_type(self):
return fwaas_constants.FIREWALL_V2
def get_plugin_description(self):
return 'Firewall Service v2 Plugin'
# Firewall Group
@abc.abstractmethod
def create_firewall_group(self, context, firewall_group):
pass
@abc.abstractmethod
def delete_firewall_group(self, context, id):
pass
@abc.abstractmethod
def get_firewall_group(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_groups(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_group(self, context, id, firewall_group):
pass
# Firewall Policy
@abc.abstractmethod
def create_firewall_policy(self, context, firewall_policy):
pass
@abc.abstractmethod
def delete_firewall_policy(self, context, id):
pass
@abc.abstractmethod
def get_firewall_policy(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_policies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_policy(self, context, id, firewall_policy):
pass
# Firewall Rule
@abc.abstractmethod
def create_firewall_rule(self, context, firewall_rule):
pass
@abc.abstractmethod
def delete_firewall_rule(self, context, id):
pass
@abc.abstractmethod
def get_firewall_rule(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_rules(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_rule(self, context, id, firewall_rule):
pass
@abc.abstractmethod
def insert_rule(self, context, id, rule_info):
pass
@abc.abstractmethod
def remove_rule(self, context, id, rule_info):
pass
| # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from debtcollector import moves
from neutron.api.v2 import resource_helper
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import firewall_v2
from neutron_lib.api import extensions
from neutron_lib.exceptions import firewall_v2 as f_exc
from neutron_lib.services import base as service_base
from oslo_config import cfg
import six
from neutron_fwaas._i18n import _
from neutron_fwaas.common import fwaas_constants
FirewallGroupNotFound = moves.moved_class(
f_exc.FirewallGroupNotFound, 'FirewallGroupNotFound', __name__)
FirewallGroupInUse = moves.moved_class(
f_exc.FirewallGroupInUse, 'FirewallGroupInUse', __name__)
FirewallGroupInPendingState = moves.moved_class(
f_exc.FirewallGroupInPendingState, 'FirewallGroupInPendingState', __name__)
FirewallGroupPortInvalid = moves.moved_class(
f_exc.FirewallGroupPortInvalid, 'FirewallGroupPortInvalid', __name__)
FirewallGroupPortInvalidProject = moves.moved_class(
f_exc.FirewallGroupPortInvalidProject, 'FirewallGroupPortInvalidProject',
__name__)
FirewallGroupPortInUse = moves.moved_class(
f_exc.FirewallGroupPortInUse, 'FirewallGroupPortInUse', __name__)
FirewallPolicyNotFound = moves.moved_class(
f_exc.FirewallPolicyNotFound, 'FirewallPolicyNotFound', __name__)
FirewallPolicyInUse = moves.moved_class(
f_exc.FirewallPolicyInUse, 'FirewallPolicyInUse', __name__)
FirewallPolicyConflict = moves.moved_class(
f_exc.FirewallPolicyConflict, 'FirewallPolicyConflict', __name__)
FirewallRuleSharingConflict = moves.moved_class(
f_exc.FirewallRuleSharingConflict, 'FirewallRuleSharingConflict',
__name__)
FirewallPolicySharingConflict = moves.moved_class(
f_exc.FirewallPolicySharingConflict, 'FirewallPolicySharingConflict',
__name__)
FirewallRuleNotFound = moves.moved_class(
f_exc.FirewallRuleNotFound, 'FirewallRuleNotFound', __name__)
FirewallRuleInUse = moves.moved_class(
f_exc.FirewallRuleInUse, 'FirewallRuleInUse', __name__)
FirewallRuleNotAssociatedWithPolicy = moves.moved_class(
f_exc.FirewallRuleNotAssociatedWithPolicy,
'FirewallRuleNotAssociatedWithPolicy',
__name__)
FirewallRuleInvalidProtocol = moves.moved_class(
f_exc.FirewallRuleInvalidProtocol, 'FirewallRuleInvalidProtocol',
__name__)
FirewallRuleInvalidAction = moves.moved_class(
f_exc.FirewallRuleInvalidAction, 'FirewallRuleInvalidAction',
__name__)
FirewallRuleInvalidICMPParameter = moves.moved_class(
f_exc.FirewallRuleInvalidICMPParameter,
'FirewallRuleInvalidICMPParameter', __name__)
FirewallRuleWithPortWithoutProtocolInvalid = moves.moved_class(
f_exc.FirewallRuleWithPortWithoutProtocolInvalid,
'FirewallRuleWithPortWithoutProtocolInvalid', __name__)
FirewallRuleInvalidPortValue = moves.moved_class(
f_exc.FirewallRuleInvalidPortValue, 'FirewallRuleInvalidPortValue',
__name__)
FirewallRuleInfoMissing = moves.moved_class(
f_exc.FirewallRuleInfoMissing, 'FirewallRuleInfoMissing', __name__)
FirewallIpAddressConflict = moves.moved_class(
f_exc.FirewallIpAddressConflict, 'FirewallIpAddressConflict', __name__)
FirewallInternalDriverError = moves.moved_class(
f_exc.FirewallInternalDriverError, 'FirewallInternalDriverError', __name__)
FirewallRuleConflict = moves.moved_class(
f_exc.FirewallRuleConflict, 'FirewallRuleConflict', __name__)
FirewallRuleAlreadyAssociated = moves.moved_class(
f_exc.FirewallRuleAlreadyAssociated, 'FirewallRuleAlreadyAssociated',
__name__)
default_fwg_rules_opts = [
cfg.StrOpt('ingress_action',
default=api_const.FWAAS_DENY,
help=_('Firewall group rule action allow or '
'deny or reject for ingress. '
'Default is deny.')),
cfg.StrOpt('ingress_source_ipv4_address',
default=None,
help=_('IPv4 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_ipv6_address',
default=None,
help=_('IPv6 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for ingress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('ingress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('egress_action',
default=api_const.FWAAS_ALLOW,
help=_('Firewall group rule action allow or '
'deny or reject for egress. '
'Default is allow.')),
cfg.StrOpt('egress_source_ipv4_address',
default=None,
help=_('IPv4 source address for egress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('egress_source_ipv6_address',
default=None,
help=_('IPv6 source address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.StrOpt('egress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.BoolOpt('shared',
default=False,
help=_('Firewall group rule shared. '
'Default is False.')),
cfg.StrOpt('protocol',
default=None,
help=_('Network protocols (tcp, udp, ...). '
'Default is None.')),
cfg.BoolOpt('enabled',
default=True,
help=_('Firewall group rule enabled. '
'Default is True.')),
]
firewall_quota_opts = [
cfg.IntOpt('quota_firewall_group',
default=10,
help=_('Number of firewall groups allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_policy',
default=10,
help=_('Number of firewall policies allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_rule',
default=100,
help=_('Number of firewall rules allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(default_fwg_rules_opts, 'default_fwg_rules')
cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS')
# TODO(Reedip): Remove the convert_to functionality after bug1706061 is fixed.
def convert_to_string(value):
if value is not None:
return str(value)
return None
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'source_port']['convert_to'] = convert_to_string
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'destination_port']['convert_to'] = convert_to_string
class Firewall_v2(extensions.APIExtensionDescriptor):
api_definition = firewall_v2
@classmethod
def get_resources(cls):
special_mappings = {'firewall_policies': 'firewall_policy'}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, firewall_v2.RESOURCE_ATTRIBUTE_MAP)
return resource_helper.build_resource_info(
plural_mappings, firewall_v2.RESOURCE_ATTRIBUTE_MAP,
fwaas_constants.FIREWALL_V2, action_map=firewall_v2.ACTION_MAP,
register_quota=True)
@classmethod
def get_plugin_interface(cls):
return Firewallv2PluginBase
@six.add_metaclass(abc.ABCMeta)
class Firewallv2PluginBase(service_base.ServicePluginBase):
def get_plugin_type(self):
return fwaas_constants.FIREWALL_V2
def get_plugin_description(self):
return 'Firewall Service v2 Plugin'
# Firewall Group
@abc.abstractmethod
def create_firewall_group(self, context, firewall_group):
pass
@abc.abstractmethod
def delete_firewall_group(self, context, id):
pass
@abc.abstractmethod
def get_firewall_group(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_groups(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_group(self, context, id, firewall_group):
pass
# Firewall Policy
@abc.abstractmethod
def create_firewall_policy(self, context, firewall_policy):
pass
@abc.abstractmethod
def delete_firewall_policy(self, context, id):
pass
@abc.abstractmethod
def get_firewall_policy(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_policies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_policy(self, context, id, firewall_policy):
pass
# Firewall Rule
@abc.abstractmethod
def create_firewall_rule(self, context, firewall_rule):
pass
@abc.abstractmethod
def delete_firewall_rule(self, context, id):
pass
@abc.abstractmethod
def get_firewall_rule(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_rules(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_rule(self, context, id, firewall_rule):
pass
@abc.abstractmethod
def insert_rule(self, context, id, rule_info):
pass
@abc.abstractmethod
def remove_rule(self, context, id, rule_info):
pass
| en | 0.831802 | # Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(Reedip): Remove the convert_to functionality after bug1706061 is fixed. # Firewall Group # Firewall Policy # Firewall Rule | 1.312046 | 1 |
model_hub/model_hub/mmdetection/utils.py | gh-determined-ai/determined | 0 | 10541 | <filename>model_hub/model_hub/mmdetection/utils.py
"""
Various utility functions for using mmdetection in Determined that may be useful
even if not using the provided MMDetTrial.
build_fp16_loss_scaler is large derived from the original mmcv code at
https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py
mmcv is covered by the Apache 2.0 License. Copyright (c) OpenMMLab. All rights reserved.
"""
import os
from typing import Any, Dict, Tuple
import mmcv
import torch
import model_hub.utils
def get_config_pretrained_url_mapping() -> Dict[str, str]:
"""
Walks the MMDETECTION_CONFIG_DIR and creates a mapping of configs
to urls for pretrained checkpoints. The url for pretrained checkpoints
are parsed from the README files in each of the mmdetection config folders.
MMDETECTION_CONFIG_DIR is set to /mmdetection/configs in the default
determinedai/model-hub-mmdetection docker image.
"""
models = {}
config_dir = os.getenv("MMDETECTION_CONFIG_DIR")
if config_dir:
for root, _, files in os.walk(config_dir):
for f in files:
if "README" in f:
with open(os.path.join(root, f), "r") as readme:
lines = readme.readlines()
for line in lines:
if "[config]" in line:
start = line.find("[config]")
end = line.find(".py", start)
start = line.rfind("/", start, end)
config_name = line[start + 1 : end + 3]
start = line.find("[model]")
end = line.find(".pth", start)
ckpt_name = line[start + 8 : end + 4]
models[config_name] = ckpt_name
return models
CONFIG_TO_PRETRAINED = get_config_pretrained_url_mapping()
def get_pretrained_ckpt_path(download_directory: str, config_file: str) -> Tuple[Any, Any]:
"""
If the config_file has an associated pretrained checkpoint,
return path to downloaded checkpoint and preloaded checkpoint
Arguments:
download_directory: path to download checkpoints to
config_file: mmdet config file path for which to find and load pretrained weights
Returns:
checkpoint path, loaded checkpoint
"""
config_file = config_file.split("/")[-1]
if config_file in CONFIG_TO_PRETRAINED:
ckpt_path = model_hub.utils.download_url(
download_directory, CONFIG_TO_PRETRAINED[config_file]
)
return ckpt_path, torch.load(ckpt_path) # type: ignore
return None, None
def build_fp16_loss_scaler(loss_scale: mmcv.Config) -> Any:
"""
This function is derived from mmcv, which is coverd by the Apache 2.0 License.
Copyright (c) OpenMMLab. All rights reserved.
Arguments:
loss_scale (float | str | dict): Scale factor configuration.
If loss_scale is a float, static loss scaling will be used with
the specified scale. If loss_scale is a string, it must be
'dynamic', then dynamic loss scaling will be used.
It can also be a dict containing arguments of GradScalar.
Defaults to 512. For PyTorch >= 1.6, mmcv uses official
implementation of GradScaler. If you use a dict version of
loss_scale to create GradScaler, please refer to:
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
for the parameters.
Examples:
>>> loss_scale = dict(
... init_scale=65536.0,
... growth_factor=2.0,
... backoff_factor=0.5,
... growth_interval=2000
... )
"""
if loss_scale == "dynamic":
loss_scaler = torch.cuda.amp.GradScaler() # type: ignore
elif isinstance(loss_scale, float):
loss_scaler = torch.cuda.amp.GradScaler(init_scale=loss_scale) # type: ignore
elif isinstance(loss_scale, dict):
loss_scaler = torch.cuda.amp.GradScaler(**loss_scale) # type: ignore
else:
raise Exception(
"Cannot parse fp16 configuration. Expected cfg to be str(dynamic), float or dict."
)
return loss_scaler
| <filename>model_hub/model_hub/mmdetection/utils.py
"""
Various utility functions for using mmdetection in Determined that may be useful
even if not using the provided MMDetTrial.
build_fp16_loss_scaler is large derived from the original mmcv code at
https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py
mmcv is covered by the Apache 2.0 License. Copyright (c) OpenMMLab. All rights reserved.
"""
import os
from typing import Any, Dict, Tuple
import mmcv
import torch
import model_hub.utils
def get_config_pretrained_url_mapping() -> Dict[str, str]:
"""
Walks the MMDETECTION_CONFIG_DIR and creates a mapping of configs
to urls for pretrained checkpoints. The url for pretrained checkpoints
are parsed from the README files in each of the mmdetection config folders.
MMDETECTION_CONFIG_DIR is set to /mmdetection/configs in the default
determinedai/model-hub-mmdetection docker image.
"""
models = {}
config_dir = os.getenv("MMDETECTION_CONFIG_DIR")
if config_dir:
for root, _, files in os.walk(config_dir):
for f in files:
if "README" in f:
with open(os.path.join(root, f), "r") as readme:
lines = readme.readlines()
for line in lines:
if "[config]" in line:
start = line.find("[config]")
end = line.find(".py", start)
start = line.rfind("/", start, end)
config_name = line[start + 1 : end + 3]
start = line.find("[model]")
end = line.find(".pth", start)
ckpt_name = line[start + 8 : end + 4]
models[config_name] = ckpt_name
return models
CONFIG_TO_PRETRAINED = get_config_pretrained_url_mapping()
def get_pretrained_ckpt_path(download_directory: str, config_file: str) -> Tuple[Any, Any]:
"""
If the config_file has an associated pretrained checkpoint,
return path to downloaded checkpoint and preloaded checkpoint
Arguments:
download_directory: path to download checkpoints to
config_file: mmdet config file path for which to find and load pretrained weights
Returns:
checkpoint path, loaded checkpoint
"""
config_file = config_file.split("/")[-1]
if config_file in CONFIG_TO_PRETRAINED:
ckpt_path = model_hub.utils.download_url(
download_directory, CONFIG_TO_PRETRAINED[config_file]
)
return ckpt_path, torch.load(ckpt_path) # type: ignore
return None, None
def build_fp16_loss_scaler(loss_scale: mmcv.Config) -> Any:
"""
This function is derived from mmcv, which is coverd by the Apache 2.0 License.
Copyright (c) OpenMMLab. All rights reserved.
Arguments:
loss_scale (float | str | dict): Scale factor configuration.
If loss_scale is a float, static loss scaling will be used with
the specified scale. If loss_scale is a string, it must be
'dynamic', then dynamic loss scaling will be used.
It can also be a dict containing arguments of GradScalar.
Defaults to 512. For PyTorch >= 1.6, mmcv uses official
implementation of GradScaler. If you use a dict version of
loss_scale to create GradScaler, please refer to:
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
for the parameters.
Examples:
>>> loss_scale = dict(
... init_scale=65536.0,
... growth_factor=2.0,
... backoff_factor=0.5,
... growth_interval=2000
... )
"""
if loss_scale == "dynamic":
loss_scaler = torch.cuda.amp.GradScaler() # type: ignore
elif isinstance(loss_scale, float):
loss_scaler = torch.cuda.amp.GradScaler(init_scale=loss_scale) # type: ignore
elif isinstance(loss_scale, dict):
loss_scaler = torch.cuda.amp.GradScaler(**loss_scale) # type: ignore
else:
raise Exception(
"Cannot parse fp16 configuration. Expected cfg to be str(dynamic), float or dict."
)
return loss_scaler
| en | 0.77003 | Various utility functions for using mmdetection in Determined that may be useful even if not using the provided MMDetTrial. build_fp16_loss_scaler is large derived from the original mmcv code at https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py mmcv is covered by the Apache 2.0 License. Copyright (c) OpenMMLab. All rights reserved. Walks the MMDETECTION_CONFIG_DIR and creates a mapping of configs to urls for pretrained checkpoints. The url for pretrained checkpoints are parsed from the README files in each of the mmdetection config folders. MMDETECTION_CONFIG_DIR is set to /mmdetection/configs in the default determinedai/model-hub-mmdetection docker image. If the config_file has an associated pretrained checkpoint, return path to downloaded checkpoint and preloaded checkpoint Arguments: download_directory: path to download checkpoints to config_file: mmdet config file path for which to find and load pretrained weights Returns: checkpoint path, loaded checkpoint # type: ignore This function is derived from mmcv, which is coverd by the Apache 2.0 License. Copyright (c) OpenMMLab. All rights reserved. Arguments: loss_scale (float | str | dict): Scale factor configuration. If loss_scale is a float, static loss scaling will be used with the specified scale. If loss_scale is a string, it must be 'dynamic', then dynamic loss scaling will be used. It can also be a dict containing arguments of GradScalar. Defaults to 512. For PyTorch >= 1.6, mmcv uses official implementation of GradScaler. If you use a dict version of loss_scale to create GradScaler, please refer to: https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler for the parameters. Examples: >>> loss_scale = dict( ... init_scale=65536.0, ... growth_factor=2.0, ... backoff_factor=0.5, ... growth_interval=2000 ... ) # type: ignore # type: ignore # type: ignore | 2.209423 | 2 |
gcloud/datastores/tests/STUB_test_bigquery.py | pantheon-ci-bot/etl-framework | 2 | 10542 | <filename>gcloud/datastores/tests/STUB_test_bigquery.py
"""tests bigquery client"""
import unittest
from gcloud.datastores.bigquery import BigqueryClient
class BigqueryClientTestCases(unittest.TestCase):
"""stuff"""
@classmethod
def setUpClass(cls):
cls.project_id = 'test'
cls.dataset_id = 'etl_test'
cls.table_id = 'etl_test'
cls.table_schema = {
"fields": [
{
"type": "STRING",
"name": "a_key",
"mode": "REQUIRED",
}
]
}
cls.rows = [
{
"insertId": "some_uuid",
"json": {
"a_key": "a_value"
},
},
]
cls.query = "SELECT a_key FROM [{}:{}.{}]".format(
cls.project_id,
cls.dataset_id,
cls.table_id,
)
cls.client = BigqueryClient(
project_name=cls.project_id,
dataset_id=cls.dataset_id
)
# Create a dataset and table (this indirectly tests create and delete)
cls.client.insert_dataset(cls.dataset_id)
cls.client.insert_table(
table_id=cls.table_id,
schema=cls.table_schema
)
@classmethod
def tearDownClass(cls):
# Remove table and dataset (this indirectly tests create and delete)
cls.client.delete_table(cls.table_id)
cls.client.delete_dataset(cls.dataset_id)
def test_get_dataset(self):
self.client.get_dataset(self.dataset_id)
def test_get_table(self):
self.client.get_table(self.table_id)
def test_insert_data(self):
self.client.insert_data(
table_id=self.table_id,
rows=self.rows
)
def test_list_data(self):
self.client.list_data(
table_id=self.table_id
)
def test_list_datasets(self):
self.client.list_datasets()
def test_list_tables(self):
self.client.list_tables(
dataset_id=self.dataset_id
)
def test_patch_table(self):
self.client.patch_table(
table_id=self.table_id,
schema=self.table_schema,
)
def test_query(self):
self.client.query(
query=self.query,
)
def test_update_table(self):
self.client.update_table(
table_id=self.table_id,
schema=self.table_schema,
)
| <filename>gcloud/datastores/tests/STUB_test_bigquery.py
"""tests bigquery client"""
import unittest
from gcloud.datastores.bigquery import BigqueryClient
class BigqueryClientTestCases(unittest.TestCase):
"""stuff"""
@classmethod
def setUpClass(cls):
cls.project_id = 'test'
cls.dataset_id = 'etl_test'
cls.table_id = 'etl_test'
cls.table_schema = {
"fields": [
{
"type": "STRING",
"name": "a_key",
"mode": "REQUIRED",
}
]
}
cls.rows = [
{
"insertId": "some_uuid",
"json": {
"a_key": "a_value"
},
},
]
cls.query = "SELECT a_key FROM [{}:{}.{}]".format(
cls.project_id,
cls.dataset_id,
cls.table_id,
)
cls.client = BigqueryClient(
project_name=cls.project_id,
dataset_id=cls.dataset_id
)
# Create a dataset and table (this indirectly tests create and delete)
cls.client.insert_dataset(cls.dataset_id)
cls.client.insert_table(
table_id=cls.table_id,
schema=cls.table_schema
)
@classmethod
def tearDownClass(cls):
# Remove table and dataset (this indirectly tests create and delete)
cls.client.delete_table(cls.table_id)
cls.client.delete_dataset(cls.dataset_id)
def test_get_dataset(self):
self.client.get_dataset(self.dataset_id)
def test_get_table(self):
self.client.get_table(self.table_id)
def test_insert_data(self):
self.client.insert_data(
table_id=self.table_id,
rows=self.rows
)
def test_list_data(self):
self.client.list_data(
table_id=self.table_id
)
def test_list_datasets(self):
self.client.list_datasets()
def test_list_tables(self):
self.client.list_tables(
dataset_id=self.dataset_id
)
def test_patch_table(self):
self.client.patch_table(
table_id=self.table_id,
schema=self.table_schema,
)
def test_query(self):
self.client.query(
query=self.query,
)
def test_update_table(self):
self.client.update_table(
table_id=self.table_id,
schema=self.table_schema,
)
| en | 0.725039 | tests bigquery client stuff # Create a dataset and table (this indirectly tests create and delete) # Remove table and dataset (this indirectly tests create and delete) | 2.729877 | 3 |
filter_hash.py | mbougarne/python-algos | 0 | 10543 | <gh_stars>0
fruits = ["orange", "banana", "apple", "avocado", "kiwi", "apricot",
"cherry", "grape", "coconut", "lemon", "mango", "peach",
"pear", "strawberry", "pineapple", "apple", "orange", "pear",
"grape", "banana"
]
filters = dict()
for key in fruits:
filters[key] = 1
result = set(filters.keys())
print(result) | fruits = ["orange", "banana", "apple", "avocado", "kiwi", "apricot",
"cherry", "grape", "coconut", "lemon", "mango", "peach",
"pear", "strawberry", "pineapple", "apple", "orange", "pear",
"grape", "banana"
]
filters = dict()
for key in fruits:
filters[key] = 1
result = set(filters.keys())
print(result) | none | 1 | 3.431828 | 3 |
|
teste/knn.py | joandesonandrade/nebulosa | 0 | 10544 | <filename>teste/knn.py
from sklearn import preprocessing
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
#Abrindo o dados como Dataframe
dados = pd.read_csv('dados/001.csv')
#Iniciando o método para binanizar as classe sim=1; não=0
pre = preprocessing.LabelBinarizer()
#Binazirando a classe jogou, e atribuíndo a uma matriz n-dimencional
y_binary = pre.fit_transform(dados['jogou'])
y = np.array(y_binary).ravel()
lista_clima = [x for x in dados['clima']]
lista_temperatura = [x for x in dados['temperatura']]
lista_jogou = [x for x in dados['jogou']]
pre = preprocessing.LabelEncoder()
clima_encoding = pre.fit_transform(lista_clima)
temperatura_encoding = pre.fit_transform(lista_temperatura)
jogou_encoding = pre.fit_transform(lista_jogou)
lista = list(zip(clima_encoding, temperatura_encoding, jogou_encoding))
X = np.array(lista, dtype=np.int32)
#colunas = ['A', 'B', 'C']
# print(pd.DataFrame(X, columns=colunas, dtype=np.int32))
# print(pd.DataFrame(y, columns=['Classe'], dtype=np.int32))
#
# xX = []
# for i, x in enumerate(X):
# xX.append([list(x), y[i][0]])
#
# dX = [(x[0][0] + x[0][1] + x[0][2]) for x in xX]
# dY = [x[1] for x in xX]
#
# print('Soma dos rótulos:', dX)
# print('Classe:', dY)
#
# fig, ax = plt.subplots()
# ax.plot(dX)
# ax.plot(dY)
# plt.show()
from sklearn import model_selection
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
#Dividido os dados, onde o treinamento ficará com 75% e teste 25%, eu sempre uso este padrão :)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, random_state=0)
#Gerando o modelo, vou deixar os parâmetros padrão
knn = KNeighborsClassifier()
#Treinando o modelo
knn.fit(X=X_train, y=y_train)
#Avaliando a pontuação do modelo, usando os dados de teste
pontuacao = str(accuracy_score(y_test, knn.predict(X_test)) * 100)
print("Precisão: "+pontuacao+"%")
| <filename>teste/knn.py
from sklearn import preprocessing
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
#Abrindo o dados como Dataframe
dados = pd.read_csv('dados/001.csv')
#Iniciando o método para binanizar as classe sim=1; não=0
pre = preprocessing.LabelBinarizer()
#Binazirando a classe jogou, e atribuíndo a uma matriz n-dimencional
y_binary = pre.fit_transform(dados['jogou'])
y = np.array(y_binary).ravel()
lista_clima = [x for x in dados['clima']]
lista_temperatura = [x for x in dados['temperatura']]
lista_jogou = [x for x in dados['jogou']]
pre = preprocessing.LabelEncoder()
clima_encoding = pre.fit_transform(lista_clima)
temperatura_encoding = pre.fit_transform(lista_temperatura)
jogou_encoding = pre.fit_transform(lista_jogou)
lista = list(zip(clima_encoding, temperatura_encoding, jogou_encoding))
X = np.array(lista, dtype=np.int32)
#colunas = ['A', 'B', 'C']
# print(pd.DataFrame(X, columns=colunas, dtype=np.int32))
# print(pd.DataFrame(y, columns=['Classe'], dtype=np.int32))
#
# xX = []
# for i, x in enumerate(X):
# xX.append([list(x), y[i][0]])
#
# dX = [(x[0][0] + x[0][1] + x[0][2]) for x in xX]
# dY = [x[1] for x in xX]
#
# print('Soma dos rótulos:', dX)
# print('Classe:', dY)
#
# fig, ax = plt.subplots()
# ax.plot(dX)
# ax.plot(dY)
# plt.show()
from sklearn import model_selection
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
#Dividido os dados, onde o treinamento ficará com 75% e teste 25%, eu sempre uso este padrão :)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, random_state=0)
#Gerando o modelo, vou deixar os parâmetros padrão
knn = KNeighborsClassifier()
#Treinando o modelo
knn.fit(X=X_train, y=y_train)
#Avaliando a pontuação do modelo, usando os dados de teste
pontuacao = str(accuracy_score(y_test, knn.predict(X_test)) * 100)
print("Precisão: "+pontuacao+"%")
| pt | 0.784644 | #import matplotlib.pyplot as plt #Abrindo o dados como Dataframe #Iniciando o método para binanizar as classe sim=1; não=0 #Binazirando a classe jogou, e atribuíndo a uma matriz n-dimencional #colunas = ['A', 'B', 'C'] # print(pd.DataFrame(X, columns=colunas, dtype=np.int32)) # print(pd.DataFrame(y, columns=['Classe'], dtype=np.int32)) # # xX = [] # for i, x in enumerate(X): # xX.append([list(x), y[i][0]]) # # dX = [(x[0][0] + x[0][1] + x[0][2]) for x in xX] # dY = [x[1] for x in xX] # # print('Soma dos rótulos:', dX) # print('Classe:', dY) # # fig, ax = plt.subplots() # ax.plot(dX) # ax.plot(dY) # plt.show() #Dividido os dados, onde o treinamento ficará com 75% e teste 25%, eu sempre uso este padrão :) #Gerando o modelo, vou deixar os parâmetros padrão #Treinando o modelo #Avaliando a pontuação do modelo, usando os dados de teste | 3.274144 | 3 |
components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | m-mayran/pipelines | 0 | 10545 | <filename>components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py
# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Vertex AI Batch Prediction Job Remote Runner Client module."""
import json
from logging import raiseExceptions
import os
import time
import unittest
from unittest import mock
from google.cloud import aiplatform
from google.cloud.aiplatform.compat.types import job_state as gca_job_state
from google.protobuf import json_format
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import GcpResources
from google_cloud_pipeline_components.container.experimental.gcp_launcher import batch_prediction_job_remote_runner
from google_cloud_pipeline_components.container.experimental.gcp_launcher import job_remote_runner
class BatchPredictionJobRemoteRunnerUtilsTests(unittest.TestCase):
def setUp(self):
super(BatchPredictionJobRemoteRunnerUtilsTests, self).setUp()
self._payload = (
'{"batchPredictionJob": {"displayName": '
'"BatchPredictionComponentName", "model": '
'"projects/test/locations/test/models/test-model","inputConfig":'
' {"instancesFormat": "CSV","gcsSource": {"uris": '
'["test_gcs_source"]}}, "outputConfig": {"predictionsFormat": '
'"CSV", "gcsDestination": {"outputUriPrefix": '
'"test_gcs_destination"}}}}')
self._job_type = 'BatchPredictionJob'
self._project = 'test_project'
self._location = 'test_region'
self._batch_prediction_job_name = '/projects/{self._project}/locations/{self._location}/jobs/test_job_id'
self._gcp_resources_path = 'gcp_resources'
self._batch_prediction_job_uri_prefix = f'https://{self._location}-aiplatform.googleapis.com/v1/'
def tearDown(self):
if os.path.exists(self._gcp_resources_path):
os.remove(self._gcp_resources_path)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
def test_batch_prediction_job_remote_runner_on_region_is_set_correctly_in_client_options(
self, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
mock_job_service_client.assert_called_once_with(
client_options={
'api_endpoint': 'test_region-aiplatform.googleapis.com'
},
client_info=mock.ANY)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_on_payload_deserializes_correctly(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
expected_parent = f'projects/{self._project}/locations/{self._location}'
expected_job_spec = json.loads(self._payload, strict=False)
job_client.create_batch_prediction_job.assert_called_once_with(
parent=expected_parent, batch_prediction_job=expected_job_spec)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_raises_exception_on_error(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_FAILED
mock_path_exists.return_value = False
with self.assertRaises(RuntimeError):
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_batch_prediction_job_remote_runner_retries_to_get_status_on_non_completed_job(
self, mock_time_sleep, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
get_batch_prediction_job_response_running = mock.Mock()
get_batch_prediction_job_response_running.state = gca_job_state.JobState.JOB_STATE_RUNNING
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_running,
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
mock_time_sleep.assert_called_once_with(
job_remote_runner._POLLING_INTERVAL_IN_SECONDS)
self.assertEqual(job_client.get_batch_prediction_job.call_count, 2)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_returns_gcp_resources(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
with open(self._gcp_resources_path) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
batch_prediction_job_resources = json_format.Parse(
serialized_gcp_resources, GcpResources())
self.assertEqual(len(batch_prediction_job_resources.resources), 1)
batch_prediction_job_name = batch_prediction_job_resources.resources[
0].resource_uri[len(self._batch_prediction_job_uri_prefix):]
self.assertEqual(batch_prediction_job_name,
self._batch_prediction_job_name)
| <filename>components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py
# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Vertex AI Batch Prediction Job Remote Runner Client module."""
import json
from logging import raiseExceptions
import os
import time
import unittest
from unittest import mock
from google.cloud import aiplatform
from google.cloud.aiplatform.compat.types import job_state as gca_job_state
from google.protobuf import json_format
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import GcpResources
from google_cloud_pipeline_components.container.experimental.gcp_launcher import batch_prediction_job_remote_runner
from google_cloud_pipeline_components.container.experimental.gcp_launcher import job_remote_runner
class BatchPredictionJobRemoteRunnerUtilsTests(unittest.TestCase):
def setUp(self):
super(BatchPredictionJobRemoteRunnerUtilsTests, self).setUp()
self._payload = (
'{"batchPredictionJob": {"displayName": '
'"BatchPredictionComponentName", "model": '
'"projects/test/locations/test/models/test-model","inputConfig":'
' {"instancesFormat": "CSV","gcsSource": {"uris": '
'["test_gcs_source"]}}, "outputConfig": {"predictionsFormat": '
'"CSV", "gcsDestination": {"outputUriPrefix": '
'"test_gcs_destination"}}}}')
self._job_type = 'BatchPredictionJob'
self._project = 'test_project'
self._location = 'test_region'
self._batch_prediction_job_name = '/projects/{self._project}/locations/{self._location}/jobs/test_job_id'
self._gcp_resources_path = 'gcp_resources'
self._batch_prediction_job_uri_prefix = f'https://{self._location}-aiplatform.googleapis.com/v1/'
def tearDown(self):
if os.path.exists(self._gcp_resources_path):
os.remove(self._gcp_resources_path)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
def test_batch_prediction_job_remote_runner_on_region_is_set_correctly_in_client_options(
self, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
mock_job_service_client.assert_called_once_with(
client_options={
'api_endpoint': 'test_region-aiplatform.googleapis.com'
},
client_info=mock.ANY)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_on_payload_deserializes_correctly(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
expected_parent = f'projects/{self._project}/locations/{self._location}'
expected_job_spec = json.loads(self._payload, strict=False)
job_client.create_batch_prediction_job.assert_called_once_with(
parent=expected_parent, batch_prediction_job=expected_job_spec)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_raises_exception_on_error(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_FAILED
mock_path_exists.return_value = False
with self.assertRaises(RuntimeError):
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_batch_prediction_job_remote_runner_retries_to_get_status_on_non_completed_job(
self, mock_time_sleep, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
get_batch_prediction_job_response_running = mock.Mock()
get_batch_prediction_job_response_running.state = gca_job_state.JobState.JOB_STATE_RUNNING
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_running,
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
mock_time_sleep.assert_called_once_with(
job_remote_runner._POLLING_INTERVAL_IN_SECONDS)
self.assertEqual(job_client.get_batch_prediction_job.call_count, 2)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_returns_gcp_resources(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
with open(self._gcp_resources_path) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
batch_prediction_job_resources = json_format.Parse(
serialized_gcp_resources, GcpResources())
self.assertEqual(len(batch_prediction_job_resources.resources), 1)
batch_prediction_job_name = batch_prediction_job_resources.resources[
0].resource_uri[len(self._batch_prediction_job_uri_prefix):]
self.assertEqual(batch_prediction_job_name,
self._batch_prediction_job_name)
| en | 0.819758 | # Copyright 2021 The Kubeflow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test Vertex AI Batch Prediction Job Remote Runner Client module. # Instantiate GCPResources Proto | 1.714203 | 2 |
rotypes/Windows/Storage/Streams/__init__.py | Gliese129/ArknightsAutoHelper | 18 | 10546 | from ctypes import c_uint32, c_void_p, string_at
from rotypes.idldsl import define_winrt_com_method, GUID
from rotypes.inspectable import IInspectable, IUnknown
@GUID('905a0fef-bc53-11df-8c49-001e4fc686da')
class IBufferByteAccess(IUnknown):
pass
@GUID('905A0FE0-BC53-11DF-8C49-001E4FC686DA')
class IBuffer(IInspectable):
def __len__(self):
return self.Length
def __bytes__(self):
byteaccess = self.astype(IBufferByteAccess)
ptr = byteaccess.Buffer()
return string_at(ptr, len(self))
define_winrt_com_method(IBufferByteAccess, 'Buffer', retval=c_void_p)
define_winrt_com_method(IBuffer, 'get_Capacity', propget=c_uint32)
define_winrt_com_method(IBuffer, 'get_Length', propget=c_uint32)
define_winrt_com_method(IBuffer, 'put_Length', propput=c_uint32)
| from ctypes import c_uint32, c_void_p, string_at
from rotypes.idldsl import define_winrt_com_method, GUID
from rotypes.inspectable import IInspectable, IUnknown
@GUID('905a0fef-bc53-11df-8c49-001e4fc686da')
class IBufferByteAccess(IUnknown):
pass
@GUID('905A0FE0-BC53-11DF-8C49-001E4FC686DA')
class IBuffer(IInspectable):
def __len__(self):
return self.Length
def __bytes__(self):
byteaccess = self.astype(IBufferByteAccess)
ptr = byteaccess.Buffer()
return string_at(ptr, len(self))
define_winrt_com_method(IBufferByteAccess, 'Buffer', retval=c_void_p)
define_winrt_com_method(IBuffer, 'get_Capacity', propget=c_uint32)
define_winrt_com_method(IBuffer, 'get_Length', propget=c_uint32)
define_winrt_com_method(IBuffer, 'put_Length', propput=c_uint32)
| none | 1 | 2.051599 | 2 |
|
simple_playgrounds/playground.py | Asjidkalam/simple-playgrounds | 0 | 10547 | # -*- coding: utf-8 -*-
""" Playground documentation.
Module defining Playground Base Class
"""
import os
from abc import ABC
import yaml
import pymunk
from .utils import PositionAreaSampler
from .utils.definitions import SPACE_DAMPING, CollisionTypes, SceneElementTypes
# pylint: disable=unused-argument
# pylint: disable=line-too-long
class Playground(ABC):
""" Playground is a Base Class that manages the physical simulation.
Playground manages the interactions between Agents and Scene Elements.
Attributes:
size: size of the scene (width, length).
scene_elements: list of SceneElements present in the Playground.
fields: list of fields producing SceneElements in the Playground.
agents: list of Agents present in the Playground.
initial_agent_position: position or PositionAreaSampler,
Starting position of an agent (single agent).
done: bool, True if the playground reached termination.
"""
# pylint: disable=too-many-instance-attributes
scene_entities = []
def __init__(self, size):
# Generate Scene
self.size = size
self._width, self._length = self.size
# Initialization of the pymunk space, modelling all the physics
self.space = self._initialize_space()
# Public attributes for entities in the playground
self.scene_elements = []
self.fields = []
self.agents = []
# Private attributes for managing interactions in playground
self._disappeared_scene_elements = []
self._grasped_scene_elements = {}
self._teleported = []
# Add entities declared in the scene
for scene_entity in self.scene_entities:
self.add_scene_element(scene_entity)
self.done = False
self.initial_agent_position = None
self._handle_interactions()
self.time_limit = None
self.time_limit_reached_reward = None
self.time_test = 0
@staticmethod
def parse_configuration(key):
""" Private method that parses yaml configuration files.
Args:
key: (str) name of the playground configuration.
Returns:
Dictionary of attributes and default values.
"""
fname = 'utils/configs/playground.yml'
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, fname), 'r') as yaml_file:
default_config = yaml.load(yaml_file, Loader=yaml.SafeLoader)
return default_config[key]
@staticmethod
def _initialize_space():
""" Method to initialize Pymunk empty space for 2D physics.
Returns: Pymunk Space
"""
space = pymunk.Space()
space.gravity = pymunk.Vec2d(0., 0.)
space.damping = SPACE_DAMPING
return space
def update(self, steps):
""" Update the Playground
Update all SceneElements, Fields, Timers and Grasps
Runs the Physics engine for n steps.
Args:
steps: Number of steps
"""
for agent in self.agents:
agent.pre_step()
for _ in range(steps):
self.space.step(1. / steps)
for elem in self.scene_elements:
elem.pre_step()
if elem.follows_waypoints:
self.space.reindex_shapes_for_body(elem.pm_body)
self._fields_produce()
self._check_timers()
self._release_grasps()
self._check_teleports()
def reset(self):
""" Reset the Playground to its initial state.
"""
# remove entities and filter out entities which are temporary
for entity in self.scene_elements.copy():
self.remove_scene_element(entity)
# reset and replace entities that are not temporary
for entity in self._disappeared_scene_elements.copy():
entity.reset()
self.add_scene_element(entity)
# reset fields
for entity in self.fields:
entity.reset()
# reset agents
for agent in self.agents.copy():
agent.reset()
self.remove_agent(agent)
self.add_agent(agent)
self.done = False
def add_agent(self, new_agent, tries=100):
""" Method to add an Agent to the Playground.
If the Agent has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Args:
new_agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the agent
"""
# If already there
if new_agent in self.scene_elements:
raise ValueError('Agent already in Playground')
# Inform agent of the playground size
new_agent.size_playground = self.size
if new_agent.allow_overlapping:
self._add_agent(new_agent)
else:
success = self._add_agent_without_ovelapping(new_agent, tries = tries)
if not success:
raise ValueError("Agent couldn't be placed without overlapping")
def _add_agent(self, agent):
""" Add an agent to the playground.
Args:
agent: Agent.
"""
self.agents.append(agent)
if agent.initial_position is not None:
pass
elif self.initial_agent_position is not None:
agent.initial_position = self.initial_agent_position
else:
raise ValueError("""Agent initial position should be defined in the playground or passed as an argument)
to the class agent""")
agent.position = agent.initial_position
for body_part in agent.parts:
self.space.add(*body_part.pm_elements)
def _add_agent_without_ovelapping(self, agent, tries=100):
""" Method to add am Agent to the Playground without overlapping.
Useful when an Agent has a random initial position, to avoid overlapping.
Args:
agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
trial = 0
visible_collide_parts = True
interactive_collide_parts = True
all_shapes = self.space.shapes.copy()
while (interactive_collide_parts or visible_collide_parts) and trial < tries:
self._add_agent(agent)
visible_collide_parts = False
interactive_collide_parts = False
for part in agent.parts:
visible_collide = False
interactive_collide = False
if part.pm_visible_shape is not None:
collisions = [part.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if part.pm_interaction_shape is not None:
collisions = [part.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
visible_collide_parts = visible_collide or visible_collide_parts
interactive_collide_parts = interactive_collide or interactive_collide_parts
if visible_collide_parts or interactive_collide_parts:
self.remove_agent(agent)
trial += 1
if interactive_collide_parts or visible_collide_parts:
return False
return True
def _add_scene_element(self, new_scene_element, new_position):
""" Method to add a SceneElement to the Playground.
"""
if new_scene_element in self.scene_elements:
raise ValueError('Scene element already in Playground')
new_scene_element.size_playground = self.size
if new_position:
new_scene_element.position = new_scene_element.initial_position
self.space.add(*new_scene_element.pm_elements)
self.scene_elements.append(new_scene_element)
if new_scene_element in self._disappeared_scene_elements:
self._disappeared_scene_elements.remove(new_scene_element)
def _add_scene_element_without_ovelapping(self, scene_element, tries, new_position):
trial = 0
visible_collide = True
interactive_collide = True
all_shapes = self.space.shapes.copy()
while (visible_collide or interactive_collide) and trial < tries:
self._add_scene_element(scene_element, new_position)
visible_collide = False
interactive_collide = False
if scene_element.pm_visible_shape is not None:
collisions = [scene_element.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if scene_element.pm_interaction_shape is not None:
collisions = [scene_element.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
if visible_collide or interactive_collide:
self.remove_scene_element(scene_element)
trial += 1
if visible_collide or interactive_collide:
return False
return True
def add_scene_element(self, scene_element, tries=100, new_position=True):
""" Method to add a SceneElement to the Playground.
If the Element has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Useful when a SceneElement has a random initial position, to avoid overlapping.
Args:
scene_element: Scene Element to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
if scene_element.entity_type is SceneElementTypes.FIELD:
# If already there
if scene_element in self.fields:
raise ValueError('Field already in Playground')
self.fields.append(scene_element)
else:
if scene_element in self.scene_elements:
raise ValueError('Field already in Playground')
# Else
scene_element.size_playground = self.size
if scene_element.allow_overlapping:
self._add_scene_element(scene_element, new_position)
else:
success = self._add_scene_element_without_ovelapping(scene_element, tries = tries, new_position=new_position)
if not success:
raise ValueError('Entity could not be placed without overlapping')
def _remove_agents(self):
for agent in self.agents:
self.remove_agent(agent)
def remove_agent(self, agent):
if agent not in self.agents:
return False
for part in agent.parts:
self.space.remove(*part.pm_elements)
part.velocity = [0, 0, 0]
part.grasped = []
agent.initial_position = None
self.agents.remove(agent)
return True
def remove_scene_element(self, scene_element):
if scene_element not in self.scene_elements:
return False
self.space.remove(*scene_element.pm_elements)
self.scene_elements.remove(scene_element)
if not scene_element.is_temporary_entity:
self._disappeared_scene_elements.append(scene_element)
for elem in self.scene_elements:
if elem.entity_type == 'dispenser' and scene_element in elem.produced_entities:
elem.produced_entities.remove(scene_element)
for field in self.fields:
if scene_element in field.produced_entities:
field.produced_entities.remove(scene_element)
if scene_element in self._grasped_scene_elements.keys():
body_part = self._grasped_scene_elements[scene_element]
self.space.remove(*body_part.grasped)
body_part.grasped = []
# self._grasped_scene_elements.pop(scene_element)
return True
def _fields_produce(self):
for field in self.fields:
if field.can_produce():
new_entity = field.produce()
self.add_scene_element(new_entity)
def _check_timers(self):
for entity in self.scene_elements:
if entity.timed and entity.timer == 0:
list_remove, list_add = entity.activate(self)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
def _release_grasps(self):
for agent in self.agents:
for part in agent.parts:
if not part.is_holding and part.can_grasp:
for joint in part.grasped:
self.space.remove(joint)
part.grasped = []
for element_grasped, part in self._grasped_scene_elements.copy().items():
if not part.grasped:
self._grasped_scene_elements.pop(element_grasped)
def _check_teleports(self):
for agent, teleport in self._teleported:
overlaps = self.agent_overlaps_with_element(agent, teleport)
if not overlaps:
self._teleported.remove((agent, teleport))
def agent_overlaps_with_element(self, agent, element):
overlaps = False
for part in agent.parts:
if element.pm_visible_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_visible_shape).points != []
if element.pm_interaction_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_interaction_shape).points != []
return overlaps
def get_scene_element_from_shape(self, pm_shape):
"""
Returns: Returns the Scene Element associated with the pymunk shape.
"""
entity = next(iter([e for e in self.scene_elements if pm_shape in e.pm_elements]), None)
return entity
def get_agent_from_shape(self, pm_shape):
"""
Returns: Returns the Agent associated with the pymunk shape.
"""
for agent in self.agents:
if agent.owns_shape(pm_shape):
return agent
return None
def get_entity_from_shape(self, pm_shape):
"""
Returns the element associated with the pymunk shape
Args:
pm_shape: Pymunk shaape
Returns:
Single entitiy or None
"""
scene_element = self.get_scene_element_from_shape(pm_shape)
if scene_element is not None: return scene_element
for agent in self.agents:
part = agent.get_bodypart_from_shape(pm_shape)
if part is not None: return part
return None
def _get_closest_agent(self, ent):
dist_list = [(a.position[0] - ent.position[0])**2 + (a.position[1] - ent.position[1])**2 for a in self.agents]
index_min_dist = dist_list.index(min(dist_list))
closest_agent = self.agents[index_min_dist]
return closest_agent
def _agent_touches_entity(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
touched_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if touched_entity is None: return True
agent.reward += touched_entity.reward
list_remove, list_add = touched_entity.activate()
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if touched_entity.terminate_upon_contact:
self.done = True
return True
def _agent_interacts(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None: return True
if body_part.is_activating:
agent.reward += interacting_entity.reward
list_remove, list_add = interacting_entity.activate(body_part)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if interacting_entity.terminate_upon_contact:
self.done = True
body_part.is_activating = False
return True
def _agent_grasps(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None: return True
if body_part.is_grasping and not body_part.is_holding:
body_part.is_holding = True
j_1 = pymunk.PinJoint(body_part.pm_body, interacting_entity.pm_body, (0, 5), (0, 0))
j_2 = pymunk.PinJoint(body_part.pm_body, interacting_entity.pm_body, (0, -5), (0, 0))
motor = pymunk.SimpleMotor(body_part.pm_body, interacting_entity.pm_body, 0)
self.space.add(j_1, j_2, motor) # , j_3, j_4, j_5, j_6, j_7, j_8)
body_part.grasped = [j_1, j_2, motor] # , j_3, j_4, j_5, j_6, j_7, j_8]
self._grasped_scene_elements[interacting_entity] = body_part
return True
def _agent_enters_zone(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
zone_reached = self.get_scene_element_from_shape(arbiter.shapes[1])
if zone_reached is None: return True
agent.reward += zone_reached.reward
if zone_reached.terminate_upon_contact:
self.done = True
return True
def _gem_interacts(self, arbiter, space, data):
gem = self.get_scene_element_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None or gem is None: return True
agent = self._get_closest_agent(gem)
agent.reward += interacting_entity.reward
list_remove, list_add = interacting_entity.activate(gem)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if interacting_entity.terminate_upon_contact:
self.done = True
return True
def _agent_eats(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
edible_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if edible_entity is None: return True
if body_part.is_eating:
agent.reward += edible_entity.get_reward()
self.remove_scene_element(edible_entity)
completely_eaten = edible_entity.eats()
if not completely_eaten:
self.add_scene_element(edible_entity, new_position=False)
body_part.is_eating = False
return True
def _agent_teleports(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
teleport = self.get_scene_element_from_shape(arbiter.shapes[1])
if teleport is None or teleport.target is None or (agent, teleport) in self._teleported:
return True
if teleport.target.traversable:
agent.position = (teleport.target.position[0], teleport.target.position[1],
agent.position[2])
else:
area_shape = teleport.target.physical_shape
if area_shape == 'rectangle':
width = teleport.target.width + agent.base_platform.radius * 2 + 1
length = teleport.target.length + agent.base_platform.radius * 2 + 1
angle = teleport.target.position[-1]
sampler = PositionAreaSampler(
center=[teleport.target.position[0], teleport.target.position[1]],
area_shape=area_shape,
angle=angle,
width_length=[width+2, length+2],
excl_width_length=[width, length],
)
else:
radius = teleport.target.radius + agent.base_platform.radius + 1
sampler = PositionAreaSampler(
center=[teleport.target.position[0], teleport.target.position[1]],
area_shape='circle',
radius=radius,
excl_radius=radius,
)
agent.position = sampler.sample()
if (agent, teleport) not in self._teleported:
self._teleported.append((agent, teleport.target))
return True
def _handle_interactions(self):
# Order is important
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.GRASPABLE, self._agent_grasps)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.CONTACT, self._agent_touches_entity)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.EDIBLE, self._agent_eats)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.INTERACTIVE, self._agent_interacts)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.PASSIVE, self._agent_enters_zone)
self.add_interaction(CollisionTypes.GEM, CollisionTypes.ACTIVATED_BY_GEM, self._gem_interacts)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.TELEPORT, self._agent_teleports)
def add_interaction(self, collision_type_1, collision_type_2, interaction_function):
"""
Args:
collision_type_1: collision type of the first entity
collision_type_2: collision type of the second entity
interaction_function: function that handles the interaction
Returns: None
"""
handler = self.space.add_collision_handler(collision_type_1, collision_type_2)
handler.pre_solve = interaction_function
class PlaygroundRegister:
"""
Class to register Playgrounds.
"""
playgrounds = {}
@classmethod
def register(cls, playground_name):
"""
Registers a playground
"""
def decorator(subclass):
if playground_name in cls.playgrounds:
raise ValueError(playground_name+' already registered')
cls.playgrounds[playground_name] = subclass
return subclass
return decorator
@classmethod
def filter(cls, name):
return [pg for name_pg, pg in cls.playgrounds.items() if name in name_pg]
| # -*- coding: utf-8 -*-
""" Playground documentation.
Module defining Playground Base Class
"""
import os
from abc import ABC
import yaml
import pymunk
from .utils import PositionAreaSampler
from .utils.definitions import SPACE_DAMPING, CollisionTypes, SceneElementTypes
# pylint: disable=unused-argument
# pylint: disable=line-too-long
class Playground(ABC):
""" Playground is a Base Class that manages the physical simulation.
Playground manages the interactions between Agents and Scene Elements.
Attributes:
size: size of the scene (width, length).
scene_elements: list of SceneElements present in the Playground.
fields: list of fields producing SceneElements in the Playground.
agents: list of Agents present in the Playground.
initial_agent_position: position or PositionAreaSampler,
Starting position of an agent (single agent).
done: bool, True if the playground reached termination.
"""
# pylint: disable=too-many-instance-attributes
scene_entities = []
def __init__(self, size):
# Generate Scene
self.size = size
self._width, self._length = self.size
# Initialization of the pymunk space, modelling all the physics
self.space = self._initialize_space()
# Public attributes for entities in the playground
self.scene_elements = []
self.fields = []
self.agents = []
# Private attributes for managing interactions in playground
self._disappeared_scene_elements = []
self._grasped_scene_elements = {}
self._teleported = []
# Add entities declared in the scene
for scene_entity in self.scene_entities:
self.add_scene_element(scene_entity)
self.done = False
self.initial_agent_position = None
self._handle_interactions()
self.time_limit = None
self.time_limit_reached_reward = None
self.time_test = 0
@staticmethod
def parse_configuration(key):
""" Private method that parses yaml configuration files.
Args:
key: (str) name of the playground configuration.
Returns:
Dictionary of attributes and default values.
"""
fname = 'utils/configs/playground.yml'
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, fname), 'r') as yaml_file:
default_config = yaml.load(yaml_file, Loader=yaml.SafeLoader)
return default_config[key]
@staticmethod
def _initialize_space():
""" Method to initialize Pymunk empty space for 2D physics.
Returns: Pymunk Space
"""
space = pymunk.Space()
space.gravity = pymunk.Vec2d(0., 0.)
space.damping = SPACE_DAMPING
return space
def update(self, steps):
""" Update the Playground
Update all SceneElements, Fields, Timers and Grasps
Runs the Physics engine for n steps.
Args:
steps: Number of steps
"""
for agent in self.agents:
agent.pre_step()
for _ in range(steps):
self.space.step(1. / steps)
for elem in self.scene_elements:
elem.pre_step()
if elem.follows_waypoints:
self.space.reindex_shapes_for_body(elem.pm_body)
self._fields_produce()
self._check_timers()
self._release_grasps()
self._check_teleports()
def reset(self):
""" Reset the Playground to its initial state.
"""
# remove entities and filter out entities which are temporary
for entity in self.scene_elements.copy():
self.remove_scene_element(entity)
# reset and replace entities that are not temporary
for entity in self._disappeared_scene_elements.copy():
entity.reset()
self.add_scene_element(entity)
# reset fields
for entity in self.fields:
entity.reset()
# reset agents
for agent in self.agents.copy():
agent.reset()
self.remove_agent(agent)
self.add_agent(agent)
self.done = False
def add_agent(self, new_agent, tries=100):
""" Method to add an Agent to the Playground.
If the Agent has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Args:
new_agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the agent
"""
# If already there
if new_agent in self.scene_elements:
raise ValueError('Agent already in Playground')
# Inform agent of the playground size
new_agent.size_playground = self.size
if new_agent.allow_overlapping:
self._add_agent(new_agent)
else:
success = self._add_agent_without_ovelapping(new_agent, tries = tries)
if not success:
raise ValueError("Agent couldn't be placed without overlapping")
def _add_agent(self, agent):
""" Add an agent to the playground.
Args:
agent: Agent.
"""
self.agents.append(agent)
if agent.initial_position is not None:
pass
elif self.initial_agent_position is not None:
agent.initial_position = self.initial_agent_position
else:
raise ValueError("""Agent initial position should be defined in the playground or passed as an argument)
to the class agent""")
agent.position = agent.initial_position
for body_part in agent.parts:
self.space.add(*body_part.pm_elements)
def _add_agent_without_ovelapping(self, agent, tries=100):
""" Method to add am Agent to the Playground without overlapping.
Useful when an Agent has a random initial position, to avoid overlapping.
Args:
agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
trial = 0
visible_collide_parts = True
interactive_collide_parts = True
all_shapes = self.space.shapes.copy()
while (interactive_collide_parts or visible_collide_parts) and trial < tries:
self._add_agent(agent)
visible_collide_parts = False
interactive_collide_parts = False
for part in agent.parts:
visible_collide = False
interactive_collide = False
if part.pm_visible_shape is not None:
collisions = [part.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if part.pm_interaction_shape is not None:
collisions = [part.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
visible_collide_parts = visible_collide or visible_collide_parts
interactive_collide_parts = interactive_collide or interactive_collide_parts
if visible_collide_parts or interactive_collide_parts:
self.remove_agent(agent)
trial += 1
if interactive_collide_parts or visible_collide_parts:
return False
return True
def _add_scene_element(self, new_scene_element, new_position):
""" Method to add a SceneElement to the Playground.
"""
if new_scene_element in self.scene_elements:
raise ValueError('Scene element already in Playground')
new_scene_element.size_playground = self.size
if new_position:
new_scene_element.position = new_scene_element.initial_position
self.space.add(*new_scene_element.pm_elements)
self.scene_elements.append(new_scene_element)
if new_scene_element in self._disappeared_scene_elements:
self._disappeared_scene_elements.remove(new_scene_element)
def _add_scene_element_without_ovelapping(self, scene_element, tries, new_position):
trial = 0
visible_collide = True
interactive_collide = True
all_shapes = self.space.shapes.copy()
while (visible_collide or interactive_collide) and trial < tries:
self._add_scene_element(scene_element, new_position)
visible_collide = False
interactive_collide = False
if scene_element.pm_visible_shape is not None:
collisions = [scene_element.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if scene_element.pm_interaction_shape is not None:
collisions = [scene_element.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
if visible_collide or interactive_collide:
self.remove_scene_element(scene_element)
trial += 1
if visible_collide or interactive_collide:
return False
return True
def add_scene_element(self, scene_element, tries=100, new_position=True):
""" Method to add a SceneElement to the Playground.
If the Element has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Useful when a SceneElement has a random initial position, to avoid overlapping.
Args:
scene_element: Scene Element to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
if scene_element.entity_type is SceneElementTypes.FIELD:
# If already there
if scene_element in self.fields:
raise ValueError('Field already in Playground')
self.fields.append(scene_element)
else:
if scene_element in self.scene_elements:
raise ValueError('Field already in Playground')
# Else
scene_element.size_playground = self.size
if scene_element.allow_overlapping:
self._add_scene_element(scene_element, new_position)
else:
success = self._add_scene_element_without_ovelapping(scene_element, tries = tries, new_position=new_position)
if not success:
raise ValueError('Entity could not be placed without overlapping')
def _remove_agents(self):
for agent in self.agents:
self.remove_agent(agent)
def remove_agent(self, agent):
if agent not in self.agents:
return False
for part in agent.parts:
self.space.remove(*part.pm_elements)
part.velocity = [0, 0, 0]
part.grasped = []
agent.initial_position = None
self.agents.remove(agent)
return True
def remove_scene_element(self, scene_element):
if scene_element not in self.scene_elements:
return False
self.space.remove(*scene_element.pm_elements)
self.scene_elements.remove(scene_element)
if not scene_element.is_temporary_entity:
self._disappeared_scene_elements.append(scene_element)
for elem in self.scene_elements:
if elem.entity_type == 'dispenser' and scene_element in elem.produced_entities:
elem.produced_entities.remove(scene_element)
for field in self.fields:
if scene_element in field.produced_entities:
field.produced_entities.remove(scene_element)
if scene_element in self._grasped_scene_elements.keys():
body_part = self._grasped_scene_elements[scene_element]
self.space.remove(*body_part.grasped)
body_part.grasped = []
# self._grasped_scene_elements.pop(scene_element)
return True
def _fields_produce(self):
for field in self.fields:
if field.can_produce():
new_entity = field.produce()
self.add_scene_element(new_entity)
def _check_timers(self):
for entity in self.scene_elements:
if entity.timed and entity.timer == 0:
list_remove, list_add = entity.activate(self)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
def _release_grasps(self):
for agent in self.agents:
for part in agent.parts:
if not part.is_holding and part.can_grasp:
for joint in part.grasped:
self.space.remove(joint)
part.grasped = []
for element_grasped, part in self._grasped_scene_elements.copy().items():
if not part.grasped:
self._grasped_scene_elements.pop(element_grasped)
def _check_teleports(self):
for agent, teleport in self._teleported:
overlaps = self.agent_overlaps_with_element(agent, teleport)
if not overlaps:
self._teleported.remove((agent, teleport))
def agent_overlaps_with_element(self, agent, element):
overlaps = False
for part in agent.parts:
if element.pm_visible_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_visible_shape).points != []
if element.pm_interaction_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_interaction_shape).points != []
return overlaps
def get_scene_element_from_shape(self, pm_shape):
"""
Returns: Returns the Scene Element associated with the pymunk shape.
"""
entity = next(iter([e for e in self.scene_elements if pm_shape in e.pm_elements]), None)
return entity
def get_agent_from_shape(self, pm_shape):
"""
Returns: Returns the Agent associated with the pymunk shape.
"""
for agent in self.agents:
if agent.owns_shape(pm_shape):
return agent
return None
def get_entity_from_shape(self, pm_shape):
"""
Returns the element associated with the pymunk shape
Args:
pm_shape: Pymunk shaape
Returns:
Single entitiy or None
"""
scene_element = self.get_scene_element_from_shape(pm_shape)
if scene_element is not None: return scene_element
for agent in self.agents:
part = agent.get_bodypart_from_shape(pm_shape)
if part is not None: return part
return None
def _get_closest_agent(self, ent):
dist_list = [(a.position[0] - ent.position[0])**2 + (a.position[1] - ent.position[1])**2 for a in self.agents]
index_min_dist = dist_list.index(min(dist_list))
closest_agent = self.agents[index_min_dist]
return closest_agent
def _agent_touches_entity(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
touched_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if touched_entity is None: return True
agent.reward += touched_entity.reward
list_remove, list_add = touched_entity.activate()
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if touched_entity.terminate_upon_contact:
self.done = True
return True
def _agent_interacts(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None: return True
if body_part.is_activating:
agent.reward += interacting_entity.reward
list_remove, list_add = interacting_entity.activate(body_part)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if interacting_entity.terminate_upon_contact:
self.done = True
body_part.is_activating = False
return True
def _agent_grasps(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None: return True
if body_part.is_grasping and not body_part.is_holding:
body_part.is_holding = True
j_1 = pymunk.PinJoint(body_part.pm_body, interacting_entity.pm_body, (0, 5), (0, 0))
j_2 = pymunk.PinJoint(body_part.pm_body, interacting_entity.pm_body, (0, -5), (0, 0))
motor = pymunk.SimpleMotor(body_part.pm_body, interacting_entity.pm_body, 0)
self.space.add(j_1, j_2, motor) # , j_3, j_4, j_5, j_6, j_7, j_8)
body_part.grasped = [j_1, j_2, motor] # , j_3, j_4, j_5, j_6, j_7, j_8]
self._grasped_scene_elements[interacting_entity] = body_part
return True
def _agent_enters_zone(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
zone_reached = self.get_scene_element_from_shape(arbiter.shapes[1])
if zone_reached is None: return True
agent.reward += zone_reached.reward
if zone_reached.terminate_upon_contact:
self.done = True
return True
def _gem_interacts(self, arbiter, space, data):
gem = self.get_scene_element_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None or gem is None: return True
agent = self._get_closest_agent(gem)
agent.reward += interacting_entity.reward
list_remove, list_add = interacting_entity.activate(gem)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if interacting_entity.terminate_upon_contact:
self.done = True
return True
def _agent_eats(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
edible_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if edible_entity is None: return True
if body_part.is_eating:
agent.reward += edible_entity.get_reward()
self.remove_scene_element(edible_entity)
completely_eaten = edible_entity.eats()
if not completely_eaten:
self.add_scene_element(edible_entity, new_position=False)
body_part.is_eating = False
return True
def _agent_teleports(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
teleport = self.get_scene_element_from_shape(arbiter.shapes[1])
if teleport is None or teleport.target is None or (agent, teleport) in self._teleported:
return True
if teleport.target.traversable:
agent.position = (teleport.target.position[0], teleport.target.position[1],
agent.position[2])
else:
area_shape = teleport.target.physical_shape
if area_shape == 'rectangle':
width = teleport.target.width + agent.base_platform.radius * 2 + 1
length = teleport.target.length + agent.base_platform.radius * 2 + 1
angle = teleport.target.position[-1]
sampler = PositionAreaSampler(
center=[teleport.target.position[0], teleport.target.position[1]],
area_shape=area_shape,
angle=angle,
width_length=[width+2, length+2],
excl_width_length=[width, length],
)
else:
radius = teleport.target.radius + agent.base_platform.radius + 1
sampler = PositionAreaSampler(
center=[teleport.target.position[0], teleport.target.position[1]],
area_shape='circle',
radius=radius,
excl_radius=radius,
)
agent.position = sampler.sample()
if (agent, teleport) not in self._teleported:
self._teleported.append((agent, teleport.target))
return True
def _handle_interactions(self):
# Order is important
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.GRASPABLE, self._agent_grasps)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.CONTACT, self._agent_touches_entity)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.EDIBLE, self._agent_eats)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.INTERACTIVE, self._agent_interacts)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.PASSIVE, self._agent_enters_zone)
self.add_interaction(CollisionTypes.GEM, CollisionTypes.ACTIVATED_BY_GEM, self._gem_interacts)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.TELEPORT, self._agent_teleports)
def add_interaction(self, collision_type_1, collision_type_2, interaction_function):
"""
Args:
collision_type_1: collision type of the first entity
collision_type_2: collision type of the second entity
interaction_function: function that handles the interaction
Returns: None
"""
handler = self.space.add_collision_handler(collision_type_1, collision_type_2)
handler.pre_solve = interaction_function
class PlaygroundRegister:
"""
Class to register Playgrounds.
"""
playgrounds = {}
@classmethod
def register(cls, playground_name):
"""
Registers a playground
"""
def decorator(subclass):
if playground_name in cls.playgrounds:
raise ValueError(playground_name+' already registered')
cls.playgrounds[playground_name] = subclass
return subclass
return decorator
@classmethod
def filter(cls, name):
return [pg for name_pg, pg in cls.playgrounds.items() if name in name_pg]
| en | 0.811134 | # -*- coding: utf-8 -*- Playground documentation. Module defining Playground Base Class # pylint: disable=unused-argument # pylint: disable=line-too-long Playground is a Base Class that manages the physical simulation. Playground manages the interactions between Agents and Scene Elements. Attributes: size: size of the scene (width, length). scene_elements: list of SceneElements present in the Playground. fields: list of fields producing SceneElements in the Playground. agents: list of Agents present in the Playground. initial_agent_position: position or PositionAreaSampler, Starting position of an agent (single agent). done: bool, True if the playground reached termination. # pylint: disable=too-many-instance-attributes # Generate Scene # Initialization of the pymunk space, modelling all the physics # Public attributes for entities in the playground # Private attributes for managing interactions in playground # Add entities declared in the scene Private method that parses yaml configuration files. Args: key: (str) name of the playground configuration. Returns: Dictionary of attributes and default values. Method to initialize Pymunk empty space for 2D physics. Returns: Pymunk Space Update the Playground Update all SceneElements, Fields, Timers and Grasps Runs the Physics engine for n steps. Args: steps: Number of steps Reset the Playground to its initial state. # remove entities and filter out entities which are temporary # reset and replace entities that are not temporary # reset fields # reset agents Method to add an Agent to the Playground. If the Agent has its attribute allow_overlapping set to False, the playground will try to add it multiple times. Args: new_agent: Agent to add to the Playground tries: Number of times the Playground will try to place the agent # If already there # Inform agent of the playground size Add an agent to the playground. Args: agent: Agent. Agent initial position should be defined in the playground or passed as an argument) to the class agent Method to add am Agent to the Playground without overlapping. Useful when an Agent has a random initial position, to avoid overlapping. Args: agent: Agent to add to the Playground tries: Number of times the Playground will try to place the new_entity Method to add a SceneElement to the Playground. Method to add a SceneElement to the Playground. If the Element has its attribute allow_overlapping set to False, the playground will try to add it multiple times. Useful when a SceneElement has a random initial position, to avoid overlapping. Args: scene_element: Scene Element to add to the Playground tries: Number of times the Playground will try to place the new_entity # If already there # Else # self._grasped_scene_elements.pop(scene_element) Returns: Returns the Scene Element associated with the pymunk shape. Returns: Returns the Agent associated with the pymunk shape. Returns the element associated with the pymunk shape Args: pm_shape: Pymunk shaape Returns: Single entitiy or None # , j_3, j_4, j_5, j_6, j_7, j_8) # , j_3, j_4, j_5, j_6, j_7, j_8] # Order is important Args: collision_type_1: collision type of the first entity collision_type_2: collision type of the second entity interaction_function: function that handles the interaction Returns: None Class to register Playgrounds. Registers a playground | 2.794216 | 3 |
UEManifestReader/classes/FManifestData.py | ryryburge/UEManifestReader | 0 | 10548 | <filename>UEManifestReader/classes/FManifestData.py
# -*- coding: utf-8 -*-
import zlib
from UEManifestReader.enums import *
from UEManifestReader.classes.FCustomFields import FCustomFields
from UEManifestReader.classes.FManifestMeta import FManifestMeta
from UEManifestReader.classes.FChunkDataList import FChunkDataList
from UEManifestReader.classes.FManifestHeader import FManifestHeader
from UEManifestReader.classes.stream_reader import ConstBitStreamWrapper
from UEManifestReader.classes.FFileManifestList import FFileManifestList
# FManifestData - The public interface to load/saving manifest files.
class FManifestData():
def __init__(self, data: bytes):
self.reader = ConstBitStreamWrapper(data)
self.start()
def start(self):
StartPos = self.reader.bytepos
# Read the Manifest Header
self.Header = FManifestHeader(self.reader)
# If we are loading an old format, defer to the old code!
if (self.Header.Version.value < EFeatureLevel.StoredAsBinaryData.value):
FullDataSize = GetFullDataSize(Header)
FullData = reader.read_bytes(FullDataSize)
self.reader.bytepos = StartPos
temp = FManifestData(self.reader.read_bytes(FullDataSize))
self.Meta = temp.Meta
self.ChunkDataList = temp.ChunkDataList
self.FileManifestList = temp.FileManifestList
self.CustomFields = temp.CustomFields
return
else:
# Compression format selection - we only have one right now.
# Fill the array with loaded data.
# DataSizeCompressed always equals the size of the data following the header.
if self.Header.StoredAs == EManifestStorageFlags.Compressed.value:
Decompressed = zlib.decompress(self.reader.read_bytes(self.Header.DataSizeCompressed))
ManifestRawData = ConstBitStreamWrapper(Decompressed)
elif self.Header.StoredAs == EManifestStorageFlags.Encrypted.value:
raise Exception('Encrypted Manifests are not supported yet')
# Read the Manifest Meta
self.Meta = FManifestMeta(ManifestRawData)
# Read the Manifest Chunk List
self.ChunkDataList = FChunkDataList(ManifestRawData)
# Read the Manifest File List
self.FileManifestList = FFileManifestList(ManifestRawData)
# Read the Custom Fields
self.CustomFields = FCustomFields(ManifestRawData)
def GetFullDataSize(self) -> int:
bIsCompressed = self.Header.StoredAs == EManifestStorageFlags.Compressed
return self.Header.HeaderSize + (bIsCompressed if Header.DataSizeCompressed else Header.DataSizeUncompressed)
| <filename>UEManifestReader/classes/FManifestData.py
# -*- coding: utf-8 -*-
import zlib
from UEManifestReader.enums import *
from UEManifestReader.classes.FCustomFields import FCustomFields
from UEManifestReader.classes.FManifestMeta import FManifestMeta
from UEManifestReader.classes.FChunkDataList import FChunkDataList
from UEManifestReader.classes.FManifestHeader import FManifestHeader
from UEManifestReader.classes.stream_reader import ConstBitStreamWrapper
from UEManifestReader.classes.FFileManifestList import FFileManifestList
# FManifestData - The public interface to load/saving manifest files.
class FManifestData():
def __init__(self, data: bytes):
self.reader = ConstBitStreamWrapper(data)
self.start()
def start(self):
StartPos = self.reader.bytepos
# Read the Manifest Header
self.Header = FManifestHeader(self.reader)
# If we are loading an old format, defer to the old code!
if (self.Header.Version.value < EFeatureLevel.StoredAsBinaryData.value):
FullDataSize = GetFullDataSize(Header)
FullData = reader.read_bytes(FullDataSize)
self.reader.bytepos = StartPos
temp = FManifestData(self.reader.read_bytes(FullDataSize))
self.Meta = temp.Meta
self.ChunkDataList = temp.ChunkDataList
self.FileManifestList = temp.FileManifestList
self.CustomFields = temp.CustomFields
return
else:
# Compression format selection - we only have one right now.
# Fill the array with loaded data.
# DataSizeCompressed always equals the size of the data following the header.
if self.Header.StoredAs == EManifestStorageFlags.Compressed.value:
Decompressed = zlib.decompress(self.reader.read_bytes(self.Header.DataSizeCompressed))
ManifestRawData = ConstBitStreamWrapper(Decompressed)
elif self.Header.StoredAs == EManifestStorageFlags.Encrypted.value:
raise Exception('Encrypted Manifests are not supported yet')
# Read the Manifest Meta
self.Meta = FManifestMeta(ManifestRawData)
# Read the Manifest Chunk List
self.ChunkDataList = FChunkDataList(ManifestRawData)
# Read the Manifest File List
self.FileManifestList = FFileManifestList(ManifestRawData)
# Read the Custom Fields
self.CustomFields = FCustomFields(ManifestRawData)
def GetFullDataSize(self) -> int:
bIsCompressed = self.Header.StoredAs == EManifestStorageFlags.Compressed
return self.Header.HeaderSize + (bIsCompressed if Header.DataSizeCompressed else Header.DataSizeUncompressed)
| en | 0.776341 | # -*- coding: utf-8 -*- # FManifestData - The public interface to load/saving manifest files. # Read the Manifest Header # If we are loading an old format, defer to the old code! # Compression format selection - we only have one right now. # Fill the array with loaded data. # DataSizeCompressed always equals the size of the data following the header. # Read the Manifest Meta # Read the Manifest Chunk List # Read the Manifest File List # Read the Custom Fields | 2.095841 | 2 |
Python/module.py | minjibyeongho/KOSA-Pytorch | 2 | 10549 | <reponame>minjibyeongho/KOSA-Pytorch
#module.py
def hello():
print("Hello!")
#if __name__=="__main__":
# print(__name__) | #module.py
def hello():
print("Hello!")
#if __name__=="__main__":
# print(__name__) | ar | 0.094354 | #module.py #if __name__=="__main__": # print(__name__) | 2.05883 | 2 |
data_utils.py | algoprog/Quin | 47 | 10550 | import csv
import json
import pickle
import logging
import re
import pandas
import gzip
import os
import numpy as np
from random import randint, random
from tqdm import tqdm
from retriever.dense_retriever import DenseRetriever
from models.tokenization import tokenize
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str, texts: List[str], label: Union[int, float]):
"""
Creates one InputExample with the given texts, guid and label
str.strip() is called on both texts.
:param guid
id for the example
:param texts
the texts for the example
:param label
the label for the example
"""
self.guid = guid
self.texts = [text.strip() for text in texts]
self.label = label
def get_texts(self):
return self.texts
def get_label(self):
return self.label
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def get_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
guid = "%s-%d" % (filename, id)
id += 1
if label == 'entailment':
label = 0
elif label == 'contradiction':
label = 1
else:
label = 2
examples.append(InputExample(guid=guid,
texts=[sample['s1'], sample['s2']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def get_qa_examples(filename, max_examples=0, dev=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['relevant']
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if not dev:
if label == 1:
for _ in range(13):
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def map_label(label):
labels = {"relevant": 0, "irrelevant": 1}
return labels[label.strip().lower()]
def get_qar_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_qar_artificial_examples():
examples = []
id = 0
print('Loading passages...')
passages = []
file = open('data/msmarco/collection.tsv', 'r', encoding='utf8')
while True:
line = file.readline()
if not line:
break
line = line.rstrip('\n').split('\t')
passages.append(line[1])
print('Loaded passages')
with open('data/qar/qar_artificial_queries.csv') as f:
for i, line in enumerate(f):
queries = line.rstrip('\n').split('|')
for query in queries:
guid = "%s-%d" % ('', id)
id += 1
examples.append(InputExample(guid=guid,
texts=[query, passages[i]],
label=1.0))
return examples
def get_single_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['text']],
label=1))
if 0 < max_examples <= len(examples):
break
return examples
def get_qnli_examples(filename, max_examples=0, no_contradictions=False, fever_only=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
if label == 'contradiction' and no_contradictions:
continue
if sample['evidence'] == '':
continue
if fever_only and sample['source'] != 'fever':
continue
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['statement'].strip(), sample['evidence'].strip()],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_retrieval_examples(filename, negative_corpus='data/msmarco/collection.tsv', max_examples=0, no_statements=True,
encoder_model=None, negative_samples_num=4):
examples = []
queries = []
passages = []
negative_passages = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
if 'evidence' in sample and sample['evidence'] == '':
continue
guid = "%s-%d" % (filename, id)
id += 1
if sample['type'] == 'question':
query = sample['question']
passage = sample['answer']
else:
query = sample['statement']
passage = sample['evidence']
query = query.strip()
passage = passage.strip()
if sample['type'] == 'statement' and no_statements:
continue
queries.append(query)
passages.append(passage)
if sample['source'] == 'natural-questions':
negative_passages.append(passage)
if max_examples == len(passages):
break
if encoder_model is not None:
# Load MSMARCO passages
logging.info('Loading MSM passages...')
with open(negative_corpus) as file:
for line in file:
p = line.rstrip('\n').split('\t')[1]
negative_passages.append(p)
logging.info('Building ANN index...')
dense_retriever = DenseRetriever(model=encoder_model, batch_size=1024, use_gpu=True)
dense_retriever.create_index_from_documents(negative_passages)
results = dense_retriever.search(queries=queries, limit=100, probes=256)
negative_samples = [
[negative_passages[p[0]] for p in r if negative_passages[p[0]] != passages[i]][:negative_samples_num]
for i, r in enumerate(results)
]
# print(queries[0])
# print(negative_samples[0][0])
for i in range(len(queries)):
texts = [queries[i], passages[i]] + negative_samples[i]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
else:
for i in range(len(queries)):
texts = [queries[i], passages[i]]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
return examples
def get_pair_input(tokenizer, sent1, sent2, max_len=256):
text = "[CLS] {} [SEP] {} [SEP]".format(sent1, sent2)
tokenized_text = tokenizer.tokenize(text)[:max_len]
indexed_tokens = tokenizer.encode(text)[:max_len]
segments_ids = []
sep_flag = False
for i in range(len(tokenized_text)):
if tokenized_text[i] == '[SEP]' and not sep_flag:
segments_ids.append(0)
sep_flag = True
elif sep_flag:
segments_ids.append(1)
else:
segments_ids.append(0)
return indexed_tokens, segments_ids
def build_batch(tokenizer, text_list, max_len=256):
token_id_list = []
segment_list = []
attention_masks = []
longest = -1
for pair in text_list:
sent1, sent2 = pair
ids, segs = get_pair_input(tokenizer, sent1, sent2, max_len=max_len)
if ids is None or segs is None:
continue
token_id_list.append(ids)
segment_list.append(segs)
attention_masks.append([1] * len(ids))
if len(ids) > longest:
longest = len(ids)
if len(token_id_list) == 0:
return None, None, None
# padding
assert (len(token_id_list) == len(segment_list))
for ii in range(len(token_id_list)):
token_id_list[ii] += [0] * (longest - len(token_id_list[ii]))
attention_masks[ii] += [1] * (longest - len(attention_masks[ii]))
segment_list[ii] += [1] * (longest - len(segment_list[ii]))
return token_id_list, segment_list, attention_masks
def load_unsupervised_dataset(dataset_file):
print('Loading dataset...')
x = pickle.load(open(dataset_file, "rb"))
print('Done')
return x, len(x[0])
def load_supervised_dataset(dataset_file):
print('Loading dataset...')
d = pickle.load(open(dataset_file, "rb"))
print('Done')
return d[0], d[1]
| import csv
import json
import pickle
import logging
import re
import pandas
import gzip
import os
import numpy as np
from random import randint, random
from tqdm import tqdm
from retriever.dense_retriever import DenseRetriever
from models.tokenization import tokenize
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str, texts: List[str], label: Union[int, float]):
"""
Creates one InputExample with the given texts, guid and label
str.strip() is called on both texts.
:param guid
id for the example
:param texts
the texts for the example
:param label
the label for the example
"""
self.guid = guid
self.texts = [text.strip() for text in texts]
self.label = label
def get_texts(self):
return self.texts
def get_label(self):
return self.label
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def get_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
guid = "%s-%d" % (filename, id)
id += 1
if label == 'entailment':
label = 0
elif label == 'contradiction':
label = 1
else:
label = 2
examples.append(InputExample(guid=guid,
texts=[sample['s1'], sample['s2']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def get_qa_examples(filename, max_examples=0, dev=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['relevant']
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if not dev:
if label == 1:
for _ in range(13):
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def map_label(label):
labels = {"relevant": 0, "irrelevant": 1}
return labels[label.strip().lower()]
def get_qar_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_qar_artificial_examples():
examples = []
id = 0
print('Loading passages...')
passages = []
file = open('data/msmarco/collection.tsv', 'r', encoding='utf8')
while True:
line = file.readline()
if not line:
break
line = line.rstrip('\n').split('\t')
passages.append(line[1])
print('Loaded passages')
with open('data/qar/qar_artificial_queries.csv') as f:
for i, line in enumerate(f):
queries = line.rstrip('\n').split('|')
for query in queries:
guid = "%s-%d" % ('', id)
id += 1
examples.append(InputExample(guid=guid,
texts=[query, passages[i]],
label=1.0))
return examples
def get_single_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['text']],
label=1))
if 0 < max_examples <= len(examples):
break
return examples
def get_qnli_examples(filename, max_examples=0, no_contradictions=False, fever_only=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
if label == 'contradiction' and no_contradictions:
continue
if sample['evidence'] == '':
continue
if fever_only and sample['source'] != 'fever':
continue
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['statement'].strip(), sample['evidence'].strip()],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_retrieval_examples(filename, negative_corpus='data/msmarco/collection.tsv', max_examples=0, no_statements=True,
encoder_model=None, negative_samples_num=4):
examples = []
queries = []
passages = []
negative_passages = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
if 'evidence' in sample and sample['evidence'] == '':
continue
guid = "%s-%d" % (filename, id)
id += 1
if sample['type'] == 'question':
query = sample['question']
passage = sample['answer']
else:
query = sample['statement']
passage = sample['evidence']
query = query.strip()
passage = passage.strip()
if sample['type'] == 'statement' and no_statements:
continue
queries.append(query)
passages.append(passage)
if sample['source'] == 'natural-questions':
negative_passages.append(passage)
if max_examples == len(passages):
break
if encoder_model is not None:
# Load MSMARCO passages
logging.info('Loading MSM passages...')
with open(negative_corpus) as file:
for line in file:
p = line.rstrip('\n').split('\t')[1]
negative_passages.append(p)
logging.info('Building ANN index...')
dense_retriever = DenseRetriever(model=encoder_model, batch_size=1024, use_gpu=True)
dense_retriever.create_index_from_documents(negative_passages)
results = dense_retriever.search(queries=queries, limit=100, probes=256)
negative_samples = [
[negative_passages[p[0]] for p in r if negative_passages[p[0]] != passages[i]][:negative_samples_num]
for i, r in enumerate(results)
]
# print(queries[0])
# print(negative_samples[0][0])
for i in range(len(queries)):
texts = [queries[i], passages[i]] + negative_samples[i]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
else:
for i in range(len(queries)):
texts = [queries[i], passages[i]]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
return examples
def get_pair_input(tokenizer, sent1, sent2, max_len=256):
text = "[CLS] {} [SEP] {} [SEP]".format(sent1, sent2)
tokenized_text = tokenizer.tokenize(text)[:max_len]
indexed_tokens = tokenizer.encode(text)[:max_len]
segments_ids = []
sep_flag = False
for i in range(len(tokenized_text)):
if tokenized_text[i] == '[SEP]' and not sep_flag:
segments_ids.append(0)
sep_flag = True
elif sep_flag:
segments_ids.append(1)
else:
segments_ids.append(0)
return indexed_tokens, segments_ids
def build_batch(tokenizer, text_list, max_len=256):
token_id_list = []
segment_list = []
attention_masks = []
longest = -1
for pair in text_list:
sent1, sent2 = pair
ids, segs = get_pair_input(tokenizer, sent1, sent2, max_len=max_len)
if ids is None or segs is None:
continue
token_id_list.append(ids)
segment_list.append(segs)
attention_masks.append([1] * len(ids))
if len(ids) > longest:
longest = len(ids)
if len(token_id_list) == 0:
return None, None, None
# padding
assert (len(token_id_list) == len(segment_list))
for ii in range(len(token_id_list)):
token_id_list[ii] += [0] * (longest - len(token_id_list[ii]))
attention_masks[ii] += [1] * (longest - len(attention_masks[ii]))
segment_list[ii] += [1] * (longest - len(segment_list[ii]))
return token_id_list, segment_list, attention_masks
def load_unsupervised_dataset(dataset_file):
print('Loading dataset...')
x = pickle.load(open(dataset_file, "rb"))
print('Done')
return x, len(x[0])
def load_supervised_dataset(dataset_file):
print('Loading dataset...')
d = pickle.load(open(dataset_file, "rb"))
print('Done')
return d[0], d[1]
| en | 0.646983 | Structure for one input example with texts, the label and a unique id Creates one InputExample with the given texts, guid and label str.strip() is called on both texts. :param guid id for the example :param texts the texts for the example :param label the label for the example # Load MSMARCO passages # print(queries[0]) # print(negative_samples[0][0]) # padding | 2.484442 | 2 |
hc/accounts/migrations/0025_remove_member_team.py | opsct/healthchecks | 0 | 10551 | <gh_stars>0
# Generated by Django 2.1.5 on 2019-01-22 08:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0024_auto_20190119_1540'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='team',
),
]
| # Generated by Django 2.1.5 on 2019-01-22 08:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0024_auto_20190119_1540'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='team',
),
] | en | 0.714565 | # Generated by Django 2.1.5 on 2019-01-22 08:33 | 1.374321 | 1 |
openstack-dashboard/openstack_dashboard/api/proxy.py | foruy/openflow-multiopenstack | 1 | 10552 | <filename>openstack-dashboard/openstack_dashboard/api/proxy.py
from django.conf import settings
#from proxyclient.v2 import client as proxy_client
from openstack_dashboard.utils import proxy_client
def proxyclient(request):
management_url = getattr(settings, 'MANAGEMENT_URL')
conn = proxy_client.Client(request.user.username,
request.user.token.id,
user_id=request.user.id,
project_id=request.user.project_id,
insecure=False,
cacert=None,
http_log_debug=settings.DEBUG)
conn.client.auth_token = request.user.token.id
conn.client.set_management_url(management_url)
return conn
def authenticate(request, username, password, **kwargs):
return proxyclient(request).users.authenticate(username, password, **kwargs)
def authenticate_by_zone(request, zone_id):
return proxyclient(request).users.authenticate_by_zone(request.user.id, zone_id)
def user_list(request):
return proxyclient(request).users.list()
def user_get(request):
return proxyclient(request).users.get(request.user.id)
def user_delete(request, user_id):
return proxyclient(request).users.delete(user_id)
def user_login_list(request, user_id=None):
return proxyclient(request).users.login_list(user_id=user_id)
def availability_zone_list(request, detail=False):
return proxyclient(request).zones.list(detail=detail)
def availability_zone_get(request, id):
return proxyclient(request).zones.get(id)
def zone_create(request, id=None, name=None, auth_url=None,
auth_token=None, default_instances=None):
return proxyclient(request).zones.create(
id=id, name=name, auth_url=auth_url, auth_token=auth_token,
default_instances=default_instances)
def zone_delete(request, zone_id):
proxyclient(request).zones.delete(zone_id)
#
#def logout(request):
# _proxy(request).logout(request.user.id)
def server_list(request, all_tenants=False):
return proxyclient(request).servers.list(all_tenants=all_tenants)
def server_get(request, instance_id):
return proxyclient(request).servers.get(instance_id)
def server_create(request, name, image, flavor, zone_id=None,
key_name=None, user_data=None, security_groups=None,
block_device_mapping=None, block_device_mapping_v2=None, nics=None,
availability_zone=None, instance_count=1, admin_pass=<PASSWORD>,
disk_config=None, accessIPv4=None, gateway=None, net_type=None): #cg
return proxyclient(request).servers.create(
name, image, flavor, zone_id=zone_id,
user_data=user_data, security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
nics=nics, availability_zone=availability_zone,
instance_count=instance_count, admin_pass=<PASSWORD>,
disk_config=disk_config, accessIPv4=accessIPv4,
gateway=gateway, netype=net_type)
def server_delete(request, instance_id):
proxyclient(request).servers.delete(instance_id)
def server_start(request, instance_id):
proxyclient(request).servers.start(instance_id)
def server_stop(request, instance_id):
proxyclient(request).servers.stop(instance_id)
def image_list_detailed(request, zone_id, filters=None):
return image_get(request, zone_id, filters=filters), False
def image_get(request, zone, filters=None):
return proxyclient(request).images.get(zone, filters=filters)
def image_delete(request, image_id):
proxyclient(request).images.delete(image_id)
def image_rebuild(request, zone):
return proxyclient(request).images.rebuild(zone)
def flavor_list(request, zone):
return proxyclient(request).flavors.get(zone)
def flavor_get_by_zone(request, zone):
return proxyclient(request).flavors.get(zone)
def flavor_delete(request, flavor_id):
proxyclient(request).flavors.delete(flavor_id)
def flavor_rebuild(request, zone):
return proxyclient(request).flavors.rebuild(zone)
def gateway_list(request):
return proxyclient(request).gateways.list()
def gateway_get(request, instance_id):
return proxyclient(request).gateways.get_by_instance(instance_id)
def gateway_get_by_zone(request, zone):
return proxyclient(request).gateways.get_by_zone(zone)
def gateway_delete(request, gateway_id):
proxyclient(request).gateways.delete(gateway_id)
def gateway_rebuild(request, zone):
return proxyclient(request).gateways.rebuild(zone)
def network_get_by_zone(request, zone):
return proxyclient(request).networks.get(zone)
def network_delete(request, network_id):
proxyclient(request).networks.delete(network_id)
def network_rebuild(request, zone):
return proxyclient(request).networks.rebuild(zone)
def network_type_list(request):
return proxyclient(request).networks.network_type_list()
def network_type_delete(request, id):
proxyclient(request).networks.network_type_delete(id)
def security_group_list(request):
return proxyclient(request).security_groups.list()
def security_group_update(request, **kwargs):
proxyclient(request).security_groups.update(**kwargs)
def firewall_list(request):
return proxyclient(request).firewalls.list()
def firewall_get(request, id):
return proxyclient(request).firewalls.get(id)
def firewall_create(request, instance_id, hostname, gateway_port,
service_port):
return proxyclient(request).firewalls.create(
instance_id=instance_id, hostname=hostname,
gateway_port=gateway_port, service_port=service_port)
def firewall_exist(request, instance_id, hostname=None, gateway_port=None):
return proxyclient(request).firewalls.exists(
instance_id, hostname=hostname, gateway_port=gateway_port)
def firewall_delete(request, firewall_id):
proxyclient(request).firewalls.delete(firewall_id)
#
def project_absolute_limits(request, zone_id):
return proxyclient(request).users.user_absolute_limits(zone_id)
def user_absolute_limits(request):
return proxyclient(request).users.user_absolute_limits()
def resource_list(request, user_id=None):
return proxyclient(request).resources.list(
user_id=user_id or request.user.id)
def resource_get(request, user_id=None, source_name=None, source_id=None):
filters = {'source_id': source_id, 'source_name': source_name}
return proxyclient(request).resources.get(
user_id or request.user.id, filters=filters)
def get_monitor(request, instance):
return proxyclient(request).servers.monitor(instance)
| <filename>openstack-dashboard/openstack_dashboard/api/proxy.py
from django.conf import settings
#from proxyclient.v2 import client as proxy_client
from openstack_dashboard.utils import proxy_client
def proxyclient(request):
management_url = getattr(settings, 'MANAGEMENT_URL')
conn = proxy_client.Client(request.user.username,
request.user.token.id,
user_id=request.user.id,
project_id=request.user.project_id,
insecure=False,
cacert=None,
http_log_debug=settings.DEBUG)
conn.client.auth_token = request.user.token.id
conn.client.set_management_url(management_url)
return conn
def authenticate(request, username, password, **kwargs):
return proxyclient(request).users.authenticate(username, password, **kwargs)
def authenticate_by_zone(request, zone_id):
return proxyclient(request).users.authenticate_by_zone(request.user.id, zone_id)
def user_list(request):
return proxyclient(request).users.list()
def user_get(request):
return proxyclient(request).users.get(request.user.id)
def user_delete(request, user_id):
return proxyclient(request).users.delete(user_id)
def user_login_list(request, user_id=None):
return proxyclient(request).users.login_list(user_id=user_id)
def availability_zone_list(request, detail=False):
return proxyclient(request).zones.list(detail=detail)
def availability_zone_get(request, id):
return proxyclient(request).zones.get(id)
def zone_create(request, id=None, name=None, auth_url=None,
auth_token=None, default_instances=None):
return proxyclient(request).zones.create(
id=id, name=name, auth_url=auth_url, auth_token=auth_token,
default_instances=default_instances)
def zone_delete(request, zone_id):
proxyclient(request).zones.delete(zone_id)
#
#def logout(request):
# _proxy(request).logout(request.user.id)
def server_list(request, all_tenants=False):
return proxyclient(request).servers.list(all_tenants=all_tenants)
def server_get(request, instance_id):
return proxyclient(request).servers.get(instance_id)
def server_create(request, name, image, flavor, zone_id=None,
key_name=None, user_data=None, security_groups=None,
block_device_mapping=None, block_device_mapping_v2=None, nics=None,
availability_zone=None, instance_count=1, admin_pass=<PASSWORD>,
disk_config=None, accessIPv4=None, gateway=None, net_type=None): #cg
return proxyclient(request).servers.create(
name, image, flavor, zone_id=zone_id,
user_data=user_data, security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
nics=nics, availability_zone=availability_zone,
instance_count=instance_count, admin_pass=<PASSWORD>,
disk_config=disk_config, accessIPv4=accessIPv4,
gateway=gateway, netype=net_type)
def server_delete(request, instance_id):
proxyclient(request).servers.delete(instance_id)
def server_start(request, instance_id):
proxyclient(request).servers.start(instance_id)
def server_stop(request, instance_id):
proxyclient(request).servers.stop(instance_id)
def image_list_detailed(request, zone_id, filters=None):
return image_get(request, zone_id, filters=filters), False
def image_get(request, zone, filters=None):
return proxyclient(request).images.get(zone, filters=filters)
def image_delete(request, image_id):
proxyclient(request).images.delete(image_id)
def image_rebuild(request, zone):
return proxyclient(request).images.rebuild(zone)
def flavor_list(request, zone):
return proxyclient(request).flavors.get(zone)
def flavor_get_by_zone(request, zone):
return proxyclient(request).flavors.get(zone)
def flavor_delete(request, flavor_id):
proxyclient(request).flavors.delete(flavor_id)
def flavor_rebuild(request, zone):
return proxyclient(request).flavors.rebuild(zone)
def gateway_list(request):
return proxyclient(request).gateways.list()
def gateway_get(request, instance_id):
return proxyclient(request).gateways.get_by_instance(instance_id)
def gateway_get_by_zone(request, zone):
return proxyclient(request).gateways.get_by_zone(zone)
def gateway_delete(request, gateway_id):
proxyclient(request).gateways.delete(gateway_id)
def gateway_rebuild(request, zone):
return proxyclient(request).gateways.rebuild(zone)
def network_get_by_zone(request, zone):
return proxyclient(request).networks.get(zone)
def network_delete(request, network_id):
proxyclient(request).networks.delete(network_id)
def network_rebuild(request, zone):
return proxyclient(request).networks.rebuild(zone)
def network_type_list(request):
return proxyclient(request).networks.network_type_list()
def network_type_delete(request, id):
proxyclient(request).networks.network_type_delete(id)
def security_group_list(request):
return proxyclient(request).security_groups.list()
def security_group_update(request, **kwargs):
proxyclient(request).security_groups.update(**kwargs)
def firewall_list(request):
return proxyclient(request).firewalls.list()
def firewall_get(request, id):
return proxyclient(request).firewalls.get(id)
def firewall_create(request, instance_id, hostname, gateway_port,
service_port):
return proxyclient(request).firewalls.create(
instance_id=instance_id, hostname=hostname,
gateway_port=gateway_port, service_port=service_port)
def firewall_exist(request, instance_id, hostname=None, gateway_port=None):
return proxyclient(request).firewalls.exists(
instance_id, hostname=hostname, gateway_port=gateway_port)
def firewall_delete(request, firewall_id):
proxyclient(request).firewalls.delete(firewall_id)
#
def project_absolute_limits(request, zone_id):
return proxyclient(request).users.user_absolute_limits(zone_id)
def user_absolute_limits(request):
return proxyclient(request).users.user_absolute_limits()
def resource_list(request, user_id=None):
return proxyclient(request).resources.list(
user_id=user_id or request.user.id)
def resource_get(request, user_id=None, source_name=None, source_id=None):
filters = {'source_id': source_id, 'source_name': source_name}
return proxyclient(request).resources.get(
user_id or request.user.id, filters=filters)
def get_monitor(request, instance):
return proxyclient(request).servers.monitor(instance)
| en | 0.484683 | #from proxyclient.v2 import client as proxy_client # #def logout(request): # _proxy(request).logout(request.user.id) #cg # | 2.029726 | 2 |
tfl_data.py | dongyan1024/overtime | 9 | 10553 | <reponame>dongyan1024/overtime<filename>tfl_data.py<gh_stars>1-10
import overtime as ot
times = ['14:00','14:05', '14:10', '14:15', '14:20', '14:25', '14:30', '14:35', '14:40', '14:45', '14:50', '14:55']
tfl_data = ot.TflInput(['victoria', 'central', 'bakerloo', 'piccadilly'], ['inbound', 'outbound'], times)
| import overtime as ot
times = ['14:00','14:05', '14:10', '14:15', '14:20', '14:25', '14:30', '14:35', '14:40', '14:45', '14:50', '14:55']
tfl_data = ot.TflInput(['victoria', 'central', 'bakerloo', 'piccadilly'], ['inbound', 'outbound'], times) | none | 1 | 2.16247 | 2 |
|
apps/dc_tools/odc/apps/dc_tools/fs_to_dc.py | opendatacube/odc-tools | 29 | 10554 | <filename>apps/dc_tools/odc/apps/dc_tools/fs_to_dc.py<gh_stars>10-100
import json
from pathlib import Path
import click
import datacube
from datacube.index.hl import Doc2Dataset
from odc.apps.dc_tools.utils import (
index_update_dataset,
update_if_exists,
allow_unsafe,
transform_stac,
)
from ._stac import stac_transform
from typing import Generator, Optional
import logging
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
logging.basicConfig(
level=logging.WARNING,
format="%(asctime)s: %(levelname)s: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S",
)
def _find_files(
path: str, glob: Optional[str] = None, stac: Optional[bool] = False
) -> Generator[Path, None, None]:
if glob is None:
glob = "**/*.json" if stac else "**/*.yaml"
return Path(path).glob(glob)
@click.command("fs-to-dc")
@click.argument("input_directory", type=str, nargs=1)
@update_if_exists
@allow_unsafe
@transform_stac
@click.option(
"--glob",
default=None,
help="File system glob to use, defaults to **/*.yaml or **/*.json for STAC.",
)
def cli(input_directory, update_if_exists, allow_unsafe, stac, glob):
dc = datacube.Datacube()
doc2ds = Doc2Dataset(dc.index)
if glob is None:
glob = "**/*.json" if stac else "**/*.yaml"
files_to_process = _find_files(input_directory, glob, stac=stac)
added, failed = 0, 0
for in_file in files_to_process:
with in_file.open() as f:
try:
if in_file.endswith(".yml") or in_file.endswith(".yaml"):
metadata = yaml.safe_load(f, Loader=Loader)
else:
metadata = json.load(f)
# Do the STAC Transform if it's flagged
if stac:
metadata = stac_transform(metadata)
index_update_dataset(
metadata,
in_file.absolute().as_uri(),
dc=dc,
doc2ds=doc2ds,
update_if_exists=update_if_exists,
allow_unsafe=allow_unsafe,
)
added += 1
except Exception as e:
logging.exception(f"Failed to add dataset {in_file} with error {e}")
failed += 1
logging.info(f"Added {added} and failed {failed} datasets.")
if __name__ == "__main__":
cli()
| <filename>apps/dc_tools/odc/apps/dc_tools/fs_to_dc.py<gh_stars>10-100
import json
from pathlib import Path
import click
import datacube
from datacube.index.hl import Doc2Dataset
from odc.apps.dc_tools.utils import (
index_update_dataset,
update_if_exists,
allow_unsafe,
transform_stac,
)
from ._stac import stac_transform
from typing import Generator, Optional
import logging
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
logging.basicConfig(
level=logging.WARNING,
format="%(asctime)s: %(levelname)s: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S",
)
def _find_files(
path: str, glob: Optional[str] = None, stac: Optional[bool] = False
) -> Generator[Path, None, None]:
if glob is None:
glob = "**/*.json" if stac else "**/*.yaml"
return Path(path).glob(glob)
@click.command("fs-to-dc")
@click.argument("input_directory", type=str, nargs=1)
@update_if_exists
@allow_unsafe
@transform_stac
@click.option(
"--glob",
default=None,
help="File system glob to use, defaults to **/*.yaml or **/*.json for STAC.",
)
def cli(input_directory, update_if_exists, allow_unsafe, stac, glob):
dc = datacube.Datacube()
doc2ds = Doc2Dataset(dc.index)
if glob is None:
glob = "**/*.json" if stac else "**/*.yaml"
files_to_process = _find_files(input_directory, glob, stac=stac)
added, failed = 0, 0
for in_file in files_to_process:
with in_file.open() as f:
try:
if in_file.endswith(".yml") or in_file.endswith(".yaml"):
metadata = yaml.safe_load(f, Loader=Loader)
else:
metadata = json.load(f)
# Do the STAC Transform if it's flagged
if stac:
metadata = stac_transform(metadata)
index_update_dataset(
metadata,
in_file.absolute().as_uri(),
dc=dc,
doc2ds=doc2ds,
update_if_exists=update_if_exists,
allow_unsafe=allow_unsafe,
)
added += 1
except Exception as e:
logging.exception(f"Failed to add dataset {in_file} with error {e}")
failed += 1
logging.info(f"Added {added} and failed {failed} datasets.")
if __name__ == "__main__":
cli()
| en | 0.822704 | # Do the STAC Transform if it's flagged | 2.131879 | 2 |
scripts/fullizer.py | stijm/jazzjackrabbit2 | 5 | 10555 | <gh_stars>1-10
"""
WARNING:
Using this script outside any server except one with IP 127.0.0.1 means risking getting
an instant and permanent ban, anywhere you use it.
The script was created *ONLY FOR LOCAL* testing purposes.
NEVER, NEVER, *NEVER* run it in an online multiplayer server.
At least unless you're a dumb freak.
"""
import multiprocessing
import time
from scripts import play
if __name__ == '__main__':
for i in range(1, 33):
process = multiprocessing.Process(
target=play,
kwargs=dict(nick=f'Player {i}', connect=['127.0.0.1'], new_sgip=False),
)
process.start()
time.sleep(0.09)
| """
WARNING:
Using this script outside any server except one with IP 127.0.0.1 means risking getting
an instant and permanent ban, anywhere you use it.
The script was created *ONLY FOR LOCAL* testing purposes.
NEVER, NEVER, *NEVER* run it in an online multiplayer server.
At least unless you're a dumb freak.
"""
import multiprocessing
import time
from scripts import play
if __name__ == '__main__':
for i in range(1, 33):
process = multiprocessing.Process(
target=play,
kwargs=dict(nick=f'Player {i}', connect=['127.0.0.1'], new_sgip=False),
)
process.start()
time.sleep(0.09) | en | 0.880217 | WARNING: Using this script outside any server except one with IP 127.0.0.1 means risking getting an instant and permanent ban, anywhere you use it. The script was created *ONLY FOR LOCAL* testing purposes. NEVER, NEVER, *NEVER* run it in an online multiplayer server. At least unless you're a dumb freak. | 2.283888 | 2 |
setup.py | pranithk/gluster-georep-tools | 0 | 10556 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
gluster-georep-tools.setup.py
:copyright: (c) 2016 by <NAME>
:license: MIT, see LICENSE for more details.
"""
from setuptools import setup
setup(
name="gluster-georep-tools",
version="0.2",
packages=["gluster_georep_tools",
"gluster_georep_tools.status",
"gluster_georep_tools.setup"],
include_package_data=True,
install_requires=['argparse', 'paramiko', 'glustercli'],
entry_points={
"console_scripts": [
"gluster-georep-setup = gluster_georep_tools.setup.cli:main",
"gluster-georep-status = gluster_georep_tools.status.cli:main",
]
},
platforms="linux",
zip_safe=False,
author="<NAME>",
author_email="<EMAIL>",
description="Gluster Geo-replication tools",
license="MIT",
keywords="gluster, tool, geo-replication",
url="https://github.com/aravindavk/gluster-georep-tools",
long_description="""
Gluster Geo-replication Tools
""",
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only"
],
)
| # -*- coding: utf-8 -*-
"""
gluster-georep-tools.setup.py
:copyright: (c) 2016 by <NAME>
:license: MIT, see LICENSE for more details.
"""
from setuptools import setup
setup(
name="gluster-georep-tools",
version="0.2",
packages=["gluster_georep_tools",
"gluster_georep_tools.status",
"gluster_georep_tools.setup"],
include_package_data=True,
install_requires=['argparse', 'paramiko', 'glustercli'],
entry_points={
"console_scripts": [
"gluster-georep-setup = gluster_georep_tools.setup.cli:main",
"gluster-georep-status = gluster_georep_tools.status.cli:main",
]
},
platforms="linux",
zip_safe=False,
author="<NAME>",
author_email="<EMAIL>",
description="Gluster Geo-replication tools",
license="MIT",
keywords="gluster, tool, geo-replication",
url="https://github.com/aravindavk/gluster-georep-tools",
long_description="""
Gluster Geo-replication Tools
""",
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only"
],
) | en | 0.754054 | # -*- coding: utf-8 -*- gluster-georep-tools.setup.py :copyright: (c) 2016 by <NAME> :license: MIT, see LICENSE for more details. Gluster Geo-replication Tools | 1.035642 | 1 |
docs/_downloads/dbc5873471dad3c21022112121cbd008/tensorboard_profiler_tutorial.py | woojinsong/PyTorch-tutorials-kr | 221 | 10557 | """
PyTorch Profiler With TensorBoard
====================================
This tutorial demonstrates how to use TensorBoard plugin with PyTorch Profiler
to detect performance bottlenecks of the model.
Introduction
------------
PyTorch 1.8 includes an updated profiler API capable of
recording the CPU side operations as well as the CUDA kernel launches on the GPU side.
The profiler can visualize this information
in TensorBoard Plugin and provide analysis of the performance bottlenecks.
In this tutorial, we will use a simple Resnet model to demonstrate how to
use TensorBoard plugin to analyze model performance.
Setup
-----
To install ``torch`` and ``torchvision`` use the following command:
::
pip install torch torchvision
"""
######################################################################
# Steps
# -----
#
# 1. Prepare the data and model
# 2. Use profiler to record execution events
# 3. Run the profiler
# 4. Use TensorBoard to view results and analyze model performance
# 5. Improve performance with the help of profiler
# 6. Analyze performance with other advanced features
#
# 1. Prepare the data and model
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# First, import all necessary libraries:
#
import torch
import torch.nn
import torch.optim
import torch.profiler
import torch.utils.data
import torchvision.datasets
import torchvision.models
import torchvision.transforms as T
######################################################################
# Then prepare the input data. For this tutorial, we use the CIFAR10 dataset.
# Transform it to the desired format and use DataLoader to load each batch.
transform = T.Compose(
[T.Resize(224),
T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
######################################################################
# Next, create Resnet model, loss function, and optimizer objects.
# To run on GPU, move model and loss to GPU device.
device = torch.device("cuda:0")
model = torchvision.models.resnet18(pretrained=True).cuda(device)
criterion = torch.nn.CrossEntropyLoss().cuda(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
model.train()
######################################################################
# Define the training step for each batch of input data.
def train(data):
inputs, labels = data[0].to(device=device), data[1].to(device=device)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
######################################################################
# 2. Use profiler to record execution events
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The profiler is enabled through the context manager and accepts several parameters,
# some of the most useful are:
#
# - ``schedule`` - callable that takes step (int) as a single parameter
# and returns the profiler action to perform at each step.
#
# In this example with ``wait=1, warmup=1, active=3, repeat=2``,
# profiler will skip the first step/iteration,
# start warming up on the second,
# record the following three iterations,
# after which the trace will become available and on_trace_ready (when set) is called.
# In total, the cycle repeats twice. Each cycle is called a "span" in TensorBoard plugin.
#
# During ``wait`` steps, the profiler is disabled.
# During ``warmup`` steps, the profiler starts tracing but the results are discarded.
# This is for reducing the profiling overhead.
# The overhead at the beginning of profiling is high and easy to bring skew to the profiling result.
# During ``active`` steps, the profiler works and records events.
# - ``on_trace_ready`` - callable that is called at the end of each cycle;
# In this example we use ``torch.profiler.tensorboard_trace_handler`` to generate result files for TensorBoard.
# After profiling, result files will be saved into the ``./log/resnet18`` directory.
# Specify this directory as a ``logdir`` parameter to analyze profile in TensorBoard.
# - ``record_shapes`` - whether to record shapes of the operator inputs.
# - ``profile_memory`` - Track tensor memory allocation/deallocation.
# - ``with_stack`` - Record source information (file and line number) for the ops.
# If the TensorBoard is launched in VSCode (`reference <https://code.visualstudio.com/docs/datascience/pytorch-support#_tensorboard-integration>`_),
# clicking a stack frame will navigate to the specific code line.
with torch.profiler.profile(
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18'),
record_shapes=True,
with_stack=True
) as prof:
for step, batch_data in enumerate(train_loader):
if step >= (1 + 1 + 3) * 2:
break
train(batch_data)
prof.step() # Need to call this at the end of each step to notify profiler of steps' boundary.
######################################################################
# 3. Run the profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Run the above code. The profiling result will be saved under ``./log/resnet18`` directory.
######################################################################
# 4. Use TensorBoard to view results and analyze model performance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Install PyTorch Profiler TensorBoard Plugin.
#
# ::
#
# pip install torch_tb_profiler
#
######################################################################
# Launch the TensorBoard.
#
# ::
#
# tensorboard --logdir=./log
#
######################################################################
# Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser.
#
# ::
#
# http://localhost:6006/#pytorch_profiler
#
######################################################################
# You could see Profiler plugin page as shown below.
#
# - Overview
# .. image:: ../../_static/img/profiler_overview1.png
# :scale: 25 %
#
# The overview shows a high-level summary of model performance.
#
# The "GPU Summary" panel shows the GPU configuration and the GPU usage.
# In this example, the GPU Utilization is low.
# The details of these metrics are `here <https://github.com/guyang3532/kineto/blob/readme/tb_plugin/docs/gpu_utilization.md>`_.
#
# The "Step Time Breakdown" shows distribution of time spent in each step over different categories of execution.
# In this example, you can see the ``DataLoader`` overhead is significant.
#
# The bottom "Performance Recommendation" uses the profiling data
# to automatically highlight likely bottlenecks,
# and gives you actionable optimization suggestions.
#
# You can change the view page in left "Views" dropdown list.
#
# .. image:: ../../_static/img/profiler_views_list.png
# :alt:
#
#
# - Operator view
# The operator view displays the performance of every PyTorch operator
# that is executed either on the host or device.
#
# .. image:: ../../_static/img/profiler_operator_view.png
# :scale: 25 %
# The "Self" duration does not include its child operators’ time.
# The "Total" duration includes its child operators’ time.
#
# - View call stack
# Click the "View Callstack" of an operator, the operators with same name but different call stacks will be shown.
# Then click a "View Callstack" in this sub-table, the call stack frames will be shown.
#
# .. image:: ../../_static/img/profiler_callstack.png
# :scale: 25 %
#
# If the TensorBoard is launched inside VSCode
# (`Launch Guide <https://devblogs.microsoft.com/python/python-in-visual-studio-code-february-2021-release/#tensorboard-integration>`_),
# clicking a call stack frame will navigate to the specific code line.
#
# .. image:: ../../_static/img/profiler_vscode.png
# :scale: 25 %
#
#
# - Kernel view
# The GPU kernel view shows all kernels’ time spent on GPU.
#
# .. image:: ../../_static/img/profiler_kernel_view.png
# :scale: 25 %
# Mean Blocks per SM:
# Blocks per SM = Blocks of this kernel / SM number of this GPU.
# If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized.
# "Mean Blocks per SM" is weighted average of all runs of this kernel name, using each run’s duration as weight.
#
# Mean Est. Achieved Occupancy:
# Est. Achieved Occupancy is defined in this column’s tooltip.
# For most cases such as memory bandwidth bounded kernels, the higher the better.
# "Mean Est. Achieved Occupancy" is weighted average of all runs of this kernel name,
# using each run’s duration as weight.
#
# - Trace view
# The trace view shows timeline of profiled operators and GPU kernels.
# You can select it to see details as below.
#
# .. image:: ../../_static/img/profiler_trace_view1.png
# :scale: 25 %
#
# You can move the graph and zoom in/out with the help of right side toolbar.
# And keyboard can also be used to zoom and move around inside the timeline.
# The ‘w’ and ‘s’ keys zoom in centered around the mouse,
# and the ‘a’ and ‘d’ keys move the timeline left and right.
# You can hit these keys multiple times until you see a readable representation.
#
# In this example, we can see the event prefixed with ``enumerate(DataLoader)`` costs a lot of time.
# And during most of this period, the GPU is idle.
# Because this function is loading data and transforming data on host side,
# during which the GPU resource is wasted.
######################################################################
# 5. Improve performance with the help of profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# At the bottom of "Overview" page, the suggestion in "Performance Recommendation" hints the bottleneck is DataLoader.
# The PyTorch DataLoader uses single process by default.
# User could enable multi-process data loading by setting the parameter ``num_workers``.
# `Here <https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading>`_ is more details.
#
# In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below,
# pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again.
#
# ::
#
# train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4)
#
######################################################################
# Then let’s choose the recently profiled run in left "Runs" dropdown list.
#
# .. image:: ../../_static/img/profiler_overview2.png
# :scale: 25 %
#
# From the above view, we can find the step time is reduced to about 58ms comparing with previous run's 121ms,
# and the time reduction of ``DataLoader`` mainly contributes.
#
# .. image:: ../../_static/img/profiler_trace_view2.png
# :scale: 25 %
#
# From the above view, we can see that the runtime of ``enumerate(DataLoader)`` is reduced,
# and the GPU utilization is increased.
######################################################################
# 6. Analyze performance with other advanced features
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - Memory view
# To profile memory, please add ``profile_memory=True`` in arguments of ``torch.profiler.profile``.
#
# Note: Because of the current non-optimized implementation of PyTorch profiler,
# enabling ``profile_memory=True`` will take about several minutes to finish.
# To save time, you can try our existing examples first by running:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo
#
# The profiler records all memory allocation/release events during profiling.
# For every specific operator, the plugin aggregates all these memory events inside its life span.
#
# .. image:: ../../_static/img/profiler_memory_view.png
# :scale: 25 %
#
# The memory type could be selected in "Device" selection box.
# For example, "GPU0" means the following table only shows each operator’s memory usage on GPU 0, not including CPU or other GPUs.
#
# The "Size Increase" sums up all allocation bytes and minus all the memory release bytes.
#
# The "Allocation Size" sums up all allocation bytes without considering the memory release.
#
# - Distributed view
# The plugin now supports distributed view on profiling DDP with NCCL as backend.
#
# You can try it by using existing example on Azure:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert
#
# .. image:: ../../_static/img/profiler_distributed_view.png
# :scale: 25 %
#
# The "Computation/Communication Overview" shows computation/communication ratio and their overlapping degree.
# From this view, User can figure out load balance issue among workers.
# For example, if the computation + overlapping time of one worker is much larger than others,
# there may be a problem of load balance or this worker may be a straggler.
#
# The "Synchronizing/Communication Overview" shows the efficiency of communication.
# "Data Transfer Time" is the time for actual data exchanging.
# "Synchronizing Time" is the time for waiting and synchronizing with other workers.
#
# If one worker’s "Synchronizing Time" is much shorter than that of other workers’,
# this worker may be a straggler which may have more computation workload than other workers’.
#
# The "Communication Operations Stats" summarizes the detailed statistics of all communication ops in each worker.
######################################################################
# Learn More
# ----------
#
# Take a look at the following documents to continue your learning,
# and feel free to open an issue `here <https://github.com/pytorch/kineto/issues>`_.
#
# - `Pytorch TensorBoard Profiler github <https://github.com/pytorch/kineto/tree/master/tb_plugin>`_
# - `torch.profiler API <https://pytorch.org/docs/master/profiler.html>`_
| """
PyTorch Profiler With TensorBoard
====================================
This tutorial demonstrates how to use TensorBoard plugin with PyTorch Profiler
to detect performance bottlenecks of the model.
Introduction
------------
PyTorch 1.8 includes an updated profiler API capable of
recording the CPU side operations as well as the CUDA kernel launches on the GPU side.
The profiler can visualize this information
in TensorBoard Plugin and provide analysis of the performance bottlenecks.
In this tutorial, we will use a simple Resnet model to demonstrate how to
use TensorBoard plugin to analyze model performance.
Setup
-----
To install ``torch`` and ``torchvision`` use the following command:
::
pip install torch torchvision
"""
######################################################################
# Steps
# -----
#
# 1. Prepare the data and model
# 2. Use profiler to record execution events
# 3. Run the profiler
# 4. Use TensorBoard to view results and analyze model performance
# 5. Improve performance with the help of profiler
# 6. Analyze performance with other advanced features
#
# 1. Prepare the data and model
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# First, import all necessary libraries:
#
import torch
import torch.nn
import torch.optim
import torch.profiler
import torch.utils.data
import torchvision.datasets
import torchvision.models
import torchvision.transforms as T
######################################################################
# Then prepare the input data. For this tutorial, we use the CIFAR10 dataset.
# Transform it to the desired format and use DataLoader to load each batch.
transform = T.Compose(
[T.Resize(224),
T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
######################################################################
# Next, create Resnet model, loss function, and optimizer objects.
# To run on GPU, move model and loss to GPU device.
device = torch.device("cuda:0")
model = torchvision.models.resnet18(pretrained=True).cuda(device)
criterion = torch.nn.CrossEntropyLoss().cuda(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
model.train()
######################################################################
# Define the training step for each batch of input data.
def train(data):
inputs, labels = data[0].to(device=device), data[1].to(device=device)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
######################################################################
# 2. Use profiler to record execution events
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The profiler is enabled through the context manager and accepts several parameters,
# some of the most useful are:
#
# - ``schedule`` - callable that takes step (int) as a single parameter
# and returns the profiler action to perform at each step.
#
# In this example with ``wait=1, warmup=1, active=3, repeat=2``,
# profiler will skip the first step/iteration,
# start warming up on the second,
# record the following three iterations,
# after which the trace will become available and on_trace_ready (when set) is called.
# In total, the cycle repeats twice. Each cycle is called a "span" in TensorBoard plugin.
#
# During ``wait`` steps, the profiler is disabled.
# During ``warmup`` steps, the profiler starts tracing but the results are discarded.
# This is for reducing the profiling overhead.
# The overhead at the beginning of profiling is high and easy to bring skew to the profiling result.
# During ``active`` steps, the profiler works and records events.
# - ``on_trace_ready`` - callable that is called at the end of each cycle;
# In this example we use ``torch.profiler.tensorboard_trace_handler`` to generate result files for TensorBoard.
# After profiling, result files will be saved into the ``./log/resnet18`` directory.
# Specify this directory as a ``logdir`` parameter to analyze profile in TensorBoard.
# - ``record_shapes`` - whether to record shapes of the operator inputs.
# - ``profile_memory`` - Track tensor memory allocation/deallocation.
# - ``with_stack`` - Record source information (file and line number) for the ops.
# If the TensorBoard is launched in VSCode (`reference <https://code.visualstudio.com/docs/datascience/pytorch-support#_tensorboard-integration>`_),
# clicking a stack frame will navigate to the specific code line.
with torch.profiler.profile(
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18'),
record_shapes=True,
with_stack=True
) as prof:
for step, batch_data in enumerate(train_loader):
if step >= (1 + 1 + 3) * 2:
break
train(batch_data)
prof.step() # Need to call this at the end of each step to notify profiler of steps' boundary.
######################################################################
# 3. Run the profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Run the above code. The profiling result will be saved under ``./log/resnet18`` directory.
######################################################################
# 4. Use TensorBoard to view results and analyze model performance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Install PyTorch Profiler TensorBoard Plugin.
#
# ::
#
# pip install torch_tb_profiler
#
######################################################################
# Launch the TensorBoard.
#
# ::
#
# tensorboard --logdir=./log
#
######################################################################
# Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser.
#
# ::
#
# http://localhost:6006/#pytorch_profiler
#
######################################################################
# You could see Profiler plugin page as shown below.
#
# - Overview
# .. image:: ../../_static/img/profiler_overview1.png
# :scale: 25 %
#
# The overview shows a high-level summary of model performance.
#
# The "GPU Summary" panel shows the GPU configuration and the GPU usage.
# In this example, the GPU Utilization is low.
# The details of these metrics are `here <https://github.com/guyang3532/kineto/blob/readme/tb_plugin/docs/gpu_utilization.md>`_.
#
# The "Step Time Breakdown" shows distribution of time spent in each step over different categories of execution.
# In this example, you can see the ``DataLoader`` overhead is significant.
#
# The bottom "Performance Recommendation" uses the profiling data
# to automatically highlight likely bottlenecks,
# and gives you actionable optimization suggestions.
#
# You can change the view page in left "Views" dropdown list.
#
# .. image:: ../../_static/img/profiler_views_list.png
# :alt:
#
#
# - Operator view
# The operator view displays the performance of every PyTorch operator
# that is executed either on the host or device.
#
# .. image:: ../../_static/img/profiler_operator_view.png
# :scale: 25 %
# The "Self" duration does not include its child operators’ time.
# The "Total" duration includes its child operators’ time.
#
# - View call stack
# Click the "View Callstack" of an operator, the operators with same name but different call stacks will be shown.
# Then click a "View Callstack" in this sub-table, the call stack frames will be shown.
#
# .. image:: ../../_static/img/profiler_callstack.png
# :scale: 25 %
#
# If the TensorBoard is launched inside VSCode
# (`Launch Guide <https://devblogs.microsoft.com/python/python-in-visual-studio-code-february-2021-release/#tensorboard-integration>`_),
# clicking a call stack frame will navigate to the specific code line.
#
# .. image:: ../../_static/img/profiler_vscode.png
# :scale: 25 %
#
#
# - Kernel view
# The GPU kernel view shows all kernels’ time spent on GPU.
#
# .. image:: ../../_static/img/profiler_kernel_view.png
# :scale: 25 %
# Mean Blocks per SM:
# Blocks per SM = Blocks of this kernel / SM number of this GPU.
# If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized.
# "Mean Blocks per SM" is weighted average of all runs of this kernel name, using each run’s duration as weight.
#
# Mean Est. Achieved Occupancy:
# Est. Achieved Occupancy is defined in this column’s tooltip.
# For most cases such as memory bandwidth bounded kernels, the higher the better.
# "Mean Est. Achieved Occupancy" is weighted average of all runs of this kernel name,
# using each run’s duration as weight.
#
# - Trace view
# The trace view shows timeline of profiled operators and GPU kernels.
# You can select it to see details as below.
#
# .. image:: ../../_static/img/profiler_trace_view1.png
# :scale: 25 %
#
# You can move the graph and zoom in/out with the help of right side toolbar.
# And keyboard can also be used to zoom and move around inside the timeline.
# The ‘w’ and ‘s’ keys zoom in centered around the mouse,
# and the ‘a’ and ‘d’ keys move the timeline left and right.
# You can hit these keys multiple times until you see a readable representation.
#
# In this example, we can see the event prefixed with ``enumerate(DataLoader)`` costs a lot of time.
# And during most of this period, the GPU is idle.
# Because this function is loading data and transforming data on host side,
# during which the GPU resource is wasted.
######################################################################
# 5. Improve performance with the help of profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# At the bottom of "Overview" page, the suggestion in "Performance Recommendation" hints the bottleneck is DataLoader.
# The PyTorch DataLoader uses single process by default.
# User could enable multi-process data loading by setting the parameter ``num_workers``.
# `Here <https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading>`_ is more details.
#
# In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below,
# pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again.
#
# ::
#
# train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4)
#
######################################################################
# Then let’s choose the recently profiled run in left "Runs" dropdown list.
#
# .. image:: ../../_static/img/profiler_overview2.png
# :scale: 25 %
#
# From the above view, we can find the step time is reduced to about 58ms comparing with previous run's 121ms,
# and the time reduction of ``DataLoader`` mainly contributes.
#
# .. image:: ../../_static/img/profiler_trace_view2.png
# :scale: 25 %
#
# From the above view, we can see that the runtime of ``enumerate(DataLoader)`` is reduced,
# and the GPU utilization is increased.
######################################################################
# 6. Analyze performance with other advanced features
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - Memory view
# To profile memory, please add ``profile_memory=True`` in arguments of ``torch.profiler.profile``.
#
# Note: Because of the current non-optimized implementation of PyTorch profiler,
# enabling ``profile_memory=True`` will take about several minutes to finish.
# To save time, you can try our existing examples first by running:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo
#
# The profiler records all memory allocation/release events during profiling.
# For every specific operator, the plugin aggregates all these memory events inside its life span.
#
# .. image:: ../../_static/img/profiler_memory_view.png
# :scale: 25 %
#
# The memory type could be selected in "Device" selection box.
# For example, "GPU0" means the following table only shows each operator’s memory usage on GPU 0, not including CPU or other GPUs.
#
# The "Size Increase" sums up all allocation bytes and minus all the memory release bytes.
#
# The "Allocation Size" sums up all allocation bytes without considering the memory release.
#
# - Distributed view
# The plugin now supports distributed view on profiling DDP with NCCL as backend.
#
# You can try it by using existing example on Azure:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert
#
# .. image:: ../../_static/img/profiler_distributed_view.png
# :scale: 25 %
#
# The "Computation/Communication Overview" shows computation/communication ratio and their overlapping degree.
# From this view, User can figure out load balance issue among workers.
# For example, if the computation + overlapping time of one worker is much larger than others,
# there may be a problem of load balance or this worker may be a straggler.
#
# The "Synchronizing/Communication Overview" shows the efficiency of communication.
# "Data Transfer Time" is the time for actual data exchanging.
# "Synchronizing Time" is the time for waiting and synchronizing with other workers.
#
# If one worker’s "Synchronizing Time" is much shorter than that of other workers’,
# this worker may be a straggler which may have more computation workload than other workers’.
#
# The "Communication Operations Stats" summarizes the detailed statistics of all communication ops in each worker.
######################################################################
# Learn More
# ----------
#
# Take a look at the following documents to continue your learning,
# and feel free to open an issue `here <https://github.com/pytorch/kineto/issues>`_.
#
# - `Pytorch TensorBoard Profiler github <https://github.com/pytorch/kineto/tree/master/tb_plugin>`_
# - `torch.profiler API <https://pytorch.org/docs/master/profiler.html>`_
| en | 0.722221 | PyTorch Profiler With TensorBoard ==================================== This tutorial demonstrates how to use TensorBoard plugin with PyTorch Profiler to detect performance bottlenecks of the model. Introduction ------------ PyTorch 1.8 includes an updated profiler API capable of recording the CPU side operations as well as the CUDA kernel launches on the GPU side. The profiler can visualize this information in TensorBoard Plugin and provide analysis of the performance bottlenecks. In this tutorial, we will use a simple Resnet model to demonstrate how to use TensorBoard plugin to analyze model performance. Setup ----- To install ``torch`` and ``torchvision`` use the following command: :: pip install torch torchvision ###################################################################### # Steps # ----- # # 1. Prepare the data and model # 2. Use profiler to record execution events # 3. Run the profiler # 4. Use TensorBoard to view results and analyze model performance # 5. Improve performance with the help of profiler # 6. Analyze performance with other advanced features # # 1. Prepare the data and model # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # First, import all necessary libraries: # ###################################################################### # Then prepare the input data. For this tutorial, we use the CIFAR10 dataset. # Transform it to the desired format and use DataLoader to load each batch. ###################################################################### # Next, create Resnet model, loss function, and optimizer objects. # To run on GPU, move model and loss to GPU device. ###################################################################### # Define the training step for each batch of input data. ###################################################################### # 2. Use profiler to record execution events # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # The profiler is enabled through the context manager and accepts several parameters, # some of the most useful are: # # - ``schedule`` - callable that takes step (int) as a single parameter # and returns the profiler action to perform at each step. # # In this example with ``wait=1, warmup=1, active=3, repeat=2``, # profiler will skip the first step/iteration, # start warming up on the second, # record the following three iterations, # after which the trace will become available and on_trace_ready (when set) is called. # In total, the cycle repeats twice. Each cycle is called a "span" in TensorBoard plugin. # # During ``wait`` steps, the profiler is disabled. # During ``warmup`` steps, the profiler starts tracing but the results are discarded. # This is for reducing the profiling overhead. # The overhead at the beginning of profiling is high and easy to bring skew to the profiling result. # During ``active`` steps, the profiler works and records events. # - ``on_trace_ready`` - callable that is called at the end of each cycle; # In this example we use ``torch.profiler.tensorboard_trace_handler`` to generate result files for TensorBoard. # After profiling, result files will be saved into the ``./log/resnet18`` directory. # Specify this directory as a ``logdir`` parameter to analyze profile in TensorBoard. # - ``record_shapes`` - whether to record shapes of the operator inputs. # - ``profile_memory`` - Track tensor memory allocation/deallocation. # - ``with_stack`` - Record source information (file and line number) for the ops. # If the TensorBoard is launched in VSCode (`reference <https://code.visualstudio.com/docs/datascience/pytorch-support#_tensorboard-integration>`_), # clicking a stack frame will navigate to the specific code line. # Need to call this at the end of each step to notify profiler of steps' boundary. ###################################################################### # 3. Run the profiler # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Run the above code. The profiling result will be saved under ``./log/resnet18`` directory. ###################################################################### # 4. Use TensorBoard to view results and analyze model performance # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Install PyTorch Profiler TensorBoard Plugin. # # :: # # pip install torch_tb_profiler # ###################################################################### # Launch the TensorBoard. # # :: # # tensorboard --logdir=./log # ###################################################################### # Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser. # # :: # # http://localhost:6006/#pytorch_profiler # ###################################################################### # You could see Profiler plugin page as shown below. # # - Overview # .. image:: ../../_static/img/profiler_overview1.png # :scale: 25 % # # The overview shows a high-level summary of model performance. # # The "GPU Summary" panel shows the GPU configuration and the GPU usage. # In this example, the GPU Utilization is low. # The details of these metrics are `here <https://github.com/guyang3532/kineto/blob/readme/tb_plugin/docs/gpu_utilization.md>`_. # # The "Step Time Breakdown" shows distribution of time spent in each step over different categories of execution. # In this example, you can see the ``DataLoader`` overhead is significant. # # The bottom "Performance Recommendation" uses the profiling data # to automatically highlight likely bottlenecks, # and gives you actionable optimization suggestions. # # You can change the view page in left "Views" dropdown list. # # .. image:: ../../_static/img/profiler_views_list.png # :alt: # # # - Operator view # The operator view displays the performance of every PyTorch operator # that is executed either on the host or device. # # .. image:: ../../_static/img/profiler_operator_view.png # :scale: 25 % # The "Self" duration does not include its child operators’ time. # The "Total" duration includes its child operators’ time. # # - View call stack # Click the "View Callstack" of an operator, the operators with same name but different call stacks will be shown. # Then click a "View Callstack" in this sub-table, the call stack frames will be shown. # # .. image:: ../../_static/img/profiler_callstack.png # :scale: 25 % # # If the TensorBoard is launched inside VSCode # (`Launch Guide <https://devblogs.microsoft.com/python/python-in-visual-studio-code-february-2021-release/#tensorboard-integration>`_), # clicking a call stack frame will navigate to the specific code line. # # .. image:: ../../_static/img/profiler_vscode.png # :scale: 25 % # # # - Kernel view # The GPU kernel view shows all kernels’ time spent on GPU. # # .. image:: ../../_static/img/profiler_kernel_view.png # :scale: 25 % # Mean Blocks per SM: # Blocks per SM = Blocks of this kernel / SM number of this GPU. # If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized. # "Mean Blocks per SM" is weighted average of all runs of this kernel name, using each run’s duration as weight. # # Mean Est. Achieved Occupancy: # Est. Achieved Occupancy is defined in this column’s tooltip. # For most cases such as memory bandwidth bounded kernels, the higher the better. # "Mean Est. Achieved Occupancy" is weighted average of all runs of this kernel name, # using each run’s duration as weight. # # - Trace view # The trace view shows timeline of profiled operators and GPU kernels. # You can select it to see details as below. # # .. image:: ../../_static/img/profiler_trace_view1.png # :scale: 25 % # # You can move the graph and zoom in/out with the help of right side toolbar. # And keyboard can also be used to zoom and move around inside the timeline. # The ‘w’ and ‘s’ keys zoom in centered around the mouse, # and the ‘a’ and ‘d’ keys move the timeline left and right. # You can hit these keys multiple times until you see a readable representation. # # In this example, we can see the event prefixed with ``enumerate(DataLoader)`` costs a lot of time. # And during most of this period, the GPU is idle. # Because this function is loading data and transforming data on host side, # during which the GPU resource is wasted. ###################################################################### # 5. Improve performance with the help of profiler # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # At the bottom of "Overview" page, the suggestion in "Performance Recommendation" hints the bottleneck is DataLoader. # The PyTorch DataLoader uses single process by default. # User could enable multi-process data loading by setting the parameter ``num_workers``. # `Here <https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading>`_ is more details. # # In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below, # pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again. # # :: # # train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4) # ###################################################################### # Then let’s choose the recently profiled run in left "Runs" dropdown list. # # .. image:: ../../_static/img/profiler_overview2.png # :scale: 25 % # # From the above view, we can find the step time is reduced to about 58ms comparing with previous run's 121ms, # and the time reduction of ``DataLoader`` mainly contributes. # # .. image:: ../../_static/img/profiler_trace_view2.png # :scale: 25 % # # From the above view, we can see that the runtime of ``enumerate(DataLoader)`` is reduced, # and the GPU utilization is increased. ###################################################################### # 6. Analyze performance with other advanced features # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # - Memory view # To profile memory, please add ``profile_memory=True`` in arguments of ``torch.profiler.profile``. # # Note: Because of the current non-optimized implementation of PyTorch profiler, # enabling ``profile_memory=True`` will take about several minutes to finish. # To save time, you can try our existing examples first by running: # # :: # # tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo # # The profiler records all memory allocation/release events during profiling. # For every specific operator, the plugin aggregates all these memory events inside its life span. # # .. image:: ../../_static/img/profiler_memory_view.png # :scale: 25 % # # The memory type could be selected in "Device" selection box. # For example, "GPU0" means the following table only shows each operator’s memory usage on GPU 0, not including CPU or other GPUs. # # The "Size Increase" sums up all allocation bytes and minus all the memory release bytes. # # The "Allocation Size" sums up all allocation bytes without considering the memory release. # # - Distributed view # The plugin now supports distributed view on profiling DDP with NCCL as backend. # # You can try it by using existing example on Azure: # # :: # # tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert # # .. image:: ../../_static/img/profiler_distributed_view.png # :scale: 25 % # # The "Computation/Communication Overview" shows computation/communication ratio and their overlapping degree. # From this view, User can figure out load balance issue among workers. # For example, if the computation + overlapping time of one worker is much larger than others, # there may be a problem of load balance or this worker may be a straggler. # # The "Synchronizing/Communication Overview" shows the efficiency of communication. # "Data Transfer Time" is the time for actual data exchanging. # "Synchronizing Time" is the time for waiting and synchronizing with other workers. # # If one worker’s "Synchronizing Time" is much shorter than that of other workers’, # this worker may be a straggler which may have more computation workload than other workers’. # # The "Communication Operations Stats" summarizes the detailed statistics of all communication ops in each worker. ###################################################################### # Learn More # ---------- # # Take a look at the following documents to continue your learning, # and feel free to open an issue `here <https://github.com/pytorch/kineto/issues>`_. # # - `Pytorch TensorBoard Profiler github <https://github.com/pytorch/kineto/tree/master/tb_plugin>`_ # - `torch.profiler API <https://pytorch.org/docs/master/profiler.html>`_ | 2.962076 | 3 |
imgtoch/__init__.py | hrpzcf/imgtoch | 0 | 10558 | # coding: utf-8
from .__utils__ import grayscaleOf, makeImage, sortByGrayscale
NAME = "imgtoch"
VERSIONNUM = 0, 2, 3
VERSION = ".".join(map(str, VERSIONNUM))
AUTHOR = "hrpzcf"
EMAIL = "<EMAIL>"
WEBSITE = "https://gitee.com/hrpzcf/imgtoch"
__all__ = ["grayscaleOf", "makeImage", "sortByGrayscale"]
| # coding: utf-8
from .__utils__ import grayscaleOf, makeImage, sortByGrayscale
NAME = "imgtoch"
VERSIONNUM = 0, 2, 3
VERSION = ".".join(map(str, VERSIONNUM))
AUTHOR = "hrpzcf"
EMAIL = "<EMAIL>"
WEBSITE = "https://gitee.com/hrpzcf/imgtoch"
__all__ = ["grayscaleOf", "makeImage", "sortByGrayscale"]
| en | 0.833554 | # coding: utf-8 | 1.599772 | 2 |
packages/gradient_boosting_model/gradient_boosting_model/processing/validation.py | g-nightingale/testing-and-monitoring-ml-deployments | 99 | 10559 | import typing as t
from gradient_boosting_model.config.core import config
import numpy as np
import pandas as pd
from marshmallow import fields, Schema, ValidationError
class HouseDataInputSchema(Schema):
Alley = fields.Str(allow_none=True)
BedroomAbvGr = fields.Integer()
BldgType = fields.Str()
BsmtCond = fields.Str(allow_none=True)
BsmtExposure = fields.Str(allow_none=True)
BsmtFinSF1 = fields.Float(allow_none=True)
BsmtFinSF2 = fields.Float(allow_none=True)
BsmtFinType1 = fields.Str(allow_none=True)
BsmtFinType2 = fields.Str(allow_none=True)
BsmtFullBath = fields.Float(allow_none=True)
BsmtHalfBath = fields.Float(allow_none=True)
BsmtQual = fields.Str(allow_none=True)
BsmtUnfSF = fields.Float()
CentralAir = fields.Str()
Condition1 = fields.Str()
Condition2 = fields.Str()
Electrical = fields.Str(allow_none=True)
EnclosedPorch = fields.Integer()
ExterCond = fields.Str()
ExterQual = fields.Str()
Exterior1st = fields.Str(allow_none=True)
Exterior2nd = fields.Str(allow_none=True)
Fence = fields.Str(allow_none=True)
FireplaceQu = fields.Str(allow_none=True)
Fireplaces = fields.Integer()
Foundation = fields.Str()
FullBath = fields.Integer()
Functional = fields.Str(allow_none=True)
GarageArea = fields.Float()
GarageCars = fields.Float()
GarageCond = fields.Str(allow_none=True)
GarageFinish = fields.Str(allow_none=True)
GarageQual = fields.Str(allow_none=True)
GarageType = fields.Str(allow_none=True)
GarageYrBlt = fields.Float(allow_none=True)
GrLivArea = fields.Integer()
HalfBath = fields.Integer()
Heating = fields.Str()
HeatingQC = fields.Str()
HouseStyle = fields.Str()
Id = fields.Integer()
KitchenAbvGr = fields.Integer()
KitchenQual = fields.Str(allow_none=True)
LandContour = fields.Str()
LandSlope = fields.Str()
LotArea = fields.Integer()
LotConfig = fields.Str()
LotFrontage = fields.Float(allow_none=True)
LotShape = fields.Str()
LowQualFinSF = fields.Integer()
MSSubClass = fields.Integer()
MSZoning = fields.Str(allow_none=True)
MasVnrArea = fields.Float(allow_none=True)
MasVnrType = fields.Str(allow_none=True)
MiscFeature = fields.Str(allow_none=True)
MiscVal = fields.Integer()
MoSold = fields.Integer()
Neighborhood = fields.Str()
OpenPorchSF = fields.Integer()
OverallCond = fields.Integer()
OverallQual = fields.Integer()
PavedDrive = fields.Str()
PoolArea = fields.Integer()
PoolQC = fields.Str(allow_none=True)
RoofMatl = fields.Str()
RoofStyle = fields.Str()
SaleCondition = fields.Str()
SaleType = fields.Str(allow_none=True)
ScreenPorch = fields.Integer()
Street = fields.Str()
TotRmsAbvGrd = fields.Integer()
TotalBsmtSF = fields.Float()
Utilities = fields.Str(allow_none=True)
WoodDeckSF = fields.Integer()
YearBuilt = fields.Integer()
YearRemodAdd = fields.Integer()
YrSold = fields.Integer()
FirstFlrSF = fields.Integer()
SecondFlrSF = fields.Integer()
ThreeSsnPortch = fields.Integer()
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
validated_data = input_data.copy()
if input_data[config.model_config.numerical_na_not_allowed].isnull().any().any():
validated_data = validated_data.dropna(
axis=0, subset=config.model_config.numerical_na_not_allowed
)
return validated_data
def validate_inputs(
*, input_data: pd.DataFrame
) -> t.Tuple[pd.DataFrame, t.Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
validated_data = drop_na_inputs(input_data=input_data)
# set many=True to allow passing in a list
schema = HouseDataInputSchema(many=True)
errors = None
try:
# replace numpy nans so that Marshmallow can validate
schema.load(validated_data.replace({np.nan: None}).to_dict(orient="records"))
except ValidationError as exc:
errors = exc.messages
return validated_data, errors
| import typing as t
from gradient_boosting_model.config.core import config
import numpy as np
import pandas as pd
from marshmallow import fields, Schema, ValidationError
class HouseDataInputSchema(Schema):
Alley = fields.Str(allow_none=True)
BedroomAbvGr = fields.Integer()
BldgType = fields.Str()
BsmtCond = fields.Str(allow_none=True)
BsmtExposure = fields.Str(allow_none=True)
BsmtFinSF1 = fields.Float(allow_none=True)
BsmtFinSF2 = fields.Float(allow_none=True)
BsmtFinType1 = fields.Str(allow_none=True)
BsmtFinType2 = fields.Str(allow_none=True)
BsmtFullBath = fields.Float(allow_none=True)
BsmtHalfBath = fields.Float(allow_none=True)
BsmtQual = fields.Str(allow_none=True)
BsmtUnfSF = fields.Float()
CentralAir = fields.Str()
Condition1 = fields.Str()
Condition2 = fields.Str()
Electrical = fields.Str(allow_none=True)
EnclosedPorch = fields.Integer()
ExterCond = fields.Str()
ExterQual = fields.Str()
Exterior1st = fields.Str(allow_none=True)
Exterior2nd = fields.Str(allow_none=True)
Fence = fields.Str(allow_none=True)
FireplaceQu = fields.Str(allow_none=True)
Fireplaces = fields.Integer()
Foundation = fields.Str()
FullBath = fields.Integer()
Functional = fields.Str(allow_none=True)
GarageArea = fields.Float()
GarageCars = fields.Float()
GarageCond = fields.Str(allow_none=True)
GarageFinish = fields.Str(allow_none=True)
GarageQual = fields.Str(allow_none=True)
GarageType = fields.Str(allow_none=True)
GarageYrBlt = fields.Float(allow_none=True)
GrLivArea = fields.Integer()
HalfBath = fields.Integer()
Heating = fields.Str()
HeatingQC = fields.Str()
HouseStyle = fields.Str()
Id = fields.Integer()
KitchenAbvGr = fields.Integer()
KitchenQual = fields.Str(allow_none=True)
LandContour = fields.Str()
LandSlope = fields.Str()
LotArea = fields.Integer()
LotConfig = fields.Str()
LotFrontage = fields.Float(allow_none=True)
LotShape = fields.Str()
LowQualFinSF = fields.Integer()
MSSubClass = fields.Integer()
MSZoning = fields.Str(allow_none=True)
MasVnrArea = fields.Float(allow_none=True)
MasVnrType = fields.Str(allow_none=True)
MiscFeature = fields.Str(allow_none=True)
MiscVal = fields.Integer()
MoSold = fields.Integer()
Neighborhood = fields.Str()
OpenPorchSF = fields.Integer()
OverallCond = fields.Integer()
OverallQual = fields.Integer()
PavedDrive = fields.Str()
PoolArea = fields.Integer()
PoolQC = fields.Str(allow_none=True)
RoofMatl = fields.Str()
RoofStyle = fields.Str()
SaleCondition = fields.Str()
SaleType = fields.Str(allow_none=True)
ScreenPorch = fields.Integer()
Street = fields.Str()
TotRmsAbvGrd = fields.Integer()
TotalBsmtSF = fields.Float()
Utilities = fields.Str(allow_none=True)
WoodDeckSF = fields.Integer()
YearBuilt = fields.Integer()
YearRemodAdd = fields.Integer()
YrSold = fields.Integer()
FirstFlrSF = fields.Integer()
SecondFlrSF = fields.Integer()
ThreeSsnPortch = fields.Integer()
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
validated_data = input_data.copy()
if input_data[config.model_config.numerical_na_not_allowed].isnull().any().any():
validated_data = validated_data.dropna(
axis=0, subset=config.model_config.numerical_na_not_allowed
)
return validated_data
def validate_inputs(
*, input_data: pd.DataFrame
) -> t.Tuple[pd.DataFrame, t.Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
validated_data = drop_na_inputs(input_data=input_data)
# set many=True to allow passing in a list
schema = HouseDataInputSchema(many=True)
errors = None
try:
# replace numpy nans so that Marshmallow can validate
schema.load(validated_data.replace({np.nan: None}).to_dict(orient="records"))
except ValidationError as exc:
errors = exc.messages
return validated_data, errors
| en | 0.469698 | Check model inputs for na values and filter. Check model inputs for unprocessable values. # convert syntax error field names (beginning with numbers) # set many=True to allow passing in a list # replace numpy nans so that Marshmallow can validate | 2.223641 | 2 |
pyplan_engine/classes/IOEngine.py | jorgedouglas71/pyplan-ide | 17 | 10560 |
class IOEngine(object):
def __init__(self, node):
self.node = node
self.inputs = []
self.outputs = []
def release(self):
self.inputs = None
self.outputs = None
self.node = None
def updateInputs(self, names):
# remove prior outputs
for inputNode in self.inputs:
if not inputNode in names:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
newInputs = []
for nodeId in names:
if self.node.model.existNode(nodeId):
newInputs.append(nodeId)
if not nodeId in self.inputs:
self.node.model.getNode(nodeId).ioEngine.addOutput(
self.node.identifier)
self.inputs = newInputs
def removeOutput(self, nodeId):
if nodeId in self.outputs:
self.outputs.remove(nodeId)
def removeInput(self, nodeId):
if nodeId in self.inputs:
self.inputs.remove(nodeId)
def addOutput(self, nodeId):
self.outputs.append(nodeId)
def updateNodeId(self, oldId, newId):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(
inputNode).ioEngine.updateOutputId(oldId, newId)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(
outputNode).ioEngine.updateInputId(oldId, newId)
def updateOnDeleteNode(self):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(outputNode).ioEngine.removeInput(
self.node.identifier)
def updateOutputId(self, oldId, newId):
if oldId in self.outputs:
self.outputs.remove(oldId)
self.outputs.append(newId)
def updateInputId(self, oldId, newId):
if oldId in self.inputs:
self.inputs.remove(oldId)
self.inputs.append(newId)
self.node.updateDefinitionForChangeId(oldId, newId)
|
class IOEngine(object):
def __init__(self, node):
self.node = node
self.inputs = []
self.outputs = []
def release(self):
self.inputs = None
self.outputs = None
self.node = None
def updateInputs(self, names):
# remove prior outputs
for inputNode in self.inputs:
if not inputNode in names:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
newInputs = []
for nodeId in names:
if self.node.model.existNode(nodeId):
newInputs.append(nodeId)
if not nodeId in self.inputs:
self.node.model.getNode(nodeId).ioEngine.addOutput(
self.node.identifier)
self.inputs = newInputs
def removeOutput(self, nodeId):
if nodeId in self.outputs:
self.outputs.remove(nodeId)
def removeInput(self, nodeId):
if nodeId in self.inputs:
self.inputs.remove(nodeId)
def addOutput(self, nodeId):
self.outputs.append(nodeId)
def updateNodeId(self, oldId, newId):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(
inputNode).ioEngine.updateOutputId(oldId, newId)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(
outputNode).ioEngine.updateInputId(oldId, newId)
def updateOnDeleteNode(self):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(outputNode).ioEngine.removeInput(
self.node.identifier)
def updateOutputId(self, oldId, newId):
if oldId in self.outputs:
self.outputs.remove(oldId)
self.outputs.append(newId)
def updateInputId(self, oldId, newId):
if oldId in self.inputs:
self.inputs.remove(oldId)
self.inputs.append(newId)
self.node.updateDefinitionForChangeId(oldId, newId)
| en | 0.305924 | # remove prior outputs | 2.97548 | 3 |
RLBotPack/JoeyBot/CSharpPythonAgent/CSharpPythonAgent.py | RLMarvin/RLBotPack | 13 | 10561 | import os
from rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER
from rlbot.agents.base_dotnet_agent import BaseDotNetAgent
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
class DotNetBot(BaseDotNetAgent):
def get_port_file_path(self):
# Look for a port.cfg file in the same directory as THIS python file.
return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__), 'port.cfg'))
def load_config(self, config_header: ConfigHeader):
self.dotnet_executable_path = config_header.getpath('dotnet_executable_path')
self.logger.info(".NET executable is configured as {}".format(self.dotnet_executable_path))
@staticmethod
def create_agent_configurations(config: ConfigObject):
params = config.get_header(BOT_CONFIG_AGENT_HEADER)
params.add_value('dotnet_executable_path', str, default=None,
description='Relative path to the executable that runs the .NET executable.')
| import os
from rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER
from rlbot.agents.base_dotnet_agent import BaseDotNetAgent
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
class DotNetBot(BaseDotNetAgent):
def get_port_file_path(self):
# Look for a port.cfg file in the same directory as THIS python file.
return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__), 'port.cfg'))
def load_config(self, config_header: ConfigHeader):
self.dotnet_executable_path = config_header.getpath('dotnet_executable_path')
self.logger.info(".NET executable is configured as {}".format(self.dotnet_executable_path))
@staticmethod
def create_agent_configurations(config: ConfigObject):
params = config.get_header(BOT_CONFIG_AGENT_HEADER)
params.add_value('dotnet_executable_path', str, default=None,
description='Relative path to the executable that runs the .NET executable.')
| en | 0.889596 | # Look for a port.cfg file in the same directory as THIS python file. | 2.059436 | 2 |
v3_experiments.py | runekaagaard/workflows | 0 | 10562 | <filename>v3_experiments.py<gh_stars>0
# coding=utf-8
import inspect
from functools import wraps
def listify(func_s):
if callable(func_s):
return [func_s]
else:
return func_s
def parse_conditions(condition_s, args, kwargs, title):
err_msg = unicode(title) + u" nr. {} failed: {}"
for i, condition in enumerate(listify(condition_s), 1):
assert condition(*args, **
kwargs) is not False, unicode(err_msg).format(
i, unicode(inspect.getsource(condition)))
def mark_takes_no_arguments(func):
func.takes_no_arguments = True
return func
def takes_no_arguments(func):
mark_takes_no_arguments(func)
return func
def contract(pre_conditions, post_conditions):
"""
Pre is before. Post is after.
"""
def _(func):
@wraps(func)
def __(*args, **kwargs):
parse_conditions(
pre_conditions, args, kwargs, title='Preconditions')
result = func(*args, **kwargs)
parse_conditions(
post_conditions, [result], {}, title='Postconditions')
return result
return __
return _
def processing(pre_process, post_process):
"Procemanns"
def _(func):
@wraps(func)
def __(*args, **kwargs):
args, kwargs = pre_process(*args, **kwargs)
return post_process(func(*args, **kwargs))
return __
return _
@takes_no_arguments
def add_one(func):
@wraps(func)
def _(*args, **kwargs):
return func(*args, **kwargs) + 1
return _
def compose(*workflows):
def extract_kwargs(workflow, kwargs):
return {x: kwargs[x] for x in inspect.getargspec(workflow).args}
def _(*args, **kwargs):
assert len(args) == 0, "Only keywords allowed."
def __(func):
@wraps(func)
def ___(*a, **k):
return func(*a, **k)
for workflow in reversed(workflows):
if hasattr(workflow, 'takes_no_arguments'):
___ = workflow(___)
else:
___ = workflow(**extract_kwargs(workflow, kwargs))(___)
___.__doc__ += workflow.__doc__ or ""
return ___
return __
return _
someworkflow = compose(contract, processing, add_one)
print someworkflow
@someworkflow(
pre_conditions=[lambda x: x == 2],
post_conditions=lambda r: r == 15,
pre_process=lambda x: ([x + 1], {}),
post_process=lambda x: x + 1, )
def somefunc(x):
"""
Very important: x must be 2!
"""
return x + 10
print somefunc(2)
help(somefunc)
| <filename>v3_experiments.py<gh_stars>0
# coding=utf-8
import inspect
from functools import wraps
def listify(func_s):
if callable(func_s):
return [func_s]
else:
return func_s
def parse_conditions(condition_s, args, kwargs, title):
err_msg = unicode(title) + u" nr. {} failed: {}"
for i, condition in enumerate(listify(condition_s), 1):
assert condition(*args, **
kwargs) is not False, unicode(err_msg).format(
i, unicode(inspect.getsource(condition)))
def mark_takes_no_arguments(func):
func.takes_no_arguments = True
return func
def takes_no_arguments(func):
mark_takes_no_arguments(func)
return func
def contract(pre_conditions, post_conditions):
"""
Pre is before. Post is after.
"""
def _(func):
@wraps(func)
def __(*args, **kwargs):
parse_conditions(
pre_conditions, args, kwargs, title='Preconditions')
result = func(*args, **kwargs)
parse_conditions(
post_conditions, [result], {}, title='Postconditions')
return result
return __
return _
def processing(pre_process, post_process):
"Procemanns"
def _(func):
@wraps(func)
def __(*args, **kwargs):
args, kwargs = pre_process(*args, **kwargs)
return post_process(func(*args, **kwargs))
return __
return _
@takes_no_arguments
def add_one(func):
@wraps(func)
def _(*args, **kwargs):
return func(*args, **kwargs) + 1
return _
def compose(*workflows):
def extract_kwargs(workflow, kwargs):
return {x: kwargs[x] for x in inspect.getargspec(workflow).args}
def _(*args, **kwargs):
assert len(args) == 0, "Only keywords allowed."
def __(func):
@wraps(func)
def ___(*a, **k):
return func(*a, **k)
for workflow in reversed(workflows):
if hasattr(workflow, 'takes_no_arguments'):
___ = workflow(___)
else:
___ = workflow(**extract_kwargs(workflow, kwargs))(___)
___.__doc__ += workflow.__doc__ or ""
return ___
return __
return _
someworkflow = compose(contract, processing, add_one)
print someworkflow
@someworkflow(
pre_conditions=[lambda x: x == 2],
post_conditions=lambda r: r == 15,
pre_process=lambda x: ([x + 1], {}),
post_process=lambda x: x + 1, )
def somefunc(x):
"""
Very important: x must be 2!
"""
return x + 10
print somefunc(2)
help(somefunc)
| en | 0.916802 | # coding=utf-8 Pre is before. Post is after. Very important: x must be 2! | 2.553414 | 3 |
externals/binaryen/test/emscripten/tools/distill_asm.py | caokun8008/ckeos | 40 | 10563 | <reponame>caokun8008/ckeos<gh_stars>10-100
'''
Gets the core asm module out of an emscripten output file.
By default it adds a ';' to end the
var asm = ...
statement. You can add a third param to customize that. If the third param is 'swap-in', it will emit code to swap this asm module in, instead of the default one.
XXX this probably doesn't work with closure compiler advanced yet XXX
'''
import os, sys
import asm_module
infile = sys.argv[1]
outfile = sys.argv[2]
extra = sys.argv[3] if len(sys.argv) >= 4 else ';'
module = asm_module.AsmModule(infile).asm_js
if extra == 'swap-in':
# we do |var asm = | just like the original codebase, so that gets overridden anyhow (assuming global scripts).
extra = r''' (Module.asmGlobalArg, Module.asmLibraryArg, Module['buffer']);
// special fixups
asm.stackRestore(Module['asm'].stackSave()); // if this fails, make sure the original was built to be swappable (-s SWAPPABLE_ASM_MODULE=1)
// Finish swap
Module['asm'] = asm;
if (Module['onAsmSwap']) Module['onAsmSwap']();
'''
elif extra == 'just-func':
module = module[module.find('=')+1:] # strip the initial "var asm =" bit, leave just the raw module as a function
extra = ';'
open(outfile, 'w').write(module + extra)
| '''
Gets the core asm module out of an emscripten output file.
By default it adds a ';' to end the
var asm = ...
statement. You can add a third param to customize that. If the third param is 'swap-in', it will emit code to swap this asm module in, instead of the default one.
XXX this probably doesn't work with closure compiler advanced yet XXX
'''
import os, sys
import asm_module
infile = sys.argv[1]
outfile = sys.argv[2]
extra = sys.argv[3] if len(sys.argv) >= 4 else ';'
module = asm_module.AsmModule(infile).asm_js
if extra == 'swap-in':
# we do |var asm = | just like the original codebase, so that gets overridden anyhow (assuming global scripts).
extra = r''' (Module.asmGlobalArg, Module.asmLibraryArg, Module['buffer']);
// special fixups
asm.stackRestore(Module['asm'].stackSave()); // if this fails, make sure the original was built to be swappable (-s SWAPPABLE_ASM_MODULE=1)
// Finish swap
Module['asm'] = asm;
if (Module['onAsmSwap']) Module['onAsmSwap']();
'''
elif extra == 'just-func':
module = module[module.find('=')+1:] # strip the initial "var asm =" bit, leave just the raw module as a function
extra = ';'
open(outfile, 'w').write(module + extra) | en | 0.66968 | Gets the core asm module out of an emscripten output file. By default it adds a ';' to end the var asm = ... statement. You can add a third param to customize that. If the third param is 'swap-in', it will emit code to swap this asm module in, instead of the default one. XXX this probably doesn't work with closure compiler advanced yet XXX # we do |var asm = | just like the original codebase, so that gets overridden anyhow (assuming global scripts). (Module.asmGlobalArg, Module.asmLibraryArg, Module['buffer']); // special fixups asm.stackRestore(Module['asm'].stackSave()); // if this fails, make sure the original was built to be swappable (-s SWAPPABLE_ASM_MODULE=1) // Finish swap Module['asm'] = asm; if (Module['onAsmSwap']) Module['onAsmSwap'](); # strip the initial "var asm =" bit, leave just the raw module as a function | 2.457035 | 2 |
apex/contrib/conv_bias_relu/conv_bias_relu.py | XL-Kong/Painter_GAN | 0 | 10564 | import torch
import pdb
from torch.autograd import gradcheck
import fused_conv_bias_relu
class ConvBiasReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
class ConvBiasMaskReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, mask, padding, stride):
outputs = fused_conv_bias_relu.forward_mask([x, weight, bias, mask], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None, None
class ConvBias_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward_no_relu([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight)
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward_no_relu(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
ConvBiasReLU = ConvBiasReLU_.apply
ConvBiasMaskReLU = ConvBiasMaskReLU_.apply
ConvBias = ConvBias_.apply
| import torch
import pdb
from torch.autograd import gradcheck
import fused_conv_bias_relu
class ConvBiasReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
class ConvBiasMaskReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, mask, padding, stride):
outputs = fused_conv_bias_relu.forward_mask([x, weight, bias, mask], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None, None
class ConvBias_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward_no_relu([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight)
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward_no_relu(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
ConvBiasReLU = ConvBiasReLU_.apply
ConvBiasMaskReLU = ConvBiasMaskReLU_.apply
ConvBias = ConvBias_.apply
| none | 1 | 2.46043 | 2 |
|
data-structures/trees/trees/trees.py | bayan-alkhatib/data-structures-and-algorithms-401 | 0 | 10565 | class Node:
def __init__(self,value):
self.value=value
self.left=None
self.right=None
class Binary_Tree:
def __init__(self):
self.root = None
def pre_order(self):
""" root-left-right """
try:
self.values=[]
if self.root == None:
return "Tree is Empty"
def tree(node):
self.values+=[node.value]
if node.left:
tree(node.left)
if node.right:
tree(node.right)
return self.values
return tree(self.root)
except:
return "Error"
def in_order(self):
""" left-node-right"""
try:
self.values=[]
if not self.root:
return "Tree is Empty"
def tree(node):
if node.left:
tree(node.left)
self.values+=[node.value]
if node.right:
tree(node.right)
return self.values
return tree(self.root)
except:
return "Error"
def post_order(self):
""" left-right-node"""
try:
self.values=[]
if not self.root:
return "Tree is Empty"
def tree(node):
if node.left:
tree(node.left)
if node.right:
tree(node.right)
self.values+=[node.value]
return self.values
return tree(self.root)
except:
return "Error"
def max(self):
if not self.root:
return "Tree is Empty"
self.max=self.root.value
def tree(node):
if node.value>self.max:
self.max=node.value
if node.left:
tree(node.left)
if node.right:
tree(node.right)
return self.max
return tree(self.root)
class Binary_Search_Tree(Binary_Tree):
def add(self,value):
'''add value to binery tree '''
if self.root == None:
self.root = Node(value)
else:
current=self.root
while current:
if value < current.value :
if current.left == None:
current.left = Node(value)
break
current = current.left
else:
if current.right == None:
current.right = Node(value)
break
current = current.right
def Contains(self,value):
if self.root==None:
return 'Tree is Empty'
else:
current=self.root
while current:
if current.value==value:
return True
elif value < current.value :
if current.left == None:
return False
current = current.left
else:
if current.right == None:
return False
current = current.right
| class Node:
def __init__(self,value):
self.value=value
self.left=None
self.right=None
class Binary_Tree:
def __init__(self):
self.root = None
def pre_order(self):
""" root-left-right """
try:
self.values=[]
if self.root == None:
return "Tree is Empty"
def tree(node):
self.values+=[node.value]
if node.left:
tree(node.left)
if node.right:
tree(node.right)
return self.values
return tree(self.root)
except:
return "Error"
def in_order(self):
""" left-node-right"""
try:
self.values=[]
if not self.root:
return "Tree is Empty"
def tree(node):
if node.left:
tree(node.left)
self.values+=[node.value]
if node.right:
tree(node.right)
return self.values
return tree(self.root)
except:
return "Error"
def post_order(self):
""" left-right-node"""
try:
self.values=[]
if not self.root:
return "Tree is Empty"
def tree(node):
if node.left:
tree(node.left)
if node.right:
tree(node.right)
self.values+=[node.value]
return self.values
return tree(self.root)
except:
return "Error"
def max(self):
if not self.root:
return "Tree is Empty"
self.max=self.root.value
def tree(node):
if node.value>self.max:
self.max=node.value
if node.left:
tree(node.left)
if node.right:
tree(node.right)
return self.max
return tree(self.root)
class Binary_Search_Tree(Binary_Tree):
def add(self,value):
'''add value to binery tree '''
if self.root == None:
self.root = Node(value)
else:
current=self.root
while current:
if value < current.value :
if current.left == None:
current.left = Node(value)
break
current = current.left
else:
if current.right == None:
current.right = Node(value)
break
current = current.right
def Contains(self,value):
if self.root==None:
return 'Tree is Empty'
else:
current=self.root
while current:
if current.value==value:
return True
elif value < current.value :
if current.left == None:
return False
current = current.left
else:
if current.right == None:
return False
current = current.right
| en | 0.294704 | root-left-right left-node-right left-right-node add value to binery tree | 4.051388 | 4 |
ironic_inspector/cmd/dbsync.py | namnx228/ironic-inspector | 31 | 10566 | <gh_stars>10-100
# Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from oslo_config import cfg
from oslo_log import log
from ironic_inspector import conf # noqa
CONF = cfg.CONF
def add_alembic_command(subparsers, name):
return subparsers.add_parser(
name, help=getattr(alembic_command, name).__doc__)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches', 'heads']:
parser = add_alembic_command(subparsers, name)
parser.set_defaults(func=do_alembic_command)
for name in ['stamp', 'show', 'edit']:
parser = add_alembic_command(subparsers, name)
parser.set_defaults(func=with_revision)
parser.add_argument('--revision', nargs='?', required=True)
parser = add_alembic_command(subparsers, 'upgrade')
parser.set_defaults(func=with_revision)
parser.add_argument('--revision', nargs='?')
parser = add_alembic_command(subparsers, 'revision')
parser.set_defaults(func=do_revision)
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
command_opt = cfg.SubCommandOpt('command',
title='Command',
help='Available commands',
handler=add_command_parsers)
def _get_alembic_config():
base_path = os.path.split(os.path.dirname(__file__))[0]
return alembic_config.Config(os.path.join(base_path, 'alembic.ini'))
def do_revision(config, cmd, *args, **kwargs):
do_alembic_command(config, cmd, message=CONF.command.message,
autogenerate=CONF.command.autogenerate)
def with_revision(config, cmd, *args, **kwargs):
revision = CONF.command.revision or 'head'
do_alembic_command(config, cmd, revision)
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def main(args=sys.argv[1:]):
log.register_options(CONF)
CONF.register_cli_opt(command_opt)
CONF(args, project='ironic-inspector')
config = _get_alembic_config()
config.set_main_option('script_location', "ironic_inspector:migrations")
config.ironic_inspector_config = CONF
CONF.command.func(config, CONF.command.name)
| # Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from oslo_config import cfg
from oslo_log import log
from ironic_inspector import conf # noqa
CONF = cfg.CONF
def add_alembic_command(subparsers, name):
return subparsers.add_parser(
name, help=getattr(alembic_command, name).__doc__)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches', 'heads']:
parser = add_alembic_command(subparsers, name)
parser.set_defaults(func=do_alembic_command)
for name in ['stamp', 'show', 'edit']:
parser = add_alembic_command(subparsers, name)
parser.set_defaults(func=with_revision)
parser.add_argument('--revision', nargs='?', required=True)
parser = add_alembic_command(subparsers, 'upgrade')
parser.set_defaults(func=with_revision)
parser.add_argument('--revision', nargs='?')
parser = add_alembic_command(subparsers, 'revision')
parser.set_defaults(func=do_revision)
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
command_opt = cfg.SubCommandOpt('command',
title='Command',
help='Available commands',
handler=add_command_parsers)
def _get_alembic_config():
base_path = os.path.split(os.path.dirname(__file__))[0]
return alembic_config.Config(os.path.join(base_path, 'alembic.ini'))
def do_revision(config, cmd, *args, **kwargs):
do_alembic_command(config, cmd, message=CONF.command.message,
autogenerate=CONF.command.autogenerate)
def with_revision(config, cmd, *args, **kwargs):
revision = CONF.command.revision or 'head'
do_alembic_command(config, cmd, revision)
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def main(args=sys.argv[1:]):
log.register_options(CONF)
CONF.register_cli_opt(command_opt)
CONF(args, project='ironic-inspector')
config = _get_alembic_config()
config.set_main_option('script_location', "ironic_inspector:migrations")
config.ironic_inspector_config = CONF
CONF.command.func(config, CONF.command.name) | en | 0.846956 | # Copyright 2015 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # noqa | 2.100114 | 2 |
trabalhoaqui/comp_perguntas/valida.py | EmanoelG/jogodaforca | 1 | 10567 | from jogo import desenha_jogo
from random import randint
import sys
def input_cria_usuario():
usuario = dict()
usuario['nome'] = input('Informe o seu nome: ')
usuario['pontos'] = 0
usuario['desafiado'] = False
return usuario
def comeco(j1, j2):
j1 = 1
j2 = 2
n= randint(j1,j2)
escolhildo = n
return escolhildo
# mexi a aqui
def completou(acertos, pala , jogador_adivinhao):#recebe as letras acertadass e depois verifica se a palavra esta completa
if acertos == len(pala):## e aqui
print(f'\t\t\t\t\t \033[37mJogador >> {jogador_adivinhao} << venceu !\033[m')
print("""
\033[35m
_____ ___ ___ ___ _______
/ ___| / | / |/ | | ____|
| | / | / /| /| | | |__
| | _ / /| | / / |__/ | | | __|
| |_| | / ___ | / / | | | |____
\_____//_/ |_| /_/ |_| |_______|
_____ _ _ ______ ______
/ _ \ | | / / | _____| | _ |
| | | | | | / / | |__ | |_| |
| | | | | | / / | __| | _ /
| |_| | | |/ / | |____ | | \ |
\_____/ |___/ |______| |_| \_|\033[m
""")
| from jogo import desenha_jogo
from random import randint
import sys
def input_cria_usuario():
usuario = dict()
usuario['nome'] = input('Informe o seu nome: ')
usuario['pontos'] = 0
usuario['desafiado'] = False
return usuario
def comeco(j1, j2):
j1 = 1
j2 = 2
n= randint(j1,j2)
escolhildo = n
return escolhildo
# mexi a aqui
def completou(acertos, pala , jogador_adivinhao):#recebe as letras acertadass e depois verifica se a palavra esta completa
if acertos == len(pala):## e aqui
print(f'\t\t\t\t\t \033[37mJogador >> {jogador_adivinhao} << venceu !\033[m')
print("""
\033[35m
_____ ___ ___ ___ _______
/ ___| / | / |/ | | ____|
| | / | / /| /| | | |__
| | _ / /| | / / |__/ | | | __|
| |_| | / ___ | / / | | | |____
\_____//_/ |_| /_/ |_| |_______|
_____ _ _ ______ ______
/ _ \ | | / / | _____| | _ |
| | | | | | / / | |__ | |_| |
| | | | | | / / | __| | _ /
| |_| | | |/ / | |____ | | \ |
\_____/ |___/ |______| |_| \_|\033[m
""")
| en | 0.255283 | # mexi a aqui #recebe as letras acertadass e depois verifica se a palavra esta completa ## e aqui \033[35m _____ ___ ___ ___ _______ / ___| / | / |/ | | ____| | | / | / /| /| | | |__ | | _ / /| | / / |__/ | | | __| | |_| | / ___ | / / | | | |____ \_____//_/ |_| /_/ |_| |_______| _____ _ _ ______ ______ / _ \ | | / / | _____| | _ | | | | | | | / / | |__ | |_| | | | | | | | / / | __| | _ / | |_| | | |/ / | |____ | | \ | \_____/ |___/ |______| |_| \_|\033[m | 3.24493 | 3 |
DesksReminder/Desks/accounts_desk.py | flopezag/fiware-management-scripts | 0 | 10568 | <reponame>flopezag/fiware-management-scripts
from datetime import date, datetime
from DesksReminder.Basics.dataFinder import Data
from DesksReminder.Basics.nickNames import ContactBook
from Config.settings import JIRA_URL
__author__ = '<NAME>'
class AccountsDesk:
def __init__(self):
self.contactBook = ContactBook()
def open(self):
messages = list()
for issue in Data().getAccountsDeskOpen():
created = datetime.strptime(issue.fields.created[:10], '%Y-%m-%d').date()
unanswered = (date.today() - created).days
if unanswered <= 1:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk : Open Issue'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed the issue {} is still OPEN, i.e. not replied for {} days.".format(issue, unanswered) +\
"\nLet me remind you of our rule to reply in the first 24 hours during working days." +\
"\nI would appreciate you spent a minute to reply to this request and to progress it " \
"on its workflow." +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def inProgress(self):
messages = list()
for issue in Data().getAccountsDeskInProgress():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: stalled Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} is In Progress but no update happened in the last {} days.".format(issue,
noupdated) +\
"\nI would appreciate you spent a minute to update it by reporting its progress in a comment" +\
"\n\tor if ready for analysing, please, evolve it" +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def scheduled(self):
messages = list()
for issue in Data().getAccountsDeskScheduled():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: stalled Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} is Scheduled but no update happened in the last {} days.".format(issue,
noupdated) +\
"\nI would appreciate you spent a minute to update it by reporting its progress in a comment" +\
"\n\tor if ready for Answered, please, evolve it" +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def answered(self):
messages = list()
for issue in Data().getAccountsDeskAnswered():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: Closed Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} has been Answered but no update happened in the " \
"last {} days.".format(issue, noupdated) +\
"\nI would appreciate you spent a minute to close it" \
"\n\tor if the exchange continues, please, update its progress in a comment" \
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def rejected(self):
messages = list()
for issue in Data().getAccountsDeskRejected():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 1:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: Close the procedure'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} has been Rejected.".format(issue) +\
"\nI would appreciate you spent a minute to close the procedure" \
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
if __name__ == "__main__":
pass
| from datetime import date, datetime
from DesksReminder.Basics.dataFinder import Data
from DesksReminder.Basics.nickNames import ContactBook
from Config.settings import JIRA_URL
__author__ = '<NAME>'
class AccountsDesk:
def __init__(self):
self.contactBook = ContactBook()
def open(self):
messages = list()
for issue in Data().getAccountsDeskOpen():
created = datetime.strptime(issue.fields.created[:10], '%Y-%m-%d').date()
unanswered = (date.today() - created).days
if unanswered <= 1:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk : Open Issue'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed the issue {} is still OPEN, i.e. not replied for {} days.".format(issue, unanswered) +\
"\nLet me remind you of our rule to reply in the first 24 hours during working days." +\
"\nI would appreciate you spent a minute to reply to this request and to progress it " \
"on its workflow." +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def inProgress(self):
messages = list()
for issue in Data().getAccountsDeskInProgress():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: stalled Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} is In Progress but no update happened in the last {} days.".format(issue,
noupdated) +\
"\nI would appreciate you spent a minute to update it by reporting its progress in a comment" +\
"\n\tor if ready for analysing, please, evolve it" +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def scheduled(self):
messages = list()
for issue in Data().getAccountsDeskScheduled():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: stalled Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} is Scheduled but no update happened in the last {} days.".format(issue,
noupdated) +\
"\nI would appreciate you spent a minute to update it by reporting its progress in a comment" +\
"\n\tor if ready for Answered, please, evolve it" +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def answered(self):
messages = list()
for issue in Data().getAccountsDeskAnswered():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: Closed Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} has been Answered but no update happened in the " \
"last {} days.".format(issue, noupdated) +\
"\nI would appreciate you spent a minute to close it" \
"\n\tor if the exchange continues, please, update its progress in a comment" \
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def rejected(self):
messages = list()
for issue in Data().getAccountsDeskRejected():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 1:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: Close the procedure'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} has been Rejected.".format(issue) +\
"\nI would appreciate you spent a minute to close the procedure" \
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
if __name__ == "__main__":
pass | none | 1 | 2.531706 | 3 |
|
scripts/ip2hex.py | Kidlike/dotfiles | 0 | 10569 | #!/usr/bin/python
import sys
import re
def iptohex(ip):
octets = ip.split('.')
hex_octets = []
for octet in octets:
if int(octet) < 16:
hex_octets.append('0' + hex(int(octet))[2:])
else:
hex_octets.append(hex(int(octet))[2:])
hex_octets = ''.join(hex_octets)
return hex_octets
def main():
if (len(sys.argv) != 2):
print 'Usage: ./iptohex.py x.x.x.x'
sys.exit(1)
ip = sys.argv[1]
invalidInput = re.search(r'[^0-9\.]', ip)
if invalidInput:
print 'Usage: ./iptohex.py x.x.x.x'
hex_ip = iptohex(ip)
print "Hex IP: %s " % (hex_ip)
print "Decimal IP: %s" % (ip)
if __name__ == '__main__':
main()
| #!/usr/bin/python
import sys
import re
def iptohex(ip):
octets = ip.split('.')
hex_octets = []
for octet in octets:
if int(octet) < 16:
hex_octets.append('0' + hex(int(octet))[2:])
else:
hex_octets.append(hex(int(octet))[2:])
hex_octets = ''.join(hex_octets)
return hex_octets
def main():
if (len(sys.argv) != 2):
print 'Usage: ./iptohex.py x.x.x.x'
sys.exit(1)
ip = sys.argv[1]
invalidInput = re.search(r'[^0-9\.]', ip)
if invalidInput:
print 'Usage: ./iptohex.py x.x.x.x'
hex_ip = iptohex(ip)
print "Hex IP: %s " % (hex_ip)
print "Decimal IP: %s" % (ip)
if __name__ == '__main__':
main()
| ru | 0.258958 | #!/usr/bin/python | 3.20201 | 3 |
src/models/CVX_weighted.py | DanqingZ/social-DCM | 14 | 10570 | <reponame>DanqingZ/social-DCM
import random
import numpy as np
import numpy.linalg as LA
import scipy as spy
import time
from itertools import *
import sys
import cvxpy as cvx
from random import randint
import numpy as np
import random
from scipy.sparse import csc_matrix
from scipy import sparse as sp
import networkx as nx
class CVX_weighted:
def __init__(self, X, y, b,pos_node ,temp, Lambda, Rho):
self.X = X
self.y = y
self.value = 0
self.dim = X.shape[1]
self.Lambda = Lambda
self.Rho = Rho
self.temp = temp
self.num_nodes = nx.number_of_nodes(self.temp)
self.W = np.zeros((self.dim))
self.b = b
self.pos_node = pos_node
self.P = np.zeros((self.num_nodes,self.num_nodes))
def init_P(self):
for i in self.temp.nodes_iter():
for j in self.temp.neighbors(i):
self.P[i,j] = self.temp[i][j]['pos_edge_prob']
self.P = np.diag(np.sum(self.P,1)) - self.P
def solve(self):
dim = self.X.shape[1]
w = cvx.Variable(dim)
num_nodes = nx.number_of_nodes(self.temp)
b = cvx.Variable(num_nodes)
loss = cvx.sum_entries(cvx.mul_elemwise(np.array(self.pos_node),cvx.logistic(-cvx.mul_elemwise(self.y, self.X*w+b)))) + self.Lambda*cvx.quad_form(b,self.P)
problem = cvx.Problem(cvx.Minimize(loss))
problem.solve(verbose=False)
opt = problem.value
self.W = w.value
self.b = b.value
self.value = opt | import random
import numpy as np
import numpy.linalg as LA
import scipy as spy
import time
from itertools import *
import sys
import cvxpy as cvx
from random import randint
import numpy as np
import random
from scipy.sparse import csc_matrix
from scipy import sparse as sp
import networkx as nx
class CVX_weighted:
def __init__(self, X, y, b,pos_node ,temp, Lambda, Rho):
self.X = X
self.y = y
self.value = 0
self.dim = X.shape[1]
self.Lambda = Lambda
self.Rho = Rho
self.temp = temp
self.num_nodes = nx.number_of_nodes(self.temp)
self.W = np.zeros((self.dim))
self.b = b
self.pos_node = pos_node
self.P = np.zeros((self.num_nodes,self.num_nodes))
def init_P(self):
for i in self.temp.nodes_iter():
for j in self.temp.neighbors(i):
self.P[i,j] = self.temp[i][j]['pos_edge_prob']
self.P = np.diag(np.sum(self.P,1)) - self.P
def solve(self):
dim = self.X.shape[1]
w = cvx.Variable(dim)
num_nodes = nx.number_of_nodes(self.temp)
b = cvx.Variable(num_nodes)
loss = cvx.sum_entries(cvx.mul_elemwise(np.array(self.pos_node),cvx.logistic(-cvx.mul_elemwise(self.y, self.X*w+b)))) + self.Lambda*cvx.quad_form(b,self.P)
problem = cvx.Problem(cvx.Minimize(loss))
problem.solve(verbose=False)
opt = problem.value
self.W = w.value
self.b = b.value
self.value = opt | none | 1 | 2.162183 | 2 |
|
experiments/colorization_cINN/data.py | jlmaccal/FrEIA | 0 | 10571 | <reponame>jlmaccal/FrEIA
import sys
import glob
from os.path import join
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, color
from PIL import Image, ImageEnhance
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn.functional as F
import torchvision.transforms as T
from tqdm import tqdm
import joint_bilateral_filter as jbf
import config as c
offsets = (47.5, 2.4, 7.4)
scales = (25.6, 11.2, 16.8)
def apply_filt(args):
'''multiprocessing wrapper for applying the joint bilateral filter'''
L_i, ab_i = args
return jbf.upsample(L_i[0], ab_i, s_x=6, s_l=0.10)
def norm_lab_to_rgb(L, ab, norm=True, filt=False, bw=False):
'''given an Nx1xWxH Tensor L and an Nx2xwxh Tensor ab, normalized accoring to offsets and
scales above, upsample the ab channels and combine with L, and form an RGB image.
norm: If false, assume that L, ab are not normalized and already in the correct range
filt: Use joint bilateral upsamling to do the upsampling. Slow, but improves image quality.
bw: Simply produce a grayscale RGB, ignoring the ab channels'''
if bw:
filt=False
if filt:
with Pool(12) as p:
ab_up_list = p.map(apply_filt, [(L[i], ab[i]) for i in range(len(L))])
ab = np.stack(ab_up_list, axis=0)
ab = torch.Tensor(ab)
else:
ab = F.interpolate(ab, size=L.shape[2], mode='bilinear')
lab = torch.cat([L, ab], dim=1)
for i in range(1 + 2*norm):
lab[:, i] = lab[:, i] * scales[i] + offsets[i]
lab[:, 0].clamp_(0., 100.)
lab[:, 1:].clamp_(-128, 128)
if bw:
lab[:, 1:].zero_()
lab = lab.cpu().data.numpy()
rgb = [color.lab2rgb(np.transpose(l, (1, 2, 0))).transpose(2, 0, 1) for l in lab]
return np.array(rgb)
class LabColorDataset(Dataset):
def __init__(self, file_list, transform=None):
self.files = file_list
self.transform = transform
self.to_tensor = T.ToTensor()
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
im = Image.open(self.files[idx])
if self.transform:
im = self.transform(im)
im = self.to_tensor(im).numpy()
try:
if im.shape[0] == 1:
im = np.concatenate([im]*3, axis=0)
if im.shape[0] == 4:
im = im[:3]
im = np.transpose(im, (1,2,0))
im = color.rgb2lab(im).transpose((2, 0, 1))
for i in range(3):
im[i] = (im[i] - offsets[i]) / scales[i]
return torch.Tensor(im)
except:
return self.__getitem__(idx+1)
# Data transforms for training and test/validation set
transf = T.Compose([T.RandomHorizontalFlip(),
T.RandomResizedCrop(c.img_dims_orig[0], scale=(0.2, 1.))])
transf_test = T.Compose([T.Resize(c.img_dims_orig[0]),
T.CenterCrop(c.img_dims_orig[0])])
if c.dataset == 'imagenet':
with open('./imagenet/training_images.txt') as f:
train_list = [join('./imagenet', fname[2:]) for fname in f.read().splitlines()]
with open(c.validation_images) as f:
test_list = [ t for t in f.read().splitlines()if t[0] != '#']
test_list = [join('./imagenet', fname) for fname in test_list]
if c.val_start is not None:
test_list = test_list[c.val_start:c.val_stop]
else:
data_dir = '/home/diz/data/coco17'
complete_list = sorted(glob.glob(join(data_dir, '*.jpg')))
train_list = complete_list[64:]
test_list = complete_list[64:]
train_data = LabColorDataset(train_list,transf)
test_data = LabColorDataset(test_list, transf_test)
train_loader = DataLoader(train_data, batch_size=c.batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)
test_loader = DataLoader(test_data, batch_size=min(64, len(test_list)), shuffle=c.shuffle_val, num_workers=4, pin_memory=True, drop_last=False)
if __name__ == '__main__':
# Determine mean and standard deviation of RGB channels
# (i.e. set global variables scale and offsets to 1., then use the results as new scale and offset)
for x in test_loader:
x_l, x_ab, _, x_ab_pred = model.prepare_batch(x)
#continue
img_gt = norm_lab_to_rgb(x_l, x_ab)
img_pred = norm_lab_to_rgb(x_l, x_ab_pred)
for i in range(c.batch_size):
plt.figure()
plt.subplot(2,2,1)
plt.imshow(img_gt[i].transpose(1,2,0))
plt.subplot(2,2,2)
plt.scatter(x_ab[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='gt')
plt.scatter(x_ab_pred[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab_pred[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='pred')
plt.legend()
plt.subplot(2,2,3)
plt.imshow(img_pred[i].transpose(1,2,0))
plt.show()
sys.exit()
means = []
stds = []
for i, x in enumerate(train_loader):
print('\r', '%i / %i' % (i, len(train_loader)), end='')
mean = []
std = []
for i in range(3):
mean.append(x[:, i].mean().item())
std.append(x[:, i].std().item())
means.append(mean)
stds.append(std)
if i >= 1000:
break
means, stds = np.array(means), np.array(stds)
print()
print('Mean ', means.mean(axis=0))
print('Std dev', stds.mean(axis=0))
#[-0.04959071 0.03768991 0.11539354]
#[0.51175581 0.17507738 0.26179135]
| import sys
import glob
from os.path import join
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, color
from PIL import Image, ImageEnhance
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn.functional as F
import torchvision.transforms as T
from tqdm import tqdm
import joint_bilateral_filter as jbf
import config as c
offsets = (47.5, 2.4, 7.4)
scales = (25.6, 11.2, 16.8)
def apply_filt(args):
'''multiprocessing wrapper for applying the joint bilateral filter'''
L_i, ab_i = args
return jbf.upsample(L_i[0], ab_i, s_x=6, s_l=0.10)
def norm_lab_to_rgb(L, ab, norm=True, filt=False, bw=False):
'''given an Nx1xWxH Tensor L and an Nx2xwxh Tensor ab, normalized accoring to offsets and
scales above, upsample the ab channels and combine with L, and form an RGB image.
norm: If false, assume that L, ab are not normalized and already in the correct range
filt: Use joint bilateral upsamling to do the upsampling. Slow, but improves image quality.
bw: Simply produce a grayscale RGB, ignoring the ab channels'''
if bw:
filt=False
if filt:
with Pool(12) as p:
ab_up_list = p.map(apply_filt, [(L[i], ab[i]) for i in range(len(L))])
ab = np.stack(ab_up_list, axis=0)
ab = torch.Tensor(ab)
else:
ab = F.interpolate(ab, size=L.shape[2], mode='bilinear')
lab = torch.cat([L, ab], dim=1)
for i in range(1 + 2*norm):
lab[:, i] = lab[:, i] * scales[i] + offsets[i]
lab[:, 0].clamp_(0., 100.)
lab[:, 1:].clamp_(-128, 128)
if bw:
lab[:, 1:].zero_()
lab = lab.cpu().data.numpy()
rgb = [color.lab2rgb(np.transpose(l, (1, 2, 0))).transpose(2, 0, 1) for l in lab]
return np.array(rgb)
class LabColorDataset(Dataset):
def __init__(self, file_list, transform=None):
self.files = file_list
self.transform = transform
self.to_tensor = T.ToTensor()
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
im = Image.open(self.files[idx])
if self.transform:
im = self.transform(im)
im = self.to_tensor(im).numpy()
try:
if im.shape[0] == 1:
im = np.concatenate([im]*3, axis=0)
if im.shape[0] == 4:
im = im[:3]
im = np.transpose(im, (1,2,0))
im = color.rgb2lab(im).transpose((2, 0, 1))
for i in range(3):
im[i] = (im[i] - offsets[i]) / scales[i]
return torch.Tensor(im)
except:
return self.__getitem__(idx+1)
# Data transforms for training and test/validation set
transf = T.Compose([T.RandomHorizontalFlip(),
T.RandomResizedCrop(c.img_dims_orig[0], scale=(0.2, 1.))])
transf_test = T.Compose([T.Resize(c.img_dims_orig[0]),
T.CenterCrop(c.img_dims_orig[0])])
if c.dataset == 'imagenet':
with open('./imagenet/training_images.txt') as f:
train_list = [join('./imagenet', fname[2:]) for fname in f.read().splitlines()]
with open(c.validation_images) as f:
test_list = [ t for t in f.read().splitlines()if t[0] != '#']
test_list = [join('./imagenet', fname) for fname in test_list]
if c.val_start is not None:
test_list = test_list[c.val_start:c.val_stop]
else:
data_dir = '/home/diz/data/coco17'
complete_list = sorted(glob.glob(join(data_dir, '*.jpg')))
train_list = complete_list[64:]
test_list = complete_list[64:]
train_data = LabColorDataset(train_list,transf)
test_data = LabColorDataset(test_list, transf_test)
train_loader = DataLoader(train_data, batch_size=c.batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)
test_loader = DataLoader(test_data, batch_size=min(64, len(test_list)), shuffle=c.shuffle_val, num_workers=4, pin_memory=True, drop_last=False)
if __name__ == '__main__':
# Determine mean and standard deviation of RGB channels
# (i.e. set global variables scale and offsets to 1., then use the results as new scale and offset)
for x in test_loader:
x_l, x_ab, _, x_ab_pred = model.prepare_batch(x)
#continue
img_gt = norm_lab_to_rgb(x_l, x_ab)
img_pred = norm_lab_to_rgb(x_l, x_ab_pred)
for i in range(c.batch_size):
plt.figure()
plt.subplot(2,2,1)
plt.imshow(img_gt[i].transpose(1,2,0))
plt.subplot(2,2,2)
plt.scatter(x_ab[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='gt')
plt.scatter(x_ab_pred[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab_pred[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='pred')
plt.legend()
plt.subplot(2,2,3)
plt.imshow(img_pred[i].transpose(1,2,0))
plt.show()
sys.exit()
means = []
stds = []
for i, x in enumerate(train_loader):
print('\r', '%i / %i' % (i, len(train_loader)), end='')
mean = []
std = []
for i in range(3):
mean.append(x[:, i].mean().item())
std.append(x[:, i].std().item())
means.append(mean)
stds.append(std)
if i >= 1000:
break
means, stds = np.array(means), np.array(stds)
print()
print('Mean ', means.mean(axis=0))
print('Std dev', stds.mean(axis=0))
#[-0.04959071 0.03768991 0.11539354]
#[0.51175581 0.17507738 0.26179135] | en | 0.778123 | multiprocessing wrapper for applying the joint bilateral filter given an Nx1xWxH Tensor L and an Nx2xwxh Tensor ab, normalized accoring to offsets and scales above, upsample the ab channels and combine with L, and form an RGB image. norm: If false, assume that L, ab are not normalized and already in the correct range filt: Use joint bilateral upsamling to do the upsampling. Slow, but improves image quality. bw: Simply produce a grayscale RGB, ignoring the ab channels # Data transforms for training and test/validation set # Determine mean and standard deviation of RGB channels # (i.e. set global variables scale and offsets to 1., then use the results as new scale and offset) #continue #[-0.04959071 0.03768991 0.11539354] #[0.51175581 0.17507738 0.26179135] | 2.456857 | 2 |
utils/editor.py | tien1504/idinvert_pytorch | 415 | 10572 | <reponame>tien1504/idinvert_pytorch
# python 3.7
"""Utility functions for image editing from latent space."""
import os.path
import numpy as np
__all__ = [
'parse_indices', 'interpolate', 'mix_style',
'get_layerwise_manipulation_strength', 'manipulate', 'parse_boundary_list'
]
def parse_indices(obj, min_val=None, max_val=None):
"""Parses indices.
If the input is a list or tuple, this function has no effect.
The input can also be a string, which is either a comma separated list of
numbers 'a, b, c', or a dash separated range 'a - c'. Space in the string will
be ignored.
Args:
obj: The input object to parse indices from.
min_val: If not `None`, this function will check that all indices are equal
to or larger than this value. (default: None)
max_val: If not `None`, this function will check that all indices are equal
to or smaller than this field. (default: None)
Returns:
A list of integers.
Raises:
If the input is invalid, i.e., neither a list or tuple, nor a string.
"""
if obj is None or obj == '':
indices = []
elif isinstance(obj, int):
indices = [obj]
elif isinstance(obj, (list, tuple, np.ndarray)):
indices = list(obj)
elif isinstance(obj, str):
indices = []
splits = obj.replace(' ', '').split(',')
for split in splits:
numbers = list(map(int, split.split('-')))
if len(numbers) == 1:
indices.append(numbers[0])
elif len(numbers) == 2:
indices.extend(list(range(numbers[0], numbers[1] + 1)))
else:
raise ValueError(f'Invalid type of input: {type(obj)}!')
assert isinstance(indices, list)
indices = sorted(list(set(indices)))
for idx in indices:
assert isinstance(idx, int)
if min_val is not None:
assert idx >= min_val, f'{idx} is smaller than min val `{min_val}`!'
if max_val is not None:
assert idx <= max_val, f'{idx} is larger than max val `{max_val}`!'
return indices
def interpolate(src_codes, dst_codes, step=5):
"""Interpolates two sets of latent codes linearly.
Args:
src_codes: Source codes, with shape [num, *code_shape].
dst_codes: Target codes, with shape [num, *code_shape].
step: Number of interplolation steps, with source and target included. For
example, if `step = 5`, three more samples will be inserted. (default: 5)
Returns:
Interpolated codes, with shape [num, step, *code_shape].
Raises:
ValueError: If the input two sets of latent codes are with different shapes.
"""
if not (src_codes.ndim >= 2 and src_codes.shape == dst_codes.shape):
raise ValueError(f'Shapes of source codes and target codes should both be '
f'[num, *code_shape], but {src_codes.shape} and '
f'{dst_codes.shape} are received!')
num = src_codes.shape[0]
code_shape = src_codes.shape[1:]
a = src_codes[:, np.newaxis]
b = dst_codes[:, np.newaxis]
l = np.linspace(0.0, 1.0, step).reshape(
[step if axis == 1 else 1 for axis in range(a.ndim)])
results = a + l * (b - a)
assert results.shape == (num, step, *code_shape)
return results
def mix_style(style_codes,
content_codes,
num_layers=1,
mix_layers=None,
is_style_layerwise=True,
is_content_layerwise=True):
"""Mixes styles from style codes to those of content codes.
Each style code or content code consists of `num_layers` codes, each of which
is typically fed into a particular layer of the generator. This function mixes
styles by partially replacing the codes of `content_codes` from some certain
layers with those of `style_codes`.
For example, if both style code and content code are with shape [10, 512],
meaning to have 10 layers and each employs a 512-dimensional latent code. And
the 1st, 2nd, and 3rd layers are the target layers to perform style mixing.
Then the top half of the content code (with shape [3, 512]) will be replaced
by the top half of the style code (also with shape [3, 512]).
NOTE: This function also supports taking single-layer latent codes as inputs,
i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this
case, the corresponding code will be first repeated for `num_layers` before
performing style mixing.
Args:
style_codes: Style codes, with shape [num_styles, *code_shape] or
[num_styles, num_layers, *code_shape].
content_codes: Content codes, with shape [num_contents, *code_shape] or
[num_contents, num_layers, *code_shape].
num_layers: Total number of layers in the generative model. (default: 1)
mix_layers: Indices of the layers to perform style mixing. `None` means to
replace all layers, in which case the content code will be completely
replaced by style code. (default: None)
is_style_layerwise: Indicating whether the input `style_codes` are
layer-wise codes. (default: True)
is_content_layerwise: Indicating whether the input `content_codes` are
layer-wise codes. (default: True)
num_layers
Returns:
Codes after style mixing, with shape [num_styles, num_contents, num_layers,
*code_shape].
Raises:
ValueError: If input `content_codes` or `style_codes` is with invalid shape.
"""
if not is_style_layerwise:
style_codes = style_codes[:, np.newaxis]
style_codes = np.tile(
style_codes,
[num_layers if axis == 1 else 1 for axis in range(style_codes.ndim)])
if not is_content_layerwise:
content_codes = content_codes[:, np.newaxis]
content_codes = np.tile(
content_codes,
[num_layers if axis == 1 else 1 for axis in range(content_codes.ndim)])
if not (style_codes.ndim >= 3 and style_codes.shape[1] == num_layers and
style_codes.shape[1:] == content_codes.shape[1:]):
raise ValueError(f'Shapes of style codes and content codes should be '
f'[num_styles, num_layers, *code_shape] and '
f'[num_contents, num_layers, *code_shape] respectively, '
f'but {style_codes.shape} and {content_codes.shape} are '
f'received!')
layer_indices = parse_indices(mix_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
num_styles = style_codes.shape[0]
num_contents = content_codes.shape[0]
code_shape = content_codes.shape[2:]
s = style_codes[:, np.newaxis]
s = np.tile(s, [num_contents if axis == 1 else 1 for axis in range(s.ndim)])
c = content_codes[np.newaxis]
c = np.tile(c, [num_styles if axis == 0 else 1 for axis in range(c.ndim)])
from_style = np.zeros(s.shape, dtype=bool)
from_style[:, :, layer_indices] = True
results = np.where(from_style, s, c)
assert results.shape == (num_styles, num_contents, num_layers, *code_shape)
return results
def get_layerwise_manipulation_strength(num_layers,
truncation_psi,
truncation_layers):
"""Gets layer-wise strength for manipulation.
Recall the truncation trick played on layer [0, truncation_layers):
w = truncation_psi * w + (1 - truncation_psi) * w_avg
So, when using the same boundary to manipulate different layers, layer
[0, truncation_layers) and layer [truncation_layers, num_layers) should use
different strength to eliminate the effect from the truncation trick. More
concretely, the strength for layer [0, truncation_layers) is set as
`truncation_psi`, while that for other layers are set as 1.
"""
strength = [1.0 for _ in range(num_layers)]
if truncation_layers > 0:
for layer_idx in range(0, truncation_layers):
strength[layer_idx] = truncation_psi
return strength
def manipulate(latent_codes,
boundary,
start_distance=-5.0,
end_distance=5.0,
step=21,
layerwise_manipulation=False,
num_layers=1,
manipulate_layers=None,
is_code_layerwise=False,
is_boundary_layerwise=False,
layerwise_manipulation_strength=1.0):
"""Manipulates the given latent codes with respect to a particular boundary.
Basically, this function takes a set of latent codes and a boundary as inputs,
and outputs a collection of manipulated latent codes.
For example, let `step` to be 10, `latent_codes` to be with shape [num,
*code_shape], and `boundary` to be with shape [1, *code_shape] and unit norm.
Then the output will be with shape [num, 10, *code_shape]. For each 10-element
manipulated codes, the first code is `start_distance` away from the original
code (i.e., the input) along the `boundary` direction, while the last code is
`end_distance` away. Remaining codes are linearly interpolated. Here,
`distance` is sign sensitive.
NOTE: This function also supports layer-wise manipulation, in which case the
generator should be able to take layer-wise latent codes as inputs. For
example, if the generator has 18 convolutional layers in total, and each of
which takes an independent latent code as input. It is possible, sometimes
with even better performance, to only partially manipulate these latent codes
corresponding to some certain layers yet keeping others untouched.
NOTE: Boundary is assumed to be normalized to unit norm already.
Args:
latent_codes: The input latent codes for manipulation, with shape
[num, *code_shape] or [num, num_layers, *code_shape].
boundary: The semantic boundary as reference, with shape [1, *code_shape] or
[1, num_layers, *code_shape].
start_distance: Start point for manipulation. (default: -5.0)
end_distance: End point for manipulation. (default: 5.0)
step: Number of manipulation steps. (default: 21)
layerwise_manipulation: Whether to perform layer-wise manipulation.
(default: False)
num_layers: Number of layers. Only active when `layerwise_manipulation` is
set as `True`. Should be a positive integer. (default: 1)
manipulate_layers: Indices of the layers to perform manipulation. `None`
means to manipulate latent codes from all layers. (default: None)
is_code_layerwise: Whether the input latent codes are layer-wise. If set as
`False`, the function will first repeat the input codes for `num_layers`
times before perform manipulation. (default: False)
is_boundary_layerwise: Whether the input boundary is layer-wise. If set as
`False`, the function will first repeat boundary for `num_layers` times
before perform manipulation. (default: False)
layerwise_manipulation_strength: Manipulation strength for each layer. Only
active when `layerwise_manipulation` is set as `True`. This field can be
used to resolve the strength discrepancy across layers when truncation
trick is on. See function `get_layerwise_manipulation_strength()` for
details. A tuple, list, or `numpy.ndarray` is expected. If set as a single
number, this strength will be used for all layers. (default: 1.0)
Returns:
Manipulated codes, with shape [num, step, *code_shape] if
`layerwise_manipulation` is set as `False`, or shape [num, step,
num_layers, *code_shape] if `layerwise_manipulation` is set as `True`.
Raises:
ValueError: If the input latent codes, boundary, or strength are with
invalid shape.
"""
if not (boundary.ndim >= 2 and boundary.shape[0] == 1):
raise ValueError(f'Boundary should be with shape [1, *code_shape] or '
f'[1, num_layers, *code_shape], but '
f'{boundary.shape} is received!')
if not layerwise_manipulation:
assert not is_code_layerwise
assert not is_boundary_layerwise
num_layers = 1
manipulate_layers = None
layerwise_manipulation_strength = 1.0
# Preprocessing for layer-wise manipulation.
# Parse indices of manipulation layers.
layer_indices = parse_indices(
manipulate_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
# Make latent codes layer-wise if needed.
assert num_layers > 0
if not is_code_layerwise:
x = latent_codes[:, np.newaxis]
x = np.tile(x, [num_layers if axis == 1 else 1 for axis in range(x.ndim)])
else:
x = latent_codes
if x.shape[1] != num_layers:
raise ValueError(f'Latent codes should be with shape [num, num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {x.shape} is received!')
# Make boundary layer-wise if needed.
if not is_boundary_layerwise:
b = boundary
b = np.tile(b, [num_layers if axis == 0 else 1 for axis in range(b.ndim)])
else:
b = boundary[0]
if b.shape[0] != num_layers:
raise ValueError(f'Boundary should be with shape [num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {b.shape} is received!')
# Get layer-wise manipulation strength.
if isinstance(layerwise_manipulation_strength, (int, float)):
s = [float(layerwise_manipulation_strength) for _ in range(num_layers)]
elif isinstance(layerwise_manipulation_strength, (list, tuple)):
s = layerwise_manipulation_strength
if len(s) != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{len(s)}` '
f'mismatches number of layers `{num_layers}`!')
elif isinstance(layerwise_manipulation_strength, np.ndarray):
s = layerwise_manipulation_strength
if s.size != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{s.size}` '
f'mismatches number of layers `{num_layers}`!')
else:
raise ValueError(f'Unsupported type of `layerwise_manipulation_strength`!')
s = np.array(s).reshape(
[num_layers if axis == 0 else 1 for axis in range(b.ndim)])
b = b * s
if x.shape[1:] != b.shape:
raise ValueError(f'Latent code shape {x.shape} and boundary shape '
f'{b.shape} mismatch!')
num = x.shape[0]
code_shape = x.shape[2:]
x = x[:, np.newaxis]
b = b[np.newaxis, np.newaxis, :]
l = np.linspace(start_distance, end_distance, step).reshape(
[step if axis == 1 else 1 for axis in range(x.ndim)])
results = np.tile(x, [step if axis == 1 else 1 for axis in range(x.ndim)])
is_manipulatable = np.zeros(results.shape, dtype=bool)
is_manipulatable[:, :, layer_indices] = True
results = np.where(is_manipulatable, x + l * b, results)
assert results.shape == (num, step, num_layers, *code_shape)
return results if layerwise_manipulation else results[:, :, 0]
def parse_boundary_list(boundary_list_path):
"""Parses boundary list.
Sometimes, a text file containing a list of boundaries will significantly
simplify image manipulation with a large amount of boundaries. This function
is used to parse boundary information from such list file.
Basically, each item in the list should be with format
`($NAME, $SPACE_TYPE): $PATH`. `DISABLE` at the beginning of the line can
disable a particular boundary.
Sample:
(age, z): $AGE_BOUNDARY_PATH
(gender, w): $GENDER_BOUNDARY_PATH
DISABLE(pose, wp): $POSE_BOUNDARY_PATH
Args:
boundary_list_path: Path to the boundary list.
Returns:
A dictionary, whose key is a two-element tuple (boundary_name, space_type)
and value is the corresponding boundary path.
Raise:
ValueError: If the given boundary list does not exist.
"""
if not os.path.isfile(boundary_list_path):
raise ValueError(f'Boundary list `boundary_list_path` does not exist!')
boundaries = {}
with open(boundary_list_path, 'r') as f:
for line in f:
if line[:len('DISABLE')] == 'DISABLE':
continue
boundary_info, boundary_path = line.strip().split(':')
boundary_name, space_type = boundary_info.strip()[1:-1].split(',')
boundary_name = boundary_name.strip()
space_type = space_type.strip().lower()
boundary_path = boundary_path.strip()
boundaries[(boundary_name, space_type)] = boundary_path
return boundaries
| # python 3.7
"""Utility functions for image editing from latent space."""
import os.path
import numpy as np
__all__ = [
'parse_indices', 'interpolate', 'mix_style',
'get_layerwise_manipulation_strength', 'manipulate', 'parse_boundary_list'
]
def parse_indices(obj, min_val=None, max_val=None):
"""Parses indices.
If the input is a list or tuple, this function has no effect.
The input can also be a string, which is either a comma separated list of
numbers 'a, b, c', or a dash separated range 'a - c'. Space in the string will
be ignored.
Args:
obj: The input object to parse indices from.
min_val: If not `None`, this function will check that all indices are equal
to or larger than this value. (default: None)
max_val: If not `None`, this function will check that all indices are equal
to or smaller than this field. (default: None)
Returns:
A list of integers.
Raises:
If the input is invalid, i.e., neither a list or tuple, nor a string.
"""
if obj is None or obj == '':
indices = []
elif isinstance(obj, int):
indices = [obj]
elif isinstance(obj, (list, tuple, np.ndarray)):
indices = list(obj)
elif isinstance(obj, str):
indices = []
splits = obj.replace(' ', '').split(',')
for split in splits:
numbers = list(map(int, split.split('-')))
if len(numbers) == 1:
indices.append(numbers[0])
elif len(numbers) == 2:
indices.extend(list(range(numbers[0], numbers[1] + 1)))
else:
raise ValueError(f'Invalid type of input: {type(obj)}!')
assert isinstance(indices, list)
indices = sorted(list(set(indices)))
for idx in indices:
assert isinstance(idx, int)
if min_val is not None:
assert idx >= min_val, f'{idx} is smaller than min val `{min_val}`!'
if max_val is not None:
assert idx <= max_val, f'{idx} is larger than max val `{max_val}`!'
return indices
def interpolate(src_codes, dst_codes, step=5):
"""Interpolates two sets of latent codes linearly.
Args:
src_codes: Source codes, with shape [num, *code_shape].
dst_codes: Target codes, with shape [num, *code_shape].
step: Number of interplolation steps, with source and target included. For
example, if `step = 5`, three more samples will be inserted. (default: 5)
Returns:
Interpolated codes, with shape [num, step, *code_shape].
Raises:
ValueError: If the input two sets of latent codes are with different shapes.
"""
if not (src_codes.ndim >= 2 and src_codes.shape == dst_codes.shape):
raise ValueError(f'Shapes of source codes and target codes should both be '
f'[num, *code_shape], but {src_codes.shape} and '
f'{dst_codes.shape} are received!')
num = src_codes.shape[0]
code_shape = src_codes.shape[1:]
a = src_codes[:, np.newaxis]
b = dst_codes[:, np.newaxis]
l = np.linspace(0.0, 1.0, step).reshape(
[step if axis == 1 else 1 for axis in range(a.ndim)])
results = a + l * (b - a)
assert results.shape == (num, step, *code_shape)
return results
def mix_style(style_codes,
content_codes,
num_layers=1,
mix_layers=None,
is_style_layerwise=True,
is_content_layerwise=True):
"""Mixes styles from style codes to those of content codes.
Each style code or content code consists of `num_layers` codes, each of which
is typically fed into a particular layer of the generator. This function mixes
styles by partially replacing the codes of `content_codes` from some certain
layers with those of `style_codes`.
For example, if both style code and content code are with shape [10, 512],
meaning to have 10 layers and each employs a 512-dimensional latent code. And
the 1st, 2nd, and 3rd layers are the target layers to perform style mixing.
Then the top half of the content code (with shape [3, 512]) will be replaced
by the top half of the style code (also with shape [3, 512]).
NOTE: This function also supports taking single-layer latent codes as inputs,
i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this
case, the corresponding code will be first repeated for `num_layers` before
performing style mixing.
Args:
style_codes: Style codes, with shape [num_styles, *code_shape] or
[num_styles, num_layers, *code_shape].
content_codes: Content codes, with shape [num_contents, *code_shape] or
[num_contents, num_layers, *code_shape].
num_layers: Total number of layers in the generative model. (default: 1)
mix_layers: Indices of the layers to perform style mixing. `None` means to
replace all layers, in which case the content code will be completely
replaced by style code. (default: None)
is_style_layerwise: Indicating whether the input `style_codes` are
layer-wise codes. (default: True)
is_content_layerwise: Indicating whether the input `content_codes` are
layer-wise codes. (default: True)
num_layers
Returns:
Codes after style mixing, with shape [num_styles, num_contents, num_layers,
*code_shape].
Raises:
ValueError: If input `content_codes` or `style_codes` is with invalid shape.
"""
if not is_style_layerwise:
style_codes = style_codes[:, np.newaxis]
style_codes = np.tile(
style_codes,
[num_layers if axis == 1 else 1 for axis in range(style_codes.ndim)])
if not is_content_layerwise:
content_codes = content_codes[:, np.newaxis]
content_codes = np.tile(
content_codes,
[num_layers if axis == 1 else 1 for axis in range(content_codes.ndim)])
if not (style_codes.ndim >= 3 and style_codes.shape[1] == num_layers and
style_codes.shape[1:] == content_codes.shape[1:]):
raise ValueError(f'Shapes of style codes and content codes should be '
f'[num_styles, num_layers, *code_shape] and '
f'[num_contents, num_layers, *code_shape] respectively, '
f'but {style_codes.shape} and {content_codes.shape} are '
f'received!')
layer_indices = parse_indices(mix_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
num_styles = style_codes.shape[0]
num_contents = content_codes.shape[0]
code_shape = content_codes.shape[2:]
s = style_codes[:, np.newaxis]
s = np.tile(s, [num_contents if axis == 1 else 1 for axis in range(s.ndim)])
c = content_codes[np.newaxis]
c = np.tile(c, [num_styles if axis == 0 else 1 for axis in range(c.ndim)])
from_style = np.zeros(s.shape, dtype=bool)
from_style[:, :, layer_indices] = True
results = np.where(from_style, s, c)
assert results.shape == (num_styles, num_contents, num_layers, *code_shape)
return results
def get_layerwise_manipulation_strength(num_layers,
truncation_psi,
truncation_layers):
"""Gets layer-wise strength for manipulation.
Recall the truncation trick played on layer [0, truncation_layers):
w = truncation_psi * w + (1 - truncation_psi) * w_avg
So, when using the same boundary to manipulate different layers, layer
[0, truncation_layers) and layer [truncation_layers, num_layers) should use
different strength to eliminate the effect from the truncation trick. More
concretely, the strength for layer [0, truncation_layers) is set as
`truncation_psi`, while that for other layers are set as 1.
"""
strength = [1.0 for _ in range(num_layers)]
if truncation_layers > 0:
for layer_idx in range(0, truncation_layers):
strength[layer_idx] = truncation_psi
return strength
def manipulate(latent_codes,
boundary,
start_distance=-5.0,
end_distance=5.0,
step=21,
layerwise_manipulation=False,
num_layers=1,
manipulate_layers=None,
is_code_layerwise=False,
is_boundary_layerwise=False,
layerwise_manipulation_strength=1.0):
"""Manipulates the given latent codes with respect to a particular boundary.
Basically, this function takes a set of latent codes and a boundary as inputs,
and outputs a collection of manipulated latent codes.
For example, let `step` to be 10, `latent_codes` to be with shape [num,
*code_shape], and `boundary` to be with shape [1, *code_shape] and unit norm.
Then the output will be with shape [num, 10, *code_shape]. For each 10-element
manipulated codes, the first code is `start_distance` away from the original
code (i.e., the input) along the `boundary` direction, while the last code is
`end_distance` away. Remaining codes are linearly interpolated. Here,
`distance` is sign sensitive.
NOTE: This function also supports layer-wise manipulation, in which case the
generator should be able to take layer-wise latent codes as inputs. For
example, if the generator has 18 convolutional layers in total, and each of
which takes an independent latent code as input. It is possible, sometimes
with even better performance, to only partially manipulate these latent codes
corresponding to some certain layers yet keeping others untouched.
NOTE: Boundary is assumed to be normalized to unit norm already.
Args:
latent_codes: The input latent codes for manipulation, with shape
[num, *code_shape] or [num, num_layers, *code_shape].
boundary: The semantic boundary as reference, with shape [1, *code_shape] or
[1, num_layers, *code_shape].
start_distance: Start point for manipulation. (default: -5.0)
end_distance: End point for manipulation. (default: 5.0)
step: Number of manipulation steps. (default: 21)
layerwise_manipulation: Whether to perform layer-wise manipulation.
(default: False)
num_layers: Number of layers. Only active when `layerwise_manipulation` is
set as `True`. Should be a positive integer. (default: 1)
manipulate_layers: Indices of the layers to perform manipulation. `None`
means to manipulate latent codes from all layers. (default: None)
is_code_layerwise: Whether the input latent codes are layer-wise. If set as
`False`, the function will first repeat the input codes for `num_layers`
times before perform manipulation. (default: False)
is_boundary_layerwise: Whether the input boundary is layer-wise. If set as
`False`, the function will first repeat boundary for `num_layers` times
before perform manipulation. (default: False)
layerwise_manipulation_strength: Manipulation strength for each layer. Only
active when `layerwise_manipulation` is set as `True`. This field can be
used to resolve the strength discrepancy across layers when truncation
trick is on. See function `get_layerwise_manipulation_strength()` for
details. A tuple, list, or `numpy.ndarray` is expected. If set as a single
number, this strength will be used for all layers. (default: 1.0)
Returns:
Manipulated codes, with shape [num, step, *code_shape] if
`layerwise_manipulation` is set as `False`, or shape [num, step,
num_layers, *code_shape] if `layerwise_manipulation` is set as `True`.
Raises:
ValueError: If the input latent codes, boundary, or strength are with
invalid shape.
"""
if not (boundary.ndim >= 2 and boundary.shape[0] == 1):
raise ValueError(f'Boundary should be with shape [1, *code_shape] or '
f'[1, num_layers, *code_shape], but '
f'{boundary.shape} is received!')
if not layerwise_manipulation:
assert not is_code_layerwise
assert not is_boundary_layerwise
num_layers = 1
manipulate_layers = None
layerwise_manipulation_strength = 1.0
# Preprocessing for layer-wise manipulation.
# Parse indices of manipulation layers.
layer_indices = parse_indices(
manipulate_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
# Make latent codes layer-wise if needed.
assert num_layers > 0
if not is_code_layerwise:
x = latent_codes[:, np.newaxis]
x = np.tile(x, [num_layers if axis == 1 else 1 for axis in range(x.ndim)])
else:
x = latent_codes
if x.shape[1] != num_layers:
raise ValueError(f'Latent codes should be with shape [num, num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {x.shape} is received!')
# Make boundary layer-wise if needed.
if not is_boundary_layerwise:
b = boundary
b = np.tile(b, [num_layers if axis == 0 else 1 for axis in range(b.ndim)])
else:
b = boundary[0]
if b.shape[0] != num_layers:
raise ValueError(f'Boundary should be with shape [num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {b.shape} is received!')
# Get layer-wise manipulation strength.
if isinstance(layerwise_manipulation_strength, (int, float)):
s = [float(layerwise_manipulation_strength) for _ in range(num_layers)]
elif isinstance(layerwise_manipulation_strength, (list, tuple)):
s = layerwise_manipulation_strength
if len(s) != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{len(s)}` '
f'mismatches number of layers `{num_layers}`!')
elif isinstance(layerwise_manipulation_strength, np.ndarray):
s = layerwise_manipulation_strength
if s.size != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{s.size}` '
f'mismatches number of layers `{num_layers}`!')
else:
raise ValueError(f'Unsupported type of `layerwise_manipulation_strength`!')
s = np.array(s).reshape(
[num_layers if axis == 0 else 1 for axis in range(b.ndim)])
b = b * s
if x.shape[1:] != b.shape:
raise ValueError(f'Latent code shape {x.shape} and boundary shape '
f'{b.shape} mismatch!')
num = x.shape[0]
code_shape = x.shape[2:]
x = x[:, np.newaxis]
b = b[np.newaxis, np.newaxis, :]
l = np.linspace(start_distance, end_distance, step).reshape(
[step if axis == 1 else 1 for axis in range(x.ndim)])
results = np.tile(x, [step if axis == 1 else 1 for axis in range(x.ndim)])
is_manipulatable = np.zeros(results.shape, dtype=bool)
is_manipulatable[:, :, layer_indices] = True
results = np.where(is_manipulatable, x + l * b, results)
assert results.shape == (num, step, num_layers, *code_shape)
return results if layerwise_manipulation else results[:, :, 0]
def parse_boundary_list(boundary_list_path):
"""Parses boundary list.
Sometimes, a text file containing a list of boundaries will significantly
simplify image manipulation with a large amount of boundaries. This function
is used to parse boundary information from such list file.
Basically, each item in the list should be with format
`($NAME, $SPACE_TYPE): $PATH`. `DISABLE` at the beginning of the line can
disable a particular boundary.
Sample:
(age, z): $AGE_BOUNDARY_PATH
(gender, w): $GENDER_BOUNDARY_PATH
DISABLE(pose, wp): $POSE_BOUNDARY_PATH
Args:
boundary_list_path: Path to the boundary list.
Returns:
A dictionary, whose key is a two-element tuple (boundary_name, space_type)
and value is the corresponding boundary path.
Raise:
ValueError: If the given boundary list does not exist.
"""
if not os.path.isfile(boundary_list_path):
raise ValueError(f'Boundary list `boundary_list_path` does not exist!')
boundaries = {}
with open(boundary_list_path, 'r') as f:
for line in f:
if line[:len('DISABLE')] == 'DISABLE':
continue
boundary_info, boundary_path = line.strip().split(':')
boundary_name, space_type = boundary_info.strip()[1:-1].split(',')
boundary_name = boundary_name.strip()
space_type = space_type.strip().lower()
boundary_path = boundary_path.strip()
boundaries[(boundary_name, space_type)] = boundary_path
return boundaries | en | 0.794306 | # python 3.7 Utility functions for image editing from latent space. Parses indices. If the input is a list or tuple, this function has no effect. The input can also be a string, which is either a comma separated list of numbers 'a, b, c', or a dash separated range 'a - c'. Space in the string will be ignored. Args: obj: The input object to parse indices from. min_val: If not `None`, this function will check that all indices are equal to or larger than this value. (default: None) max_val: If not `None`, this function will check that all indices are equal to or smaller than this field. (default: None) Returns: A list of integers. Raises: If the input is invalid, i.e., neither a list or tuple, nor a string. Interpolates two sets of latent codes linearly. Args: src_codes: Source codes, with shape [num, *code_shape]. dst_codes: Target codes, with shape [num, *code_shape]. step: Number of interplolation steps, with source and target included. For example, if `step = 5`, three more samples will be inserted. (default: 5) Returns: Interpolated codes, with shape [num, step, *code_shape]. Raises: ValueError: If the input two sets of latent codes are with different shapes. Mixes styles from style codes to those of content codes. Each style code or content code consists of `num_layers` codes, each of which is typically fed into a particular layer of the generator. This function mixes styles by partially replacing the codes of `content_codes` from some certain layers with those of `style_codes`. For example, if both style code and content code are with shape [10, 512], meaning to have 10 layers and each employs a 512-dimensional latent code. And the 1st, 2nd, and 3rd layers are the target layers to perform style mixing. Then the top half of the content code (with shape [3, 512]) will be replaced by the top half of the style code (also with shape [3, 512]). NOTE: This function also supports taking single-layer latent codes as inputs, i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this case, the corresponding code will be first repeated for `num_layers` before performing style mixing. Args: style_codes: Style codes, with shape [num_styles, *code_shape] or [num_styles, num_layers, *code_shape]. content_codes: Content codes, with shape [num_contents, *code_shape] or [num_contents, num_layers, *code_shape]. num_layers: Total number of layers in the generative model. (default: 1) mix_layers: Indices of the layers to perform style mixing. `None` means to replace all layers, in which case the content code will be completely replaced by style code. (default: None) is_style_layerwise: Indicating whether the input `style_codes` are layer-wise codes. (default: True) is_content_layerwise: Indicating whether the input `content_codes` are layer-wise codes. (default: True) num_layers Returns: Codes after style mixing, with shape [num_styles, num_contents, num_layers, *code_shape]. Raises: ValueError: If input `content_codes` or `style_codes` is with invalid shape. Gets layer-wise strength for manipulation. Recall the truncation trick played on layer [0, truncation_layers): w = truncation_psi * w + (1 - truncation_psi) * w_avg So, when using the same boundary to manipulate different layers, layer [0, truncation_layers) and layer [truncation_layers, num_layers) should use different strength to eliminate the effect from the truncation trick. More concretely, the strength for layer [0, truncation_layers) is set as `truncation_psi`, while that for other layers are set as 1. Manipulates the given latent codes with respect to a particular boundary. Basically, this function takes a set of latent codes and a boundary as inputs, and outputs a collection of manipulated latent codes. For example, let `step` to be 10, `latent_codes` to be with shape [num, *code_shape], and `boundary` to be with shape [1, *code_shape] and unit norm. Then the output will be with shape [num, 10, *code_shape]. For each 10-element manipulated codes, the first code is `start_distance` away from the original code (i.e., the input) along the `boundary` direction, while the last code is `end_distance` away. Remaining codes are linearly interpolated. Here, `distance` is sign sensitive. NOTE: This function also supports layer-wise manipulation, in which case the generator should be able to take layer-wise latent codes as inputs. For example, if the generator has 18 convolutional layers in total, and each of which takes an independent latent code as input. It is possible, sometimes with even better performance, to only partially manipulate these latent codes corresponding to some certain layers yet keeping others untouched. NOTE: Boundary is assumed to be normalized to unit norm already. Args: latent_codes: The input latent codes for manipulation, with shape [num, *code_shape] or [num, num_layers, *code_shape]. boundary: The semantic boundary as reference, with shape [1, *code_shape] or [1, num_layers, *code_shape]. start_distance: Start point for manipulation. (default: -5.0) end_distance: End point for manipulation. (default: 5.0) step: Number of manipulation steps. (default: 21) layerwise_manipulation: Whether to perform layer-wise manipulation. (default: False) num_layers: Number of layers. Only active when `layerwise_manipulation` is set as `True`. Should be a positive integer. (default: 1) manipulate_layers: Indices of the layers to perform manipulation. `None` means to manipulate latent codes from all layers. (default: None) is_code_layerwise: Whether the input latent codes are layer-wise. If set as `False`, the function will first repeat the input codes for `num_layers` times before perform manipulation. (default: False) is_boundary_layerwise: Whether the input boundary is layer-wise. If set as `False`, the function will first repeat boundary for `num_layers` times before perform manipulation. (default: False) layerwise_manipulation_strength: Manipulation strength for each layer. Only active when `layerwise_manipulation` is set as `True`. This field can be used to resolve the strength discrepancy across layers when truncation trick is on. See function `get_layerwise_manipulation_strength()` for details. A tuple, list, or `numpy.ndarray` is expected. If set as a single number, this strength will be used for all layers. (default: 1.0) Returns: Manipulated codes, with shape [num, step, *code_shape] if `layerwise_manipulation` is set as `False`, or shape [num, step, num_layers, *code_shape] if `layerwise_manipulation` is set as `True`. Raises: ValueError: If the input latent codes, boundary, or strength are with invalid shape. # Preprocessing for layer-wise manipulation. # Parse indices of manipulation layers. # Make latent codes layer-wise if needed. # Make boundary layer-wise if needed. # Get layer-wise manipulation strength. Parses boundary list. Sometimes, a text file containing a list of boundaries will significantly simplify image manipulation with a large amount of boundaries. This function is used to parse boundary information from such list file. Basically, each item in the list should be with format `($NAME, $SPACE_TYPE): $PATH`. `DISABLE` at the beginning of the line can disable a particular boundary. Sample: (age, z): $AGE_BOUNDARY_PATH (gender, w): $GENDER_BOUNDARY_PATH DISABLE(pose, wp): $POSE_BOUNDARY_PATH Args: boundary_list_path: Path to the boundary list. Returns: A dictionary, whose key is a two-element tuple (boundary_name, space_type) and value is the corresponding boundary path. Raise: ValueError: If the given boundary list does not exist. | 3.312094 | 3 |
venv/lib/python3.8/site-packages/pyls/_version.py | Retraces/UkraineBot | 2 | 10573 | /home/runner/.cache/pip/pool/24/e8/39/183700a0b2d2a9545f3da2571d82b53df290aab3a51dc229b113d16e6c | /home/runner/.cache/pip/pool/24/e8/39/183700a0b2d2a9545f3da2571d82b53df290aab3a51dc229b113d16e6c | none | 1 | 0.659773 | 1 |
|
pymoo/util/normalization.py | Electr0phile/pymoo | 1 | 10574 | import numpy as np
def denormalize(x, x_min, x_max):
if x_max is None:
_range = 1
else:
_range = (x_max - x_min)
return x * _range + x_min
def normalize(x, x_min=None, x_max=None, return_bounds=False, estimate_bounds_if_none=True):
# if the bounds should be estimated if none do it for both
if estimate_bounds_if_none and x_min is None:
x_min = np.min(x, axis=0)
if estimate_bounds_if_none and x_max is None:
x_max = np.max(x, axis=0)
# if they are still none set them to default to avoid exception
if x_min is None:
x_min = np.zeros()
if x_max is None:
x_max = np.ones()
# calculate the denominator
denom = x_max - x_min
# we can not divide by zero -> plus small epsilon
denom += 1e-30
# normalize the actual values
N = (x - x_min) / denom
# return with or without bounds
if not return_bounds:
return N
else:
return N, x_min, x_max
def standardize(x, return_bounds=False):
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
# standardize
val = (x - mean) / std
if not return_bounds:
return val
else:
return val, mean, std
def destandardize(x, mean, std):
return (x * std) + mean
| import numpy as np
def denormalize(x, x_min, x_max):
if x_max is None:
_range = 1
else:
_range = (x_max - x_min)
return x * _range + x_min
def normalize(x, x_min=None, x_max=None, return_bounds=False, estimate_bounds_if_none=True):
# if the bounds should be estimated if none do it for both
if estimate_bounds_if_none and x_min is None:
x_min = np.min(x, axis=0)
if estimate_bounds_if_none and x_max is None:
x_max = np.max(x, axis=0)
# if they are still none set them to default to avoid exception
if x_min is None:
x_min = np.zeros()
if x_max is None:
x_max = np.ones()
# calculate the denominator
denom = x_max - x_min
# we can not divide by zero -> plus small epsilon
denom += 1e-30
# normalize the actual values
N = (x - x_min) / denom
# return with or without bounds
if not return_bounds:
return N
else:
return N, x_min, x_max
def standardize(x, return_bounds=False):
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
# standardize
val = (x - mean) / std
if not return_bounds:
return val
else:
return val, mean, std
def destandardize(x, mean, std):
return (x * std) + mean
| en | 0.757756 | # if the bounds should be estimated if none do it for both # if they are still none set them to default to avoid exception # calculate the denominator # we can not divide by zero -> plus small epsilon # normalize the actual values # return with or without bounds # standardize | 3.560162 | 4 |
alembic/versions/92235b77ea53_check_new.py | go-lab/appcomposer | 1 | 10575 | """Check new
Revision ID: 92235b77ea53
Revises: 381fdb66ec27
Create Date: 2017-10-14 02:38:51.007307
"""
# revision identifiers, used by Alembic.
revision = '92235b77ea53'
down_revision = '381fdb66ec27'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_ActiveTranslationMessages_category', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_datetime', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_fmt', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_from_developer', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_key', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_namespace', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_position', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_same_tool', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_taken_from_default', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_tool_id', table_name='ActiveTranslationMessages')
op.drop_index('ix_Apps_composer', table_name='Apps')
op.drop_index('ix_Apps_creation_date', table_name='Apps')
op.drop_index('ix_Apps_last_access_date', table_name='Apps')
op.drop_index('ix_Apps_modification_date', table_name='Apps')
op.drop_index('ix_Apps_name', table_name='Apps')
op.drop_index('ix_Apps_owner_id', table_name='Apps')
op.drop_index('ix_Apps_unique_id', table_name='Apps')
op.drop_index('ix_GoLabOAuthUsers_display_name', table_name='GoLabOAuthUsers')
op.drop_index('ix_GoLabOAuthUsers_email', table_name='GoLabOAuthUsers')
op.drop_index('ix_Languages_language', table_name='Languages')
op.drop_index('ix_RepositoryApps_adaptable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_external_id', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing_since', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_check', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_download_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_time', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_name', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_repository', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_translatable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_url', table_name='RepositoryApps')
op.drop_index('ix_TranslatedApps_url', table_name='TranslatedApps')
op.drop_index('ix_TranslationBundles_from_developer', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_language', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_target', table_name='TranslationBundles')
op.drop_index('ix_TranslationCurrentActiveUsers_last_check', table_name='TranslationCurrentActiveUsers')
op.drop_index('ix_TranslationExternalSuggestions_engine', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key_hash', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_origin_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationKeySuggestions_key', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_language', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_target', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationMessageHistory_category', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_datetime', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_fmt', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_from_developer', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_key', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_namespace', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_parent_translation_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_position', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_same_tool', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_taken_from_default', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_tool_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationNotificationRecipients_created', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationNotificationRecipients_email', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationSubscriptions_last_check', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSubscriptions_mechanism', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSyncLogs_end_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationSyncLogs_start_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationUrls_automatic', table_name='TranslationUrls')
op.drop_index('ix_TranslationUrls_url', table_name='TranslationUrls')
op.drop_index('ix_TranslationValueSuggestions_human_key', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_language', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_target', table_name='TranslationValueSuggestions')
op.drop_index('ix_Users_creation_date', table_name='Users')
op.drop_index('ix_Users_last_access_date', table_name='Users')
op.create_index(op.f('ix_ActiveTranslationMessages_category'), 'ActiveTranslationMessages', ['category'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_datetime'), 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_fmt'), 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_from_developer'), 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_key'), 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_namespace'), 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_position'), 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_same_tool'), 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_tool_id'), 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index(op.f('ix_Apps_composer'), 'Apps', ['composer'], unique=False)
op.create_index(op.f('ix_Apps_creation_date'), 'Apps', ['creation_date'], unique=False)
op.create_index(op.f('ix_Apps_last_access_date'), 'Apps', ['last_access_date'], unique=False)
op.create_index(op.f('ix_Apps_modification_date'), 'Apps', ['modification_date'], unique=False)
op.create_index(op.f('ix_Apps_name'), 'Apps', ['name'], unique=False)
op.create_index(op.f('ix_Apps_owner_id'), 'Apps', ['owner_id'], unique=False)
op.create_index(op.f('ix_Apps_unique_id'), 'Apps', ['unique_id'], unique=True)
op.create_index(op.f('ix_GoLabOAuthUsers_display_name'), 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index(op.f('ix_GoLabOAuthUsers_email'), 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index(op.f('ix_Languages_language'), 'Languages', ['language'], unique=True)
op.create_index(op.f('ix_RepositoryApps_adaptable'), 'RepositoryApps', ['adaptable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_contents_hash'), 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_downloaded_hash'), 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_external_id'), 'RepositoryApps', ['external_id'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing_since'), 'RepositoryApps', ['failing_since'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing'), 'RepositoryApps', ['failing'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_change'), 'RepositoryApps', ['last_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_check'), 'RepositoryApps', ['last_check'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_download_change'), 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_time'), 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index(op.f('ix_RepositoryApps_name'), 'RepositoryApps', ['name'], unique=False)
op.create_index(op.f('ix_RepositoryApps_repository'), 'RepositoryApps', ['repository'], unique=False)
op.create_index(op.f('ix_RepositoryApps_translatable'), 'RepositoryApps', ['translatable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_url'), 'RepositoryApps', ['url'], unique=False)
op.create_index(op.f('ix_TranslatedApps_url'), 'TranslatedApps', ['url'], unique=True)
op.create_index(op.f('ix_TranslationBundles_from_developer'), 'TranslationBundles', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationBundles_language'), 'TranslationBundles', ['language'], unique=False)
op.create_index(op.f('ix_TranslationBundles_target'), 'TranslationBundles', ['target'], unique=False)
op.create_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_engine'), 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key'), 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_language'), 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_origin_language'), 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_key'), 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_language'), 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_target'), 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_category'), 'TranslationMessageHistory', ['category'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_datetime'), 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_fmt'), 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_from_developer'), 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_key'), 'TranslationMessageHistory', ['key'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_namespace'), 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_position'), 'TranslationMessageHistory', ['position'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_same_tool'), 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_taken_from_default'), 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_tool_id'), 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_created'), 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_email'), 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index(op.f('ix_TranslationSubscriptions_last_check'), 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationSubscriptions_mechanism'), 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_end_datetime'), 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_start_datetime'), 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index(op.f('ix_TranslationUrls_automatic'), 'TranslationUrls', ['automatic'], unique=False)
op.create_index(op.f('ix_TranslationUrls_url'), 'TranslationUrls', ['url'], unique=True)
op.create_index(op.f('ix_TranslationValueSuggestions_human_key'), 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_language'), 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_target'), 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index(op.f('ix_Users_creation_date'), 'Users', ['creation_date'], unique=False)
op.create_index(op.f('ix_Users_last_access_date'), 'Users', ['last_access_date'], unique=False)
# op.create_unique_constraint(None, 'ActiveTranslationMessages', ['bundle_id', 'key'])
# op.create_unique_constraint(None, 'RepositoryApp2languages', ['repository_app_id', 'language_id'])
# op.create_unique_constraint(None, 'TranslationBundles', ['translation_url_id', 'language', 'target'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_Users_last_access_date'), table_name='Users')
op.drop_index(op.f('ix_Users_creation_date'), table_name='Users')
op.drop_index(op.f('ix_TranslationValueSuggestions_target'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_language'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_human_key'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationUrls_url'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationUrls_automatic'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationSyncLogs_start_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSyncLogs_end_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSubscriptions_mechanism'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationSubscriptions_last_check'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationNotificationRecipients_email'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationNotificationRecipients_created'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationMessageHistory_tool_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_taken_from_default'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_same_tool'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_position'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_namespace'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_key'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_from_developer'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_fmt'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_datetime'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_category'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationKeySuggestions_target'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_language'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_key'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_origin_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_engine'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationBundles_target'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_language'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_from_developer'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), table_name='TranslationCurrentActiveUsers')
# op.drop_constraint(None, 'TranslationBundles', type_='unique')
op.drop_index(op.f('ix_RepositoryApps_url'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_translatable'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_repository'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_name'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_time'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_download_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_check'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing_since'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_external_id'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_adaptable'), table_name='RepositoryApps')
# op.drop_constraint(None, 'RepositoryApp2languages', type_='unique')
op.drop_index(op.f('ix_TranslatedApps_url'), table_name='TranslatedApps')
op.drop_index(op.f('ix_Languages_language'), table_name='Languages')
op.drop_index(op.f('ix_GoLabOAuthUsers_email'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_GoLabOAuthUsers_display_name'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_Apps_unique_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_owner_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_name'), table_name='Apps')
op.drop_index(op.f('ix_Apps_modification_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_last_access_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_creation_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_composer'), table_name='Apps')
# op.drop_constraint(None, 'ActiveTranslationMessages', type_='unique')
op.drop_index(op.f('ix_ActiveTranslationMessages_tool_id'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_same_tool'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_position'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_namespace'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_key'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_from_developer'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_fmt'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_datetime'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_category'), table_name='ActiveTranslationMessages')
op.create_index('ix_Users_last_access_date', 'Users', ['last_access_date'], unique=False)
op.create_index('ix_Users_creation_date', 'Users', ['creation_date'], unique=False)
op.create_index('ix_TranslationValueSuggestions_target', 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index('ix_TranslationValueSuggestions_language', 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationValueSuggestions_human_key', 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationUrls_url', 'TranslationUrls', ['url'], unique=True)
op.create_index('ix_TranslationUrls_automatic', 'TranslationUrls', ['automatic'], unique=False)
op.create_index('ix_TranslationSyncLogs_start_datetime', 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index('ix_TranslationSyncLogs_end_datetime', 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index('ix_TranslationSubscriptions_mechanism', 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index('ix_TranslationSubscriptions_last_check', 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index('ix_TranslationNotificationRecipients_email', 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index('ix_TranslationNotificationRecipients_created', 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index('ix_TranslationMessageHistory_tool_id', 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_taken_from_default', 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index('ix_TranslationMessageHistory_same_tool', 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index('ix_TranslationMessageHistory_position', 'TranslationMessageHistory', ['position'], unique=False)
op.create_index('ix_TranslationMessageHistory_parent_translation_id', 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_namespace', 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index('ix_TranslationMessageHistory_key', 'TranslationMessageHistory', ['key'], unique=False)
op.create_index('ix_TranslationMessageHistory_from_developer', 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index('ix_TranslationMessageHistory_fmt', 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index('ix_TranslationMessageHistory_datetime', 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index('ix_TranslationMessageHistory_category', 'TranslationMessageHistory', ['category'], unique=False)
op.create_index('ix_TranslationKeySuggestions_target', 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index('ix_TranslationKeySuggestions_language', 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index('ix_TranslationKeySuggestions_key', 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_origin_language', 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_language', 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key_hash', 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key', 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_engine', 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index('ix_TranslationCurrentActiveUsers_last_check', 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index('ix_TranslationBundles_target', 'TranslationBundles', ['target'], unique=False)
op.create_index('ix_TranslationBundles_language', 'TranslationBundles', ['language'], unique=False)
op.create_index('ix_TranslationBundles_from_developer', 'TranslationBundles', ['from_developer'], unique=False)
op.create_index('ix_TranslatedApps_url', 'TranslatedApps', ['url'], unique=True)
op.create_index('ix_RepositoryApps_url', 'RepositoryApps', ['url'], unique=False)
op.create_index('ix_RepositoryApps_translatable', 'RepositoryApps', ['translatable'], unique=False)
op.create_index('ix_RepositoryApps_repository', 'RepositoryApps', ['repository'], unique=False)
op.create_index('ix_RepositoryApps_name', 'RepositoryApps', ['name'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_time', 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_downloaded_hash', 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_contents_hash', 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_download_change', 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index('ix_RepositoryApps_last_check', 'RepositoryApps', ['last_check'], unique=False)
op.create_index('ix_RepositoryApps_last_change', 'RepositoryApps', ['last_change'], unique=False)
op.create_index('ix_RepositoryApps_failing_since', 'RepositoryApps', ['failing_since'], unique=False)
op.create_index('ix_RepositoryApps_failing', 'RepositoryApps', ['failing'], unique=False)
op.create_index('ix_RepositoryApps_external_id', 'RepositoryApps', ['external_id'], unique=False)
op.create_index('ix_RepositoryApps_downloaded_hash', 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_contents_hash', 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_adaptable', 'RepositoryApps', ['adaptable'], unique=False)
op.create_index('ix_Languages_language', 'Languages', ['language'], unique=True)
op.create_index('ix_GoLabOAuthUsers_email', 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index('ix_GoLabOAuthUsers_display_name', 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index('ix_Apps_unique_id', 'Apps', ['unique_id'], unique=True)
op.create_index('ix_Apps_owner_id', 'Apps', ['owner_id'], unique=False)
op.create_index('ix_Apps_name', 'Apps', ['name'], unique=False)
op.create_index('ix_Apps_modification_date', 'Apps', ['modification_date'], unique=False)
op.create_index('ix_Apps_last_access_date', 'Apps', ['last_access_date'], unique=False)
op.create_index('ix_Apps_creation_date', 'Apps', ['creation_date'], unique=False)
op.create_index('ix_Apps_composer', 'Apps', ['composer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_tool_id', 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index('ix_ActiveTranslationMessages_taken_from_default', 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index('ix_ActiveTranslationMessages_same_tool', 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index('ix_ActiveTranslationMessages_position', 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index('ix_ActiveTranslationMessages_namespace', 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index('ix_ActiveTranslationMessages_key', 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index('ix_ActiveTranslationMessages_from_developer', 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_fmt', 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index('ix_ActiveTranslationMessages_datetime', 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index('ix_ActiveTranslationMessages_category', 'ActiveTranslationMessages', ['category'], unique=False)
# ### end Alembic commands ###
| """Check new
Revision ID: 92235b77ea53
Revises: 381fdb66ec27
Create Date: 2017-10-14 02:38:51.007307
"""
# revision identifiers, used by Alembic.
revision = '92235b77ea53'
down_revision = '381fdb66ec27'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_ActiveTranslationMessages_category', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_datetime', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_fmt', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_from_developer', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_key', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_namespace', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_position', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_same_tool', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_taken_from_default', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_tool_id', table_name='ActiveTranslationMessages')
op.drop_index('ix_Apps_composer', table_name='Apps')
op.drop_index('ix_Apps_creation_date', table_name='Apps')
op.drop_index('ix_Apps_last_access_date', table_name='Apps')
op.drop_index('ix_Apps_modification_date', table_name='Apps')
op.drop_index('ix_Apps_name', table_name='Apps')
op.drop_index('ix_Apps_owner_id', table_name='Apps')
op.drop_index('ix_Apps_unique_id', table_name='Apps')
op.drop_index('ix_GoLabOAuthUsers_display_name', table_name='GoLabOAuthUsers')
op.drop_index('ix_GoLabOAuthUsers_email', table_name='GoLabOAuthUsers')
op.drop_index('ix_Languages_language', table_name='Languages')
op.drop_index('ix_RepositoryApps_adaptable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_external_id', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing_since', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_check', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_download_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_time', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_name', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_repository', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_translatable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_url', table_name='RepositoryApps')
op.drop_index('ix_TranslatedApps_url', table_name='TranslatedApps')
op.drop_index('ix_TranslationBundles_from_developer', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_language', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_target', table_name='TranslationBundles')
op.drop_index('ix_TranslationCurrentActiveUsers_last_check', table_name='TranslationCurrentActiveUsers')
op.drop_index('ix_TranslationExternalSuggestions_engine', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key_hash', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_origin_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationKeySuggestions_key', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_language', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_target', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationMessageHistory_category', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_datetime', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_fmt', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_from_developer', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_key', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_namespace', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_parent_translation_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_position', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_same_tool', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_taken_from_default', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_tool_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationNotificationRecipients_created', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationNotificationRecipients_email', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationSubscriptions_last_check', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSubscriptions_mechanism', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSyncLogs_end_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationSyncLogs_start_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationUrls_automatic', table_name='TranslationUrls')
op.drop_index('ix_TranslationUrls_url', table_name='TranslationUrls')
op.drop_index('ix_TranslationValueSuggestions_human_key', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_language', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_target', table_name='TranslationValueSuggestions')
op.drop_index('ix_Users_creation_date', table_name='Users')
op.drop_index('ix_Users_last_access_date', table_name='Users')
op.create_index(op.f('ix_ActiveTranslationMessages_category'), 'ActiveTranslationMessages', ['category'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_datetime'), 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_fmt'), 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_from_developer'), 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_key'), 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_namespace'), 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_position'), 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_same_tool'), 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_tool_id'), 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index(op.f('ix_Apps_composer'), 'Apps', ['composer'], unique=False)
op.create_index(op.f('ix_Apps_creation_date'), 'Apps', ['creation_date'], unique=False)
op.create_index(op.f('ix_Apps_last_access_date'), 'Apps', ['last_access_date'], unique=False)
op.create_index(op.f('ix_Apps_modification_date'), 'Apps', ['modification_date'], unique=False)
op.create_index(op.f('ix_Apps_name'), 'Apps', ['name'], unique=False)
op.create_index(op.f('ix_Apps_owner_id'), 'Apps', ['owner_id'], unique=False)
op.create_index(op.f('ix_Apps_unique_id'), 'Apps', ['unique_id'], unique=True)
op.create_index(op.f('ix_GoLabOAuthUsers_display_name'), 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index(op.f('ix_GoLabOAuthUsers_email'), 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index(op.f('ix_Languages_language'), 'Languages', ['language'], unique=True)
op.create_index(op.f('ix_RepositoryApps_adaptable'), 'RepositoryApps', ['adaptable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_contents_hash'), 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_downloaded_hash'), 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_external_id'), 'RepositoryApps', ['external_id'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing_since'), 'RepositoryApps', ['failing_since'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing'), 'RepositoryApps', ['failing'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_change'), 'RepositoryApps', ['last_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_check'), 'RepositoryApps', ['last_check'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_download_change'), 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_time'), 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index(op.f('ix_RepositoryApps_name'), 'RepositoryApps', ['name'], unique=False)
op.create_index(op.f('ix_RepositoryApps_repository'), 'RepositoryApps', ['repository'], unique=False)
op.create_index(op.f('ix_RepositoryApps_translatable'), 'RepositoryApps', ['translatable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_url'), 'RepositoryApps', ['url'], unique=False)
op.create_index(op.f('ix_TranslatedApps_url'), 'TranslatedApps', ['url'], unique=True)
op.create_index(op.f('ix_TranslationBundles_from_developer'), 'TranslationBundles', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationBundles_language'), 'TranslationBundles', ['language'], unique=False)
op.create_index(op.f('ix_TranslationBundles_target'), 'TranslationBundles', ['target'], unique=False)
op.create_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_engine'), 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key'), 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_language'), 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_origin_language'), 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_key'), 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_language'), 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_target'), 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_category'), 'TranslationMessageHistory', ['category'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_datetime'), 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_fmt'), 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_from_developer'), 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_key'), 'TranslationMessageHistory', ['key'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_namespace'), 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_position'), 'TranslationMessageHistory', ['position'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_same_tool'), 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_taken_from_default'), 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_tool_id'), 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_created'), 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_email'), 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index(op.f('ix_TranslationSubscriptions_last_check'), 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationSubscriptions_mechanism'), 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_end_datetime'), 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_start_datetime'), 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index(op.f('ix_TranslationUrls_automatic'), 'TranslationUrls', ['automatic'], unique=False)
op.create_index(op.f('ix_TranslationUrls_url'), 'TranslationUrls', ['url'], unique=True)
op.create_index(op.f('ix_TranslationValueSuggestions_human_key'), 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_language'), 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_target'), 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index(op.f('ix_Users_creation_date'), 'Users', ['creation_date'], unique=False)
op.create_index(op.f('ix_Users_last_access_date'), 'Users', ['last_access_date'], unique=False)
# op.create_unique_constraint(None, 'ActiveTranslationMessages', ['bundle_id', 'key'])
# op.create_unique_constraint(None, 'RepositoryApp2languages', ['repository_app_id', 'language_id'])
# op.create_unique_constraint(None, 'TranslationBundles', ['translation_url_id', 'language', 'target'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_Users_last_access_date'), table_name='Users')
op.drop_index(op.f('ix_Users_creation_date'), table_name='Users')
op.drop_index(op.f('ix_TranslationValueSuggestions_target'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_language'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_human_key'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationUrls_url'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationUrls_automatic'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationSyncLogs_start_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSyncLogs_end_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSubscriptions_mechanism'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationSubscriptions_last_check'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationNotificationRecipients_email'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationNotificationRecipients_created'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationMessageHistory_tool_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_taken_from_default'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_same_tool'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_position'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_namespace'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_key'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_from_developer'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_fmt'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_datetime'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_category'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationKeySuggestions_target'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_language'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_key'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_origin_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_engine'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationBundles_target'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_language'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_from_developer'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), table_name='TranslationCurrentActiveUsers')
# op.drop_constraint(None, 'TranslationBundles', type_='unique')
op.drop_index(op.f('ix_RepositoryApps_url'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_translatable'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_repository'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_name'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_time'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_download_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_check'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing_since'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_external_id'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_adaptable'), table_name='RepositoryApps')
# op.drop_constraint(None, 'RepositoryApp2languages', type_='unique')
op.drop_index(op.f('ix_TranslatedApps_url'), table_name='TranslatedApps')
op.drop_index(op.f('ix_Languages_language'), table_name='Languages')
op.drop_index(op.f('ix_GoLabOAuthUsers_email'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_GoLabOAuthUsers_display_name'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_Apps_unique_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_owner_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_name'), table_name='Apps')
op.drop_index(op.f('ix_Apps_modification_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_last_access_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_creation_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_composer'), table_name='Apps')
# op.drop_constraint(None, 'ActiveTranslationMessages', type_='unique')
op.drop_index(op.f('ix_ActiveTranslationMessages_tool_id'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_same_tool'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_position'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_namespace'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_key'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_from_developer'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_fmt'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_datetime'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_category'), table_name='ActiveTranslationMessages')
op.create_index('ix_Users_last_access_date', 'Users', ['last_access_date'], unique=False)
op.create_index('ix_Users_creation_date', 'Users', ['creation_date'], unique=False)
op.create_index('ix_TranslationValueSuggestions_target', 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index('ix_TranslationValueSuggestions_language', 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationValueSuggestions_human_key', 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationUrls_url', 'TranslationUrls', ['url'], unique=True)
op.create_index('ix_TranslationUrls_automatic', 'TranslationUrls', ['automatic'], unique=False)
op.create_index('ix_TranslationSyncLogs_start_datetime', 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index('ix_TranslationSyncLogs_end_datetime', 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index('ix_TranslationSubscriptions_mechanism', 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index('ix_TranslationSubscriptions_last_check', 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index('ix_TranslationNotificationRecipients_email', 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index('ix_TranslationNotificationRecipients_created', 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index('ix_TranslationMessageHistory_tool_id', 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_taken_from_default', 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index('ix_TranslationMessageHistory_same_tool', 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index('ix_TranslationMessageHistory_position', 'TranslationMessageHistory', ['position'], unique=False)
op.create_index('ix_TranslationMessageHistory_parent_translation_id', 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_namespace', 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index('ix_TranslationMessageHistory_key', 'TranslationMessageHistory', ['key'], unique=False)
op.create_index('ix_TranslationMessageHistory_from_developer', 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index('ix_TranslationMessageHistory_fmt', 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index('ix_TranslationMessageHistory_datetime', 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index('ix_TranslationMessageHistory_category', 'TranslationMessageHistory', ['category'], unique=False)
op.create_index('ix_TranslationKeySuggestions_target', 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index('ix_TranslationKeySuggestions_language', 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index('ix_TranslationKeySuggestions_key', 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_origin_language', 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_language', 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key_hash', 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key', 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_engine', 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index('ix_TranslationCurrentActiveUsers_last_check', 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index('ix_TranslationBundles_target', 'TranslationBundles', ['target'], unique=False)
op.create_index('ix_TranslationBundles_language', 'TranslationBundles', ['language'], unique=False)
op.create_index('ix_TranslationBundles_from_developer', 'TranslationBundles', ['from_developer'], unique=False)
op.create_index('ix_TranslatedApps_url', 'TranslatedApps', ['url'], unique=True)
op.create_index('ix_RepositoryApps_url', 'RepositoryApps', ['url'], unique=False)
op.create_index('ix_RepositoryApps_translatable', 'RepositoryApps', ['translatable'], unique=False)
op.create_index('ix_RepositoryApps_repository', 'RepositoryApps', ['repository'], unique=False)
op.create_index('ix_RepositoryApps_name', 'RepositoryApps', ['name'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_time', 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_downloaded_hash', 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_contents_hash', 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_download_change', 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index('ix_RepositoryApps_last_check', 'RepositoryApps', ['last_check'], unique=False)
op.create_index('ix_RepositoryApps_last_change', 'RepositoryApps', ['last_change'], unique=False)
op.create_index('ix_RepositoryApps_failing_since', 'RepositoryApps', ['failing_since'], unique=False)
op.create_index('ix_RepositoryApps_failing', 'RepositoryApps', ['failing'], unique=False)
op.create_index('ix_RepositoryApps_external_id', 'RepositoryApps', ['external_id'], unique=False)
op.create_index('ix_RepositoryApps_downloaded_hash', 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_contents_hash', 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_adaptable', 'RepositoryApps', ['adaptable'], unique=False)
op.create_index('ix_Languages_language', 'Languages', ['language'], unique=True)
op.create_index('ix_GoLabOAuthUsers_email', 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index('ix_GoLabOAuthUsers_display_name', 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index('ix_Apps_unique_id', 'Apps', ['unique_id'], unique=True)
op.create_index('ix_Apps_owner_id', 'Apps', ['owner_id'], unique=False)
op.create_index('ix_Apps_name', 'Apps', ['name'], unique=False)
op.create_index('ix_Apps_modification_date', 'Apps', ['modification_date'], unique=False)
op.create_index('ix_Apps_last_access_date', 'Apps', ['last_access_date'], unique=False)
op.create_index('ix_Apps_creation_date', 'Apps', ['creation_date'], unique=False)
op.create_index('ix_Apps_composer', 'Apps', ['composer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_tool_id', 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index('ix_ActiveTranslationMessages_taken_from_default', 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index('ix_ActiveTranslationMessages_same_tool', 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index('ix_ActiveTranslationMessages_position', 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index('ix_ActiveTranslationMessages_namespace', 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index('ix_ActiveTranslationMessages_key', 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index('ix_ActiveTranslationMessages_from_developer', 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_fmt', 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index('ix_ActiveTranslationMessages_datetime', 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index('ix_ActiveTranslationMessages_category', 'ActiveTranslationMessages', ['category'], unique=False)
# ### end Alembic commands ###
| en | 0.383329 | Check new Revision ID: 92235b77ea53 Revises: 381fdb66ec27 Create Date: 2017-10-14 02:38:51.007307 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # op.create_unique_constraint(None, 'ActiveTranslationMessages', ['bundle_id', 'key']) # op.create_unique_constraint(None, 'RepositoryApp2languages', ['repository_app_id', 'language_id']) # op.create_unique_constraint(None, 'TranslationBundles', ['translation_url_id', 'language', 'target']) # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # op.drop_constraint(None, 'TranslationBundles', type_='unique') # op.drop_constraint(None, 'RepositoryApp2languages', type_='unique') # op.drop_constraint(None, 'ActiveTranslationMessages', type_='unique') # ### end Alembic commands ### | 1.413148 | 1 |
build/lib/adb_utils/adb_utils.py | christopherferreira3/Python-ADB-Tools | 0 | 10576 | <reponame>christopherferreira3/Python-ADB-Tools
import subprocess
import os
def get_connected_devices() -> list:
"""
Returns a list of tuples containing the Device name and the android Version
:return:
"""
devices = []
devices_output = subprocess.check_output(["adb", "devices"]).decode("utf-8").strip("List of devices attached").split("\n")
for device in devices_output:
if device is None or device == "":
pass
else:
device_name = device.strip('\tdevice')
android_version = subprocess.check_output(["adb", "-s", device_name, "shell", "getprop", "ro.build.version.release"])
devices.append((device_name, android_version.decode('utf-8').strip("\r\n")))
return devices
def install_app(apk_path=None, device=None) -> bool:
"""
Installs an APK file into a device.
The app installed with the -r option so the apk gets replaced it exists or installed if it doenst
:param apk_path: Path for the APK
:param device: Device name
:return: True if success , False if fail
"""
path = os.getcwd() + apk_path if str(apk_path).startswith("/") else os.getcwd() + "/" + apk_path
if apk_path is not None and device is not None:
if os.path.isfile(path):
command = ["adb", "-s" , device, "install", "-r", path]
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
print("APK {0} was installed in {1}".format(apk_path, device))
return True
else:
print("File {0} not found!".format(path))
else:
print("Device and/or apk not found or not specified")
return False
def is_device_connected(device) -> bool:
all_connected = get_connected_devices()
for device_connected, version in all_connected:
if device == device_connected:
return True
return False
def unintall_app(package=None, device=None) -> None:
"""
Uninstall an app from the device
:return:
"""
command = ["adb", "-s", device, "uninstall", package]
if package is not None:
if device is None:
command.pop(1)
command.pop(1)
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
else:
print("App package was not specified.")
def is_app_installed(package=None, device=None) -> bool:
"""
Returns True if the package is installed or False if it is not
:param package:
:return:
"""
command = ["adb", "-s", device, "shell", "pm", "list", "packages |", "grep", package]
if device is None:
command.pop(1)
command.pop(1)
out = subprocess.check_output(command, stderr=None)
return True if out.decode('utf-8').strip("\r\n") == "package:{0}".format(package) else False
def run_command(arg_string=None, arg_list=None) -> None:
"""
Run a general ABD command
:return:
"""
command = arg_list if arg_list else str(arg_string).split(" ")
p = subprocess.check_output(command, stderr=None)
print(p.decode('utf-8'))
def kill_server() -> None:
"""
Kills the ADB server
:return: None
"""
command = ["adb", "kill-server"]
p = subprocess.Popen(command, stdout=None, stderr=None)
p.wait(timeout=10)
print("ADB server has been killed.")
def start_server() -> None:
"""
Starts the ADB server
:return: None
"""
command = ["adb", "start-server"]
p = subprocess.Popen(command, stderr=None, stdout=None)
p.wait(timeout=10)
print("ADB server has been started.")
def get_apk_from_device(package=None, device=None) -> bool:
"""
Retrieves the APK of an application if it exists
:param package:
:param device:
:return: bool
"""
# adb shell pm path com.example.someapp
# adb pull /data/app/com.example.someapp-2.apk path/to/desired/destination
command_apk_path = ["adb", "-s", device, "pm", "path", package]
if package is None:
print("Package is required but it was not specified.")
return False
if device is None and len(get_connected_devices()) != 1:
print("There are multiple devices connected, please specify a device to get the APK from")
return False
elif device is None:
command_apk_path.pop(1)
command_apk_path.pop(1)
apk_path = subprocess.check_output(command_apk_path, stderr=None)
# TODO: Rest of the stuff
def push_file_to_device() -> None: # For now...
"""
Pushes a file to the device
:param device:
:return: None
"""
pass
def list_files_in_device() -> None:
"""
Gets a list of files in a specific folder
:param device:
:param path:
:return: list of files
"""
pass
def unlock_device(password=None, device=None) -> bool:
"""
Unlocks a device given a device name and the password
:param password:
:param device:
:return: True is sucess, False if error
"""
command_input = ["adb", "-s", device, "shell", "input", "text", password]
command_submit = ["adb", "-s", device, "shell", "input", "keyevent", 66]
if device is None and len(get_connected_devices()) != 1:
print("No device was specified and/or multiple devices are connected")
return False
if device is None:
command_input.pop(1)
command_input.pop(1)
command_submit.pop(1)
command_submit.pop(1)
p = subprocess.Popen(command_input, stdout=None)
p.wait()
p.terminate()
p1 = subprocess.Popen(command_submit, stdout=None)
p1.wait()
p1.terminate()
return True
| import subprocess
import os
def get_connected_devices() -> list:
"""
Returns a list of tuples containing the Device name and the android Version
:return:
"""
devices = []
devices_output = subprocess.check_output(["adb", "devices"]).decode("utf-8").strip("List of devices attached").split("\n")
for device in devices_output:
if device is None or device == "":
pass
else:
device_name = device.strip('\tdevice')
android_version = subprocess.check_output(["adb", "-s", device_name, "shell", "getprop", "ro.build.version.release"])
devices.append((device_name, android_version.decode('utf-8').strip("\r\n")))
return devices
def install_app(apk_path=None, device=None) -> bool:
"""
Installs an APK file into a device.
The app installed with the -r option so the apk gets replaced it exists or installed if it doenst
:param apk_path: Path for the APK
:param device: Device name
:return: True if success , False if fail
"""
path = os.getcwd() + apk_path if str(apk_path).startswith("/") else os.getcwd() + "/" + apk_path
if apk_path is not None and device is not None:
if os.path.isfile(path):
command = ["adb", "-s" , device, "install", "-r", path]
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
print("APK {0} was installed in {1}".format(apk_path, device))
return True
else:
print("File {0} not found!".format(path))
else:
print("Device and/or apk not found or not specified")
return False
def is_device_connected(device) -> bool:
all_connected = get_connected_devices()
for device_connected, version in all_connected:
if device == device_connected:
return True
return False
def unintall_app(package=None, device=None) -> None:
"""
Uninstall an app from the device
:return:
"""
command = ["adb", "-s", device, "uninstall", package]
if package is not None:
if device is None:
command.pop(1)
command.pop(1)
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
else:
print("App package was not specified.")
def is_app_installed(package=None, device=None) -> bool:
"""
Returns True if the package is installed or False if it is not
:param package:
:return:
"""
command = ["adb", "-s", device, "shell", "pm", "list", "packages |", "grep", package]
if device is None:
command.pop(1)
command.pop(1)
out = subprocess.check_output(command, stderr=None)
return True if out.decode('utf-8').strip("\r\n") == "package:{0}".format(package) else False
def run_command(arg_string=None, arg_list=None) -> None:
"""
Run a general ABD command
:return:
"""
command = arg_list if arg_list else str(arg_string).split(" ")
p = subprocess.check_output(command, stderr=None)
print(p.decode('utf-8'))
def kill_server() -> None:
"""
Kills the ADB server
:return: None
"""
command = ["adb", "kill-server"]
p = subprocess.Popen(command, stdout=None, stderr=None)
p.wait(timeout=10)
print("ADB server has been killed.")
def start_server() -> None:
"""
Starts the ADB server
:return: None
"""
command = ["adb", "start-server"]
p = subprocess.Popen(command, stderr=None, stdout=None)
p.wait(timeout=10)
print("ADB server has been started.")
def get_apk_from_device(package=None, device=None) -> bool:
"""
Retrieves the APK of an application if it exists
:param package:
:param device:
:return: bool
"""
# adb shell pm path com.example.someapp
# adb pull /data/app/com.example.someapp-2.apk path/to/desired/destination
command_apk_path = ["adb", "-s", device, "pm", "path", package]
if package is None:
print("Package is required but it was not specified.")
return False
if device is None and len(get_connected_devices()) != 1:
print("There are multiple devices connected, please specify a device to get the APK from")
return False
elif device is None:
command_apk_path.pop(1)
command_apk_path.pop(1)
apk_path = subprocess.check_output(command_apk_path, stderr=None)
# TODO: Rest of the stuff
def push_file_to_device() -> None: # For now...
"""
Pushes a file to the device
:param device:
:return: None
"""
pass
def list_files_in_device() -> None:
"""
Gets a list of files in a specific folder
:param device:
:param path:
:return: list of files
"""
pass
def unlock_device(password=None, device=None) -> bool:
"""
Unlocks a device given a device name and the password
:param password:
:param device:
:return: True is sucess, False if error
"""
command_input = ["adb", "-s", device, "shell", "input", "text", password]
command_submit = ["adb", "-s", device, "shell", "input", "keyevent", 66]
if device is None and len(get_connected_devices()) != 1:
print("No device was specified and/or multiple devices are connected")
return False
if device is None:
command_input.pop(1)
command_input.pop(1)
command_submit.pop(1)
command_submit.pop(1)
p = subprocess.Popen(command_input, stdout=None)
p.wait()
p.terminate()
p1 = subprocess.Popen(command_submit, stdout=None)
p1.wait()
p1.terminate()
return True | en | 0.657022 | Returns a list of tuples containing the Device name and the android Version :return: Installs an APK file into a device. The app installed with the -r option so the apk gets replaced it exists or installed if it doenst :param apk_path: Path for the APK :param device: Device name :return: True if success , False if fail Uninstall an app from the device :return: Returns True if the package is installed or False if it is not :param package: :return: Run a general ABD command :return: Kills the ADB server :return: None Starts the ADB server :return: None Retrieves the APK of an application if it exists :param package: :param device: :return: bool # adb shell pm path com.example.someapp # adb pull /data/app/com.example.someapp-2.apk path/to/desired/destination # TODO: Rest of the stuff # For now... Pushes a file to the device :param device: :return: None Gets a list of files in a specific folder :param device: :param path: :return: list of files Unlocks a device given a device name and the password :param password: :param device: :return: True is sucess, False if error | 3.251069 | 3 |
tests/unit/test_cl61d.py | griesche/cloudnetpy-1 | 1 | 10577 | import glob
import os
import sys
from tempfile import TemporaryDirectory
import netCDF4
import numpy as np
import numpy.ma as ma
from all_products_fun import Check
from lidar_fun import LidarFun
from cloudnetpy import concat_lib
from cloudnetpy.instruments import ceilo2nc
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_PATH)
FILES = glob.glob(f"{SCRIPT_PATH}/data/cl61d/*.nc")
FILES.sort()
SITE_META = {
"name": "Hyytiälä",
"altitude": 123,
"calibration_factor": 2.0,
"latitude": 45.0,
"longitude": 22.0,
}
class TestCl61d(Check):
site_meta = SITE_META
date = "2021-08-29"
temp_dir = TemporaryDirectory()
daily_file = temp_dir.name + "/daily.nc"
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
temp_path = temp_dir.name + "/test.nc"
uuid = ceilo2nc(daily_file, temp_path, site_meta, date=date)
def test_variable_names(self):
keys = {
"beta",
"beta_smooth",
"calibration_factor",
"range",
"height",
"zenith_angle",
"time",
"depolarisation",
"altitude",
"latitude",
"longitude",
"wavelength",
}
assert set(self.nc.variables.keys()) == keys
def test_common_lidar(self):
lidar_fun = LidarFun(self.nc, self.site_meta, self.date, self.uuid)
for name, method in LidarFun.__dict__.items():
if "test_" in name:
getattr(lidar_fun, name)()
def test_variable_values(self):
assert abs(self.nc.variables["wavelength"][:] - 910.55) < 0.001
assert self.nc.variables["zenith_angle"][:] == 3.0
assert ma.max(self.nc.variables["depolarisation"][:]) < 1
assert ma.min(self.nc.variables["depolarisation"][:]) > -0.1
def test_comments(self):
assert "SNR threshold applied: 5" in self.nc.variables["beta"].comment
def test_global_attributes(self):
assert self.nc.source == "Vaisala CL61d"
assert self.nc.title == f'CL61d ceilometer from {self.site_meta["name"]}'
def test_date_argument(tmp_path):
daily_file = str(tmp_path / "daily.nc")
test_file = str(tmp_path / "test.nc")
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
ceilo2nc(daily_file, test_file, SITE_META, date="2021-08-30")
with netCDF4.Dataset(test_file) as nc:
assert len(nc.variables["time"]) == 12
assert np.all(np.diff(nc.variables["time"][:]) > 0)
assert nc.year == "2021"
assert nc.month == "08"
assert nc.day == "30"
| import glob
import os
import sys
from tempfile import TemporaryDirectory
import netCDF4
import numpy as np
import numpy.ma as ma
from all_products_fun import Check
from lidar_fun import LidarFun
from cloudnetpy import concat_lib
from cloudnetpy.instruments import ceilo2nc
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_PATH)
FILES = glob.glob(f"{SCRIPT_PATH}/data/cl61d/*.nc")
FILES.sort()
SITE_META = {
"name": "Hyytiälä",
"altitude": 123,
"calibration_factor": 2.0,
"latitude": 45.0,
"longitude": 22.0,
}
class TestCl61d(Check):
site_meta = SITE_META
date = "2021-08-29"
temp_dir = TemporaryDirectory()
daily_file = temp_dir.name + "/daily.nc"
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
temp_path = temp_dir.name + "/test.nc"
uuid = ceilo2nc(daily_file, temp_path, site_meta, date=date)
def test_variable_names(self):
keys = {
"beta",
"beta_smooth",
"calibration_factor",
"range",
"height",
"zenith_angle",
"time",
"depolarisation",
"altitude",
"latitude",
"longitude",
"wavelength",
}
assert set(self.nc.variables.keys()) == keys
def test_common_lidar(self):
lidar_fun = LidarFun(self.nc, self.site_meta, self.date, self.uuid)
for name, method in LidarFun.__dict__.items():
if "test_" in name:
getattr(lidar_fun, name)()
def test_variable_values(self):
assert abs(self.nc.variables["wavelength"][:] - 910.55) < 0.001
assert self.nc.variables["zenith_angle"][:] == 3.0
assert ma.max(self.nc.variables["depolarisation"][:]) < 1
assert ma.min(self.nc.variables["depolarisation"][:]) > -0.1
def test_comments(self):
assert "SNR threshold applied: 5" in self.nc.variables["beta"].comment
def test_global_attributes(self):
assert self.nc.source == "Vaisala CL61d"
assert self.nc.title == f'CL61d ceilometer from {self.site_meta["name"]}'
def test_date_argument(tmp_path):
daily_file = str(tmp_path / "daily.nc")
test_file = str(tmp_path / "test.nc")
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
ceilo2nc(daily_file, test_file, SITE_META, date="2021-08-30")
with netCDF4.Dataset(test_file) as nc:
assert len(nc.variables["time"]) == 12
assert np.all(np.diff(nc.variables["time"][:]) > 0)
assert nc.year == "2021"
assert nc.month == "08"
assert nc.day == "30"
| none | 1 | 2.188504 | 2 |
|
tests/functions/test_count.py | athre0z/clickhouse-sqlalchemy | 1 | 10578 | <reponame>athre0z/clickhouse-sqlalchemy<filename>tests/functions/test_count.py
from sqlalchemy import Column, func
from clickhouse_sqlalchemy import types, Table
from tests.testcase import (
BaseAbstractTestCase, HttpSessionTestCase, NativeSessionTestCase,
)
class CountTestCaseBase(BaseAbstractTestCase):
def create_table(self):
metadata = self.metadata()
return Table(
't1', metadata,
Column('x', types.Int32, primary_key=True)
)
def test_count(self):
table = self.create_table()
self.assertEqual(
self.compile(self.session.query(func.count(table.c.x))),
'SELECT count(x) AS count_1 FROM t1'
)
def test_count_distinct(self):
table = self.create_table()
query = self.session.query(func.count(func.distinct(table.c.x)))
self.assertEqual(
self.compile(query),
'SELECT count(distinct(x)) AS count_1 FROM t1'
)
def test_count_no_column_specified(self):
table = self.create_table()
query = self.session.query(func.count()).select_from(table)
self.assertEqual(
self.compile(query),
'SELECT count(*) AS count_1 FROM t1'
)
class CountHttpTestCase(CountTestCaseBase, HttpSessionTestCase):
""" ... """
class CountNativeTestCase(CountTestCaseBase, NativeSessionTestCase):
""" ... """
| from sqlalchemy import Column, func
from clickhouse_sqlalchemy import types, Table
from tests.testcase import (
BaseAbstractTestCase, HttpSessionTestCase, NativeSessionTestCase,
)
class CountTestCaseBase(BaseAbstractTestCase):
def create_table(self):
metadata = self.metadata()
return Table(
't1', metadata,
Column('x', types.Int32, primary_key=True)
)
def test_count(self):
table = self.create_table()
self.assertEqual(
self.compile(self.session.query(func.count(table.c.x))),
'SELECT count(x) AS count_1 FROM t1'
)
def test_count_distinct(self):
table = self.create_table()
query = self.session.query(func.count(func.distinct(table.c.x)))
self.assertEqual(
self.compile(query),
'SELECT count(distinct(x)) AS count_1 FROM t1'
)
def test_count_no_column_specified(self):
table = self.create_table()
query = self.session.query(func.count()).select_from(table)
self.assertEqual(
self.compile(query),
'SELECT count(*) AS count_1 FROM t1'
)
class CountHttpTestCase(CountTestCaseBase, HttpSessionTestCase):
""" ... """
class CountNativeTestCase(CountTestCaseBase, NativeSessionTestCase):
""" ... """ | none | 1 | 2.541463 | 3 |
|
scripts/commands/html/actions/search.py | stevekineeve88/orb | 0 | 10579 | import click
import requests
from bs4 import BeautifulSoup
from modules.Word.managers.DictionaryManager import DictionaryManager
import re
@click.command()
@click.option('--url', help='URL to fetch from')
@click.pass_context
def search(ctx, url):
dictionary_manager: DictionaryManager = ctx.obj[DictionaryManager]
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
words_list = soup.text.split()
words_found = {}
print("Starting...")
i = 1
percentage = 5
percentage_increments = 5
for word in words_list:
try:
if (i / len(words_list) * 100) > percentage:
print(f'{percentage}% read')
percentage += percentage_increments
i += 1
word = re.sub(' +', ' ', word)
if word in words_found:
words_found[word] += 1
continue
dictionary_manager.is_word(word)
words_found[word] = 1
except Exception as e:
print(f'{str(e)}: {word}')
print("Complete...")
| import click
import requests
from bs4 import BeautifulSoup
from modules.Word.managers.DictionaryManager import DictionaryManager
import re
@click.command()
@click.option('--url', help='URL to fetch from')
@click.pass_context
def search(ctx, url):
dictionary_manager: DictionaryManager = ctx.obj[DictionaryManager]
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
words_list = soup.text.split()
words_found = {}
print("Starting...")
i = 1
percentage = 5
percentage_increments = 5
for word in words_list:
try:
if (i / len(words_list) * 100) > percentage:
print(f'{percentage}% read')
percentage += percentage_increments
i += 1
word = re.sub(' +', ' ', word)
if word in words_found:
words_found[word] += 1
continue
dictionary_manager.is_word(word)
words_found[word] = 1
except Exception as e:
print(f'{str(e)}: {word}')
print("Complete...")
| none | 1 | 3.023552 | 3 |
|
asf_search/constants/DATASET/__init__.py | jhkennedy/Discovery-asf_search | 0 | 10580 | """Datasets to be used in search and related functions"""
from .DATASET import * | """Datasets to be used in search and related functions"""
from .DATASET import * | en | 0.807085 | Datasets to be used in search and related functions | 1.009309 | 1 |
cell2cell/plotting/cci_plot.py | ckmah/cell2cell | 16 | 10581 | # -*- coding: utf-8 -*-
import matplotlib as mpl
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from cell2cell.clustering import compute_linkage
from cell2cell.preprocessing.manipulate_dataframes import check_symmetry
from cell2cell.plotting.aesthetics import map_colors_to_metadata
def clustermap_cci(interaction_space, method='ward', optimal_leaf=True, metadata=None, sample_col='#SampleID',
group_col='Groups', meta_cmap='gist_rainbow', colors=None, excluded_cells=None, title='',
cbar_title='CCI score', cbar_fontsize=18, filename=None, **kwargs):
'''Generates a clustermap (heatmap + dendrograms from a hierarchical
clustering) based on CCI scores of cell-cell pairs.
Parameters
----------
interaction_space : cell2cell.core.interaction_space.InteractionSpace
Interaction space that contains all a distance matrix after running the
the method compute_pairwise_cci_scores. Alternatively, this object
can be a numpy-array or a pandas DataFrame. Also, a
SingleCellInteractions or a BulkInteractions object after running
the method compute_pairwise_cci_scores.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_leaf : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
metadata : pandas.Dataframe, default=None
Metadata associated with the cells, cell types or samples in the
matrix containing CCI scores. If None, cells will not be colored
by major groups.
sample_col : str, default='#SampleID'
Column in the metadata for the cells, cell types or samples
in the matrix containing CCI scores.
group_col : str, default='Groups'
Column in the metadata containing the major groups of cells, cell types
or samples in the matrix with CCI scores.
meta_cmap : str, default='gist_rainbow'
Name of the color palette for coloring the major groups of cells.
colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells. If colors is specified, meta_cmap will be
ignored.
excluded_cells : list, default=None
List containing cell names that are present in the interaction_space
object but that will be excluded from this plot.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=18
Font size for the colorbar title as well as labels for axes X and Y.
filename : str, default=None
Path to save the figure of the elbow analysis. If None, the figure is not
saved.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if hasattr(interaction_space, 'distance_matrix'):
print('Interaction space detected as an InteractionSpace class')
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
elif (type(interaction_space) is np.ndarray) or (type(interaction_space) is pd.core.frame.DataFrame):
print('Interaction space detected as a distance matrix')
distance_matrix = interaction_space
space_type = 'matrix'
elif hasattr(interaction_space, 'interaction_space'):
print('Interaction space detected as a Interactions class')
if not hasattr(interaction_space.interaction_space, 'distance_matrix'):
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
else:
interaction_space = interaction_space.interaction_space
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
else:
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
# Drop excluded cells
if excluded_cells is not None:
df = distance_matrix.loc[~distance_matrix.index.isin(excluded_cells),
~distance_matrix.columns.isin(excluded_cells)]
else:
df = distance_matrix
# Check symmetry to get linkage
symmetric = check_symmetry(df)
if (not symmetric) & (type(interaction_space) is pd.core.frame.DataFrame):
assert set(df.index) == set(df.columns), 'The distance matrix does not have the same elements in rows and columns'
# Obtain info for generating plot
linkage = _get_distance_matrix_linkages(df=df,
kwargs=kwargs,
method=method,
optimal_ordering=optimal_leaf,
symmetric=symmetric
)
kwargs_ = kwargs.copy()
# PLOT CCI MATRIX
if space_type == 'class':
df = interaction_space.interaction_elements['cci_matrix']
else:
df = distance_matrix
if excluded_cells is not None:
df = df.loc[~df.index.isin(excluded_cells),
~df.columns.isin(excluded_cells)]
# Colors
if metadata is not None:
col_colors = map_colors_to_metadata(metadata=metadata,
ref_df=df,
colors=colors,
sample_col=sample_col,
group_col=group_col,
cmap=meta_cmap)
if not symmetric:
row_colors = col_colors
else:
row_colors = None
else:
col_colors = None
row_colors = None
# Plot hierarchical clustering (triangular)
hier = _plot_triangular_clustermap(df=df,
symmetric=symmetric,
linkage=linkage,
col_colors=col_colors,
row_colors=row_colors,
title=title,
cbar_title=cbar_title,
cbar_fontsize=cbar_fontsize,
**kwargs_)
if ~symmetric:
hier.ax_heatmap.set_xlabel('Receiver cells', fontsize=cbar_fontsize)
hier.ax_heatmap.set_ylabel('Sender cells', fontsize=cbar_fontsize)
if filename is not None:
plt.savefig(filename, dpi=300,
bbox_inches='tight')
return hier
def _get_distance_matrix_linkages(df, kwargs, method='ward', optimal_ordering=True, symmetric=None):
'''Computes linkages for the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores in a form of distances (that is, smaller
values represent stronger interactions). Diagonal must be filled
by zeros.
kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_ordering : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
symmetric : boolean, default=None
Whether df is symmetric.
Returns
-------
linkage : ndarray
The hierarchical clustering of cells encoded as a linkage matrix.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if symmetric:
if 'col_cluster' in kwargs.keys():
kwargs['row_cluster'] = kwargs['col_cluster']
if kwargs['col_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
elif 'row_cluster' in kwargs.keys():
if kwargs['row_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
else:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
return linkage
def _triangularize_distance_matrix(df, linkage=None, symmetric=None, **kwargs):
'''Generates a mask to plot the upper triangle of the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
symmetric : boolean, default=None
Whether df is symmetric.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
mask : ndarray
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros).
'''
if symmetric is None:
symmetric = check_symmetry(df)
# Triangular matrix
if symmetric:
order_map = dict()
if linkage is None:
mask = np.ones((df.shape[0], df.shape[1]))
for i in range(mask.shape[0]):
for j in range(i, mask.shape[1]):
mask[i, j] = 0
else:
# Plot hierarchical clustering for getting indexes according to linkage
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
**kwargs
)
plt.close()
ind_order = hier.dendrogram_col.reordered_ind
mask = np.zeros((df.shape[0], df.shape[1]))
for i, ind in enumerate(ind_order):
order_map[i] = ind
filter_list = [order_map[j] for j in range(i)]
mask[ind, filter_list] = 1
else:
mask = None
return mask
def _plot_triangular_clustermap(df, symmetric=None, linkage=None, mask=None, col_colors=None, row_colors=None,
title='', cbar_title='CCI score', cbar_fontsize=12, **kwargs):
'''Plots a triangular clustermap based on a mask.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
mask : ndarray, default=None
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros). If None, a mask will be computed based on the CCI matrix
symmetry.
col_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the columns.
row_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the rows.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=12
Font size for the colorbar title as well as labels for axes X and Y.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if mask is None:
mask = _triangularize_distance_matrix(df=df,
linkage=linkage,
symmetric=symmetric,
**kwargs
)
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
mask=mask,
col_colors=col_colors,
row_colors=row_colors,
**kwargs
)
hier = _move_xticks_triangular_clustermap(clustermap=hier,
symmetric=symmetric
)
# Title
if len(title) > 0:
hier.ax_col_dendrogram.set_title(title, fontsize=16)
# Color bar label
cbar = hier.ax_heatmap.collections[0].colorbar
cbar.ax.set_ylabel(cbar_title, fontsize=cbar_fontsize)
cbar.ax.yaxis.set_label_position("left")
return hier
def _move_xticks_triangular_clustermap(clustermap, symmetric=True):
'''Moves xticks to the diagonal when plotting a symmetric matrix
in the form of a upper triangle.
Parameters
---------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
symmetric : boolean, default=None
Whether the CCI matrix plotted in the clustermap is symmetric.
Returns
-------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance, with the xticks moved to the
diagonal if the CCI matrix was symmetric. If not, the same
input clustermap is returned, but with rotated xtick labels.
'''
if symmetric:
# Apply offset transform to all xticklabels.
clustermap.ax_row_dendrogram.set_visible(False)
clustermap.ax_heatmap.tick_params(bottom=False) # Hide xtick line
x_labels = clustermap.ax_heatmap.xaxis.get_majorticklabels()
dpi_x = clustermap.fig.dpi_scale_trans.to_values()[0]
dpi_y = clustermap.fig.dpi_scale_trans.to_values()[3]
x0 = clustermap.ax_heatmap.transData.transform(x_labels[0].get_position())
x1 = clustermap.ax_heatmap.transData.transform(x_labels[1].get_position())
ylims = clustermap.ax_heatmap.get_ylim()
bottom_points = clustermap.ax_heatmap.transData.transform((1.0, ylims[0]))[1]
for i, xl in enumerate(x_labels):
# Move labels in dx and dy points.
swap_xy = (1.0, xl.get_position()[0] + 0.5)
new_y_points = clustermap.ax_heatmap.transData.transform(swap_xy)[1]
dx = -0.5 * abs(x1[0] - x0[0]) / dpi_x
dy = (new_y_points - bottom_points) / dpi_y
offset = mpl.transforms.ScaledTranslation(dx, dy, clustermap.fig.dpi_scale_trans)
xl.set_transform(xl.get_transform() + offset)
if symmetric:
rot = 45
else:
rot = 90
va = 'center'
clustermap.ax_heatmap.set_xticklabels(clustermap.ax_heatmap.xaxis.get_majorticklabels(),
rotation=rot,
rotation_mode='anchor',
va='bottom',
ha='right') # , fontsize=9.5)
clustermap.ax_heatmap.set_yticklabels(clustermap.ax_heatmap.yaxis.get_majorticklabels(),
rotation=0,
va=va,
ha='left') # , fontsize=9.5)
return clustermap | # -*- coding: utf-8 -*-
import matplotlib as mpl
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from cell2cell.clustering import compute_linkage
from cell2cell.preprocessing.manipulate_dataframes import check_symmetry
from cell2cell.plotting.aesthetics import map_colors_to_metadata
def clustermap_cci(interaction_space, method='ward', optimal_leaf=True, metadata=None, sample_col='#SampleID',
group_col='Groups', meta_cmap='gist_rainbow', colors=None, excluded_cells=None, title='',
cbar_title='CCI score', cbar_fontsize=18, filename=None, **kwargs):
'''Generates a clustermap (heatmap + dendrograms from a hierarchical
clustering) based on CCI scores of cell-cell pairs.
Parameters
----------
interaction_space : cell2cell.core.interaction_space.InteractionSpace
Interaction space that contains all a distance matrix after running the
the method compute_pairwise_cci_scores. Alternatively, this object
can be a numpy-array or a pandas DataFrame. Also, a
SingleCellInteractions or a BulkInteractions object after running
the method compute_pairwise_cci_scores.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_leaf : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
metadata : pandas.Dataframe, default=None
Metadata associated with the cells, cell types or samples in the
matrix containing CCI scores. If None, cells will not be colored
by major groups.
sample_col : str, default='#SampleID'
Column in the metadata for the cells, cell types or samples
in the matrix containing CCI scores.
group_col : str, default='Groups'
Column in the metadata containing the major groups of cells, cell types
or samples in the matrix with CCI scores.
meta_cmap : str, default='gist_rainbow'
Name of the color palette for coloring the major groups of cells.
colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells. If colors is specified, meta_cmap will be
ignored.
excluded_cells : list, default=None
List containing cell names that are present in the interaction_space
object but that will be excluded from this plot.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=18
Font size for the colorbar title as well as labels for axes X and Y.
filename : str, default=None
Path to save the figure of the elbow analysis. If None, the figure is not
saved.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if hasattr(interaction_space, 'distance_matrix'):
print('Interaction space detected as an InteractionSpace class')
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
elif (type(interaction_space) is np.ndarray) or (type(interaction_space) is pd.core.frame.DataFrame):
print('Interaction space detected as a distance matrix')
distance_matrix = interaction_space
space_type = 'matrix'
elif hasattr(interaction_space, 'interaction_space'):
print('Interaction space detected as a Interactions class')
if not hasattr(interaction_space.interaction_space, 'distance_matrix'):
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
else:
interaction_space = interaction_space.interaction_space
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
else:
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
# Drop excluded cells
if excluded_cells is not None:
df = distance_matrix.loc[~distance_matrix.index.isin(excluded_cells),
~distance_matrix.columns.isin(excluded_cells)]
else:
df = distance_matrix
# Check symmetry to get linkage
symmetric = check_symmetry(df)
if (not symmetric) & (type(interaction_space) is pd.core.frame.DataFrame):
assert set(df.index) == set(df.columns), 'The distance matrix does not have the same elements in rows and columns'
# Obtain info for generating plot
linkage = _get_distance_matrix_linkages(df=df,
kwargs=kwargs,
method=method,
optimal_ordering=optimal_leaf,
symmetric=symmetric
)
kwargs_ = kwargs.copy()
# PLOT CCI MATRIX
if space_type == 'class':
df = interaction_space.interaction_elements['cci_matrix']
else:
df = distance_matrix
if excluded_cells is not None:
df = df.loc[~df.index.isin(excluded_cells),
~df.columns.isin(excluded_cells)]
# Colors
if metadata is not None:
col_colors = map_colors_to_metadata(metadata=metadata,
ref_df=df,
colors=colors,
sample_col=sample_col,
group_col=group_col,
cmap=meta_cmap)
if not symmetric:
row_colors = col_colors
else:
row_colors = None
else:
col_colors = None
row_colors = None
# Plot hierarchical clustering (triangular)
hier = _plot_triangular_clustermap(df=df,
symmetric=symmetric,
linkage=linkage,
col_colors=col_colors,
row_colors=row_colors,
title=title,
cbar_title=cbar_title,
cbar_fontsize=cbar_fontsize,
**kwargs_)
if ~symmetric:
hier.ax_heatmap.set_xlabel('Receiver cells', fontsize=cbar_fontsize)
hier.ax_heatmap.set_ylabel('Sender cells', fontsize=cbar_fontsize)
if filename is not None:
plt.savefig(filename, dpi=300,
bbox_inches='tight')
return hier
def _get_distance_matrix_linkages(df, kwargs, method='ward', optimal_ordering=True, symmetric=None):
'''Computes linkages for the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores in a form of distances (that is, smaller
values represent stronger interactions). Diagonal must be filled
by zeros.
kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_ordering : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
symmetric : boolean, default=None
Whether df is symmetric.
Returns
-------
linkage : ndarray
The hierarchical clustering of cells encoded as a linkage matrix.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if symmetric:
if 'col_cluster' in kwargs.keys():
kwargs['row_cluster'] = kwargs['col_cluster']
if kwargs['col_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
elif 'row_cluster' in kwargs.keys():
if kwargs['row_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
else:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
return linkage
def _triangularize_distance_matrix(df, linkage=None, symmetric=None, **kwargs):
'''Generates a mask to plot the upper triangle of the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
symmetric : boolean, default=None
Whether df is symmetric.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
mask : ndarray
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros).
'''
if symmetric is None:
symmetric = check_symmetry(df)
# Triangular matrix
if symmetric:
order_map = dict()
if linkage is None:
mask = np.ones((df.shape[0], df.shape[1]))
for i in range(mask.shape[0]):
for j in range(i, mask.shape[1]):
mask[i, j] = 0
else:
# Plot hierarchical clustering for getting indexes according to linkage
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
**kwargs
)
plt.close()
ind_order = hier.dendrogram_col.reordered_ind
mask = np.zeros((df.shape[0], df.shape[1]))
for i, ind in enumerate(ind_order):
order_map[i] = ind
filter_list = [order_map[j] for j in range(i)]
mask[ind, filter_list] = 1
else:
mask = None
return mask
def _plot_triangular_clustermap(df, symmetric=None, linkage=None, mask=None, col_colors=None, row_colors=None,
title='', cbar_title='CCI score', cbar_fontsize=12, **kwargs):
'''Plots a triangular clustermap based on a mask.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
mask : ndarray, default=None
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros). If None, a mask will be computed based on the CCI matrix
symmetry.
col_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the columns.
row_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the rows.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=12
Font size for the colorbar title as well as labels for axes X and Y.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if mask is None:
mask = _triangularize_distance_matrix(df=df,
linkage=linkage,
symmetric=symmetric,
**kwargs
)
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
mask=mask,
col_colors=col_colors,
row_colors=row_colors,
**kwargs
)
hier = _move_xticks_triangular_clustermap(clustermap=hier,
symmetric=symmetric
)
# Title
if len(title) > 0:
hier.ax_col_dendrogram.set_title(title, fontsize=16)
# Color bar label
cbar = hier.ax_heatmap.collections[0].colorbar
cbar.ax.set_ylabel(cbar_title, fontsize=cbar_fontsize)
cbar.ax.yaxis.set_label_position("left")
return hier
def _move_xticks_triangular_clustermap(clustermap, symmetric=True):
'''Moves xticks to the diagonal when plotting a symmetric matrix
in the form of a upper triangle.
Parameters
---------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
symmetric : boolean, default=None
Whether the CCI matrix plotted in the clustermap is symmetric.
Returns
-------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance, with the xticks moved to the
diagonal if the CCI matrix was symmetric. If not, the same
input clustermap is returned, but with rotated xtick labels.
'''
if symmetric:
# Apply offset transform to all xticklabels.
clustermap.ax_row_dendrogram.set_visible(False)
clustermap.ax_heatmap.tick_params(bottom=False) # Hide xtick line
x_labels = clustermap.ax_heatmap.xaxis.get_majorticklabels()
dpi_x = clustermap.fig.dpi_scale_trans.to_values()[0]
dpi_y = clustermap.fig.dpi_scale_trans.to_values()[3]
x0 = clustermap.ax_heatmap.transData.transform(x_labels[0].get_position())
x1 = clustermap.ax_heatmap.transData.transform(x_labels[1].get_position())
ylims = clustermap.ax_heatmap.get_ylim()
bottom_points = clustermap.ax_heatmap.transData.transform((1.0, ylims[0]))[1]
for i, xl in enumerate(x_labels):
# Move labels in dx and dy points.
swap_xy = (1.0, xl.get_position()[0] + 0.5)
new_y_points = clustermap.ax_heatmap.transData.transform(swap_xy)[1]
dx = -0.5 * abs(x1[0] - x0[0]) / dpi_x
dy = (new_y_points - bottom_points) / dpi_y
offset = mpl.transforms.ScaledTranslation(dx, dy, clustermap.fig.dpi_scale_trans)
xl.set_transform(xl.get_transform() + offset)
if symmetric:
rot = 45
else:
rot = 90
va = 'center'
clustermap.ax_heatmap.set_xticklabels(clustermap.ax_heatmap.xaxis.get_majorticklabels(),
rotation=rot,
rotation_mode='anchor',
va='bottom',
ha='right') # , fontsize=9.5)
clustermap.ax_heatmap.set_yticklabels(clustermap.ax_heatmap.yaxis.get_majorticklabels(),
rotation=0,
va=va,
ha='left') # , fontsize=9.5)
return clustermap | en | 0.65346 | # -*- coding: utf-8 -*- Generates a clustermap (heatmap + dendrograms from a hierarchical clustering) based on CCI scores of cell-cell pairs. Parameters ---------- interaction_space : cell2cell.core.interaction_space.InteractionSpace Interaction space that contains all a distance matrix after running the the method compute_pairwise_cci_scores. Alternatively, this object can be a numpy-array or a pandas DataFrame. Also, a SingleCellInteractions or a BulkInteractions object after running the method compute_pairwise_cci_scores. method : str, default='ward' Clustering method for computing a linkage as in scipy.cluster.hierarchy.linkage optimal_leaf : boolean, default=True Whether sorting the leaf of the dendrograms to have a minimal distance between successive leaves. For more information, see scipy.cluster.hierarchy.optimal_leaf_ordering metadata : pandas.Dataframe, default=None Metadata associated with the cells, cell types or samples in the matrix containing CCI scores. If None, cells will not be colored by major groups. sample_col : str, default='#SampleID' Column in the metadata for the cells, cell types or samples in the matrix containing CCI scores. group_col : str, default='Groups' Column in the metadata containing the major groups of cells, cell types or samples in the matrix with CCI scores. meta_cmap : str, default='gist_rainbow' Name of the color palette for coloring the major groups of cells. colors : dict, default=None Dictionary containing tuples in the RGBA format for indicating colors of major groups of cells. If colors is specified, meta_cmap will be ignored. excluded_cells : list, default=None List containing cell names that are present in the interaction_space object but that will be excluded from this plot. title : str, default='' Title of the clustermap. cbar_title : str, default='CCI score' Title for the colorbar, depending on the score employed. cbar_fontsize : int, default=18 Font size for the colorbar title as well as labels for axes X and Y. filename : str, default=None Path to save the figure of the elbow analysis. If None, the figure is not saved. **kwargs : dict Dictionary containing arguments for the seaborn.clustermap function. Returns ------- hier : seaborn.matrix.ClusterGrid A seaborn ClusterGrid instance. # Drop excluded cells # Check symmetry to get linkage # Obtain info for generating plot # PLOT CCI MATRIX # Colors # Plot hierarchical clustering (triangular) Computes linkages for the CCI matrix. Parameters ---------- df : pandas.DataFrame Contains the CCI scores in a form of distances (that is, smaller values represent stronger interactions). Diagonal must be filled by zeros. kwargs : dict Dictionary containing arguments for the seaborn.clustermap function. method : str, default='ward' Clustering method for computing a linkage as in scipy.cluster.hierarchy.linkage optimal_ordering : boolean, default=True Whether sorting the leaf of the dendrograms to have a minimal distance between successive leaves. For more information, see scipy.cluster.hierarchy.optimal_leaf_ordering symmetric : boolean, default=None Whether df is symmetric. Returns ------- linkage : ndarray The hierarchical clustering of cells encoded as a linkage matrix. Generates a mask to plot the upper triangle of the CCI matrix. Parameters ---------- df : pandas.DataFrame Contains the CCI scores. Must be a symmetric matrix. linkage : ndarray, default=None The hierarchical clustering of cells encoded as a linkage matrix. symmetric : boolean, default=None Whether df is symmetric. **kwargs : dict Dictionary containing arguments for the seaborn.clustermap function. Returns ------- mask : ndarray Mask that contains ones in the places to be hidden in the clustermap. Only the diagonal and the upper triangle are not masked (contain zeros). # Triangular matrix # Plot hierarchical clustering for getting indexes according to linkage Plots a triangular clustermap based on a mask. Parameters ---------- df : pandas.DataFrame Contains the CCI scores. Must be a symmetric matrix. linkage : ndarray, default=None The hierarchical clustering of cells encoded as a linkage matrix. mask : ndarray, default=None Mask that contains ones in the places to be hidden in the clustermap. Only the diagonal and the upper triangle are not masked (contain zeros). If None, a mask will be computed based on the CCI matrix symmetry. col_colors : dict, default=None Dictionary containing tuples in the RGBA format for indicating colors of major groups of cells in the columns. row_colors : dict, default=None Dictionary containing tuples in the RGBA format for indicating colors of major groups of cells in the rows. title : str, default='' Title of the clustermap. cbar_title : str, default='CCI score' Title for the colorbar, depending on the score employed. cbar_fontsize : int, default=12 Font size for the colorbar title as well as labels for axes X and Y. **kwargs : dict Dictionary containing arguments for the seaborn.clustermap function. Returns ------- hier : seaborn.matrix.ClusterGrid A seaborn ClusterGrid instance. # Title # Color bar label Moves xticks to the diagonal when plotting a symmetric matrix in the form of a upper triangle. Parameters --------- clustermap : seaborn.matrix.ClusterGrid A seaborn ClusterGrid instance. symmetric : boolean, default=None Whether the CCI matrix plotted in the clustermap is symmetric. Returns ------- clustermap : seaborn.matrix.ClusterGrid A seaborn ClusterGrid instance, with the xticks moved to the diagonal if the CCI matrix was symmetric. If not, the same input clustermap is returned, but with rotated xtick labels. # Apply offset transform to all xticklabels. # Hide xtick line # Move labels in dx and dy points. # , fontsize=9.5) # , fontsize=9.5) | 2.608508 | 3 |
var/spack/repos/builtin.mock/packages/gnuconfig/package.py | jeanbez/spack | 0 | 10582 | <reponame>jeanbez/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class Gnuconfig(Package):
"""
The GNU config.guess and config.sub scripts versioned by timestamp.
This package can be used as a build dependency for autotools packages that
ship a tarball with outdated config.guess and config.sub files.
"""
has_code = False
version('2021-08-14')
def install(self, spec, prefix):
config_sub = join_path(prefix, 'config.sub')
config_guess = join_path(prefix, 'config.guess')
# Create files
with open(config_sub, 'w') as f:
f.write("#!/bin/sh\necho gnuconfig version of config.sub")
with open(config_guess, 'w') as f:
f.write("#!/bin/sh\necho gnuconfig version of config.guess")
# Make executable
os.chmod(config_sub, 0o775)
os.chmod(config_guess, 0o775)
| # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class Gnuconfig(Package):
"""
The GNU config.guess and config.sub scripts versioned by timestamp.
This package can be used as a build dependency for autotools packages that
ship a tarball with outdated config.guess and config.sub files.
"""
has_code = False
version('2021-08-14')
def install(self, spec, prefix):
config_sub = join_path(prefix, 'config.sub')
config_guess = join_path(prefix, 'config.guess')
# Create files
with open(config_sub, 'w') as f:
f.write("#!/bin/sh\necho gnuconfig version of config.sub")
with open(config_guess, 'w') as f:
f.write("#!/bin/sh\necho gnuconfig version of config.guess")
# Make executable
os.chmod(config_sub, 0o775)
os.chmod(config_guess, 0o775) | en | 0.764101 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) The GNU config.guess and config.sub scripts versioned by timestamp. This package can be used as a build dependency for autotools packages that ship a tarball with outdated config.guess and config.sub files. # Create files # Make executable | 2.031506 | 2 |
mp4box/parsing/ctts.py | abhijeetbhagat/mp4box | 7 | 10583 | <filename>mp4box/parsing/ctts.py
from mp4box.box import CompositionTimeToSampleBox
def parse_ctts(reader, my_size):
version = reader.read32()
box = CompositionTimeToSampleBox(my_size, version, 0)
box.entry_count = reader.read32()
for _ in range(0, box.entry_count):
box.sample_count.append(reader.read32())
box.sample_offset.append(reader.read32())
return box
| <filename>mp4box/parsing/ctts.py
from mp4box.box import CompositionTimeToSampleBox
def parse_ctts(reader, my_size):
version = reader.read32()
box = CompositionTimeToSampleBox(my_size, version, 0)
box.entry_count = reader.read32()
for _ in range(0, box.entry_count):
box.sample_count.append(reader.read32())
box.sample_offset.append(reader.read32())
return box
| none | 1 | 2.519025 | 3 |
|
MsLightweaverManager.py | Goobley/MsLightweaver | 0 | 10584 | <filename>MsLightweaverManager.py
import pickle
import numpy as np
import matplotlib.pyplot as plt
from lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, Fe_atom, FeI_atom, MgII_atom, N_atom, Na_atom, S_atom, CaII_atom
from lightweaver.atmosphere import Atmosphere, ScaleType
from lightweaver.atomic_table import DefaultAtomicAbundance
from lightweaver.atomic_set import RadiativeSet, SpeciesStateTable
from lightweaver.molecule import MolecularTable
from lightweaver.LwCompiled import LwContext
from lightweaver.utils import InitialSolution, planck, NgOptions, ConvergenceError, compute_radiative_losses, integrate_line_losses
import lightweaver.constants as Const
import lightweaver as lw
from typing import List
from copy import deepcopy
from MsLightweaverAtoms import H_6, CaII, H_6_nasa, CaII_nasa
import os
import os.path as path
import time
from radynpy.matsplotlib import OpcFile
from radynpy.utils import hydrogen_absorption
from numba import njit
from pathlib import Path
from scipy.linalg import solve
from scipy.interpolate import interp1d, PchipInterpolator
# from HydroWeno.Simulation import Grid
# from HydroWeno.Advector import Advector
# from HydroWeno.BCs import zero_grad_bc
# from HydroWeno.Weno import reconstruct_weno_nm_z
import warnings
from traceback import print_stack
from weno4 import weno4
from RadynAdvection import an_sol, an_rad_sol, an_gml_sol
import pdb
def weno4_pos(xs, xp, fp, **kwargs):
return np.exp(weno4_safe(xs, xp, np.log(fp), **kwargs))
# https://stackoverflow.com/a/21901260
import subprocess
def mslightweaver_revision():
p = Path(__file__).parent
isGitRepo = subprocess.check_output(['git', 'rev-parse', '--is-inside-work-tree'], cwd=p).decode('ascii').strip() == 'true'
if not isGitRepo:
raise ValueError('Cannot find git info.')
gitChanges = subprocess.check_output(['git', 'status', '--porcelain', '--untracked-files=no'], cwd=p).decode('ascii').strip()
if len(gitChanges) > 0:
raise ValueError('Uncommitted changes to tracked files, cannot procede:\n%s' % gitChanges)
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=p).decode('ascii').strip()
def check_write_git_revision(outputDir):
revision = mslightweaver_revision()
with open(outputDir + 'GitRevision.txt', 'w') as f:
f.write(revision)
def nr_advect(atmost, i0, eqPops, activeAtomNames, abundances):
d1 = atmost.d1[i0+1]
for a in activeAtomNames:
pop = np.zeros_like(eqPops[a])
for i in range(pop.shape[0]):
pop[i, :] = an_sol(atmost, i0, eqPops[a][i], tol=1e-8, maxIter=1000)
nTotal = d1 / (abundances.massPerH * lw.Amu) * abundances[a]
popCorrectionFactor = nTotal / pop.sum(axis=0)
print('Max Correction %s: %.2e' % (a, np.abs(1-popCorrectionFactor).max()))
pop *= popCorrectionFactor
eqPops[a][...] = pop
class CoronalIrraditation(lw.BoundaryCondition):
def __init__(self):
# NOTE(cmo): This data needs to be in (mu, toObs) order, i.e. mu[0]
# down, mu[0] up, mu[1] down...
# self.I = I1d.reshape(I1d.shape[0], -1, I1d.shape[-1])
self.I = None
def set_bc(self, I1d):
self.I = np.expand_dims(I1d, axis=2)
def compute_bc(self, atmos, spect):
# if spect.wavelength.shape[0] != self.I.shape[0]:
# result = np.ones((spect.wavelength.shape[0], spect.I.shape[1], atmos.Nz))
# else:
if self.I is None:
raise ValueError('I has not been set (CoronalIrradtion)')
result = np.copy(self.I)
return result
@njit
def time_dep_update_impl(theta, dt, Gamma, GammaPrev, n, nPrev):
Nlevel = n.shape[0]
Nspace = n.shape[1]
GammaPrev = GammaPrev if GammaPrev is not None else np.empty_like(Gamma)
Gam = np.zeros((Nlevel, Nlevel))
nk = np.zeros(Nlevel)
nPrevIter = np.zeros(Nlevel)
nCurrent = np.zeros(Nlevel)
atomDelta = 0.0
for k in range(Nspace):
nCurrent[:] = n[:, k]
nPrevIter[:] = nPrev[:, k]
Gam[...] = -theta * Gamma[:,:, k] * dt
Gam += np.eye(Nlevel)
if theta != 1.0:
nk[:] = (1.0 - theta) * dt * GammaPrev[:,:, k] @ nPrevIter + nPrevIter
else:
nk[:] = nPrevIter
nNew = np.linalg.solve(Gam, nk)
n[:, k] = nNew
atomDelta = max(atomDelta, np.nanmax(np.abs(1.0 - nCurrent / nNew)))
return atomDelta
class MsLightweaverManager:
def __init__(self, atmost, outputDir,
atoms, activeAtoms=['H', 'Ca'],
detailedH=False,
detailedHPath=None,
startingCtx=None, conserveCharge=False,
populationTransportMode='Advect',
downgoingRadiation=None,
prd=False):
# check_write_git_revision(outputDir)
self.atmost = atmost
self.outputDir = outputDir
self.conserveCharge = conserveCharge
self.abund = DefaultAtomicAbundance
self.idx = 0
self.nHTot = atmost.d1 / (self.abund.massPerH * Const.Amu)
self.prd = prd
self.updateRhoPrd = False
self.detailedH = detailedH
# NOTE(cmo): If this is None and detailedH is True then the data from
# atmost will be used, otherwise, an MsLw pickle will be loaded from
# the path.
self.detailedHPath = detailedHPath
if populationTransportMode == 'Advect':
self.advectPops = True
self.rescalePops = False
elif populationTransportMode == 'Rescale':
self.advectPops = False
self.rescalePops = True
elif populationTransportMode is None or populationTransportMode == 'None':
self.advectPops = False
self.rescalePops = False
else:
raise ValueError('Unknown populationTransportMode: %s' % populationTransportMode)
self.downgoingRadiation = downgoingRadiation
if startingCtx is not None:
self.ctx = startingCtx
args = startingCtx.arguments
self.atmos = args['atmos']
self.spect = args['spect']
self.aSet = self.spect.radSet
self.eqPops = args['eqPops']
self.upperBc = atmos.upperBc
else:
nHTot = np.copy(self.nHTot[0])
if self.downgoingRadiation:
self.upperBc = CoronalIrraditation()
else:
self.upperBc = None
self.atmos = Atmosphere.make_1d(scale=ScaleType.Geometric, depthScale=np.copy(atmost.z1[0]), temperature=np.copy(atmost.tg1[0]), vlos=np.copy(atmost.vz1[0]), vturb=np.copy(atmost.vturb), ne=np.copy(atmost.ne1[0]), nHTot=nHTot, upperBc=self.upperBc)
# self.atmos.convert_scales()
self.atmos.quadrature(5)
self.aSet = RadiativeSet(atoms)
self.aSet.set_active(*activeAtoms)
if detailedH:
self.aSet.set_detailed_static('H')
# NOTE(cmo): Radyn seems to compute the collisional rates once per
# timestep(?) and we seem to get a much better agreement for Ca
# with the CH rates when H is set to LTE for the initial timestep.
# Might be a bug in my implementation though.
self.spect = self.aSet.compute_wavelength_grid()
self.mols = MolecularTable()
if self.conserveCharge:
self.eqPops = self.aSet.iterate_lte_ne_eq_pops(self.atmos, self.mols)
else:
self.eqPops = self.aSet.compute_eq_pops(self.atmos, self.mols)
self.ctx = lw.Context(self.atmos, self.spect, self.eqPops, initSol=InitialSolution.Lte, conserveCharge=self.conserveCharge, Nthreads=12)
self.atmos.bHeat = np.ones_like(self.atmost.bheat1[0]) * 1e-20
self.atmos.hPops = self.eqPops['H']
np.save(self.outputDir + 'Wavelength.npy', self.ctx.spect.wavelength)
if self.detailedH:
self.eqPops['H'][:] = self.detailed_hydrogen_pops()
if self.downgoingRadiation:
self.upperBc.set_bc(self.downgoingRadiation.compute_downgoing_radiation(self.spect.wavelength, self.atmos))
self.ctx.depthData.fill = True
# self.opac_background()
# NOTE(cmo): Set up background
# self.opc = OpcFile('opctab_cmo_mslw.dat')
# # self.opc = OpcFile()
# opcWvl = self.opc.wavel
# self.opcWvl = opcWvl
# # NOTE(cmo): Find mapping from wavelength array to opctab array, with
# # constant background over the region of each line. Are overlaps a
# # problem here? Probably -- but let's see the spectrum in practice
# # The record to be used is the one in self.wvlIdxs + 4 due to the data
# # layout in the opctab
# self.wvlIdxs = np.ones_like(self.spect.wavelength, dtype=np.int64) * -1
# lineCores = []
# for a in self.aSet.activeSet:
# for l in a.lines:
# lineCores.append(l.lambda0 * 10)
# lineCores = np.array(lineCores)
# lineCoreIdxs = np.zeros_like(lineCores)
# for i, l in enumerate(lineCores):
# closestIdx = np.argmin(np.abs(opcWvl - l))
# lineCoreIdxs[i] = closestIdx
# for a in self.aSet.activeSet:
# for l in a.lines:
# # closestIdx = np.argmin((opcWvl - l.lambda0*10)**2)
# closestCore = np.argmin(np.abs((l.wavelength * 10)[:, None] - lineCores), axis=1)
# closestIdx = lineCoreIdxs[closestCore]
# sub = find_subarray(self.spect.wavelength, l.wavelength)
# self.wvlIdxs[sub:sub + l.wavelength.shape[0]] = closestIdx
# for i, v in enumerate(self.wvlIdxs):
# if v >= 0:
# continue
# closestIdx = np.argmin(np.abs(opcWvl - self.spect.wavelength[i]*10))
# self.wvlIdxs[i] = closestIdx
# self.opctabIdxs = self.wvlIdxs + 4
# NOTE(cmo): Compute initial background opacity
# np.save('chi.npy', self.ctx.background.chi)
# np.save('eta.npy', self.ctx.background.eta)
# np.save('sca.npy', self.ctx.background.sca)
# self.opac_background()
def initial_stat_eq(self, Nscatter=3, NmaxIter=1000, popTol=1e-3, JTol=3e-3):
if self.prd:
self.ctx.update_hprd_coeffs()
for i in range(NmaxIter):
dJ = self.ctx.formal_sol_gamma_matrices()
if i < Nscatter:
continue
delta = self.ctx.stat_equil()
if self.prd:
self.ctx.prd_redistribute()
if self.ctx.crswDone and dJ < JTol and delta < popTol:
print('Stat eq converged in %d iterations' % (i+1))
break
else:
raise ConvergenceError('Stat Eq did not converge.')
def advect_pops(self):
if self.rescalePops:
adv = self.atmost.d1[self.idx+1] / self.atmost.d1[self.idx]
neAdv = self.atmos.ne * adv
self.atmos.ne[:] = neAdv
for atom in self.aSet.activeAtoms:
p = self.eqPops[atom.element]
for i in range(p.shape[0]):
pAdv = p[i] * adv
p[i, :] = pAdv
elif self.advectPops:
nr_advect(self.atmost, self.idx, self.eqPops, [a.element for a in self.aSet.activeAtoms], self.abund)
# NOTE(cmo): Guess advected n_e. Will be corrected to be self
# consistent later (in update_deps if conserveCharge=True). If
# conserveCharge isn't true then we're using loaded n_e anyway
# neAdv = interp1d(z0Tracer, np.log10(self.atmos.ne), kind=3, fill_value='extrapolate')(z1)
# self.atmos.ne[:] = 10**neAdv
def detailed_hydrogen_pops(self):
if not self.detailedH:
raise ValueError('Detailed H pops called without detailedH==True')
if self.detailedHPath:
with open(self.detailedHPath + '/Step_%.6d.pickle' % self.idx, 'rb') as pkl:
step = pickle.load(pkl)
pops = step['eqPops']['H']['n']
else:
pops = self.atmost.nh1[self.idx, :] / (np.sum(self.atmost.nh1[self.idx, :], axis=0) / self.atmos.nHTot)[None, :]
return pops
def detailed_ne(self):
if not self.detailedH:
raise ValueError('Detailed ne called without detailedH==True')
if self.detailedHPath:
with open(self.detailedHPath + '/Step_%.6d.pickle' % self.idx, 'rb') as pkl:
step = pickle.load(pkl)
ne = step['ne']
else:
ne = self.atmost.ne1[self.idx]
return ne
def save_timestep(self):
i = self.idx
with open(self.outputDir + 'Step_%.6d.pickle' % i, 'wb') as pkl:
eqPops = distill_pops(self.eqPops)
Iwave = self.ctx.spect.I
lines = []
for a in self.aSet.activeAtoms:
lines += self.aSet[a.element].lines
losses = compute_radiative_losses(self.ctx)
lineLosses = integrate_line_losses(self.ctx, losses, lines, extendGridNm=5.0)
pickle.dump({'eqPops': eqPops, 'Iwave': Iwave,
'ne': self.atmos.ne, 'lines': lines,
'losses': lineLosses}, pkl)
def load_timestep(self, stepNum):
with open(self.outputDir + 'Step_%.6d.pickle' % stepNum, 'rb') as pkl:
step = pickle.load(pkl)
self.idx = stepNum
self.atmos.temperature[:] = self.atmost.tg1[self.idx]
self.atmos.vlos[:] = self.atmost.vz1[self.idx]
if not self.conserveCharge:
self.atmos.ne[:] = self.detailed_ne()
if self.advectPops or self.rescalePops:
self.atmos.nHTot[:] = self.nHTot[self.idx]
self.atmos.bHeat[:] = self.atmost.bheat1[self.idx]
self.atmos.height[:] = self.atmost.z1[self.idx]
for name, pops in step['eqPops'].items():
if pops['n'] is not None:
self.eqPops.atomicPops[name].pops[:] = pops['n']
self.eqPops.atomicPops[name].nStar[:] = pops['nStar']
self.atmos.ne[:] = step['ne']
self.ctx.spect.I[:] = step['Iwave']
self.ctx.update_deps()
def increment_step(self):
self.advect_pops()
self.idx += 1
self.atmos.temperature[:] = self.atmost.tg1[self.idx]
self.atmos.vlos[:] = self.atmost.vz1[self.idx]
if not self.conserveCharge:
self.atmos.ne[:] = self.detailed_ne()
if self.advectPops or self.rescalePops:
self.atmos.nHTot[:] = self.nHTot[self.idx]
self.atmos.bHeat[:] = self.atmost.bheat1[self.idx]
if self.detailedH:
self.eqPops['H'][:] = self.detailed_hydrogen_pops()
self.atmos.height[:] = self.atmost.z1[self.idx]
self.ctx.update_deps()
if self.prd:
self.ctx.update_hprd_coeffs()
self.updateRhoPrd = False
self.interp_rho_prd()
if self.downgoingRadiation:
self.upperBc.set_bc(self.downgoingRadiation.compute_downgoing_radiation(self.spect.wavelength, self.atmos))
# self.opac_background()
def interp_rho_prd(self):
prevIdx = self.idx - 1
prevZ = self.atmost.z1[prevIdx]
z = self.atmost.z1[self.idx]
for atom in self.ctx.activeAtoms:
for trans in atom.trans:
try:
trans.rhoPrd
for la in range(trans.rhoPrd.shape[0]):
trans.rhoPrd[la, :] = weno4(z, prevZ, trans.rhoPrd[la])
trans.rhoPrd[trans.rhoPrd < 0] = 1e-5
except AttributeError:
pass
def time_dep_prev_state(self, evalGamma=False):
if evalGamma:
self.ctx.formal_sol_gamma_matrices()
s = {}
s['pops'] = [np.copy(a.n) for a in self.ctx.activeAtoms]
s['Gamma'] = [np.copy(a.Gamma) if evalGamma else None for a in self.ctx.activeAtoms]
return s
def time_dep_update(self, dt, prevState, theta=0.5):
atoms = self.ctx.activeAtoms
Nspace = self.atmos.Nspace
maxDelta = 0.0
for i, atom in enumerate(atoms):
atomDelta = time_dep_update_impl(theta, dt, atom.Gamma, prevState['Gamma'][i],
atom.n, prevState['pops'][i])
maxDelta = max(maxDelta, atomDelta)
s = ' %s delta = %6.4e' % (atom.atomicModel.element, atomDelta)
print(s)
return maxDelta
def time_dep_step(self, nSubSteps=200, popsTol=1e-3, JTol=3e-3, theta=1.0, dt=None):
dt = dt if dt is not None else self.atmost.dt[self.idx+1]
dNrPops = 0.0
underTol = False
# self.ctx.spect.J[:] = 0.0
if self.prd:
for atom in self.ctx.activeAtoms:
for t in atom.trans:
t.recompute_gII()
prevState = self.time_dep_prev_state(evalGamma=(theta!=1.0))
for sub in range(nSubSteps):
if self.updateRhoPrd and sub > 0:
dRho, prdIter = self.ctx.prd_redistribute(maxIter=10, tol=popsTol)
dJ = self.ctx.formal_sol_gamma_matrices()
delta = self.time_dep_update(dt, prevState, theta=theta)
if self.conserveCharge:
dNrPops = self.ctx.nr_post_update(timeDependentData={'dt': dt, 'nPrev': prevState['pops']})
if sub > 1 and ((delta < popsTol and dJ < JTol and dNrPops < popsTol)
or (delta < 0.1*popsTol and dNrPops < 0.1*popsTol)):
if self.prd:
if self.updateRhoPrd and dRho < JTol:
break
else:
print('Starting PRD Iterations')
self.updateRhoPrd = True
else:
break
else:
raise ValueError('NON-CONVERGED')
def cont_fn_data(self, step):
self.load_timestep(step)
self.ctx.depthData.fill = True
dJ = 1.0
while dJ > 1e-5:
dJ = self.ctx.formal_sol_gamma_matrices()
self.ctx.depthData.fill = False
J = np.copy(self.ctx.spect.J)
sourceData = {'chi': np.copy(self.ctx.depthData.chi),
'eta': np.copy(self.ctx.depthData.eta),
'chiBg': np.copy(self.ctx.background.chi),
'etaBg': np.copy(self.ctx.background.eta),
'scaBg': np.copy(self.ctx.background.sca),
'J': J
}
return sourceData
def rf_k(self, step, dt, pertSize, k, Jstart=None):
self.load_timestep(step)
print(pertSize)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.temperature[k] += 0.5 * pertSize
self.ctx.update_deps()
self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.temperature[k] -= 0.5 * pertSize
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def rf_k_stat_eq(self, step, dt, pertSize, k, Jstart=None):
self.load_timestep(step)
print(pertSize)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.temperature[k] += 0.5 * pertSize
self.ctx.update_deps()
# self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
while True:
self.ctx.formal_sol_gamma_matrices()
dPops = self.ctx.stat_equil()
if dPops < 1e-5 and dPops != 0.0:
break
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.temperature[k] -= 0.5 * pertSize
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
# self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
while True:
self.ctx.formal_sol_gamma_matrices()
dPops = self.ctx.stat_equil()
if dPops < 1e-5 and dPops != 0.0:
break
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def rf_ne_k(self, step, dt, pertSizePercent, k, Jstart=None):
self.load_timestep(step)
print(pertSizePercent)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.ne[k] += 0.5 * pertSizePercent * self.atmos.ne[k]
self.ctx.update_deps()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.ne[k] -= 0.5 * pertSizePercent * self.atmos.ne[k]
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def rf_vlos_k(self, step, dt, pertSize, k, Jstart=None):
self.load_timestep(step)
print(pertSize)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.vlos[k] += 0.5 * pertSize
self.ctx.update_deps()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.vlos[k] -= 0.5 * pertSize
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def convert_atomic_pops(atom):
d = {}
if atom.pops is not None:
d['n'] = atom.pops
else:
d['n'] = atom.pops
d['nStar'] = atom.nStar
d['radiativeRates'] = atom.radiativeRates
return d
def distill_pops(eqPops):
d = {}
for atom in eqPops.atomicPops:
d[atom.element.name] = convert_atomic_pops(atom)
return d
| <filename>MsLightweaverManager.py
import pickle
import numpy as np
import matplotlib.pyplot as plt
from lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, Fe_atom, FeI_atom, MgII_atom, N_atom, Na_atom, S_atom, CaII_atom
from lightweaver.atmosphere import Atmosphere, ScaleType
from lightweaver.atomic_table import DefaultAtomicAbundance
from lightweaver.atomic_set import RadiativeSet, SpeciesStateTable
from lightweaver.molecule import MolecularTable
from lightweaver.LwCompiled import LwContext
from lightweaver.utils import InitialSolution, planck, NgOptions, ConvergenceError, compute_radiative_losses, integrate_line_losses
import lightweaver.constants as Const
import lightweaver as lw
from typing import List
from copy import deepcopy
from MsLightweaverAtoms import H_6, CaII, H_6_nasa, CaII_nasa
import os
import os.path as path
import time
from radynpy.matsplotlib import OpcFile
from radynpy.utils import hydrogen_absorption
from numba import njit
from pathlib import Path
from scipy.linalg import solve
from scipy.interpolate import interp1d, PchipInterpolator
# from HydroWeno.Simulation import Grid
# from HydroWeno.Advector import Advector
# from HydroWeno.BCs import zero_grad_bc
# from HydroWeno.Weno import reconstruct_weno_nm_z
import warnings
from traceback import print_stack
from weno4 import weno4
from RadynAdvection import an_sol, an_rad_sol, an_gml_sol
import pdb
def weno4_pos(xs, xp, fp, **kwargs):
return np.exp(weno4_safe(xs, xp, np.log(fp), **kwargs))
# https://stackoverflow.com/a/21901260
import subprocess
def mslightweaver_revision():
p = Path(__file__).parent
isGitRepo = subprocess.check_output(['git', 'rev-parse', '--is-inside-work-tree'], cwd=p).decode('ascii').strip() == 'true'
if not isGitRepo:
raise ValueError('Cannot find git info.')
gitChanges = subprocess.check_output(['git', 'status', '--porcelain', '--untracked-files=no'], cwd=p).decode('ascii').strip()
if len(gitChanges) > 0:
raise ValueError('Uncommitted changes to tracked files, cannot procede:\n%s' % gitChanges)
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=p).decode('ascii').strip()
def check_write_git_revision(outputDir):
revision = mslightweaver_revision()
with open(outputDir + 'GitRevision.txt', 'w') as f:
f.write(revision)
def nr_advect(atmost, i0, eqPops, activeAtomNames, abundances):
d1 = atmost.d1[i0+1]
for a in activeAtomNames:
pop = np.zeros_like(eqPops[a])
for i in range(pop.shape[0]):
pop[i, :] = an_sol(atmost, i0, eqPops[a][i], tol=1e-8, maxIter=1000)
nTotal = d1 / (abundances.massPerH * lw.Amu) * abundances[a]
popCorrectionFactor = nTotal / pop.sum(axis=0)
print('Max Correction %s: %.2e' % (a, np.abs(1-popCorrectionFactor).max()))
pop *= popCorrectionFactor
eqPops[a][...] = pop
class CoronalIrraditation(lw.BoundaryCondition):
def __init__(self):
# NOTE(cmo): This data needs to be in (mu, toObs) order, i.e. mu[0]
# down, mu[0] up, mu[1] down...
# self.I = I1d.reshape(I1d.shape[0], -1, I1d.shape[-1])
self.I = None
def set_bc(self, I1d):
self.I = np.expand_dims(I1d, axis=2)
def compute_bc(self, atmos, spect):
# if spect.wavelength.shape[0] != self.I.shape[0]:
# result = np.ones((spect.wavelength.shape[0], spect.I.shape[1], atmos.Nz))
# else:
if self.I is None:
raise ValueError('I has not been set (CoronalIrradtion)')
result = np.copy(self.I)
return result
@njit
def time_dep_update_impl(theta, dt, Gamma, GammaPrev, n, nPrev):
Nlevel = n.shape[0]
Nspace = n.shape[1]
GammaPrev = GammaPrev if GammaPrev is not None else np.empty_like(Gamma)
Gam = np.zeros((Nlevel, Nlevel))
nk = np.zeros(Nlevel)
nPrevIter = np.zeros(Nlevel)
nCurrent = np.zeros(Nlevel)
atomDelta = 0.0
for k in range(Nspace):
nCurrent[:] = n[:, k]
nPrevIter[:] = nPrev[:, k]
Gam[...] = -theta * Gamma[:,:, k] * dt
Gam += np.eye(Nlevel)
if theta != 1.0:
nk[:] = (1.0 - theta) * dt * GammaPrev[:,:, k] @ nPrevIter + nPrevIter
else:
nk[:] = nPrevIter
nNew = np.linalg.solve(Gam, nk)
n[:, k] = nNew
atomDelta = max(atomDelta, np.nanmax(np.abs(1.0 - nCurrent / nNew)))
return atomDelta
class MsLightweaverManager:
def __init__(self, atmost, outputDir,
atoms, activeAtoms=['H', 'Ca'],
detailedH=False,
detailedHPath=None,
startingCtx=None, conserveCharge=False,
populationTransportMode='Advect',
downgoingRadiation=None,
prd=False):
# check_write_git_revision(outputDir)
self.atmost = atmost
self.outputDir = outputDir
self.conserveCharge = conserveCharge
self.abund = DefaultAtomicAbundance
self.idx = 0
self.nHTot = atmost.d1 / (self.abund.massPerH * Const.Amu)
self.prd = prd
self.updateRhoPrd = False
self.detailedH = detailedH
# NOTE(cmo): If this is None and detailedH is True then the data from
# atmost will be used, otherwise, an MsLw pickle will be loaded from
# the path.
self.detailedHPath = detailedHPath
if populationTransportMode == 'Advect':
self.advectPops = True
self.rescalePops = False
elif populationTransportMode == 'Rescale':
self.advectPops = False
self.rescalePops = True
elif populationTransportMode is None or populationTransportMode == 'None':
self.advectPops = False
self.rescalePops = False
else:
raise ValueError('Unknown populationTransportMode: %s' % populationTransportMode)
self.downgoingRadiation = downgoingRadiation
if startingCtx is not None:
self.ctx = startingCtx
args = startingCtx.arguments
self.atmos = args['atmos']
self.spect = args['spect']
self.aSet = self.spect.radSet
self.eqPops = args['eqPops']
self.upperBc = atmos.upperBc
else:
nHTot = np.copy(self.nHTot[0])
if self.downgoingRadiation:
self.upperBc = CoronalIrraditation()
else:
self.upperBc = None
self.atmos = Atmosphere.make_1d(scale=ScaleType.Geometric, depthScale=np.copy(atmost.z1[0]), temperature=np.copy(atmost.tg1[0]), vlos=np.copy(atmost.vz1[0]), vturb=np.copy(atmost.vturb), ne=np.copy(atmost.ne1[0]), nHTot=nHTot, upperBc=self.upperBc)
# self.atmos.convert_scales()
self.atmos.quadrature(5)
self.aSet = RadiativeSet(atoms)
self.aSet.set_active(*activeAtoms)
if detailedH:
self.aSet.set_detailed_static('H')
# NOTE(cmo): Radyn seems to compute the collisional rates once per
# timestep(?) and we seem to get a much better agreement for Ca
# with the CH rates when H is set to LTE for the initial timestep.
# Might be a bug in my implementation though.
self.spect = self.aSet.compute_wavelength_grid()
self.mols = MolecularTable()
if self.conserveCharge:
self.eqPops = self.aSet.iterate_lte_ne_eq_pops(self.atmos, self.mols)
else:
self.eqPops = self.aSet.compute_eq_pops(self.atmos, self.mols)
self.ctx = lw.Context(self.atmos, self.spect, self.eqPops, initSol=InitialSolution.Lte, conserveCharge=self.conserveCharge, Nthreads=12)
self.atmos.bHeat = np.ones_like(self.atmost.bheat1[0]) * 1e-20
self.atmos.hPops = self.eqPops['H']
np.save(self.outputDir + 'Wavelength.npy', self.ctx.spect.wavelength)
if self.detailedH:
self.eqPops['H'][:] = self.detailed_hydrogen_pops()
if self.downgoingRadiation:
self.upperBc.set_bc(self.downgoingRadiation.compute_downgoing_radiation(self.spect.wavelength, self.atmos))
self.ctx.depthData.fill = True
# self.opac_background()
# NOTE(cmo): Set up background
# self.opc = OpcFile('opctab_cmo_mslw.dat')
# # self.opc = OpcFile()
# opcWvl = self.opc.wavel
# self.opcWvl = opcWvl
# # NOTE(cmo): Find mapping from wavelength array to opctab array, with
# # constant background over the region of each line. Are overlaps a
# # problem here? Probably -- but let's see the spectrum in practice
# # The record to be used is the one in self.wvlIdxs + 4 due to the data
# # layout in the opctab
# self.wvlIdxs = np.ones_like(self.spect.wavelength, dtype=np.int64) * -1
# lineCores = []
# for a in self.aSet.activeSet:
# for l in a.lines:
# lineCores.append(l.lambda0 * 10)
# lineCores = np.array(lineCores)
# lineCoreIdxs = np.zeros_like(lineCores)
# for i, l in enumerate(lineCores):
# closestIdx = np.argmin(np.abs(opcWvl - l))
# lineCoreIdxs[i] = closestIdx
# for a in self.aSet.activeSet:
# for l in a.lines:
# # closestIdx = np.argmin((opcWvl - l.lambda0*10)**2)
# closestCore = np.argmin(np.abs((l.wavelength * 10)[:, None] - lineCores), axis=1)
# closestIdx = lineCoreIdxs[closestCore]
# sub = find_subarray(self.spect.wavelength, l.wavelength)
# self.wvlIdxs[sub:sub + l.wavelength.shape[0]] = closestIdx
# for i, v in enumerate(self.wvlIdxs):
# if v >= 0:
# continue
# closestIdx = np.argmin(np.abs(opcWvl - self.spect.wavelength[i]*10))
# self.wvlIdxs[i] = closestIdx
# self.opctabIdxs = self.wvlIdxs + 4
# NOTE(cmo): Compute initial background opacity
# np.save('chi.npy', self.ctx.background.chi)
# np.save('eta.npy', self.ctx.background.eta)
# np.save('sca.npy', self.ctx.background.sca)
# self.opac_background()
def initial_stat_eq(self, Nscatter=3, NmaxIter=1000, popTol=1e-3, JTol=3e-3):
if self.prd:
self.ctx.update_hprd_coeffs()
for i in range(NmaxIter):
dJ = self.ctx.formal_sol_gamma_matrices()
if i < Nscatter:
continue
delta = self.ctx.stat_equil()
if self.prd:
self.ctx.prd_redistribute()
if self.ctx.crswDone and dJ < JTol and delta < popTol:
print('Stat eq converged in %d iterations' % (i+1))
break
else:
raise ConvergenceError('Stat Eq did not converge.')
def advect_pops(self):
if self.rescalePops:
adv = self.atmost.d1[self.idx+1] / self.atmost.d1[self.idx]
neAdv = self.atmos.ne * adv
self.atmos.ne[:] = neAdv
for atom in self.aSet.activeAtoms:
p = self.eqPops[atom.element]
for i in range(p.shape[0]):
pAdv = p[i] * adv
p[i, :] = pAdv
elif self.advectPops:
nr_advect(self.atmost, self.idx, self.eqPops, [a.element for a in self.aSet.activeAtoms], self.abund)
# NOTE(cmo): Guess advected n_e. Will be corrected to be self
# consistent later (in update_deps if conserveCharge=True). If
# conserveCharge isn't true then we're using loaded n_e anyway
# neAdv = interp1d(z0Tracer, np.log10(self.atmos.ne), kind=3, fill_value='extrapolate')(z1)
# self.atmos.ne[:] = 10**neAdv
def detailed_hydrogen_pops(self):
if not self.detailedH:
raise ValueError('Detailed H pops called without detailedH==True')
if self.detailedHPath:
with open(self.detailedHPath + '/Step_%.6d.pickle' % self.idx, 'rb') as pkl:
step = pickle.load(pkl)
pops = step['eqPops']['H']['n']
else:
pops = self.atmost.nh1[self.idx, :] / (np.sum(self.atmost.nh1[self.idx, :], axis=0) / self.atmos.nHTot)[None, :]
return pops
def detailed_ne(self):
if not self.detailedH:
raise ValueError('Detailed ne called without detailedH==True')
if self.detailedHPath:
with open(self.detailedHPath + '/Step_%.6d.pickle' % self.idx, 'rb') as pkl:
step = pickle.load(pkl)
ne = step['ne']
else:
ne = self.atmost.ne1[self.idx]
return ne
def save_timestep(self):
i = self.idx
with open(self.outputDir + 'Step_%.6d.pickle' % i, 'wb') as pkl:
eqPops = distill_pops(self.eqPops)
Iwave = self.ctx.spect.I
lines = []
for a in self.aSet.activeAtoms:
lines += self.aSet[a.element].lines
losses = compute_radiative_losses(self.ctx)
lineLosses = integrate_line_losses(self.ctx, losses, lines, extendGridNm=5.0)
pickle.dump({'eqPops': eqPops, 'Iwave': Iwave,
'ne': self.atmos.ne, 'lines': lines,
'losses': lineLosses}, pkl)
def load_timestep(self, stepNum):
with open(self.outputDir + 'Step_%.6d.pickle' % stepNum, 'rb') as pkl:
step = pickle.load(pkl)
self.idx = stepNum
self.atmos.temperature[:] = self.atmost.tg1[self.idx]
self.atmos.vlos[:] = self.atmost.vz1[self.idx]
if not self.conserveCharge:
self.atmos.ne[:] = self.detailed_ne()
if self.advectPops or self.rescalePops:
self.atmos.nHTot[:] = self.nHTot[self.idx]
self.atmos.bHeat[:] = self.atmost.bheat1[self.idx]
self.atmos.height[:] = self.atmost.z1[self.idx]
for name, pops in step['eqPops'].items():
if pops['n'] is not None:
self.eqPops.atomicPops[name].pops[:] = pops['n']
self.eqPops.atomicPops[name].nStar[:] = pops['nStar']
self.atmos.ne[:] = step['ne']
self.ctx.spect.I[:] = step['Iwave']
self.ctx.update_deps()
def increment_step(self):
self.advect_pops()
self.idx += 1
self.atmos.temperature[:] = self.atmost.tg1[self.idx]
self.atmos.vlos[:] = self.atmost.vz1[self.idx]
if not self.conserveCharge:
self.atmos.ne[:] = self.detailed_ne()
if self.advectPops or self.rescalePops:
self.atmos.nHTot[:] = self.nHTot[self.idx]
self.atmos.bHeat[:] = self.atmost.bheat1[self.idx]
if self.detailedH:
self.eqPops['H'][:] = self.detailed_hydrogen_pops()
self.atmos.height[:] = self.atmost.z1[self.idx]
self.ctx.update_deps()
if self.prd:
self.ctx.update_hprd_coeffs()
self.updateRhoPrd = False
self.interp_rho_prd()
if self.downgoingRadiation:
self.upperBc.set_bc(self.downgoingRadiation.compute_downgoing_radiation(self.spect.wavelength, self.atmos))
# self.opac_background()
def interp_rho_prd(self):
prevIdx = self.idx - 1
prevZ = self.atmost.z1[prevIdx]
z = self.atmost.z1[self.idx]
for atom in self.ctx.activeAtoms:
for trans in atom.trans:
try:
trans.rhoPrd
for la in range(trans.rhoPrd.shape[0]):
trans.rhoPrd[la, :] = weno4(z, prevZ, trans.rhoPrd[la])
trans.rhoPrd[trans.rhoPrd < 0] = 1e-5
except AttributeError:
pass
def time_dep_prev_state(self, evalGamma=False):
if evalGamma:
self.ctx.formal_sol_gamma_matrices()
s = {}
s['pops'] = [np.copy(a.n) for a in self.ctx.activeAtoms]
s['Gamma'] = [np.copy(a.Gamma) if evalGamma else None for a in self.ctx.activeAtoms]
return s
def time_dep_update(self, dt, prevState, theta=0.5):
atoms = self.ctx.activeAtoms
Nspace = self.atmos.Nspace
maxDelta = 0.0
for i, atom in enumerate(atoms):
atomDelta = time_dep_update_impl(theta, dt, atom.Gamma, prevState['Gamma'][i],
atom.n, prevState['pops'][i])
maxDelta = max(maxDelta, atomDelta)
s = ' %s delta = %6.4e' % (atom.atomicModel.element, atomDelta)
print(s)
return maxDelta
def time_dep_step(self, nSubSteps=200, popsTol=1e-3, JTol=3e-3, theta=1.0, dt=None):
dt = dt if dt is not None else self.atmost.dt[self.idx+1]
dNrPops = 0.0
underTol = False
# self.ctx.spect.J[:] = 0.0
if self.prd:
for atom in self.ctx.activeAtoms:
for t in atom.trans:
t.recompute_gII()
prevState = self.time_dep_prev_state(evalGamma=(theta!=1.0))
for sub in range(nSubSteps):
if self.updateRhoPrd and sub > 0:
dRho, prdIter = self.ctx.prd_redistribute(maxIter=10, tol=popsTol)
dJ = self.ctx.formal_sol_gamma_matrices()
delta = self.time_dep_update(dt, prevState, theta=theta)
if self.conserveCharge:
dNrPops = self.ctx.nr_post_update(timeDependentData={'dt': dt, 'nPrev': prevState['pops']})
if sub > 1 and ((delta < popsTol and dJ < JTol and dNrPops < popsTol)
or (delta < 0.1*popsTol and dNrPops < 0.1*popsTol)):
if self.prd:
if self.updateRhoPrd and dRho < JTol:
break
else:
print('Starting PRD Iterations')
self.updateRhoPrd = True
else:
break
else:
raise ValueError('NON-CONVERGED')
def cont_fn_data(self, step):
self.load_timestep(step)
self.ctx.depthData.fill = True
dJ = 1.0
while dJ > 1e-5:
dJ = self.ctx.formal_sol_gamma_matrices()
self.ctx.depthData.fill = False
J = np.copy(self.ctx.spect.J)
sourceData = {'chi': np.copy(self.ctx.depthData.chi),
'eta': np.copy(self.ctx.depthData.eta),
'chiBg': np.copy(self.ctx.background.chi),
'etaBg': np.copy(self.ctx.background.eta),
'scaBg': np.copy(self.ctx.background.sca),
'J': J
}
return sourceData
def rf_k(self, step, dt, pertSize, k, Jstart=None):
self.load_timestep(step)
print(pertSize)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.temperature[k] += 0.5 * pertSize
self.ctx.update_deps()
self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.temperature[k] -= 0.5 * pertSize
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def rf_k_stat_eq(self, step, dt, pertSize, k, Jstart=None):
self.load_timestep(step)
print(pertSize)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.temperature[k] += 0.5 * pertSize
self.ctx.update_deps()
# self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
while True:
self.ctx.formal_sol_gamma_matrices()
dPops = self.ctx.stat_equil()
if dPops < 1e-5 and dPops != 0.0:
break
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.temperature[k] -= 0.5 * pertSize
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
# self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
while True:
self.ctx.formal_sol_gamma_matrices()
dPops = self.ctx.stat_equil()
if dPops < 1e-5 and dPops != 0.0:
break
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def rf_ne_k(self, step, dt, pertSizePercent, k, Jstart=None):
self.load_timestep(step)
print(pertSizePercent)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.ne[k] += 0.5 * pertSizePercent * self.atmos.ne[k]
self.ctx.update_deps()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.ne[k] -= 0.5 * pertSizePercent * self.atmos.ne[k]
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def rf_vlos_k(self, step, dt, pertSize, k, Jstart=None):
self.load_timestep(step)
print(pertSize)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.vlos[k] += 0.5 * pertSize
self.ctx.update_deps()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.vlos[k] -= 0.5 * pertSize
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def convert_atomic_pops(atom):
d = {}
if atom.pops is not None:
d['n'] = atom.pops
else:
d['n'] = atom.pops
d['nStar'] = atom.nStar
d['radiativeRates'] = atom.radiativeRates
return d
def distill_pops(eqPops):
d = {}
for atom in eqPops.atomicPops:
d[atom.element.name] = convert_atomic_pops(atom)
return d
| en | 0.512253 | # from HydroWeno.Simulation import Grid # from HydroWeno.Advector import Advector # from HydroWeno.BCs import zero_grad_bc # from HydroWeno.Weno import reconstruct_weno_nm_z # https://stackoverflow.com/a/21901260 # NOTE(cmo): This data needs to be in (mu, toObs) order, i.e. mu[0] # down, mu[0] up, mu[1] down... # self.I = I1d.reshape(I1d.shape[0], -1, I1d.shape[-1]) # if spect.wavelength.shape[0] != self.I.shape[0]: # result = np.ones((spect.wavelength.shape[0], spect.I.shape[1], atmos.Nz)) # else: # check_write_git_revision(outputDir) # NOTE(cmo): If this is None and detailedH is True then the data from # atmost will be used, otherwise, an MsLw pickle will be loaded from # the path. # self.atmos.convert_scales() # NOTE(cmo): Radyn seems to compute the collisional rates once per # timestep(?) and we seem to get a much better agreement for Ca # with the CH rates when H is set to LTE for the initial timestep. # Might be a bug in my implementation though. # self.opac_background() # NOTE(cmo): Set up background # self.opc = OpcFile('opctab_cmo_mslw.dat') # # self.opc = OpcFile() # opcWvl = self.opc.wavel # self.opcWvl = opcWvl # # NOTE(cmo): Find mapping from wavelength array to opctab array, with # # constant background over the region of each line. Are overlaps a # # problem here? Probably -- but let's see the spectrum in practice # # The record to be used is the one in self.wvlIdxs + 4 due to the data # # layout in the opctab # self.wvlIdxs = np.ones_like(self.spect.wavelength, dtype=np.int64) * -1 # lineCores = [] # for a in self.aSet.activeSet: # for l in a.lines: # lineCores.append(l.lambda0 * 10) # lineCores = np.array(lineCores) # lineCoreIdxs = np.zeros_like(lineCores) # for i, l in enumerate(lineCores): # closestIdx = np.argmin(np.abs(opcWvl - l)) # lineCoreIdxs[i] = closestIdx # for a in self.aSet.activeSet: # for l in a.lines: # # closestIdx = np.argmin((opcWvl - l.lambda0*10)**2) # closestCore = np.argmin(np.abs((l.wavelength * 10)[:, None] - lineCores), axis=1) # closestIdx = lineCoreIdxs[closestCore] # sub = find_subarray(self.spect.wavelength, l.wavelength) # self.wvlIdxs[sub:sub + l.wavelength.shape[0]] = closestIdx # for i, v in enumerate(self.wvlIdxs): # if v >= 0: # continue # closestIdx = np.argmin(np.abs(opcWvl - self.spect.wavelength[i]*10)) # self.wvlIdxs[i] = closestIdx # self.opctabIdxs = self.wvlIdxs + 4 # NOTE(cmo): Compute initial background opacity # np.save('chi.npy', self.ctx.background.chi) # np.save('eta.npy', self.ctx.background.eta) # np.save('sca.npy', self.ctx.background.sca) # self.opac_background() # NOTE(cmo): Guess advected n_e. Will be corrected to be self # consistent later (in update_deps if conserveCharge=True). If # conserveCharge isn't true then we're using loaded n_e anyway # neAdv = interp1d(z0Tracer, np.log10(self.atmos.ne), kind=3, fill_value='extrapolate')(z1) # self.atmos.ne[:] = 10**neAdv # self.opac_background() # self.ctx.spect.J[:] = 0.0 # if Jstart is None: # dJ = 1.0 # while dJ > 1e-3: # dJ = self.ctx.formal_sol_gamma_matrices() # self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0) # if Jstart is None: # dJ = 1.0 # while dJ > 1e-3: # dJ = self.ctx.formal_sol_gamma_matrices() # self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0) # if Jstart is None: # dJ = 1.0 # while dJ > 1e-3: # dJ = self.ctx.formal_sol_gamma_matrices() # if Jstart is None: # dJ = 1.0 # while dJ > 1e-3: # dJ = self.ctx.formal_sol_gamma_matrices() | 1.567876 | 2 |
common/writeExcel.py | lixiaofeng1993/DjangoBlog | 0 | 10585 | <filename>common/writeExcel.py
# coding:utf-8
from openpyxl import load_workbook
import openpyxl
from openpyxl.styles import Font, colors
def copy_excel(cese_path, report_path):
"""
复制测试用例到report_path
:param cese_path:
:param report_path:
:return:
"""
wb2 = openpyxl.Workbook()
wb2.save(report_path) # 在设置的路径下创建一个excel文件
# 读取数据
wb1 = openpyxl.load_workbook(cese_path)
wb2 = openpyxl.load_workbook(report_path)
sheets1 = wb1.sheetnames
sheets2 = wb2.sheetnames
sheet1 = wb1[sheets1[0]] # 获取第一个sheet页
sheet2 = wb2[sheets2[0]]
max_row = sheet1.max_row # 最大行数
max_column = sheet1.max_column # 最大列数
for m in list(range(1, max_row + 1)):
for n in list(range(97, 97 + max_column)): # chr(97)='a'
n = chr(n) # ASCII字符,excel文件的列 a b c
i = '%s%d' % (n, m) # 单元格编号
cell1 = sheet1[i].value # 获取测试用例单元格数据
sheet2[i].value = cell1 # 赋值到测试结果单元格
wb2.save(report_path) # 保存数据
wb1.close() # 关闭excel
wb2.close()
class Write_excel(object):
"""
修改excel数据
"""
def __init__(self, filename):
self.filename = filename
self.wb = load_workbook(self.filename)
self.ws = self.wb.active # 激活sheet
def write(self, row_n, col_n, value):
"""写入数据,如(2,3,"hello"),第二行第三列写入数据"hello\""""
ft = Font(color=colors.RED, size=12, bold=True)
# 判断值为错误时添加字体样式
if value in ['fail', 'error'] or col_n == 12:
self.ws.cell(row_n, col_n).font = ft
if value == 'pass':
ft = Font(color=colors.GREEN)
self.ws.cell(row_n, col_n).font = ft
self.ws.cell(row_n, col_n).value = value
self.wb.save(self.filename)
if __name__ == "__main__":
# copy_excel("demo_api_3.xlsx", "test111.xlsx")
wt = Write_excel("test111.xlsx")
wt.write(4, 5, "HELLEOP")
wt.write(4, 6, "HELLEOP")
| <filename>common/writeExcel.py
# coding:utf-8
from openpyxl import load_workbook
import openpyxl
from openpyxl.styles import Font, colors
def copy_excel(cese_path, report_path):
"""
复制测试用例到report_path
:param cese_path:
:param report_path:
:return:
"""
wb2 = openpyxl.Workbook()
wb2.save(report_path) # 在设置的路径下创建一个excel文件
# 读取数据
wb1 = openpyxl.load_workbook(cese_path)
wb2 = openpyxl.load_workbook(report_path)
sheets1 = wb1.sheetnames
sheets2 = wb2.sheetnames
sheet1 = wb1[sheets1[0]] # 获取第一个sheet页
sheet2 = wb2[sheets2[0]]
max_row = sheet1.max_row # 最大行数
max_column = sheet1.max_column # 最大列数
for m in list(range(1, max_row + 1)):
for n in list(range(97, 97 + max_column)): # chr(97)='a'
n = chr(n) # ASCII字符,excel文件的列 a b c
i = '%s%d' % (n, m) # 单元格编号
cell1 = sheet1[i].value # 获取测试用例单元格数据
sheet2[i].value = cell1 # 赋值到测试结果单元格
wb2.save(report_path) # 保存数据
wb1.close() # 关闭excel
wb2.close()
class Write_excel(object):
"""
修改excel数据
"""
def __init__(self, filename):
self.filename = filename
self.wb = load_workbook(self.filename)
self.ws = self.wb.active # 激活sheet
def write(self, row_n, col_n, value):
"""写入数据,如(2,3,"hello"),第二行第三列写入数据"hello\""""
ft = Font(color=colors.RED, size=12, bold=True)
# 判断值为错误时添加字体样式
if value in ['fail', 'error'] or col_n == 12:
self.ws.cell(row_n, col_n).font = ft
if value == 'pass':
ft = Font(color=colors.GREEN)
self.ws.cell(row_n, col_n).font = ft
self.ws.cell(row_n, col_n).value = value
self.wb.save(self.filename)
if __name__ == "__main__":
# copy_excel("demo_api_3.xlsx", "test111.xlsx")
wt = Write_excel("test111.xlsx")
wt.write(4, 5, "HELLEOP")
wt.write(4, 6, "HELLEOP")
| zh | 0.753338 | # coding:utf-8 复制测试用例到report_path :param cese_path: :param report_path: :return: # 在设置的路径下创建一个excel文件 # 读取数据 # 获取第一个sheet页 # 最大行数 # 最大列数 # chr(97)='a' # ASCII字符,excel文件的列 a b c # 单元格编号 # 获取测试用例单元格数据 # 赋值到测试结果单元格 # 保存数据 # 关闭excel 修改excel数据 # 激活sheet 写入数据,如(2,3,"hello"),第二行第三列写入数据"hello\ # 判断值为错误时添加字体样式 # copy_excel("demo_api_3.xlsx", "test111.xlsx") | 3.153886 | 3 |
test/cts/tool/CTSConverter/src/nn/specs/V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py | zhaoming0/webml-polyfill | 255 | 10586 | #
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
b1 = Input("op3", "TENSOR_FLOAT32", "{4}")
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
cm = Int32Scalar("channelMultiplier", 2)
output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("DEPTHWISE_CONV_2D",
i1, f1, b1,
pad0, pad0, pad0, pad0,
stride, stride,
cm, act).To(output)
model = model.RelaxedExecution(True)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[10, 21, 10, 22, 10, 23,
10, 24, 10, 25, 10, 26,
10, 27, 10, 28, 10, 29],
f1:
[.25, 0, .2, 0,
.25, 0, 0, .3,
.25, 0, 0, 0,
.25, .1, 0, 0],
b1:
[1, 2, 3, 4]}
# (i1 (conv) f1) + b1
# filter usage:
# in_ch1 * f_1 --> output_d1
# in_ch1 * f_2 --> output_d2
# in_ch2 * f_3 --> output_d3
# in_ch3 * f_4 --> output_d4
output0 = {output: # output 0
[11, 3, 7.2, 10.6,
11, 3, 7.4, 10.9,
11, 3, 7.8, 11.5,
11, 3, 8.0, 11.8]}
# Instantiate an example
Example((input0, output0))
| #
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
b1 = Input("op3", "TENSOR_FLOAT32", "{4}")
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
cm = Int32Scalar("channelMultiplier", 2)
output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("DEPTHWISE_CONV_2D",
i1, f1, b1,
pad0, pad0, pad0, pad0,
stride, stride,
cm, act).To(output)
model = model.RelaxedExecution(True)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[10, 21, 10, 22, 10, 23,
10, 24, 10, 25, 10, 26,
10, 27, 10, 28, 10, 29],
f1:
[.25, 0, .2, 0,
.25, 0, 0, .3,
.25, 0, 0, 0,
.25, .1, 0, 0],
b1:
[1, 2, 3, 4]}
# (i1 (conv) f1) + b1
# filter usage:
# in_ch1 * f_1 --> output_d1
# in_ch1 * f_2 --> output_d2
# in_ch2 * f_3 --> output_d3
# in_ch3 * f_4 --> output_d4
output0 = {output: # output 0
[11, 3, 7.2, 10.6,
11, 3, 7.4, 10.9,
11, 3, 7.8, 11.5,
11, 3, 8.0, 11.8]}
# Instantiate an example
Example((input0, output0))
| en | 0.738481 | # # Copyright (C) 2018 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Example 1. Input in operand 0, # input 0 # (i1 (conv) f1) + b1 # filter usage: # in_ch1 * f_1 --> output_d1 # in_ch1 * f_2 --> output_d2 # in_ch2 * f_3 --> output_d3 # in_ch3 * f_4 --> output_d4 # output 0 # Instantiate an example | 2.004025 | 2 |
jina/executors/evaluators/rank/recall.py | sdsd0101/jina | 2 | 10587 | from typing import Sequence, Any
from jina.executors.evaluators.rank import BaseRankingEvaluator
from jina.executors.evaluators.decorators import as_aggregator
class RecallEvaluator(BaseRankingEvaluator):
"""A :class:`RecallEvaluator` evaluates the Precision of the search.
It computes how many of the first given `eval_at` groundtruth are found in the matches
"""
def __init__(self, eval_at: int, *args, **kwargs):
""""
:param eval_at: k at which evaluation is performed
"""
super().__init__(*args, **kwargs)
self.eval_at = eval_at
@property
def complete_name(self):
return f'Recall@{self.eval_at}'
@as_aggregator
def evaluate(self, matches_ids: Sequence[Any], groundtruth_ids: Sequence[Any], *args, **kwargs) -> float:
""""
:param matches_ids: the matched document identifiers from the request as matched by jina indexers and rankers
:param groundtruth_ids: the expected documents matches ids sorted as they are expected
:return the evaluation metric value for the request document
"""
ret = 0.0
for doc_id in groundtruth_ids[:self.eval_at]:
if doc_id in matches_ids:
ret += 1.0
divisor = min(self.eval_at, len(matches_ids))
if divisor == 0.0:
"""TODO: Agree on a behavior"""
return 0.0
else:
return ret / divisor
| from typing import Sequence, Any
from jina.executors.evaluators.rank import BaseRankingEvaluator
from jina.executors.evaluators.decorators import as_aggregator
class RecallEvaluator(BaseRankingEvaluator):
"""A :class:`RecallEvaluator` evaluates the Precision of the search.
It computes how many of the first given `eval_at` groundtruth are found in the matches
"""
def __init__(self, eval_at: int, *args, **kwargs):
""""
:param eval_at: k at which evaluation is performed
"""
super().__init__(*args, **kwargs)
self.eval_at = eval_at
@property
def complete_name(self):
return f'Recall@{self.eval_at}'
@as_aggregator
def evaluate(self, matches_ids: Sequence[Any], groundtruth_ids: Sequence[Any], *args, **kwargs) -> float:
""""
:param matches_ids: the matched document identifiers from the request as matched by jina indexers and rankers
:param groundtruth_ids: the expected documents matches ids sorted as they are expected
:return the evaluation metric value for the request document
"""
ret = 0.0
for doc_id in groundtruth_ids[:self.eval_at]:
if doc_id in matches_ids:
ret += 1.0
divisor = min(self.eval_at, len(matches_ids))
if divisor == 0.0:
"""TODO: Agree on a behavior"""
return 0.0
else:
return ret / divisor
| en | 0.892907 | A :class:`RecallEvaluator` evaluates the Precision of the search. It computes how many of the first given `eval_at` groundtruth are found in the matches " :param eval_at: k at which evaluation is performed " :param matches_ids: the matched document identifiers from the request as matched by jina indexers and rankers :param groundtruth_ids: the expected documents matches ids sorted as they are expected :return the evaluation metric value for the request document TODO: Agree on a behavior | 2.558999 | 3 |
tests/schema_mapping/structures/example5.py | danny-vayu/typedpy | 0 | 10588 | <reponame>danny-vayu/typedpy
from typedpy import Array, DoNotSerialize, Structure, mappers
class Foo(Structure):
i: int
s: str
_serialization_mapper = {"i": "j", "s": "name"}
class Example5(Foo):
a: Array
_serialization_mapper = [{"j": DoNotSerialize}, mappers.TO_LOWERCASE] | from typedpy import Array, DoNotSerialize, Structure, mappers
class Foo(Structure):
i: int
s: str
_serialization_mapper = {"i": "j", "s": "name"}
class Example5(Foo):
a: Array
_serialization_mapper = [{"j": DoNotSerialize}, mappers.TO_LOWERCASE] | none | 1 | 2.683804 | 3 |
|
src/semantic_parsing_with_constrained_lm/eval.py | microsoft/semantic_parsing_with_constrained_lm | 17 | 10589 | <filename>src/semantic_parsing_with_constrained_lm/eval.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import dataclasses
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, Generic, List, Optional, Sequence, TypeVar
from semantic_parsing_with_constrained_lm.datum import FullDatum, FullDatumSub
from semantic_parsing_with_constrained_lm.model import ModelResult
Pred = TypeVar("Pred")
Target = TypeVar("Target")
# TODO: Replcae this with a more flexible function suited to each domain
def exact_match_with_logging(
test_datum: FullDatum, kbest: Sequence[ModelResult],
) -> bool:
gold = (
test_datum.canonical.strip(" ")
if test_datum.canonical is not None
else "UNREACHABLE"
)
pred = kbest[0].text.strip(" ") if kbest else ""
print()
print(f"context: {test_datum.agent_context}")
print(f"natural: {test_datum.natural}")
print(f"predicted: {pred}")
print(f"gold: {gold}")
result = gold == pred
print(f"is correct: {result}")
beam_result = False
for i, pred_i in enumerate(kbest):
stripped = pred_i.text.strip(" ")
beam_result = beam_result or gold == stripped
print(f"Beam {i} [{pred_i.cost:.3f}]: {stripped}")
print(f"is correct@{i}: {beam_result}")
print()
return result
class Metric(Generic[Pred, Target], ABC):
"""Used to measure goodness of model results compared to the ground truth.
Stateful over the duration of an experiment run."""
@abstractmethod
def update(self, pred: Pred, target: Target) -> Dict[str, Optional[str]]:
"""Uses `target` and the model predictions `pred` to update the state."""
pass
@abstractmethod
def compute(self) -> Dict[str, float]:
"""Uses the state to compute the final results."""
pass
@abstractmethod
def reset(self) -> None:
"""Reinitializes the state."""
pass
@dataclass
class TopKExactMatch(Metric[Sequence[str], FullDatumSub]):
k: int
correct: List[int] = dataclasses.field(init=False)
total: int = dataclasses.field(init=False)
def __post_init__(self):
self.reset()
def _is_correct(self, pred: str, target: FullDatumSub) -> bool:
"""Can be overridden by child classes."""
return pred == target.canonical
def update(
self, preds: Sequence[str], target: FullDatumSub
) -> Dict[str, Optional[str]]:
self.total += 1
found_correct = False
result: Dict[str, Optional[str]] = {}
for i, pred in enumerate(preds[: self.k]):
correct = self._is_correct(pred, target)
found_correct |= correct
self.correct[i] += found_correct
result[f"rank{i + 1}"] = "correct" if correct else "incorrect"
result[f"top{i + 1}"] = "correct" if found_correct else "incorrect"
# Handle when we have fewer predictions than self.k
for i in range(len(preds), self.k):
self.correct[i] += found_correct
result[f"rank{i + 1}"] = "incorrect"
result[f"top{i + 1}"] = "correct" if found_correct else "incorrect"
return result
def compute(self) -> Dict[str, float]:
result = {}
for i in range(self.k):
result[f"top{i + 1}"] = self.correct[i] / self.total
return result
def reset(self) -> None:
self.correct = [0] * self.k
self.total = 0
| <filename>src/semantic_parsing_with_constrained_lm/eval.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import dataclasses
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, Generic, List, Optional, Sequence, TypeVar
from semantic_parsing_with_constrained_lm.datum import FullDatum, FullDatumSub
from semantic_parsing_with_constrained_lm.model import ModelResult
Pred = TypeVar("Pred")
Target = TypeVar("Target")
# TODO: Replcae this with a more flexible function suited to each domain
def exact_match_with_logging(
test_datum: FullDatum, kbest: Sequence[ModelResult],
) -> bool:
gold = (
test_datum.canonical.strip(" ")
if test_datum.canonical is not None
else "UNREACHABLE"
)
pred = kbest[0].text.strip(" ") if kbest else ""
print()
print(f"context: {test_datum.agent_context}")
print(f"natural: {test_datum.natural}")
print(f"predicted: {pred}")
print(f"gold: {gold}")
result = gold == pred
print(f"is correct: {result}")
beam_result = False
for i, pred_i in enumerate(kbest):
stripped = pred_i.text.strip(" ")
beam_result = beam_result or gold == stripped
print(f"Beam {i} [{pred_i.cost:.3f}]: {stripped}")
print(f"is correct@{i}: {beam_result}")
print()
return result
class Metric(Generic[Pred, Target], ABC):
"""Used to measure goodness of model results compared to the ground truth.
Stateful over the duration of an experiment run."""
@abstractmethod
def update(self, pred: Pred, target: Target) -> Dict[str, Optional[str]]:
"""Uses `target` and the model predictions `pred` to update the state."""
pass
@abstractmethod
def compute(self) -> Dict[str, float]:
"""Uses the state to compute the final results."""
pass
@abstractmethod
def reset(self) -> None:
"""Reinitializes the state."""
pass
@dataclass
class TopKExactMatch(Metric[Sequence[str], FullDatumSub]):
k: int
correct: List[int] = dataclasses.field(init=False)
total: int = dataclasses.field(init=False)
def __post_init__(self):
self.reset()
def _is_correct(self, pred: str, target: FullDatumSub) -> bool:
"""Can be overridden by child classes."""
return pred == target.canonical
def update(
self, preds: Sequence[str], target: FullDatumSub
) -> Dict[str, Optional[str]]:
self.total += 1
found_correct = False
result: Dict[str, Optional[str]] = {}
for i, pred in enumerate(preds[: self.k]):
correct = self._is_correct(pred, target)
found_correct |= correct
self.correct[i] += found_correct
result[f"rank{i + 1}"] = "correct" if correct else "incorrect"
result[f"top{i + 1}"] = "correct" if found_correct else "incorrect"
# Handle when we have fewer predictions than self.k
for i in range(len(preds), self.k):
self.correct[i] += found_correct
result[f"rank{i + 1}"] = "incorrect"
result[f"top{i + 1}"] = "correct" if found_correct else "incorrect"
return result
def compute(self) -> Dict[str, float]:
result = {}
for i in range(self.k):
result[f"top{i + 1}"] = self.correct[i] / self.total
return result
def reset(self) -> None:
self.correct = [0] * self.k
self.total = 0
| en | 0.870753 | # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # TODO: Replcae this with a more flexible function suited to each domain Used to measure goodness of model results compared to the ground truth. Stateful over the duration of an experiment run. Uses `target` and the model predictions `pred` to update the state. Uses the state to compute the final results. Reinitializes the state. Can be overridden by child classes. # Handle when we have fewer predictions than self.k | 2.38833 | 2 |
irrigation_control/irrigation_control_py3/common_irrigation_chains_py3.py | bopopescu/docker_images_a | 2 | 10590 | <reponame>bopopescu/docker_images_a
class Check_Excessive_Current(object):
def __init__(self,chain_name,cf,handlers,irrigation_io,irrigation_hash_control,get_json_object):
self.get_json_object = get_json_object
cf.define_chain(chain_name, False )
#cf.insert.log("check_excessive_current")
cf.insert.assert_function_reset(self.check_excessive_current)
cf.insert.log("excessive_current_found")
cf.insert.send_event("IRI_CLOSE_MASTER_VALVE",False)
cf.insert.send_event( "RELEASE_IRRIGATION_CONTROL")
cf.insert.one_step(irrigation_io.disable_all_sprinklers )
cf.insert.wait_event_count( count = 15 )
cf.insert.reset()
self.handlers = handlers
self.irrigation_hash_control = irrigation_hash_control
def check_excessive_current(self,cf_handle, chainObj, parameters, event):
#print("check excessive current")
return False #TBD
| class Check_Excessive_Current(object):
def __init__(self,chain_name,cf,handlers,irrigation_io,irrigation_hash_control,get_json_object):
self.get_json_object = get_json_object
cf.define_chain(chain_name, False )
#cf.insert.log("check_excessive_current")
cf.insert.assert_function_reset(self.check_excessive_current)
cf.insert.log("excessive_current_found")
cf.insert.send_event("IRI_CLOSE_MASTER_VALVE",False)
cf.insert.send_event( "RELEASE_IRRIGATION_CONTROL")
cf.insert.one_step(irrigation_io.disable_all_sprinklers )
cf.insert.wait_event_count( count = 15 )
cf.insert.reset()
self.handlers = handlers
self.irrigation_hash_control = irrigation_hash_control
def check_excessive_current(self,cf_handle, chainObj, parameters, event):
#print("check excessive current")
return False #TBD | en | 0.163252 | #cf.insert.log("check_excessive_current") #print("check excessive current") #TBD | 2.227896 | 2 |
lib/surface/spanner/operations/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2 | 10591 | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for spanner operations list."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.spanner import backup_operations
from googlecloudsdk.api_lib.spanner import database_operations
from googlecloudsdk.api_lib.spanner import instance_config_operations
from googlecloudsdk.api_lib.spanner import instance_operations
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exceptions
from googlecloudsdk.command_lib.spanner import flags
def _CommonRun(args):
"""Performs run actions common to all List stages."""
is_database_type = (
args.type == 'DATABASE_RESTORE' or args.type == 'DATABASE' or
args.type == 'DATABASE_CREATE' or args.type == 'DATABASE_UPDATE_DDL')
if args.backup or args.type == 'BACKUP':
# Update output table for backup operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=BACKUP,
metadata.database.split('/').slice(-1).join():label=SOURCE_DATABASE,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
if args.type == 'DATABASE_RESTORE':
# Update output table for restore operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=RESTORED_DATABASE,
metadata.backupInfo.backup.split('/').slice(-1).join():label=SOURCE_BACKUP,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
elif is_database_type:
# Update output table for database operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
metadata.statements.join(sep="\n"),
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
database().split('/').slice(-1:).join():label=DATABASE_ID
)
""")
# Checks that user only specified either database or backup flag.
if (args.IsSpecified('database') and args.IsSpecified('backup')):
raise c_exceptions.InvalidArgumentException(
'--database or --backup',
'Must specify either --database or --backup. To search backups for a '
'specific database, use the --database flag with --type=BACKUP')
# Checks that the user did not specify the backup flag with the type filter
# set to a database operation type.
if (args.IsSpecified('backup') and is_database_type):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The backup flag cannot be used with the type flag set to a '
'database operation type.')
if args.type == 'INSTANCE':
if args.IsSpecified('database'):
raise c_exceptions.InvalidArgumentException(
'--database or --type',
'The `--database` flag cannot be used with `--type=INSTANCE`.')
if args.IsSpecified('backup'):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The `--backup` flag cannot be used with `--type=INSTANCE`.')
if args.type == 'BACKUP':
if args.database:
db_filter = backup_operations.BuildDatabaseFilter(args.instance,
args.database)
return backup_operations.List(args.instance, db_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
return backup_operations.List(args.instance)
if is_database_type:
type_filter = database_operations.BuildDatabaseOperationTypeFilter(
args.type)
return database_operations.ListDatabaseOperations(args.instance,
args.database,
type_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
if args.database:
return database_operations.List(args.instance, args.database)
return instance_operations.List(args.instance)
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class List(base.ListCommand):
"""List the Cloud Spanner operations on the given instance or database."""
detailed_help = {
'EXAMPLES':
textwrap.dedent("""\
To list Cloud Spanner instance operations for an instance, run:
$ {command} --instance=my-instance-id --type=INSTANCE
To list Cloud Spanner backup operations for an instance, run:
$ {command} --instance=my-instance-id --type=BACKUP
To list Cloud Spanner database operations for an instance, run:
$ {command} --instance=my-instance-id --type=DATABASE
To list Cloud Spanner database operations for a database, run:
$ {command} --instance=my-instance-id --database=my-database-id --type=DATABASE
To list Cloud Spanner backup operations for a database, run:
$ {command} --instance=my-instance-id --database=my-database-id --type=BACKUP
To list Cloud Spanner backup operations for a backup, run:
$ {command} --instance=my-instance-id --backup=my-backup-id --type=BACKUP
"""),
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go on
the command line after this command. Positional arguments are allowed.
"""
flags.Instance(
positional=False,
text='The ID of the instance the operations are executing on.'
).AddToParser(parser)
flags.AddCommonListArgs(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
return _CommonRun(args)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AlphaList(List):
"""List the Cloud Spanner operations on the given instance or database or instance-config."""
@staticmethod
def Args(parser):
"""See base class."""
mutex_group = parser.add_group(mutex=True, required=True)
mutex_group.add_argument(
'--instance-config',
completer=flags.InstanceConfigCompleter,
help='The ID of the instance config the operation is executing on.')
mutex_group.add_argument(
'--instance',
completer=flags.InstanceCompleter,
help='The ID of the instance the operation is executing on.')
additional_choices = {
'INSTANCE_CONFIG_CREATE':
'Instance config create operations are returned for the given '
'instance config (--instance-config).',
'INSTANCE_CONFIG_UPDATE':
'Instance config update operations are returned for the given '
'instance config (--instance-config).'
}
flags.AddCommonListArgs(parser, additional_choices)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
if args.instance_config:
type_filter = instance_config_operations.BuildInstanceConfigOperationTypeFilter(
args.type)
return instance_config_operations.List(args.instance_config, type_filter)
return _CommonRun(args)
| # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for spanner operations list."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.spanner import backup_operations
from googlecloudsdk.api_lib.spanner import database_operations
from googlecloudsdk.api_lib.spanner import instance_config_operations
from googlecloudsdk.api_lib.spanner import instance_operations
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exceptions
from googlecloudsdk.command_lib.spanner import flags
def _CommonRun(args):
"""Performs run actions common to all List stages."""
is_database_type = (
args.type == 'DATABASE_RESTORE' or args.type == 'DATABASE' or
args.type == 'DATABASE_CREATE' or args.type == 'DATABASE_UPDATE_DDL')
if args.backup or args.type == 'BACKUP':
# Update output table for backup operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=BACKUP,
metadata.database.split('/').slice(-1).join():label=SOURCE_DATABASE,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
if args.type == 'DATABASE_RESTORE':
# Update output table for restore operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=RESTORED_DATABASE,
metadata.backupInfo.backup.split('/').slice(-1).join():label=SOURCE_BACKUP,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
elif is_database_type:
# Update output table for database operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
metadata.statements.join(sep="\n"),
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
database().split('/').slice(-1:).join():label=DATABASE_ID
)
""")
# Checks that user only specified either database or backup flag.
if (args.IsSpecified('database') and args.IsSpecified('backup')):
raise c_exceptions.InvalidArgumentException(
'--database or --backup',
'Must specify either --database or --backup. To search backups for a '
'specific database, use the --database flag with --type=BACKUP')
# Checks that the user did not specify the backup flag with the type filter
# set to a database operation type.
if (args.IsSpecified('backup') and is_database_type):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The backup flag cannot be used with the type flag set to a '
'database operation type.')
if args.type == 'INSTANCE':
if args.IsSpecified('database'):
raise c_exceptions.InvalidArgumentException(
'--database or --type',
'The `--database` flag cannot be used with `--type=INSTANCE`.')
if args.IsSpecified('backup'):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The `--backup` flag cannot be used with `--type=INSTANCE`.')
if args.type == 'BACKUP':
if args.database:
db_filter = backup_operations.BuildDatabaseFilter(args.instance,
args.database)
return backup_operations.List(args.instance, db_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
return backup_operations.List(args.instance)
if is_database_type:
type_filter = database_operations.BuildDatabaseOperationTypeFilter(
args.type)
return database_operations.ListDatabaseOperations(args.instance,
args.database,
type_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
if args.database:
return database_operations.List(args.instance, args.database)
return instance_operations.List(args.instance)
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class List(base.ListCommand):
"""List the Cloud Spanner operations on the given instance or database."""
detailed_help = {
'EXAMPLES':
textwrap.dedent("""\
To list Cloud Spanner instance operations for an instance, run:
$ {command} --instance=my-instance-id --type=INSTANCE
To list Cloud Spanner backup operations for an instance, run:
$ {command} --instance=my-instance-id --type=BACKUP
To list Cloud Spanner database operations for an instance, run:
$ {command} --instance=my-instance-id --type=DATABASE
To list Cloud Spanner database operations for a database, run:
$ {command} --instance=my-instance-id --database=my-database-id --type=DATABASE
To list Cloud Spanner backup operations for a database, run:
$ {command} --instance=my-instance-id --database=my-database-id --type=BACKUP
To list Cloud Spanner backup operations for a backup, run:
$ {command} --instance=my-instance-id --backup=my-backup-id --type=BACKUP
"""),
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go on
the command line after this command. Positional arguments are allowed.
"""
flags.Instance(
positional=False,
text='The ID of the instance the operations are executing on.'
).AddToParser(parser)
flags.AddCommonListArgs(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
return _CommonRun(args)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AlphaList(List):
"""List the Cloud Spanner operations on the given instance or database or instance-config."""
@staticmethod
def Args(parser):
"""See base class."""
mutex_group = parser.add_group(mutex=True, required=True)
mutex_group.add_argument(
'--instance-config',
completer=flags.InstanceConfigCompleter,
help='The ID of the instance config the operation is executing on.')
mutex_group.add_argument(
'--instance',
completer=flags.InstanceCompleter,
help='The ID of the instance the operation is executing on.')
additional_choices = {
'INSTANCE_CONFIG_CREATE':
'Instance config create operations are returned for the given '
'instance config (--instance-config).',
'INSTANCE_CONFIG_UPDATE':
'Instance config update operations are returned for the given '
'instance config (--instance-config).'
}
flags.AddCommonListArgs(parser, additional_choices)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
if args.instance_config:
type_filter = instance_config_operations.BuildInstanceConfigOperationTypeFilter(
args.type)
return instance_config_operations.List(args.instance_config, type_filter)
return _CommonRun(args)
| en | 0.585801 | # -*- coding: utf-8 -*- # # Copyright 2016 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Command for spanner operations list. Performs run actions common to all List stages. # Update output table for backup operations. # pylint:disable=protected-access table( name.basename():label=OPERATION_ID, done():label=DONE, metadata.'@type'.split('.').slice(-1:).join(), metadata.name.split('/').slice(-1:).join():label=BACKUP, metadata.database.split('/').slice(-1).join():label=SOURCE_DATABASE, metadata.progress.startTime:label=START_TIME, metadata.progress.endTime:label=END_TIME ) # Update output table for restore operations. # pylint:disable=protected-access table( name.basename():label=OPERATION_ID, done():label=DONE, metadata.'@type'.split('.').slice(-1:).join(), metadata.name.split('/').slice(-1:).join():label=RESTORED_DATABASE, metadata.backupInfo.backup.split('/').slice(-1).join():label=SOURCE_BACKUP, metadata.progress.startTime:label=START_TIME, metadata.progress.endTime:label=END_TIME ) # Update output table for database operations. # pylint:disable=protected-access table( name.basename():label=OPERATION_ID, metadata.statements.join(sep="\n"), done():label=DONE, metadata.'@type'.split('.').slice(-1:).join(), database().split('/').slice(-1:).join():label=DATABASE_ID ) # Checks that user only specified either database or backup flag. # Checks that the user did not specify the backup flag with the type filter # set to a database operation type. List the Cloud Spanner operations on the given instance or database. \ To list Cloud Spanner instance operations for an instance, run: $ {command} --instance=my-instance-id --type=INSTANCE To list Cloud Spanner backup operations for an instance, run: $ {command} --instance=my-instance-id --type=BACKUP To list Cloud Spanner database operations for an instance, run: $ {command} --instance=my-instance-id --type=DATABASE To list Cloud Spanner database operations for a database, run: $ {command} --instance=my-instance-id --database=my-database-id --type=DATABASE To list Cloud Spanner backup operations for a database, run: $ {command} --instance=my-instance-id --database=my-database-id --type=BACKUP To list Cloud Spanner backup operations for a backup, run: $ {command} --instance=my-instance-id --backup=my-backup-id --type=BACKUP Args is called by calliope to gather arguments for this command. Please add arguments in alphabetical order except for no- or a clear- pair for that argument which can follow the argument itself. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed. This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later. List the Cloud Spanner operations on the given instance or database or instance-config. See base class. This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later. | 1.665355 | 2 |
pgmpy/tests/test_models/test_SEM.py | predictive-analytics-lab/pgmpy | 0 | 10592 | <filename>pgmpy/tests/test_models/test_SEM.py
import os
import unittest
import numpy as np
import networkx as nx
import numpy.testing as npt
from pgmpy.models import SEM, SEMGraph, SEMAlg
class TestSEM(unittest.TestCase):
def test_from_graph(self):
self.demo = SEM.from_graph(
ebunch=[
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
("xi1", "eta1"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta1", "eta2"),
("xi1", "eta2"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5"),
("y2", "y6"),
("y2", "y4"),
("y3", "y7"),
("y4", "y8"),
("y6", "y8"),
],
)
self.assertSetEqual(self.demo.latents, {"xi1", "eta1", "eta2"})
self.assertSetEqual(
self.demo.observed, {"x1", "x2", "x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"}
)
self.assertListEqual(
sorted(self.demo.graph.nodes()),
[
"eta1",
"eta2",
"x1",
"x2",
"x3",
"xi1",
"y1",
"y2",
"y3",
"y4",
"y5",
"y6",
"y7",
"y8",
],
)
self.assertListEqual(
sorted(self.demo.graph.edges()),
sorted(
[
("eta1", "eta2"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
("xi1", "eta1"),
("xi1", "eta2"),
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
]
),
)
self.assertDictEqual(self.demo.graph.edges[("xi1", "x1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y4")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y5")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y6")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y7")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y8")], {"weight": np.NaN})
npt.assert_equal(
nx.to_numpy_matrix(
self.demo.err_graph, nodelist=sorted(self.demo.err_graph.nodes()), weight=None
),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
]
),
)
for edge in self.demo.err_graph.edges():
self.assertDictEqual(self.demo.err_graph.edges[edge], {"weight": np.NaN})
for node in self.demo.err_graph.nodes():
self.assertDictEqual(self.demo.err_graph.nodes[node], {"weight": np.NaN})
def test_from_lavaan(self):
model_str = """# %load model.lav
# measurement model
ind60 =~ x1 + x2 + x3
dem60 =~ y1 + y2 + y3 + y4
dem65 =~ y5 + y6 + y7 + y8
# regressions
dem60 ~ ind60
dem65 ~ ind60 + dem60
# residual correlations
y1 ~~ y5
y2 ~~ y4 + y6
y3 ~~ y7
y4 ~~ y8
y6 ~~ y8
"""
model_from_str = SEM.from_lavaan(string=model_str)
with open("test_model.lav", "w") as f:
f.write(model_str)
model_from_file = SEM.from_lavaan(filename="test_model.lav")
os.remove("test_model.lav")
expected_edges = set(
[
("ind60", "x1"),
("ind60", "x2"),
("ind60", "x3"),
("ind60", "dem60"),
("ind60", "dem65"),
("dem60", "dem65"),
("dem60", "y1"),
("dem60", "y2"),
("dem60", "y3"),
("dem60", "y4"),
("dem65", "y5"),
("dem65", "y6"),
("dem65", "y7"),
("dem65", "y8"),
]
)
# Undirected Graph, needs to handle when edges returned in reverse.
expected_err_edges = set(
[
("y1", "y5"),
("y5", "y1"),
("y2", "y6"),
("y6", "y2"),
("y2", "y4"),
("y4", "y2"),
("y3", "y7"),
("y7", "y3"),
("y4", "y8"),
("y8", "y4"),
("y6", "y8"),
("y8", "y6"),
]
)
expected_latents = set(["dem60", "dem65", "ind60"])
self.assertEqual(set(model_from_str.graph.edges()), expected_edges)
self.assertEqual(set(model_from_file.graph.edges()), expected_edges)
self.assertFalse(set(model_from_str.err_graph.edges()) - expected_err_edges)
self.assertFalse(set(model_from_file.err_graph.edges()) - expected_err_edges)
self.assertEqual(set(model_from_str.latents), expected_latents)
self.assertEqual(set(model_from_file.latents), expected_latents)
def test_from_lisrel(self):
pass # TODO: Add this test when done writing the tests for SEMAlg
def test_from_ram(self):
pass # TODO: Add this.
class TestSEMGraph(unittest.TestCase):
def setUp(self):
self.demo = SEMGraph(
ebunch=[
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
("xi1", "eta1"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta1", "eta2"),
("xi1", "eta2"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5"),
("y2", "y6"),
("y2", "y4"),
("y3", "y7"),
("y4", "y8"),
("y6", "y8"),
],
)
self.union = SEMGraph(
ebunch=[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
],
latents=[],
err_corr=[("yrsmill", "age")],
)
self.demo_params = SEMGraph(
ebunch=[
("xi1", "x1", 0.4),
("xi1", "x2", 0.5),
("xi1", "x3", 0.6),
("xi1", "eta1", 0.3),
("eta1", "y1", 1.1),
("eta1", "y2", 1.2),
("eta1", "y3", 1.3),
("eta1", "y4", 1.4),
("eta1", "eta2", 0.1),
("xi1", "eta2", 0.2),
("eta2", "y5", 0.7),
("eta2", "y6", 0.8),
("eta2", "y7", 0.9),
("eta2", "y8", 1.0),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5", 1.5),
("y2", "y6", 1.6),
("y2", "y4", 1.9),
("y3", "y7", 1.7),
("y4", "y8", 1.8),
("y6", "y8", 2.0),
],
err_var={
"y1": 2.1,
"y2": 2.2,
"y3": 2.3,
"y4": 2.4,
"y5": 2.5,
"y6": 2.6,
"y7": 2.7,
"y8": 2.8,
"x1": 3.1,
"x2": 3.2,
"x3": 3.3,
"eta1": 2.9,
"eta2": 3.0,
"xi1": 3.4,
},
)
self.custom = SEMGraph(
ebunch=[
("xi1", "eta1"),
("xi1", "y1"),
("xi1", "y4"),
("xi1", "x1"),
("xi1", "x2"),
("y4", "y1"),
("y1", "eta2"),
("eta2", "y5"),
("y1", "eta1"),
("eta1", "y2"),
("eta1", "y3"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[("y1", "y2"), ("y2", "y3")],
err_var={},
)
def test_demo_init(self):
self.assertSetEqual(self.demo.latents, {"xi1", "eta1", "eta2"})
self.assertSetEqual(
self.demo.observed, {"x1", "x2", "x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"}
)
self.assertListEqual(
sorted(self.demo.graph.nodes()),
[
"eta1",
"eta2",
"x1",
"x2",
"x3",
"xi1",
"y1",
"y2",
"y3",
"y4",
"y5",
"y6",
"y7",
"y8",
],
)
self.assertListEqual(
sorted(self.demo.graph.edges()),
sorted(
[
("eta1", "eta2"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
("xi1", "eta1"),
("xi1", "eta2"),
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
]
),
)
self.assertDictEqual(self.demo.graph.edges[("xi1", "x1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y4")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y5")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y6")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y7")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y8")], {"weight": np.NaN})
npt.assert_equal(
nx.to_numpy_matrix(
self.demo.err_graph, nodelist=sorted(self.demo.err_graph.nodes()), weight=None
),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
]
),
)
for edge in self.demo.err_graph.edges():
self.assertDictEqual(self.demo.err_graph.edges[edge], {"weight": np.NaN})
for node in self.demo.err_graph.nodes():
self.assertDictEqual(self.demo.err_graph.nodes[node], {"weight": np.NaN})
def test_union_init(self):
self.assertSetEqual(self.union.latents, set())
self.assertSetEqual(
self.union.observed, {"yrsmill", "unionsen", "age", "laboract", "deferenc"}
)
self.assertListEqual(
sorted(self.union.graph.nodes()),
sorted(["yrsmill", "unionsen", "age", "laboract", "deferenc"]),
)
self.assertListEqual(
sorted(self.union.graph.edges()),
sorted(
[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
]
),
)
self.assertDictEqual(self.union.graph.edges[("yrsmill", "unionsen")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("age", "laboract")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("age", "deferenc")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("deferenc", "laboract")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("deferenc", "unionsen")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("laboract", "unionsen")], {"weight": np.NaN})
npt.assert_equal(
nx.to_numpy_matrix(
self.union.err_graph, nodelist=sorted(self.union.err_graph.nodes()), weight=None
),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
]
),
)
for edge in self.union.err_graph.edges():
self.assertDictEqual(self.union.err_graph.edges[edge], {"weight": np.NaN})
for node in self.union.err_graph.nodes():
self.assertDictEqual(self.union.err_graph.nodes[node], {"weight": np.NaN})
def test_demo_param_init(self):
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "x1")], {"weight": 0.4})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "x2")], {"weight": 0.5})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "x3")], {"weight": 0.6})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "eta1")], {"weight": 0.3})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y1")], {"weight": 1.1})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y2")], {"weight": 1.2})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y3")], {"weight": 1.3})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y4")], {"weight": 1.4})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "eta2")], {"weight": 0.1})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "eta2")], {"weight": 0.2})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y5")], {"weight": 0.7})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y6")], {"weight": 0.8})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y7")], {"weight": 0.9})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y8")], {"weight": 1.0})
self.assertDictEqual(self.demo_params.err_graph.edges[("y1", "y5")], {"weight": 1.5})
self.assertDictEqual(self.demo_params.err_graph.edges[("y2", "y6")], {"weight": 1.6})
self.assertDictEqual(self.demo_params.err_graph.edges[("y2", "y4")], {"weight": 1.9})
self.assertDictEqual(self.demo_params.err_graph.edges[("y3", "y7")], {"weight": 1.7})
self.assertDictEqual(self.demo_params.err_graph.edges[("y4", "y8")], {"weight": 1.8})
self.assertDictEqual(self.demo_params.err_graph.edges[("y6", "y8")], {"weight": 2.0})
self.assertDictEqual(self.demo_params.err_graph.nodes["y1"], {"weight": 2.1})
self.assertDictEqual(self.demo_params.err_graph.nodes["y2"], {"weight": 2.2})
self.assertDictEqual(self.demo_params.err_graph.nodes["y3"], {"weight": 2.3})
self.assertDictEqual(self.demo_params.err_graph.nodes["y4"], {"weight": 2.4})
self.assertDictEqual(self.demo_params.err_graph.nodes["y5"], {"weight": 2.5})
self.assertDictEqual(self.demo_params.err_graph.nodes["y6"], {"weight": 2.6})
self.assertDictEqual(self.demo_params.err_graph.nodes["y7"], {"weight": 2.7})
self.assertDictEqual(self.demo_params.err_graph.nodes["y8"], {"weight": 2.8})
self.assertDictEqual(self.demo_params.err_graph.nodes["x1"], {"weight": 3.1})
self.assertDictEqual(self.demo_params.err_graph.nodes["x2"], {"weight": 3.2})
self.assertDictEqual(self.demo_params.err_graph.nodes["x3"], {"weight": 3.3})
self.assertDictEqual(self.demo_params.err_graph.nodes["eta1"], {"weight": 2.9})
self.assertDictEqual(self.demo_params.err_graph.nodes["eta2"], {"weight": 3.0})
def test_get_full_graph_struct(self):
full_struct = self.union._get_full_graph_struct()
self.assertFalse(
set(full_struct.nodes())
- set(
[
"yrsmill",
"unionsen",
"age",
"laboract",
"deferenc",
".yrsmill",
".unionsen",
".age",
".laboract",
".deferenc",
"..ageyrsmill",
"..yrsmillage",
]
)
)
self.assertFalse(
set(full_struct.edges())
- set(
[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
(".yrsmill", "yrsmill"),
(".unionsen", "unionsen"),
(".age", "age"),
(".laboract", "laboract"),
(".deferenc", "deferenc"),
("..ageyrsmill", ".age"),
("..ageyrsmill", ".yrsmill"),
("..yrsmillage", ".age"),
("..yrsmillage", ".yrsmill"),
]
)
)
def test_active_trail_nodes(self):
demo_nodes = ["x1", "x2", "x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"]
for node in demo_nodes:
self.assertSetEqual(
self.demo.active_trail_nodes(node, struct="full")[node], set(demo_nodes)
)
union_nodes = self.union.graph.nodes()
active_trails = self.union.active_trail_nodes(list(union_nodes), struct="full")
for node in union_nodes:
self.assertSetEqual(active_trails[node], set(union_nodes))
self.assertSetEqual(
self.union.active_trail_nodes("age", observed=["laboract", "deferenc", "unionsen"])[
"age"
],
{"age", "yrsmill"},
)
def test_get_scaling_indicators(self):
demo_scaling_indicators = self.demo.get_scaling_indicators()
self.assertTrue(demo_scaling_indicators["eta1"] in ["y1", "y2", "y3", "y4"])
self.assertTrue(demo_scaling_indicators["eta2"] in ["y5", "y6", "y7", "y8"])
self.assertTrue(demo_scaling_indicators["xi1"] in ["x1", "x2", "x3"])
union_scaling_indicators = self.union.get_scaling_indicators()
self.assertDictEqual(union_scaling_indicators, dict())
custom_scaling_indicators = self.custom.get_scaling_indicators()
self.assertTrue(custom_scaling_indicators["xi1"] in ["x1", "x2", "y1", "y4"])
self.assertTrue(custom_scaling_indicators["eta1"] in ["y2", "y3"])
self.assertTrue(custom_scaling_indicators["eta2"] in ["y5"])
def test_to_lisrel(self):
demo = SEMGraph(
ebunch=[
("xi1", "x1", 1.000),
("xi1", "x2", 2.180),
("xi1", "x3", 1.819),
("xi1", "eta1", 1.483),
("eta1", "y1", 1.000),
("eta1", "y2", 1.257),
("eta1", "y3", 1.058),
("eta1", "y4", 1.265),
("eta1", "eta2", 0.837),
("xi1", "eta2", 0.572),
("eta2", "y5", 1.000),
("eta2", "y6", 1.186),
("eta2", "y7", 1.280),
("eta2", "y8", 1.266),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5", 0.624),
("y2", "y6", 2.153),
("y2", "y4", 1.313),
("y3", "y7", 0.795),
("y4", "y8", 0.348),
("y6", "y8", 1.356),
],
err_var={
"x1": 0.082,
"x2": 0.120,
"x3": 0.467,
"y1": 1.891,
"y2": 7.373,
"y3": 5.067,
"y4": 3.148,
"y5": 2.351,
"y6": 4.954,
"y7": 3.431,
"y8": 3.254,
"xi1": 0.448,
"eta1": 3.956,
"eta2": 0.172,
},
)
demo_lisrel = demo.to_lisrel()
indexing = []
vars_ordered = [
"y1",
"y2",
"y3",
"y4",
"y5",
"y6",
"y7",
"y8",
"x1",
"x2",
"x3",
"xi1",
"eta1",
"eta2",
]
for var in vars_ordered:
indexing.append(demo_lisrel.eta.index(var))
eta_reorder = [demo_lisrel.eta[i] for i in indexing]
B_reorder = demo_lisrel.B[indexing, :][:, indexing]
B_fixed_reorder = demo_lisrel.B_fixed_mask[indexing, :][:, indexing]
zeta_reorder = demo_lisrel.zeta[indexing, :][:, indexing]
zeta_fixed_reorder = demo_lisrel.zeta_fixed_mask[indexing, :][:, indexing]
wedge_y_reorder = demo_lisrel.wedge_y[:, indexing]
self.assertEqual(vars_ordered, eta_reorder)
npt.assert_array_equal(
B_reorder,
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
]
),
)
npt.assert_array_equal(
zeta_reorder,
np.array(
[
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
]
),
)
npt.assert_array_equal(
B_fixed_reorder,
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.000, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.257, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.058, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.265, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.000],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.186],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.280],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.266],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.000, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.180, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.819, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.483, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.572, 0.837, 0],
]
),
)
npt.assert_array_equal(
zeta_fixed_reorder,
np.array(
[
[1.891, 0, 0, 0, 0.624, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 7.373, 0, 1.313, 0, 2.153, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 5.067, 0, 0, 0, 0.795, 0, 0, 0, 0, 0, 0, 0],
[0, 1.313, 0, 3.148, 0, 0, 0, 0.348, 0, 0, 0, 0, 0, 0],
[0.624, 0, 0, 0, 2.351, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2.153, 0, 0, 0, 4.954, 0, 1.356, 0, 0, 0, 0, 0, 0],
[0, 0, 0.795, 0, 0, 0, 3.431, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0.348, 0, 1.356, 0, 3.254, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0.082, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0.120, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.467, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.448, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.956, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.172],
]
),
)
npt.assert_array_equal(
demo_lisrel.wedge_y,
np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
]
),
)
def test_to_from_lisrel(self):
demo_lisrel = self.demo.to_lisrel()
union_lisrel = self.union.to_lisrel()
demo_params_lisrel = self.demo_params.to_lisrel()
custom_lisrel = self.custom.to_lisrel()
demo_graph = demo_lisrel.to_SEMGraph()
union_graph = union_lisrel.to_SEMGraph()
demo_params_graph = demo_params_lisrel.to_SEMGraph()
custom_graph = custom_lisrel.to_SEMGraph()
# Test demo
self.assertSetEqual(set(self.demo.graph.nodes()), set(demo_graph.graph.nodes()))
self.assertSetEqual(set(self.demo.graph.edges()), set(demo_graph.graph.edges()))
self.assertSetEqual(set(self.demo.err_graph.nodes()), set(demo_graph.err_graph.nodes()))
npt.assert_array_equal(
nx.to_numpy_matrix(self.demo.err_graph, nodelist=sorted(self.demo.err_graph.nodes())),
nx.to_numpy_matrix(demo_graph, nodelist=sorted(demo_graph.err_graph.nodes())),
)
self.assertSetEqual(
set(self.demo.full_graph_struct.nodes()), set(demo_graph.full_graph_struct.nodes())
)
self.assertSetEqual(
set(self.demo.full_graph_struct.edges()), set(demo_graph.full_graph_struct.edges())
)
self.assertSetEqual(self.demo.latents, demo_graph.latents)
self.assertSetEqual(self.demo.observed, demo_graph.observed)
# Test union
self.assertSetEqual(set(self.union.graph.nodes()), set(union_graph.graph.nodes()))
self.assertSetEqual(set(self.union.graph.edges()), set(union_graph.graph.edges()))
self.assertSetEqual(set(self.union.err_graph.nodes()), set(union_graph.err_graph.nodes()))
npt.assert_array_equal(
nx.to_numpy_matrix(self.union.err_graph, nodelist=sorted(self.union.err_graph.nodes())),
nx.to_numpy_matrix(union_graph, nodelist=sorted(union_graph.err_graph.nodes())),
)
self.assertSetEqual(
set(self.union.full_graph_struct.nodes()), set(union_graph.full_graph_struct.nodes())
)
self.assertSetEqual(
set(self.union.full_graph_struct.edges()), set(union_graph.full_graph_struct.edges())
)
self.assertSetEqual(self.union.latents, union_graph.latents)
self.assertSetEqual(self.union.observed, union_graph.observed)
# Test demo_params
self.assertSetEqual(
set(self.demo_params.graph.nodes()), set(demo_params_graph.graph.nodes())
)
self.assertSetEqual(
set(self.demo_params.graph.edges()), set(demo_params_graph.graph.edges())
)
self.assertSetEqual(
set(self.demo_params.err_graph.nodes()), set(demo_params_graph.err_graph.nodes())
)
npt.assert_array_equal(
nx.to_numpy_matrix(
self.demo_params.err_graph,
nodelist=sorted(self.demo_params.err_graph.nodes()),
weight=None,
),
nx.to_numpy_matrix(
demo_graph.err_graph,
nodelist=sorted(demo_params_graph.err_graph.nodes()),
weight=None,
),
)
self.assertSetEqual(
set(self.demo_params.full_graph_struct.nodes()),
set(demo_params_graph.full_graph_struct.nodes()),
)
self.assertSetEqual(
set(self.demo_params.full_graph_struct.edges()),
set(demo_params_graph.full_graph_struct.edges()),
)
self.assertSetEqual(self.demo_params.latents, demo_params_graph.latents)
self.assertSetEqual(self.demo_params.observed, demo_params_graph.observed)
# Test demo
self.assertSetEqual(set(self.custom.graph.nodes()), set(custom_graph.graph.nodes()))
self.assertSetEqual(set(self.custom.graph.edges()), set(custom_graph.graph.edges()))
self.assertSetEqual(set(self.custom.err_graph.nodes()), set(custom_graph.err_graph.nodes()))
npt.assert_array_equal(
nx.to_numpy_matrix(
self.custom.err_graph, nodelist=sorted(self.custom.err_graph.nodes())
),
nx.to_numpy_matrix(custom_graph, nodelist=sorted(custom_graph.err_graph.nodes())),
)
self.assertSetEqual(
set(self.custom.full_graph_struct.nodes()), set(custom_graph.full_graph_struct.nodes())
)
self.assertSetEqual(
set(self.custom.full_graph_struct.edges()), set(custom_graph.full_graph_struct.edges())
)
self.assertSetEqual(self.custom.latents, custom_graph.latents)
self.assertSetEqual(self.custom.observed, custom_graph.observed)
def test_iv_transformations_demo(self):
scale = {"eta1": "y1", "eta2": "y5", "xi1": "x1"}
self.assertRaises(ValueError, self.demo._iv_transformations, "x1", "y1", scale)
for y in ["y2", "y3", "y4"]:
full_graph, dependent_var = self.demo._iv_transformations(
X="eta1", Y=y, scaling_indicators=scale
)
self.assertEqual(dependent_var, y)
self.assertTrue((".y1", y) in full_graph.edges)
self.assertFalse(("eta1", y) in full_graph.edges)
for y in ["y6", "y7", "y8"]:
full_graph, dependent_var = self.demo._iv_transformations(
X="eta2", Y=y, scaling_indicators=scale
)
self.assertEqual(dependent_var, y)
self.assertTrue((".y5", y) in full_graph.edges)
self.assertFalse(("eta2", y) in full_graph.edges)
full_graph, dependent_var = self.demo._iv_transformations(
X="xi1", Y="eta1", scaling_indicators=scale
)
self.assertEqual(dependent_var, "y1")
self.assertTrue((".eta1", "y1") in full_graph.edges())
self.assertTrue((".x1", "y1") in full_graph.edges())
self.assertFalse(("xi1", "eta1") in full_graph.edges())
full_graph, dependent_var = self.demo._iv_transformations(
X="xi1", Y="eta2", scaling_indicators=scale
)
self.assertEqual(dependent_var, "y5")
self.assertTrue((".y1", "y5") in full_graph.edges())
self.assertTrue((".eta2", "y5") in full_graph.edges())
self.assertTrue((".x1", "y5") in full_graph.edges())
self.assertFalse(("eta1", "eta2") in full_graph.edges())
self.assertFalse(("xi1", "eta2") in full_graph.edges())
full_graph, dependent_var = self.demo._iv_transformations(
X="eta1", Y="eta2", scaling_indicators=scale
)
self.assertEqual(dependent_var, "y5")
self.assertTrue((".y1", "y5") in full_graph.edges())
self.assertTrue((".eta2", "y5") in full_graph.edges())
self.assertTrue((".x1", "y5") in full_graph.edges())
self.assertFalse(("eta1", "eta2") in full_graph.edges())
self.assertFalse(("xi1", "eta2") in full_graph.edges())
def test_iv_transformations_union(self):
scale = {}
for u, v in self.union.graph.edges():
full_graph, dependent_var = self.union._iv_transformations(
u, v, scaling_indicators=scale
)
self.assertFalse((u, v) in full_graph.edges())
self.assertEqual(dependent_var, v)
def test_get_ivs_demo(self):
scale = {"eta1": "y1", "eta2": "y5", "xi1": "x1"}
self.assertSetEqual(
self.demo.get_ivs("eta1", "y2", scaling_indicators=scale),
{"x1", "x2", "x3", "y3", "y7", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("eta1", "y3", scaling_indicators=scale),
{"x1", "x2", "x3", "y2", "y4", "y6", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("eta1", "y4", scaling_indicators=scale),
{"x1", "x2", "x3", "y3", "y6", "y7"},
)
self.assertSetEqual(
self.demo.get_ivs("eta2", "y6", scaling_indicators=scale),
{"x1", "x2", "x3", "y3", "y4", "y7"},
)
self.assertSetEqual(
self.demo.get_ivs("eta2", "y7", scaling_indicators=scale),
{"x1", "x2", "x3", "y2", "y4", "y6", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("eta2", "y8", scaling_indicators=scale),
{"x1", "x2", "x3", "y2", "y3", "y7"},
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "x2", scaling_indicators=scale),
{"x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "x3", scaling_indicators=scale),
{"x2", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "eta1", scaling_indicators=scale), {"x2", "x3"}
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "eta2", scaling_indicators=scale),
{"x2", "x3", "y2", "y3", "y4"},
)
self.assertSetEqual(
self.demo.get_ivs("eta1", "eta2", scaling_indicators=scale),
{"x2", "x3", "y2", "y3", "y4"},
)
def test_get_conditional_ivs_demo(self):
scale = {"eta1": "y1", "eta2": "y5", "xi1": "x1"}
self.assertEqual(self.demo.get_conditional_ivs("eta1", "y2", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta1", "y3", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta1", "y4", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta2", "y6", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta2", "y7", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta2", "y8", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "x2", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "x3", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "eta1", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "eta2", scaling_indicators=scale), [])
self.assertEqual(
self.demo.get_conditional_ivs("eta1", "eta2", scaling_indicators=scale), []
)
def test_get_ivs_union(self):
scale = {}
self.assertSetEqual(
self.union.get_ivs("yrsmill", "unionsen", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("deferenc", "unionsen", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("laboract", "unionsen", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("deferenc", "laboract", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("age", "laboract", scaling_indicators=scale), {"yrsmill"}
)
self.assertSetEqual(
self.union.get_ivs("age", "deferenc", scaling_indicators=scale), {"yrsmill"}
)
def test_get_conditional_ivs_union(self):
self.assertEqual(
self.union.get_conditional_ivs("yrsmill", "unionsen"),
[("age", {"laboract", "deferenc"})],
)
# This case wouldn't have conditonal IV if the Total effect between `deferenc` and
# `unionsen` needs to be computed because one of the conditional variable lies on the
# effect path.
self.assertEqual(
self.union.get_conditional_ivs("deferenc", "unionsen"),
[("age", {"yrsmill", "laboract"})],
)
self.assertEqual(
self.union.get_conditional_ivs("laboract", "unionsen"),
[("age", {"yrsmill", "deferenc"})],
)
self.assertEqual(self.union.get_conditional_ivs("deferenc", "laboract"), [])
self.assertEqual(
self.union.get_conditional_ivs("age", "laboract"), [("yrsmill", {"deferenc"})]
)
self.assertEqual(self.union.get_conditional_ivs("age", "deferenc"), [])
def test_iv_transformations_custom(self):
scale_custom = {"eta1": "y2", "eta2": "y5", "xi1": "x1"}
full_graph, var = self.custom._iv_transformations(
"xi1", "x2", scaling_indicators=scale_custom
)
self.assertEqual(var, "x2")
self.assertTrue((".x1", "x2") in full_graph.edges())
self.assertFalse(("xi1", "x2") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"xi1", "y4", scaling_indicators=scale_custom
)
self.assertEqual(var, "y4")
self.assertTrue((".x1", "y4") in full_graph.edges())
self.assertFalse(("xi1", "y4") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"xi1", "y1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y1")
self.assertTrue((".x1", "y1") in full_graph.edges())
self.assertFalse(("xi1", "y1") in full_graph.edges())
self.assertFalse(("y4", "y1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"xi1", "eta1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y2")
self.assertTrue((".eta1", "y2") in full_graph.edges())
self.assertTrue((".x1", "y2") in full_graph.edges())
self.assertFalse(("y1", "eta1") in full_graph.edges())
self.assertFalse(("xi1", "eta1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"y1", "eta1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y2")
self.assertTrue((".eta1", "y2") in full_graph.edges())
self.assertTrue((".x1", "y2") in full_graph.edges())
self.assertFalse(("y1", "eta1") in full_graph.edges())
self.assertFalse(("xi1", "eta1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"y1", "eta2", scaling_indicators=scale_custom
)
self.assertEqual(var, "y5")
self.assertTrue((".eta2", "y5") in full_graph.edges())
self.assertFalse(("y1", "eta2") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"y4", "y1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y1")
self.assertFalse(("y4", "y1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"eta1", "y3", scaling_indicators=scale_custom
)
self.assertEqual(var, "y3")
self.assertTrue((".y2", "y3") in full_graph.edges())
self.assertFalse(("eta1", "y3") in full_graph.edges())
def test_get_ivs_custom(self):
scale_custom = {"eta1": "y2", "eta2": "y5", "xi1": "x1"}
self.assertSetEqual(
self.custom.get_ivs("xi1", "x2", scaling_indicators=scale_custom),
{"y1", "y2", "y3", "y4", "y5"},
)
self.assertSetEqual(
self.custom.get_ivs("xi1", "y4", scaling_indicators=scale_custom), {"x2"}
)
self.assertSetEqual(
self.custom.get_ivs("xi1", "y1", scaling_indicators=scale_custom), {"x2", "y4"}
)
self.assertSetEqual(
self.custom.get_ivs("xi1", "eta1", scaling_indicators=scale_custom), {"x2", "y4"}
)
# TODO: Test this and fix.
self.assertSetEqual(
self.custom.get_ivs("y1", "eta1", scaling_indicators=scale_custom), {"x2", "y4", "y5"}
)
self.assertSetEqual(
self.custom.get_ivs("y1", "eta2", scaling_indicators=scale_custom),
{"x1", "x2", "y2", "y3", "y4"},
)
self.assertSetEqual(self.custom.get_ivs("y4", "y1", scaling_indicators=scale_custom), set())
self.assertSetEqual(
self.custom.get_ivs("eta1", "y3", scaling_indicators=scale_custom), {"x1", "x2", "y4"}
)
def test_small_model_ivs(self):
model1 = SEMGraph(
ebunch=[("X", "Y"), ("I", "X"), ("W", "I")],
latents=[],
err_corr=[("W", "Y")],
err_var={},
)
self.assertEqual(model1.get_conditional_ivs("X", "Y"), [("I", {"W"})])
model2 = SEMGraph(
ebunch=[("x", "y"), ("z", "x"), ("w", "z"), ("w", "u"), ("u", "x"), ("u", "y")],
latents=["u"],
)
self.assertEqual(model2.get_conditional_ivs("x", "y"), [("z", {"w"})])
model3 = SEMGraph(ebunch=[("x", "y"), ("u", "x"), ("u", "y"), ("z", "x")], latents=["u"])
self.assertEqual(model3.get_ivs("x", "y"), {"z"})
model4 = SEMGraph(ebunch=[("x", "y"), ("z", "x"), ("u", "x"), ("u", "y")])
self.assertEqual(model4.get_conditional_ivs("x", "y"), [("z", {"u"})])
class TestSEMAlg(unittest.TestCase):
def setUp(self):
self.demo = SEMGraph(
ebunch=[
("xi1", "x1", 1.000),
("xi1", "x2", 2.180),
("xi1", "x3", 1.819),
("xi1", "eta1", 1.483),
("eta1", "y1", 1.000),
("eta1", "y2", 1.257),
("eta1", "y3", 1.058),
("eta1", "y4", 1.265),
("eta1", "eta2", 0.837),
("xi1", "eta2", 0.572),
("eta2", "y5", 1.000),
("eta2", "y6", 1.186),
("eta2", "y7", 1.280),
("eta2", "y8", 1.266),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5", 0.624),
("y2", "y6", 2.153),
("y2", "y4", 1.313),
("y3", "y7", 0.795),
("y4", "y8", 0.348),
("y6", "y8", 1.356),
],
err_var={
"x1": 0.082,
"x2": 0.120,
"x3": 0.467,
"y1": 1.891,
"y2": 7.373,
"y3": 5.067,
"y4": 3.148,
"y5": 2.351,
"y6": 4.954,
"y7": 3.431,
"y8": 3.254,
"xi1": 0.448,
"eta1": 3.956,
"eta2": 0.172,
},
)
self.demo_lisrel = self.demo.to_lisrel()
self.small_model = SEM.from_graph(
ebunch=[("X", "Y", 0.3)], latents=[], err_var={"X": 0.1, "Y": 0.1}
)
self.small_model_lisrel = self.small_model.to_lisrel()
def test_generate_samples(self):
samples = self.small_model_lisrel.generate_samples(n_samples=100)
samples = self.demo_lisrel.generate_samples(n_samples=100)
| <filename>pgmpy/tests/test_models/test_SEM.py
import os
import unittest
import numpy as np
import networkx as nx
import numpy.testing as npt
from pgmpy.models import SEM, SEMGraph, SEMAlg
class TestSEM(unittest.TestCase):
def test_from_graph(self):
self.demo = SEM.from_graph(
ebunch=[
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
("xi1", "eta1"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta1", "eta2"),
("xi1", "eta2"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5"),
("y2", "y6"),
("y2", "y4"),
("y3", "y7"),
("y4", "y8"),
("y6", "y8"),
],
)
self.assertSetEqual(self.demo.latents, {"xi1", "eta1", "eta2"})
self.assertSetEqual(
self.demo.observed, {"x1", "x2", "x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"}
)
self.assertListEqual(
sorted(self.demo.graph.nodes()),
[
"eta1",
"eta2",
"x1",
"x2",
"x3",
"xi1",
"y1",
"y2",
"y3",
"y4",
"y5",
"y6",
"y7",
"y8",
],
)
self.assertListEqual(
sorted(self.demo.graph.edges()),
sorted(
[
("eta1", "eta2"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
("xi1", "eta1"),
("xi1", "eta2"),
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
]
),
)
self.assertDictEqual(self.demo.graph.edges[("xi1", "x1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y4")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y5")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y6")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y7")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y8")], {"weight": np.NaN})
npt.assert_equal(
nx.to_numpy_matrix(
self.demo.err_graph, nodelist=sorted(self.demo.err_graph.nodes()), weight=None
),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
]
),
)
for edge in self.demo.err_graph.edges():
self.assertDictEqual(self.demo.err_graph.edges[edge], {"weight": np.NaN})
for node in self.demo.err_graph.nodes():
self.assertDictEqual(self.demo.err_graph.nodes[node], {"weight": np.NaN})
def test_from_lavaan(self):
model_str = """# %load model.lav
# measurement model
ind60 =~ x1 + x2 + x3
dem60 =~ y1 + y2 + y3 + y4
dem65 =~ y5 + y6 + y7 + y8
# regressions
dem60 ~ ind60
dem65 ~ ind60 + dem60
# residual correlations
y1 ~~ y5
y2 ~~ y4 + y6
y3 ~~ y7
y4 ~~ y8
y6 ~~ y8
"""
model_from_str = SEM.from_lavaan(string=model_str)
with open("test_model.lav", "w") as f:
f.write(model_str)
model_from_file = SEM.from_lavaan(filename="test_model.lav")
os.remove("test_model.lav")
expected_edges = set(
[
("ind60", "x1"),
("ind60", "x2"),
("ind60", "x3"),
("ind60", "dem60"),
("ind60", "dem65"),
("dem60", "dem65"),
("dem60", "y1"),
("dem60", "y2"),
("dem60", "y3"),
("dem60", "y4"),
("dem65", "y5"),
("dem65", "y6"),
("dem65", "y7"),
("dem65", "y8"),
]
)
# Undirected Graph, needs to handle when edges returned in reverse.
expected_err_edges = set(
[
("y1", "y5"),
("y5", "y1"),
("y2", "y6"),
("y6", "y2"),
("y2", "y4"),
("y4", "y2"),
("y3", "y7"),
("y7", "y3"),
("y4", "y8"),
("y8", "y4"),
("y6", "y8"),
("y8", "y6"),
]
)
expected_latents = set(["dem60", "dem65", "ind60"])
self.assertEqual(set(model_from_str.graph.edges()), expected_edges)
self.assertEqual(set(model_from_file.graph.edges()), expected_edges)
self.assertFalse(set(model_from_str.err_graph.edges()) - expected_err_edges)
self.assertFalse(set(model_from_file.err_graph.edges()) - expected_err_edges)
self.assertEqual(set(model_from_str.latents), expected_latents)
self.assertEqual(set(model_from_file.latents), expected_latents)
def test_from_lisrel(self):
pass # TODO: Add this test when done writing the tests for SEMAlg
def test_from_ram(self):
pass # TODO: Add this.
class TestSEMGraph(unittest.TestCase):
def setUp(self):
self.demo = SEMGraph(
ebunch=[
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
("xi1", "eta1"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta1", "eta2"),
("xi1", "eta2"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5"),
("y2", "y6"),
("y2", "y4"),
("y3", "y7"),
("y4", "y8"),
("y6", "y8"),
],
)
self.union = SEMGraph(
ebunch=[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
],
latents=[],
err_corr=[("yrsmill", "age")],
)
self.demo_params = SEMGraph(
ebunch=[
("xi1", "x1", 0.4),
("xi1", "x2", 0.5),
("xi1", "x3", 0.6),
("xi1", "eta1", 0.3),
("eta1", "y1", 1.1),
("eta1", "y2", 1.2),
("eta1", "y3", 1.3),
("eta1", "y4", 1.4),
("eta1", "eta2", 0.1),
("xi1", "eta2", 0.2),
("eta2", "y5", 0.7),
("eta2", "y6", 0.8),
("eta2", "y7", 0.9),
("eta2", "y8", 1.0),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5", 1.5),
("y2", "y6", 1.6),
("y2", "y4", 1.9),
("y3", "y7", 1.7),
("y4", "y8", 1.8),
("y6", "y8", 2.0),
],
err_var={
"y1": 2.1,
"y2": 2.2,
"y3": 2.3,
"y4": 2.4,
"y5": 2.5,
"y6": 2.6,
"y7": 2.7,
"y8": 2.8,
"x1": 3.1,
"x2": 3.2,
"x3": 3.3,
"eta1": 2.9,
"eta2": 3.0,
"xi1": 3.4,
},
)
self.custom = SEMGraph(
ebunch=[
("xi1", "eta1"),
("xi1", "y1"),
("xi1", "y4"),
("xi1", "x1"),
("xi1", "x2"),
("y4", "y1"),
("y1", "eta2"),
("eta2", "y5"),
("y1", "eta1"),
("eta1", "y2"),
("eta1", "y3"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[("y1", "y2"), ("y2", "y3")],
err_var={},
)
def test_demo_init(self):
self.assertSetEqual(self.demo.latents, {"xi1", "eta1", "eta2"})
self.assertSetEqual(
self.demo.observed, {"x1", "x2", "x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"}
)
self.assertListEqual(
sorted(self.demo.graph.nodes()),
[
"eta1",
"eta2",
"x1",
"x2",
"x3",
"xi1",
"y1",
"y2",
"y3",
"y4",
"y5",
"y6",
"y7",
"y8",
],
)
self.assertListEqual(
sorted(self.demo.graph.edges()),
sorted(
[
("eta1", "eta2"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
("xi1", "eta1"),
("xi1", "eta2"),
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
]
),
)
self.assertDictEqual(self.demo.graph.edges[("xi1", "x1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y4")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y5")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y6")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y7")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y8")], {"weight": np.NaN})
npt.assert_equal(
nx.to_numpy_matrix(
self.demo.err_graph, nodelist=sorted(self.demo.err_graph.nodes()), weight=None
),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
]
),
)
for edge in self.demo.err_graph.edges():
self.assertDictEqual(self.demo.err_graph.edges[edge], {"weight": np.NaN})
for node in self.demo.err_graph.nodes():
self.assertDictEqual(self.demo.err_graph.nodes[node], {"weight": np.NaN})
def test_union_init(self):
self.assertSetEqual(self.union.latents, set())
self.assertSetEqual(
self.union.observed, {"yrsmill", "unionsen", "age", "laboract", "deferenc"}
)
self.assertListEqual(
sorted(self.union.graph.nodes()),
sorted(["yrsmill", "unionsen", "age", "laboract", "deferenc"]),
)
self.assertListEqual(
sorted(self.union.graph.edges()),
sorted(
[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
]
),
)
self.assertDictEqual(self.union.graph.edges[("yrsmill", "unionsen")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("age", "laboract")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("age", "deferenc")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("deferenc", "laboract")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("deferenc", "unionsen")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("laboract", "unionsen")], {"weight": np.NaN})
npt.assert_equal(
nx.to_numpy_matrix(
self.union.err_graph, nodelist=sorted(self.union.err_graph.nodes()), weight=None
),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
]
),
)
for edge in self.union.err_graph.edges():
self.assertDictEqual(self.union.err_graph.edges[edge], {"weight": np.NaN})
for node in self.union.err_graph.nodes():
self.assertDictEqual(self.union.err_graph.nodes[node], {"weight": np.NaN})
def test_demo_param_init(self):
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "x1")], {"weight": 0.4})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "x2")], {"weight": 0.5})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "x3")], {"weight": 0.6})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "eta1")], {"weight": 0.3})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y1")], {"weight": 1.1})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y2")], {"weight": 1.2})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y3")], {"weight": 1.3})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y4")], {"weight": 1.4})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "eta2")], {"weight": 0.1})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "eta2")], {"weight": 0.2})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y5")], {"weight": 0.7})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y6")], {"weight": 0.8})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y7")], {"weight": 0.9})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y8")], {"weight": 1.0})
self.assertDictEqual(self.demo_params.err_graph.edges[("y1", "y5")], {"weight": 1.5})
self.assertDictEqual(self.demo_params.err_graph.edges[("y2", "y6")], {"weight": 1.6})
self.assertDictEqual(self.demo_params.err_graph.edges[("y2", "y4")], {"weight": 1.9})
self.assertDictEqual(self.demo_params.err_graph.edges[("y3", "y7")], {"weight": 1.7})
self.assertDictEqual(self.demo_params.err_graph.edges[("y4", "y8")], {"weight": 1.8})
self.assertDictEqual(self.demo_params.err_graph.edges[("y6", "y8")], {"weight": 2.0})
self.assertDictEqual(self.demo_params.err_graph.nodes["y1"], {"weight": 2.1})
self.assertDictEqual(self.demo_params.err_graph.nodes["y2"], {"weight": 2.2})
self.assertDictEqual(self.demo_params.err_graph.nodes["y3"], {"weight": 2.3})
self.assertDictEqual(self.demo_params.err_graph.nodes["y4"], {"weight": 2.4})
self.assertDictEqual(self.demo_params.err_graph.nodes["y5"], {"weight": 2.5})
self.assertDictEqual(self.demo_params.err_graph.nodes["y6"], {"weight": 2.6})
self.assertDictEqual(self.demo_params.err_graph.nodes["y7"], {"weight": 2.7})
self.assertDictEqual(self.demo_params.err_graph.nodes["y8"], {"weight": 2.8})
self.assertDictEqual(self.demo_params.err_graph.nodes["x1"], {"weight": 3.1})
self.assertDictEqual(self.demo_params.err_graph.nodes["x2"], {"weight": 3.2})
self.assertDictEqual(self.demo_params.err_graph.nodes["x3"], {"weight": 3.3})
self.assertDictEqual(self.demo_params.err_graph.nodes["eta1"], {"weight": 2.9})
self.assertDictEqual(self.demo_params.err_graph.nodes["eta2"], {"weight": 3.0})
def test_get_full_graph_struct(self):
full_struct = self.union._get_full_graph_struct()
self.assertFalse(
set(full_struct.nodes())
- set(
[
"yrsmill",
"unionsen",
"age",
"laboract",
"deferenc",
".yrsmill",
".unionsen",
".age",
".laboract",
".deferenc",
"..ageyrsmill",
"..yrsmillage",
]
)
)
self.assertFalse(
set(full_struct.edges())
- set(
[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
(".yrsmill", "yrsmill"),
(".unionsen", "unionsen"),
(".age", "age"),
(".laboract", "laboract"),
(".deferenc", "deferenc"),
("..ageyrsmill", ".age"),
("..ageyrsmill", ".yrsmill"),
("..yrsmillage", ".age"),
("..yrsmillage", ".yrsmill"),
]
)
)
def test_active_trail_nodes(self):
demo_nodes = ["x1", "x2", "x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"]
for node in demo_nodes:
self.assertSetEqual(
self.demo.active_trail_nodes(node, struct="full")[node], set(demo_nodes)
)
union_nodes = self.union.graph.nodes()
active_trails = self.union.active_trail_nodes(list(union_nodes), struct="full")
for node in union_nodes:
self.assertSetEqual(active_trails[node], set(union_nodes))
self.assertSetEqual(
self.union.active_trail_nodes("age", observed=["laboract", "deferenc", "unionsen"])[
"age"
],
{"age", "yrsmill"},
)
def test_get_scaling_indicators(self):
demo_scaling_indicators = self.demo.get_scaling_indicators()
self.assertTrue(demo_scaling_indicators["eta1"] in ["y1", "y2", "y3", "y4"])
self.assertTrue(demo_scaling_indicators["eta2"] in ["y5", "y6", "y7", "y8"])
self.assertTrue(demo_scaling_indicators["xi1"] in ["x1", "x2", "x3"])
union_scaling_indicators = self.union.get_scaling_indicators()
self.assertDictEqual(union_scaling_indicators, dict())
custom_scaling_indicators = self.custom.get_scaling_indicators()
self.assertTrue(custom_scaling_indicators["xi1"] in ["x1", "x2", "y1", "y4"])
self.assertTrue(custom_scaling_indicators["eta1"] in ["y2", "y3"])
self.assertTrue(custom_scaling_indicators["eta2"] in ["y5"])
def test_to_lisrel(self):
demo = SEMGraph(
ebunch=[
("xi1", "x1", 1.000),
("xi1", "x2", 2.180),
("xi1", "x3", 1.819),
("xi1", "eta1", 1.483),
("eta1", "y1", 1.000),
("eta1", "y2", 1.257),
("eta1", "y3", 1.058),
("eta1", "y4", 1.265),
("eta1", "eta2", 0.837),
("xi1", "eta2", 0.572),
("eta2", "y5", 1.000),
("eta2", "y6", 1.186),
("eta2", "y7", 1.280),
("eta2", "y8", 1.266),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5", 0.624),
("y2", "y6", 2.153),
("y2", "y4", 1.313),
("y3", "y7", 0.795),
("y4", "y8", 0.348),
("y6", "y8", 1.356),
],
err_var={
"x1": 0.082,
"x2": 0.120,
"x3": 0.467,
"y1": 1.891,
"y2": 7.373,
"y3": 5.067,
"y4": 3.148,
"y5": 2.351,
"y6": 4.954,
"y7": 3.431,
"y8": 3.254,
"xi1": 0.448,
"eta1": 3.956,
"eta2": 0.172,
},
)
demo_lisrel = demo.to_lisrel()
indexing = []
vars_ordered = [
"y1",
"y2",
"y3",
"y4",
"y5",
"y6",
"y7",
"y8",
"x1",
"x2",
"x3",
"xi1",
"eta1",
"eta2",
]
for var in vars_ordered:
indexing.append(demo_lisrel.eta.index(var))
eta_reorder = [demo_lisrel.eta[i] for i in indexing]
B_reorder = demo_lisrel.B[indexing, :][:, indexing]
B_fixed_reorder = demo_lisrel.B_fixed_mask[indexing, :][:, indexing]
zeta_reorder = demo_lisrel.zeta[indexing, :][:, indexing]
zeta_fixed_reorder = demo_lisrel.zeta_fixed_mask[indexing, :][:, indexing]
wedge_y_reorder = demo_lisrel.wedge_y[:, indexing]
self.assertEqual(vars_ordered, eta_reorder)
npt.assert_array_equal(
B_reorder,
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
]
),
)
npt.assert_array_equal(
zeta_reorder,
np.array(
[
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
]
),
)
npt.assert_array_equal(
B_fixed_reorder,
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.000, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.257, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.058, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.265, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.000],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.186],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.280],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.266],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.000, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.180, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.819, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.483, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.572, 0.837, 0],
]
),
)
npt.assert_array_equal(
zeta_fixed_reorder,
np.array(
[
[1.891, 0, 0, 0, 0.624, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 7.373, 0, 1.313, 0, 2.153, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 5.067, 0, 0, 0, 0.795, 0, 0, 0, 0, 0, 0, 0],
[0, 1.313, 0, 3.148, 0, 0, 0, 0.348, 0, 0, 0, 0, 0, 0],
[0.624, 0, 0, 0, 2.351, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2.153, 0, 0, 0, 4.954, 0, 1.356, 0, 0, 0, 0, 0, 0],
[0, 0, 0.795, 0, 0, 0, 3.431, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0.348, 0, 1.356, 0, 3.254, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0.082, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0.120, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.467, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.448, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.956, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.172],
]
),
)
npt.assert_array_equal(
demo_lisrel.wedge_y,
np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
]
),
)
def test_to_from_lisrel(self):
demo_lisrel = self.demo.to_lisrel()
union_lisrel = self.union.to_lisrel()
demo_params_lisrel = self.demo_params.to_lisrel()
custom_lisrel = self.custom.to_lisrel()
demo_graph = demo_lisrel.to_SEMGraph()
union_graph = union_lisrel.to_SEMGraph()
demo_params_graph = demo_params_lisrel.to_SEMGraph()
custom_graph = custom_lisrel.to_SEMGraph()
# Test demo
self.assertSetEqual(set(self.demo.graph.nodes()), set(demo_graph.graph.nodes()))
self.assertSetEqual(set(self.demo.graph.edges()), set(demo_graph.graph.edges()))
self.assertSetEqual(set(self.demo.err_graph.nodes()), set(demo_graph.err_graph.nodes()))
npt.assert_array_equal(
nx.to_numpy_matrix(self.demo.err_graph, nodelist=sorted(self.demo.err_graph.nodes())),
nx.to_numpy_matrix(demo_graph, nodelist=sorted(demo_graph.err_graph.nodes())),
)
self.assertSetEqual(
set(self.demo.full_graph_struct.nodes()), set(demo_graph.full_graph_struct.nodes())
)
self.assertSetEqual(
set(self.demo.full_graph_struct.edges()), set(demo_graph.full_graph_struct.edges())
)
self.assertSetEqual(self.demo.latents, demo_graph.latents)
self.assertSetEqual(self.demo.observed, demo_graph.observed)
# Test union
self.assertSetEqual(set(self.union.graph.nodes()), set(union_graph.graph.nodes()))
self.assertSetEqual(set(self.union.graph.edges()), set(union_graph.graph.edges()))
self.assertSetEqual(set(self.union.err_graph.nodes()), set(union_graph.err_graph.nodes()))
npt.assert_array_equal(
nx.to_numpy_matrix(self.union.err_graph, nodelist=sorted(self.union.err_graph.nodes())),
nx.to_numpy_matrix(union_graph, nodelist=sorted(union_graph.err_graph.nodes())),
)
self.assertSetEqual(
set(self.union.full_graph_struct.nodes()), set(union_graph.full_graph_struct.nodes())
)
self.assertSetEqual(
set(self.union.full_graph_struct.edges()), set(union_graph.full_graph_struct.edges())
)
self.assertSetEqual(self.union.latents, union_graph.latents)
self.assertSetEqual(self.union.observed, union_graph.observed)
# Test demo_params
self.assertSetEqual(
set(self.demo_params.graph.nodes()), set(demo_params_graph.graph.nodes())
)
self.assertSetEqual(
set(self.demo_params.graph.edges()), set(demo_params_graph.graph.edges())
)
self.assertSetEqual(
set(self.demo_params.err_graph.nodes()), set(demo_params_graph.err_graph.nodes())
)
npt.assert_array_equal(
nx.to_numpy_matrix(
self.demo_params.err_graph,
nodelist=sorted(self.demo_params.err_graph.nodes()),
weight=None,
),
nx.to_numpy_matrix(
demo_graph.err_graph,
nodelist=sorted(demo_params_graph.err_graph.nodes()),
weight=None,
),
)
self.assertSetEqual(
set(self.demo_params.full_graph_struct.nodes()),
set(demo_params_graph.full_graph_struct.nodes()),
)
self.assertSetEqual(
set(self.demo_params.full_graph_struct.edges()),
set(demo_params_graph.full_graph_struct.edges()),
)
self.assertSetEqual(self.demo_params.latents, demo_params_graph.latents)
self.assertSetEqual(self.demo_params.observed, demo_params_graph.observed)
# Test demo
self.assertSetEqual(set(self.custom.graph.nodes()), set(custom_graph.graph.nodes()))
self.assertSetEqual(set(self.custom.graph.edges()), set(custom_graph.graph.edges()))
self.assertSetEqual(set(self.custom.err_graph.nodes()), set(custom_graph.err_graph.nodes()))
npt.assert_array_equal(
nx.to_numpy_matrix(
self.custom.err_graph, nodelist=sorted(self.custom.err_graph.nodes())
),
nx.to_numpy_matrix(custom_graph, nodelist=sorted(custom_graph.err_graph.nodes())),
)
self.assertSetEqual(
set(self.custom.full_graph_struct.nodes()), set(custom_graph.full_graph_struct.nodes())
)
self.assertSetEqual(
set(self.custom.full_graph_struct.edges()), set(custom_graph.full_graph_struct.edges())
)
self.assertSetEqual(self.custom.latents, custom_graph.latents)
self.assertSetEqual(self.custom.observed, custom_graph.observed)
def test_iv_transformations_demo(self):
scale = {"eta1": "y1", "eta2": "y5", "xi1": "x1"}
self.assertRaises(ValueError, self.demo._iv_transformations, "x1", "y1", scale)
for y in ["y2", "y3", "y4"]:
full_graph, dependent_var = self.demo._iv_transformations(
X="eta1", Y=y, scaling_indicators=scale
)
self.assertEqual(dependent_var, y)
self.assertTrue((".y1", y) in full_graph.edges)
self.assertFalse(("eta1", y) in full_graph.edges)
for y in ["y6", "y7", "y8"]:
full_graph, dependent_var = self.demo._iv_transformations(
X="eta2", Y=y, scaling_indicators=scale
)
self.assertEqual(dependent_var, y)
self.assertTrue((".y5", y) in full_graph.edges)
self.assertFalse(("eta2", y) in full_graph.edges)
full_graph, dependent_var = self.demo._iv_transformations(
X="xi1", Y="eta1", scaling_indicators=scale
)
self.assertEqual(dependent_var, "y1")
self.assertTrue((".eta1", "y1") in full_graph.edges())
self.assertTrue((".x1", "y1") in full_graph.edges())
self.assertFalse(("xi1", "eta1") in full_graph.edges())
full_graph, dependent_var = self.demo._iv_transformations(
X="xi1", Y="eta2", scaling_indicators=scale
)
self.assertEqual(dependent_var, "y5")
self.assertTrue((".y1", "y5") in full_graph.edges())
self.assertTrue((".eta2", "y5") in full_graph.edges())
self.assertTrue((".x1", "y5") in full_graph.edges())
self.assertFalse(("eta1", "eta2") in full_graph.edges())
self.assertFalse(("xi1", "eta2") in full_graph.edges())
full_graph, dependent_var = self.demo._iv_transformations(
X="eta1", Y="eta2", scaling_indicators=scale
)
self.assertEqual(dependent_var, "y5")
self.assertTrue((".y1", "y5") in full_graph.edges())
self.assertTrue((".eta2", "y5") in full_graph.edges())
self.assertTrue((".x1", "y5") in full_graph.edges())
self.assertFalse(("eta1", "eta2") in full_graph.edges())
self.assertFalse(("xi1", "eta2") in full_graph.edges())
def test_iv_transformations_union(self):
scale = {}
for u, v in self.union.graph.edges():
full_graph, dependent_var = self.union._iv_transformations(
u, v, scaling_indicators=scale
)
self.assertFalse((u, v) in full_graph.edges())
self.assertEqual(dependent_var, v)
def test_get_ivs_demo(self):
scale = {"eta1": "y1", "eta2": "y5", "xi1": "x1"}
self.assertSetEqual(
self.demo.get_ivs("eta1", "y2", scaling_indicators=scale),
{"x1", "x2", "x3", "y3", "y7", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("eta1", "y3", scaling_indicators=scale),
{"x1", "x2", "x3", "y2", "y4", "y6", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("eta1", "y4", scaling_indicators=scale),
{"x1", "x2", "x3", "y3", "y6", "y7"},
)
self.assertSetEqual(
self.demo.get_ivs("eta2", "y6", scaling_indicators=scale),
{"x1", "x2", "x3", "y3", "y4", "y7"},
)
self.assertSetEqual(
self.demo.get_ivs("eta2", "y7", scaling_indicators=scale),
{"x1", "x2", "x3", "y2", "y4", "y6", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("eta2", "y8", scaling_indicators=scale),
{"x1", "x2", "x3", "y2", "y3", "y7"},
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "x2", scaling_indicators=scale),
{"x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "x3", scaling_indicators=scale),
{"x2", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "eta1", scaling_indicators=scale), {"x2", "x3"}
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "eta2", scaling_indicators=scale),
{"x2", "x3", "y2", "y3", "y4"},
)
self.assertSetEqual(
self.demo.get_ivs("eta1", "eta2", scaling_indicators=scale),
{"x2", "x3", "y2", "y3", "y4"},
)
def test_get_conditional_ivs_demo(self):
scale = {"eta1": "y1", "eta2": "y5", "xi1": "x1"}
self.assertEqual(self.demo.get_conditional_ivs("eta1", "y2", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta1", "y3", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta1", "y4", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta2", "y6", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta2", "y7", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta2", "y8", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "x2", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "x3", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "eta1", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "eta2", scaling_indicators=scale), [])
self.assertEqual(
self.demo.get_conditional_ivs("eta1", "eta2", scaling_indicators=scale), []
)
def test_get_ivs_union(self):
scale = {}
self.assertSetEqual(
self.union.get_ivs("yrsmill", "unionsen", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("deferenc", "unionsen", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("laboract", "unionsen", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("deferenc", "laboract", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("age", "laboract", scaling_indicators=scale), {"yrsmill"}
)
self.assertSetEqual(
self.union.get_ivs("age", "deferenc", scaling_indicators=scale), {"yrsmill"}
)
def test_get_conditional_ivs_union(self):
self.assertEqual(
self.union.get_conditional_ivs("yrsmill", "unionsen"),
[("age", {"laboract", "deferenc"})],
)
# This case wouldn't have conditonal IV if the Total effect between `deferenc` and
# `unionsen` needs to be computed because one of the conditional variable lies on the
# effect path.
self.assertEqual(
self.union.get_conditional_ivs("deferenc", "unionsen"),
[("age", {"yrsmill", "laboract"})],
)
self.assertEqual(
self.union.get_conditional_ivs("laboract", "unionsen"),
[("age", {"yrsmill", "deferenc"})],
)
self.assertEqual(self.union.get_conditional_ivs("deferenc", "laboract"), [])
self.assertEqual(
self.union.get_conditional_ivs("age", "laboract"), [("yrsmill", {"deferenc"})]
)
self.assertEqual(self.union.get_conditional_ivs("age", "deferenc"), [])
def test_iv_transformations_custom(self):
scale_custom = {"eta1": "y2", "eta2": "y5", "xi1": "x1"}
full_graph, var = self.custom._iv_transformations(
"xi1", "x2", scaling_indicators=scale_custom
)
self.assertEqual(var, "x2")
self.assertTrue((".x1", "x2") in full_graph.edges())
self.assertFalse(("xi1", "x2") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"xi1", "y4", scaling_indicators=scale_custom
)
self.assertEqual(var, "y4")
self.assertTrue((".x1", "y4") in full_graph.edges())
self.assertFalse(("xi1", "y4") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"xi1", "y1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y1")
self.assertTrue((".x1", "y1") in full_graph.edges())
self.assertFalse(("xi1", "y1") in full_graph.edges())
self.assertFalse(("y4", "y1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"xi1", "eta1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y2")
self.assertTrue((".eta1", "y2") in full_graph.edges())
self.assertTrue((".x1", "y2") in full_graph.edges())
self.assertFalse(("y1", "eta1") in full_graph.edges())
self.assertFalse(("xi1", "eta1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"y1", "eta1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y2")
self.assertTrue((".eta1", "y2") in full_graph.edges())
self.assertTrue((".x1", "y2") in full_graph.edges())
self.assertFalse(("y1", "eta1") in full_graph.edges())
self.assertFalse(("xi1", "eta1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"y1", "eta2", scaling_indicators=scale_custom
)
self.assertEqual(var, "y5")
self.assertTrue((".eta2", "y5") in full_graph.edges())
self.assertFalse(("y1", "eta2") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"y4", "y1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y1")
self.assertFalse(("y4", "y1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"eta1", "y3", scaling_indicators=scale_custom
)
self.assertEqual(var, "y3")
self.assertTrue((".y2", "y3") in full_graph.edges())
self.assertFalse(("eta1", "y3") in full_graph.edges())
def test_get_ivs_custom(self):
scale_custom = {"eta1": "y2", "eta2": "y5", "xi1": "x1"}
self.assertSetEqual(
self.custom.get_ivs("xi1", "x2", scaling_indicators=scale_custom),
{"y1", "y2", "y3", "y4", "y5"},
)
self.assertSetEqual(
self.custom.get_ivs("xi1", "y4", scaling_indicators=scale_custom), {"x2"}
)
self.assertSetEqual(
self.custom.get_ivs("xi1", "y1", scaling_indicators=scale_custom), {"x2", "y4"}
)
self.assertSetEqual(
self.custom.get_ivs("xi1", "eta1", scaling_indicators=scale_custom), {"x2", "y4"}
)
# TODO: Test this and fix.
self.assertSetEqual(
self.custom.get_ivs("y1", "eta1", scaling_indicators=scale_custom), {"x2", "y4", "y5"}
)
self.assertSetEqual(
self.custom.get_ivs("y1", "eta2", scaling_indicators=scale_custom),
{"x1", "x2", "y2", "y3", "y4"},
)
self.assertSetEqual(self.custom.get_ivs("y4", "y1", scaling_indicators=scale_custom), set())
self.assertSetEqual(
self.custom.get_ivs("eta1", "y3", scaling_indicators=scale_custom), {"x1", "x2", "y4"}
)
def test_small_model_ivs(self):
model1 = SEMGraph(
ebunch=[("X", "Y"), ("I", "X"), ("W", "I")],
latents=[],
err_corr=[("W", "Y")],
err_var={},
)
self.assertEqual(model1.get_conditional_ivs("X", "Y"), [("I", {"W"})])
model2 = SEMGraph(
ebunch=[("x", "y"), ("z", "x"), ("w", "z"), ("w", "u"), ("u", "x"), ("u", "y")],
latents=["u"],
)
self.assertEqual(model2.get_conditional_ivs("x", "y"), [("z", {"w"})])
model3 = SEMGraph(ebunch=[("x", "y"), ("u", "x"), ("u", "y"), ("z", "x")], latents=["u"])
self.assertEqual(model3.get_ivs("x", "y"), {"z"})
model4 = SEMGraph(ebunch=[("x", "y"), ("z", "x"), ("u", "x"), ("u", "y")])
self.assertEqual(model4.get_conditional_ivs("x", "y"), [("z", {"u"})])
class TestSEMAlg(unittest.TestCase):
def setUp(self):
self.demo = SEMGraph(
ebunch=[
("xi1", "x1", 1.000),
("xi1", "x2", 2.180),
("xi1", "x3", 1.819),
("xi1", "eta1", 1.483),
("eta1", "y1", 1.000),
("eta1", "y2", 1.257),
("eta1", "y3", 1.058),
("eta1", "y4", 1.265),
("eta1", "eta2", 0.837),
("xi1", "eta2", 0.572),
("eta2", "y5", 1.000),
("eta2", "y6", 1.186),
("eta2", "y7", 1.280),
("eta2", "y8", 1.266),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5", 0.624),
("y2", "y6", 2.153),
("y2", "y4", 1.313),
("y3", "y7", 0.795),
("y4", "y8", 0.348),
("y6", "y8", 1.356),
],
err_var={
"x1": 0.082,
"x2": 0.120,
"x3": 0.467,
"y1": 1.891,
"y2": 7.373,
"y3": 5.067,
"y4": 3.148,
"y5": 2.351,
"y6": 4.954,
"y7": 3.431,
"y8": 3.254,
"xi1": 0.448,
"eta1": 3.956,
"eta2": 0.172,
},
)
self.demo_lisrel = self.demo.to_lisrel()
self.small_model = SEM.from_graph(
ebunch=[("X", "Y", 0.3)], latents=[], err_var={"X": 0.1, "Y": 0.1}
)
self.small_model_lisrel = self.small_model.to_lisrel()
def test_generate_samples(self):
samples = self.small_model_lisrel.generate_samples(n_samples=100)
samples = self.demo_lisrel.generate_samples(n_samples=100)
| en | 0.784806 | # %load model.lav # measurement model ind60 =~ x1 + x2 + x3 dem60 =~ y1 + y2 + y3 + y4 dem65 =~ y5 + y6 + y7 + y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 # Undirected Graph, needs to handle when edges returned in reverse. # TODO: Add this test when done writing the tests for SEMAlg # TODO: Add this. # Test demo # Test union # Test demo_params # Test demo # This case wouldn't have conditonal IV if the Total effect between `deferenc` and # `unionsen` needs to be computed because one of the conditional variable lies on the # effect path. # TODO: Test this and fix. | 2.488555 | 2 |
data_scripts/translation.py | wangcongcong123/transection | 4 | 10593 | # coding=utf-8
# This script is finished following HF's datasets' template:
# https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
# More examples as references to write a customized dataset can be found here:
# https://github.com/huggingface/datasets/tree/master/datasets
from __future__ import absolute_import, division, print_function
import json
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_TRAIN_DOWNLOAD_URL = "data/train.json"
_VAL_DOWNLOAD_URL = "data/val.json"
class Translation(datasets.GeneratorBasedBuilder):
"""customize dataset."""
# VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"source": datasets.Value("string"),
"target": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="#",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
val_path = dl_manager.download_and_extract(_VAL_DOWNLOAD_URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}),
]
def _generate_examples(self, filepath):
with open(filepath, encoding='utf-8') as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"source": data["english"],
"target": data["chinese"],
}
| # coding=utf-8
# This script is finished following HF's datasets' template:
# https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
# More examples as references to write a customized dataset can be found here:
# https://github.com/huggingface/datasets/tree/master/datasets
from __future__ import absolute_import, division, print_function
import json
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_TRAIN_DOWNLOAD_URL = "data/train.json"
_VAL_DOWNLOAD_URL = "data/val.json"
class Translation(datasets.GeneratorBasedBuilder):
"""customize dataset."""
# VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"source": datasets.Value("string"),
"target": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="#",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
val_path = dl_manager.download_and_extract(_VAL_DOWNLOAD_URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}),
]
def _generate_examples(self, filepath):
with open(filepath, encoding='utf-8') as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"source": data["english"],
"target": data["chinese"],
}
| en | 0.732061 | # coding=utf-8 # This script is finished following HF's datasets' template: # https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py # More examples as references to write a customized dataset can be found here: # https://github.com/huggingface/datasets/tree/master/datasets \ \ customize dataset. # VERSION = datasets.Version("1.0.0") | 2.868339 | 3 |
utils/chat_formatting.py | lyricalpaws/snekbot | 13 | 10594 | import itertools
from typing import Sequence, Iterator
# Source: https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/core/utils/chat_formatting.py
def error(text: str) -> str:
"""Get text prefixed with an error emoji.
Returns
-------
str
The new message.
"""
return "\N{NO ENTRY SIGN} {}".format(text)
def warning(text: str) -> str:
"""Get text prefixed with a warning emoji.
Returns
-------
str
The new message.
"""
return "\N{WARNING SIGN} {}".format(text)
def info(text: str) -> str:
"""Get text prefixed with an info emoji.
Returns
-------
str
The new message.
"""
return "\N{INFORMATION SOURCE} {}".format(text)
def question(text: str) -> str:
"""Get text prefixed with a question emoji.
Returns
-------
str
The new message.
"""
return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text)
def bold(text: str) -> str:
"""Get the given text in bold.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "**{}**".format(text)
def box(text: str, lang: str = "") -> str:
"""Get the given text in a code block.
Parameters
----------
text : str
The text to be marked up.
lang : `str`, optional
The syntax highlighting language for the codeblock.
Returns
-------
str
The marked up text.
"""
ret = "```{}\n{}\n```".format(lang, text)
return ret
def inline(text: str) -> str:
"""Get the given text as inline code.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "`{}`".format(text)
def italics(text: str) -> str:
"""Get the given text in italics.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "*{}*".format(text)
def bordered(*columns: Sequence[str], ascii_border: bool = False) -> str:
"""Get two blocks of text in a borders.
Note
----
This will only work with a monospaced font.
Parameters
----------
*columns : `sequence` of `str`
The columns of text, each being a list of lines in that column.
ascii_border : bool
Whether or not the border should be pure ASCII.
Returns
-------
str
The bordered text.
"""
borders = {
"TL": "-" if ascii_border else "┌", # Top-left
"TR": "-" if ascii_border else "┐", # Top-right
"BL": "-" if ascii_border else "└", # Bottom-left
"BR": "-" if ascii_border else "┘", # Bottom-right
"HZ": "-" if ascii_border else "─", # Horizontal
"VT": "|" if ascii_border else "│", # Vertical
}
sep = " " * 4 # Separator between boxes
widths = tuple(
max(len(row) for row in column) + 9 for column in columns
) # width of each col
colsdone = [False] * len(columns) # whether or not each column is done
lines = [sep.join("{TL}" + "{HZ}" * width + "{TR}" for width in widths)]
for line in itertools.zip_longest(*columns):
row = []
for colidx, column in enumerate(line):
width = widths[colidx]
done = colsdone[colidx]
if column is None:
if not done:
# bottom border of column
column = "{HZ}" * width
row.append("{BL}" + column + "{BR}")
colsdone[colidx] = True # mark column as done
else:
# leave empty
row.append(" " * (width + 2))
else:
column += " " * (width - len(column)) # append padded spaces
row.append("{VT}" + column + "{VT}")
lines.append(sep.join(row))
final_row = []
for width, done in zip(widths, colsdone):
if not done:
final_row.append("{BL}" + "{HZ}" * width + "{BR}")
else:
final_row.append(" " * (width + 2))
lines.append(sep.join(final_row))
return "\n".join(lines).format(**borders)
def pagify(
text: str,
delims: Sequence[str] = ["\n"],
*,
priority: bool = False,
escape_mass_mentions: bool = True,
shorten_by: int = 8,
page_length: int = 2000
) -> Iterator[str]:
"""Generate multiple pages from the given text.
Note
----
This does not respect code blocks or inline code.
Parameters
----------
text : str
The content to pagify and send.
delims : `sequence` of `str`, optional
Characters where page breaks will occur. If no delimiters are found
in a page, the page will break after ``page_length`` characters.
By default this only contains the newline.
Other Parameters
----------------
priority : `bool`
Set to :code:`True` to choose the page break delimiter based on the
order of ``delims``. Otherwise, the page will always break at the
last possible delimiter.
escape_mass_mentions : `bool`
If :code:`True`, any mass mentions (here or everyone) will be
silenced.
shorten_by : `int`
How much to shorten each page by. Defaults to 8.
page_length : `int`
The maximum length of each page. Defaults to 2000.
Yields
------
`str`
Pages of the given text.
"""
in_text = text
page_length -= shorten_by
while len(in_text) > page_length:
this_page_len = page_length
if escape_mass_mentions:
this_page_len -= in_text.count("@here", 0, page_length) + in_text.count(
"@everyone", 0, page_length
)
closest_delim = (in_text.rfind(d, 1, this_page_len) for d in delims)
if priority:
closest_delim = next((x for x in closest_delim if x > 0), -1)
else:
closest_delim = max(closest_delim)
closest_delim = closest_delim if closest_delim != -1 else this_page_len
if escape_mass_mentions:
to_send = escape(in_text[:closest_delim], mass_mentions=True)
else:
to_send = in_text[:closest_delim]
if not to_send.strip():
yield to_send
in_text = in_text[closest_delim:]
if not in_text.strip():
if escape_mass_mentions:
yield escape(in_text, mass_mentions=True)
else:
yield in_text
def strikethrough(text: str) -> str:
"""Get the given text with a strikethrough.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "~~{}~~".format(text)
def underline(text: str) -> str:
"""Get the given text with an underline.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "__{}__".format(text)
def escape(text: str, *, mass_mentions: bool = False, formatting: bool = False) -> str:
"""Get text with all mass mentions or markdown escaped.
Parameters
----------
text : str
The text to be escaped.
mass_mentions : `bool`, optional
Set to :code:`True` to escape mass mentions in the text.
formatting : `bool`, optional
Set to :code:`True` to escpae any markdown formatting in the text.
Returns
-------
str
The escaped text.
"""
if mass_mentions:
text = text.replace("@everyone", "@\u200beveryone")
text = text.replace("@here", "@\u200bhere")
if formatting:
text = (
text.replace("`", "\\`")
.replace("*", "\\*")
.replace("_", "\\_")
.replace("~", "\\~")
)
return text
| import itertools
from typing import Sequence, Iterator
# Source: https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/core/utils/chat_formatting.py
def error(text: str) -> str:
"""Get text prefixed with an error emoji.
Returns
-------
str
The new message.
"""
return "\N{NO ENTRY SIGN} {}".format(text)
def warning(text: str) -> str:
"""Get text prefixed with a warning emoji.
Returns
-------
str
The new message.
"""
return "\N{WARNING SIGN} {}".format(text)
def info(text: str) -> str:
"""Get text prefixed with an info emoji.
Returns
-------
str
The new message.
"""
return "\N{INFORMATION SOURCE} {}".format(text)
def question(text: str) -> str:
"""Get text prefixed with a question emoji.
Returns
-------
str
The new message.
"""
return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text)
def bold(text: str) -> str:
"""Get the given text in bold.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "**{}**".format(text)
def box(text: str, lang: str = "") -> str:
"""Get the given text in a code block.
Parameters
----------
text : str
The text to be marked up.
lang : `str`, optional
The syntax highlighting language for the codeblock.
Returns
-------
str
The marked up text.
"""
ret = "```{}\n{}\n```".format(lang, text)
return ret
def inline(text: str) -> str:
"""Get the given text as inline code.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "`{}`".format(text)
def italics(text: str) -> str:
"""Get the given text in italics.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "*{}*".format(text)
def bordered(*columns: Sequence[str], ascii_border: bool = False) -> str:
"""Get two blocks of text in a borders.
Note
----
This will only work with a monospaced font.
Parameters
----------
*columns : `sequence` of `str`
The columns of text, each being a list of lines in that column.
ascii_border : bool
Whether or not the border should be pure ASCII.
Returns
-------
str
The bordered text.
"""
borders = {
"TL": "-" if ascii_border else "┌", # Top-left
"TR": "-" if ascii_border else "┐", # Top-right
"BL": "-" if ascii_border else "└", # Bottom-left
"BR": "-" if ascii_border else "┘", # Bottom-right
"HZ": "-" if ascii_border else "─", # Horizontal
"VT": "|" if ascii_border else "│", # Vertical
}
sep = " " * 4 # Separator between boxes
widths = tuple(
max(len(row) for row in column) + 9 for column in columns
) # width of each col
colsdone = [False] * len(columns) # whether or not each column is done
lines = [sep.join("{TL}" + "{HZ}" * width + "{TR}" for width in widths)]
for line in itertools.zip_longest(*columns):
row = []
for colidx, column in enumerate(line):
width = widths[colidx]
done = colsdone[colidx]
if column is None:
if not done:
# bottom border of column
column = "{HZ}" * width
row.append("{BL}" + column + "{BR}")
colsdone[colidx] = True # mark column as done
else:
# leave empty
row.append(" " * (width + 2))
else:
column += " " * (width - len(column)) # append padded spaces
row.append("{VT}" + column + "{VT}")
lines.append(sep.join(row))
final_row = []
for width, done in zip(widths, colsdone):
if not done:
final_row.append("{BL}" + "{HZ}" * width + "{BR}")
else:
final_row.append(" " * (width + 2))
lines.append(sep.join(final_row))
return "\n".join(lines).format(**borders)
def pagify(
text: str,
delims: Sequence[str] = ["\n"],
*,
priority: bool = False,
escape_mass_mentions: bool = True,
shorten_by: int = 8,
page_length: int = 2000
) -> Iterator[str]:
"""Generate multiple pages from the given text.
Note
----
This does not respect code blocks or inline code.
Parameters
----------
text : str
The content to pagify and send.
delims : `sequence` of `str`, optional
Characters where page breaks will occur. If no delimiters are found
in a page, the page will break after ``page_length`` characters.
By default this only contains the newline.
Other Parameters
----------------
priority : `bool`
Set to :code:`True` to choose the page break delimiter based on the
order of ``delims``. Otherwise, the page will always break at the
last possible delimiter.
escape_mass_mentions : `bool`
If :code:`True`, any mass mentions (here or everyone) will be
silenced.
shorten_by : `int`
How much to shorten each page by. Defaults to 8.
page_length : `int`
The maximum length of each page. Defaults to 2000.
Yields
------
`str`
Pages of the given text.
"""
in_text = text
page_length -= shorten_by
while len(in_text) > page_length:
this_page_len = page_length
if escape_mass_mentions:
this_page_len -= in_text.count("@here", 0, page_length) + in_text.count(
"@everyone", 0, page_length
)
closest_delim = (in_text.rfind(d, 1, this_page_len) for d in delims)
if priority:
closest_delim = next((x for x in closest_delim if x > 0), -1)
else:
closest_delim = max(closest_delim)
closest_delim = closest_delim if closest_delim != -1 else this_page_len
if escape_mass_mentions:
to_send = escape(in_text[:closest_delim], mass_mentions=True)
else:
to_send = in_text[:closest_delim]
if not to_send.strip():
yield to_send
in_text = in_text[closest_delim:]
if not in_text.strip():
if escape_mass_mentions:
yield escape(in_text, mass_mentions=True)
else:
yield in_text
def strikethrough(text: str) -> str:
"""Get the given text with a strikethrough.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "~~{}~~".format(text)
def underline(text: str) -> str:
"""Get the given text with an underline.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "__{}__".format(text)
def escape(text: str, *, mass_mentions: bool = False, formatting: bool = False) -> str:
"""Get text with all mass mentions or markdown escaped.
Parameters
----------
text : str
The text to be escaped.
mass_mentions : `bool`, optional
Set to :code:`True` to escape mass mentions in the text.
formatting : `bool`, optional
Set to :code:`True` to escpae any markdown formatting in the text.
Returns
-------
str
The escaped text.
"""
if mass_mentions:
text = text.replace("@everyone", "@\u200beveryone")
text = text.replace("@here", "@\u200bhere")
if formatting:
text = (
text.replace("`", "\\`")
.replace("*", "\\*")
.replace("_", "\\_")
.replace("~", "\\~")
)
return text
| en | 0.567807 | # Source: https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/core/utils/chat_formatting.py Get text prefixed with an error emoji. Returns ------- str The new message. Get text prefixed with a warning emoji. Returns ------- str The new message. Get text prefixed with an info emoji. Returns ------- str The new message. Get text prefixed with a question emoji. Returns ------- str The new message. Get the given text in bold. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. Get the given text in a code block. Parameters ---------- text : str The text to be marked up. lang : `str`, optional The syntax highlighting language for the codeblock. Returns ------- str The marked up text. Get the given text as inline code. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. Get the given text in italics. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. Get two blocks of text in a borders. Note ---- This will only work with a monospaced font. Parameters ---------- *columns : `sequence` of `str` The columns of text, each being a list of lines in that column. ascii_border : bool Whether or not the border should be pure ASCII. Returns ------- str The bordered text. # Top-left # Top-right # Bottom-left # Bottom-right # Horizontal # Vertical # Separator between boxes # width of each col # whether or not each column is done # bottom border of column # mark column as done # leave empty # append padded spaces Generate multiple pages from the given text. Note ---- This does not respect code blocks or inline code. Parameters ---------- text : str The content to pagify and send. delims : `sequence` of `str`, optional Characters where page breaks will occur. If no delimiters are found in a page, the page will break after ``page_length`` characters. By default this only contains the newline. Other Parameters ---------------- priority : `bool` Set to :code:`True` to choose the page break delimiter based on the order of ``delims``. Otherwise, the page will always break at the last possible delimiter. escape_mass_mentions : `bool` If :code:`True`, any mass mentions (here or everyone) will be silenced. shorten_by : `int` How much to shorten each page by. Defaults to 8. page_length : `int` The maximum length of each page. Defaults to 2000. Yields ------ `str` Pages of the given text. Get the given text with a strikethrough. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. Get the given text with an underline. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. Get text with all mass mentions or markdown escaped. Parameters ---------- text : str The text to be escaped. mass_mentions : `bool`, optional Set to :code:`True` to escape mass mentions in the text. formatting : `bool`, optional Set to :code:`True` to escpae any markdown formatting in the text. Returns ------- str The escaped text. | 3.424588 | 3 |
strategy/trade/strategymargintrade.py | firebird631/siis | 0 | 10595 | # @date 2018-12-28
# @author <NAME>, All rights reserved without prejudices.
# @license Copyright (c) 2018 Dream Overflow
# Strategy trade for margin with multiples positions.
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
from trader.trader import Trader
from instrument.instrument import Instrument
from strategy.strategytrader import StrategyTrader
from strategy.strategytradercontext import StrategyTraderContextBuilder
from common.signal import Signal
from trader.order import Order
from .strategytrade import StrategyTrade
import logging
logger = logging.getLogger('siis.strategy.margintrade')
class StrategyMarginTrade(StrategyTrade):
"""
Specialization for margin trading.
This type of trade is related to margin trading market, allowing or not hedging, where there is a
position identifier per trade, but generally in the same direction (no hedging).
Works with crypto margin brokers (kraken...).
@todo do we need like with asset trade an exit_trades list to compute the axp and x values, because
if we use cumulative-filled and avg-price we have the same problem here too.
@todo have to check about position_updated qty with direction maybe or take care to have trade signal and
distinct entry from exit
@todo fees and commissions
"""
__slots__ = 'create_ref_oid', 'stop_ref_oid', 'limit_ref_oid', 'create_oid', 'stop_oid', 'limit_oid', \
'position_id', 'leverage', 'stop_order_qty', 'limit_order_qty'
def __init__(self, timeframe: float):
super().__init__(StrategyTrade.TRADE_MARGIN, timeframe)
self.create_ref_oid = None
self.stop_ref_oid = None
self.limit_ref_oid = None
self.create_oid = None # related entry order id
self.stop_oid = None # related stop order id
self.limit_oid = None # related limit order id
self.position_id = None # related informal position id
self.leverage = 1.0
self.stop_order_qty = 0.0 # if stop_oid then this is the qty placed on the stop order
self.limit_order_qty = 0.0 # if limit_oid then this is the qty placed on the limit order
def open(self, trader: Trader, instrument: Instrument, direction: int, order_type: int,
order_price: float, quantity: float, take_profit: float, stop_loss: float,
leverage: float = 1.0, hedging: Optional[bool] = None) -> bool:
"""
Open a position or buy an asset.
"""
if self._entry_state != StrategyTrade.STATE_NEW:
return False
order = Order(trader, instrument.market_id)
order.direction = direction
order.price = order_price
order.order_type = order_type
order.quantity = quantity
order.post_only = False
order.margin_trade = True
order.leverage = leverage
if hedging:
order.hedging = hedging
# generated a reference order id
trader.set_ref_order_id(order)
self.create_ref_oid = order.ref_order_id
self.dir = order.direction
self.op = order.price # retains the order price
self.oq = order.quantity # ordered quantity
self.tp = take_profit
self.sl = stop_loss
self.leverage = leverage
self._stats['entry-order-type'] = order.order_type
if trader.create_order(order, instrument) > 0:
# keep the related create position identifier if available
self.create_oid = order.order_id
self.position_id = order.position_id
if not self.eot and order.created_time:
# only at the first open
self.eot = order.created_time
return True
else:
self._entry_state = StrategyTrade.STATE_REJECTED
return False
def reopen(self, trader: Trader, instrument: Instrument, quantity: float) -> bool:
if self._entry_state != StrategyTrade.STATE_CANCELED:
return False
# reset
self._entry_state = StrategyTrade.STATE_NEW
self.eot = 0
order = Order(trader, instrument.market_id)
order.direction = self.dir
order.price = self.op
order.order_type = self._stats['entry-order-type']
order.quantity = quantity
order.post_only = False
order.margin_trade = True
order.leverage = self.leverage
# generated a reference order id
trader.set_ref_order_id(order)
self.create_ref_oid = order.ref_order_id
self.oq = order.quantity # ordered quantity
if trader.create_order(order, instrument) > 0:
self.create_oid = order.order_id
self.position_id = order.position_id
if not self.eot and order.created_time:
# only at the first open
self.eot = order.created_time
return True
else:
self._entry_state = StrategyTrade.STATE_REJECTED
return False
def remove(self, trader: Trader, instrument: Instrument) -> int:
"""
Remove the orders, but doesn't close the position.
"""
error = False
if self.create_oid:
# cancel the remaining buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
if self.e <= 0:
# no entry qty processed, entry canceled
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# cancel a partially filled trade means it is then fully filled
self._entry_state = StrategyTrade.STATE_FILLED
else:
error = True
if self.stop_oid:
# cancel the stop order
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
if self.e <= 0 and self.x <= 0:
# no exit qty
self._exit_state = StrategyTrade.STATE_CANCELED
elif self.x >= self.e:
self._exit_state = StrategyTrade.STATE_FILLED
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
else:
error = True
if self.limit_oid:
# cancel the limit order
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
if self.e <= 0 and self.x <= 0:
# no exit qty
self._exit_state = StrategyTrade.STATE_CANCELED
elif self.x >= self.e:
self._exit_state = StrategyTrade.STATE_FILLED
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
else:
error = True
return not error
def cancel_open(self, trader: Trader, instrument: Instrument) -> int:
if self.create_oid:
# cancel the buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
if self.e <= 0:
# cancel a just opened trade means it is canceled
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# cancel a partially filled trade means it is then fully filled
self._entry_state = StrategyTrade.STATE_FILLED
return self.ACCEPTED
else:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no create order, nothing to do
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# exists, do nothing need to retry
return self.ERROR
return self.NOTHING_TO_DO
def modify_take_profit(self, trader: Trader, instrument: Instrument, limit_price: float, hard: bool = True) -> int:
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self._exit_state == StrategyTrade.STATE_FILLED:
# exit already fully filled
return self.NOTHING_TO_DO
if self.limit_oid:
# cancel the limit order and create a new one
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no limit order
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all entry qty is filled, if lesser something wrong but its ok
return self.NOTHING_TO_DO
if limit_price and hard:
# only if filled entry partially or totally
order = Order(trader, instrument.market_id)
order.direction = -self.direction
order.order_type = Order.ORDER_LIMIT
order.reduce_only = True
order.quantity = self.e - self.x # remaining
order.price = limit_price
order.margin_trade = True
order.leverage = self.leverage
trader.set_ref_order_id(order)
self.limit_ref_oid = order.ref_order_id
self._stats['take-profit-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.limit_oid = order.order_id
self.limit_order_qty = order.quantity
self.last_tp_ot[0] = order.created_time
self.last_tp_ot[1] += 1
self.tp = limit_price
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.limit_ref_oid = None
self.limit_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.limit_ref_oid = None
self.limit_order_qty = 0.0
return self.REJECTED
elif limit_price:
# soft take-profit
self.tp = limit_price
else:
# remove take-profit
self.tp = 0.0
return self.NOTHING_TO_DO
def modify_stop_loss(self, trader: Trader, instrument: Instrument, stop_price: float, hard: bool = True) -> int:
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self._exit_state == StrategyTrade.STATE_FILLED:
# exit already fully filled
return self.NOTHING_TO_DO
if self.stop_oid:
# cancel the stop order and create a new one
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no stop order
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all entry qty is filled, if lesser something wrong but its ok
return self.NOTHING_TO_DO
if stop_price and hard:
# only if filled entry partially or totally
order = Order(trader, instrument.market_id)
order.direction = -self.direction
order.order_type = Order.ORDER_STOP
order.reduce_only = True
order.quantity = self.e - self.x # remaining
order.stop_price = stop_price
order.leverage = self.leverage
order.margin_trade = True
trader.set_ref_order_id(order)
self.stop_ref_oid = order.ref_order_id
self._stats['stop-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.stop_oid = order.order_id
self.stop_order_qty = order.quantity
self.last_stop_ot[0] = order.created_time
self.last_stop_ot[1] += 1
self.sl = stop_price
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.stop_ref_oid = None
self.stop_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.stop_ref_oid = None
self.stop_order_qty = 0.0
return self.REJECTED
elif stop_price:
# soft stop-loss
self.sl = stop_price
else:
# remove stop-loss
self.sl = 0.0
return self.NOTHING_TO_DO
def close(self, trader: Trader, instrument: Instrument) -> int:
"""
Close the position and cancel the related orders.
"""
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self.create_oid:
# cancel the remaining buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
else:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no create order
self.create_ref_oid = None
self.create_oid = None
else:
return self.ERROR
if self.stop_oid:
# cancel the stop order
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no stop order
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
return self.ERROR
if self.limit_oid:
# cancel the limit order
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no limit order
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all qty is filled
return self.NOTHING_TO_DO
order = Order(trader, instrument.market_id)
order.direction = -self.dir # neg dir
order.order_type = Order.ORDER_MARKET
order.reduce_only = True
order.quantity = self.e - self.x # remaining qty
order.margin_trade = True
order.leverage = self.leverage
# generated a reference order id
trader.set_ref_order_id(order)
self.stop_ref_oid = order.ref_order_id
self._stats['stop-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.stop_oid = order.order_id
self.stop_order_qty = order.quantity
# closing order defined
self._closing = True
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.stop_ref_oid = None
self.stop_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.stop_ref_oid = None
self.stop_order_qty = 0.0
return self.REJECTED
def has_stop_order(self) -> bool:
return self.stop_oid is not None and self.stop_oid != ""
def has_limit_order(self) -> bool:
return self.limit_oid is not None and self.limit_oid != ""
def support_both_order(self) -> bool:
return True
@classmethod
def is_margin(cls) -> bool:
return True
@classmethod
def is_spot(cls) -> bool:
return False
#
# signal
#
def order_signal(self, signal_type: int, data: dict, ref_order_id: str, instrument: Instrument):
if signal_type == Signal.SIGNAL_ORDER_OPENED:
# already get at the return of create_order
if ref_order_id == self.create_ref_oid:
self.create_oid = data['id']
# init created timestamp at the create order open
if not self.eot:
self.eot = data['timestamp']
if data.get('stop-loss'):
self.sl = data['stop-loss']
if data.get('take-profit'):
self.tp = data['take-profit']
self._entry_state = StrategyTrade.STATE_OPENED
elif ref_order_id == self.stop_ref_oid:
self.stop_oid = data['id']
if not self.xot:
self.xot = data['timestamp']
elif ref_order_id == self.limit_ref_oid:
self.limit_oid = data['id']
if not self.xot:
self.xot = data['timestamp']
elif signal_type == Signal.SIGNAL_ORDER_DELETED:
# order is no longer active
if data == self.create_oid:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_DELETED
elif data == self.limit_oid:
self.limit_ref_oid = None
self.limit_oid = None
elif data == self.stop_oid:
self.stop_ref_oid = None
self.stop_oid = None
elif signal_type == Signal.SIGNAL_ORDER_CANCELED:
# order is no longer active
if data == self.create_oid:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
elif data == self.limit_oid:
self.limit_ref_oid = None
self.limit_oid = None
elif data == self.stop_oid:
self.stop_ref_oid = None
self.stop_oid = None
elif signal_type == Signal.SIGNAL_ORDER_UPDATED:
# order price/qty modified, cannot really be used because the strategy might
# cancel the trade or create another one.
# for the qty we could have a remaining_qty member, then comparing
pass
elif signal_type == Signal.SIGNAL_ORDER_TRADED:
# order fully or partially filled
filled = 0
if data['id'] == self.create_oid:
prev_e = self.e
# a single order for the entry, then its OK and preferred to uses cumulative-filled and avg-price
# because precision comes from the broker
if data.get('cumulative-filled') is not None and data['cumulative-filled'] > 0:
filled = data['cumulative-filled'] - self.e # compute filled qty
elif data.get('filled') is not None and data['filled'] > 0:
filled = data['filled']
else:
filled = 0
if data.get('avg-price') is not None and data['avg-price'] > 0:
# in that case we have avg-price already computed
self.aep = data['avg-price']
elif data.get('exec-price') is not None and data['exec-price'] > 0:
# compute the average price
self.aep = ((self.aep * self.e) + (data['exec-price'] * filled)) / (self.e + filled)
else:
# no have uses order price
self.aep = self.op
# cumulative filled entry qty
if data.get('cumulative-filled') is not None:
self.e = data.get('cumulative-filled')
elif filled > 0:
self.e = instrument.adjust_quantity(self.e + filled)
if filled > 0:
# probably need to update exit orders
self._dirty = True
logger.info("Entry avg-price=%s cum-filled=%s" % (self.aep, self.e))
if self.e >= self.oq:
self._entry_state = StrategyTrade.STATE_FILLED
# if no send of ORDER_DELETED signal, cleanup here
self.create_oid = None
self.create_ref_oid = None
else:
self._entry_state = StrategyTrade.STATE_PARTIALLY_FILLED
# retains the trade timestamp
if not self._stats['first-realized-entry-timestamp']:
self._stats['first-realized-entry-timestamp'] = data.get('timestamp', 0.0)
self._stats['last-realized-entry-timestamp'] = data.get('timestamp', 0.0)
elif data['id'] == self.limit_oid or data['id'] == self.stop_oid:
prev_x = self.x
# either we have 'filled' component (partial qty) or the 'cumulative-filled' or both
if data.get('cumulative-filled') is not None and data['cumulative-filled'] > 0:
filled = data['cumulative-filled'] - self.x # computed filled qty
elif data.get('filled') is not None and data['filled'] > 0:
filled = data['filled']
else:
filled = 0
if data.get('avg-price') is not None and data['avg-price'] > 0:
# recompute profit-loss
if self.dir > 0:
self.pl = (data['avg-price'] - self.aep) / self.aep
elif self.dir < 0:
self.pl = (self.aep - data['avg-price']) / self.aep
# in that case we have avg-price already computed
self.axp = data['avg-price']
elif data.get('exec-price') is not None and data['exec-price'] > 0:
# increase/decrease profit/loss (over entry executed quantity)
if self.dir > 0:
self.pl += ((data['exec-price'] * filled) - (self.aep * filled)) / (self.aep * self.e)
elif self.dir < 0:
self.pl += ((self.aep * filled) - (data['exec-price'] * filled)) / (self.aep * self.e)
# compute the average price
self.axp = ((self.axp * self.x) + (data['exec-price'] * filled)) / (self.x + filled)
# cumulative filled exit qty
if data.get('cumulative-filled') is not None:
self.x = data.get('cumulative-filled')
elif filled > 0:
self.x = instrument.adjust_quantity(self.x + filled)
logger.info("Exit avg-price=%s cum-filled=%s" % (self.axp, self.x))
if self.x >= self.oq:
self._exit_state = StrategyTrade.STATE_FILLED
# if no send of ORDER_DELETED signal, cleanup here
if data['id'] == self.limit_oid:
self.limit_oid = None
self.limit_ref_oid = None
elif data['id'] == self.stop_oid:
self.stop_oid = None
self.stop_ref_oid = None
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
# retains the trade timestamp
if not self._stats['first-realized-exit-timestamp']:
self._stats['first-realized-exit-timestamp'] = data.get('timestamp', 0.0)
self._stats['last-realized-exit-timestamp'] = data.get('timestamp', 0.0)
def position_signal(self, signal_type: int, data: dict, ref_order_id: str, instrument: Instrument):
if signal_type == Signal.SIGNAL_POSITION_OPENED:
self.position_id = data['id']
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_UPDATED:
# update the unrealized profit-loss in currency
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_DELETED:
# no longer related position
self.position_id = None
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_AMENDED:
# might not occurs
pass
def is_target_order(self, order_id: str, ref_order_id: str) -> bool:
if order_id and (order_id == self.create_oid or order_id == self.stop_oid or order_id == self.limit_oid):
return True
if ref_order_id and (ref_order_id == self.create_ref_oid or
ref_order_id == self.stop_ref_oid or
ref_order_id == self.limit_ref_oid):
return True
return False
def is_target_position(self, position_id: str, ref_order_id: str) -> bool:
if position_id and (position_id == self.position_id):
return True
if ref_order_id and (ref_order_id == self.create_ref_oid):
return True
#
# persistence
#
def dumps(self) -> dict:
data = super().dumps()
data['create-ref-oid'] = self.create_ref_oid
data['stop-ref-oid'] = self.stop_ref_oid
data['limit-ref-oid'] = self.limit_ref_oid
data['create-oid'] = self.create_oid
data['stop-oid'] = self.stop_oid
data['limit-oid'] = self.limit_oid
data['position-id'] = self.position_id
data['stop-order-qty'] = self.stop_order_qty
data['limit-order-qty'] = self.limit_order_qty
return data
def loads(self, data: dict, strategy_trader: StrategyTrader,
context_builder: Optional[StrategyTraderContextBuilder] = None) -> bool:
if not super().loads(data, strategy_trader, context_builder):
return False
self.create_ref_oid = data.get('create-ref-oid')
self.stop_ref_oid = data.get('stop-ref-oid')
self.limit_ref_oid = data.get('limit-ref-oid')
self.create_oid = data.get('create-oid')
self.stop_oid = data.get('stop-oid')
self.limit_oid = data.get('limit-oid')
self.position_id = data.get('position-id')
self.stop_order_qty = data.get('stop-order-qty', 0.0)
self.limit_order_qty = data.get('limit-order-qty', 0.0)
return True
def check(self, trader: Trader, instrument: Instrument) -> int:
result = 1
#
# entry
#
if self.create_oid:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# entry order error status
# self._entry_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer entry order
self.create_oid = None
self.create_ref_oid = None
else:
if data['cumulative-filled'] > self.e or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
#
# exit
#
if self.stop_oid:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# exit order error status
# self._exit_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer stop order
self.stop_oid = None
self.stop_ref_oid = None
else:
if data['cumulative-filled'] > self.x or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
if self.limit_oid:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# exit order error status
# self._exit_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer stop order
self.limit_oid = None
self.limit_ref_oid = None
else:
if data['cumulative-filled'] > self.x or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
return result
def repair(self, trader: Trader, instrument: Instrument) -> bool:
# @todo fix the trade
return False
#
# stats
#
def update_stats(self, instrument: Instrument, timestamp: float):
super().update_stats(instrument, timestamp)
if self.is_active():
# @todo support only for quantity in asset not in lot or contract of different size
last_price = instrument.close_exec_price(self.direction)
upnl = 0.0 # unrealized PNL
rpnl = 0.0 # realized PNL
# non realized quantity
nrq = self.e - self.x
if self.dir > 0:
upnl = last_price * nrq - self.aep * nrq
rpnl = self.axp * self.x - self.aep * self.x
elif self.dir < 0:
upnl = self.aep * nrq - last_price * nrq
rpnl = self.aep * self.x - self.axp * self.x
# including fees and realized profit and loss
self._stats['unrealized-profit-loss'] = instrument.adjust_quote(
upnl + rpnl - self._stats['entry-fees'] - self._stats['exit-fees'])
def info_report(self, strategy_trader: StrategyTrader) -> Tuple[str]:
data = list(super().info_report(strategy_trader))
if self.create_oid or self.create_ref_oid:
data.append("Entry order id / ref : %s / %s" % (self.create_oid, self.create_ref_oid))
if self.stop_oid or self.stop_ref_oid:
data.append("Stop order id / ref : %s / %s" % (self.stop_oid, self.stop_ref_oid))
if self.limit_oid or self.limit_ref_oid:
data.append("Limit order id / ref : %s / %s" % (self.limit_oid, self.limit_ref_oid))
if self.position_id:
data.append("Position id : %s" % (self.position_id,))
return tuple(data)
| # @date 2018-12-28
# @author <NAME>, All rights reserved without prejudices.
# @license Copyright (c) 2018 Dream Overflow
# Strategy trade for margin with multiples positions.
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
from trader.trader import Trader
from instrument.instrument import Instrument
from strategy.strategytrader import StrategyTrader
from strategy.strategytradercontext import StrategyTraderContextBuilder
from common.signal import Signal
from trader.order import Order
from .strategytrade import StrategyTrade
import logging
logger = logging.getLogger('siis.strategy.margintrade')
class StrategyMarginTrade(StrategyTrade):
"""
Specialization for margin trading.
This type of trade is related to margin trading market, allowing or not hedging, where there is a
position identifier per trade, but generally in the same direction (no hedging).
Works with crypto margin brokers (kraken...).
@todo do we need like with asset trade an exit_trades list to compute the axp and x values, because
if we use cumulative-filled and avg-price we have the same problem here too.
@todo have to check about position_updated qty with direction maybe or take care to have trade signal and
distinct entry from exit
@todo fees and commissions
"""
__slots__ = 'create_ref_oid', 'stop_ref_oid', 'limit_ref_oid', 'create_oid', 'stop_oid', 'limit_oid', \
'position_id', 'leverage', 'stop_order_qty', 'limit_order_qty'
def __init__(self, timeframe: float):
super().__init__(StrategyTrade.TRADE_MARGIN, timeframe)
self.create_ref_oid = None
self.stop_ref_oid = None
self.limit_ref_oid = None
self.create_oid = None # related entry order id
self.stop_oid = None # related stop order id
self.limit_oid = None # related limit order id
self.position_id = None # related informal position id
self.leverage = 1.0
self.stop_order_qty = 0.0 # if stop_oid then this is the qty placed on the stop order
self.limit_order_qty = 0.0 # if limit_oid then this is the qty placed on the limit order
def open(self, trader: Trader, instrument: Instrument, direction: int, order_type: int,
order_price: float, quantity: float, take_profit: float, stop_loss: float,
leverage: float = 1.0, hedging: Optional[bool] = None) -> bool:
"""
Open a position or buy an asset.
"""
if self._entry_state != StrategyTrade.STATE_NEW:
return False
order = Order(trader, instrument.market_id)
order.direction = direction
order.price = order_price
order.order_type = order_type
order.quantity = quantity
order.post_only = False
order.margin_trade = True
order.leverage = leverage
if hedging:
order.hedging = hedging
# generated a reference order id
trader.set_ref_order_id(order)
self.create_ref_oid = order.ref_order_id
self.dir = order.direction
self.op = order.price # retains the order price
self.oq = order.quantity # ordered quantity
self.tp = take_profit
self.sl = stop_loss
self.leverage = leverage
self._stats['entry-order-type'] = order.order_type
if trader.create_order(order, instrument) > 0:
# keep the related create position identifier if available
self.create_oid = order.order_id
self.position_id = order.position_id
if not self.eot and order.created_time:
# only at the first open
self.eot = order.created_time
return True
else:
self._entry_state = StrategyTrade.STATE_REJECTED
return False
def reopen(self, trader: Trader, instrument: Instrument, quantity: float) -> bool:
if self._entry_state != StrategyTrade.STATE_CANCELED:
return False
# reset
self._entry_state = StrategyTrade.STATE_NEW
self.eot = 0
order = Order(trader, instrument.market_id)
order.direction = self.dir
order.price = self.op
order.order_type = self._stats['entry-order-type']
order.quantity = quantity
order.post_only = False
order.margin_trade = True
order.leverage = self.leverage
# generated a reference order id
trader.set_ref_order_id(order)
self.create_ref_oid = order.ref_order_id
self.oq = order.quantity # ordered quantity
if trader.create_order(order, instrument) > 0:
self.create_oid = order.order_id
self.position_id = order.position_id
if not self.eot and order.created_time:
# only at the first open
self.eot = order.created_time
return True
else:
self._entry_state = StrategyTrade.STATE_REJECTED
return False
def remove(self, trader: Trader, instrument: Instrument) -> int:
"""
Remove the orders, but doesn't close the position.
"""
error = False
if self.create_oid:
# cancel the remaining buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
if self.e <= 0:
# no entry qty processed, entry canceled
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# cancel a partially filled trade means it is then fully filled
self._entry_state = StrategyTrade.STATE_FILLED
else:
error = True
if self.stop_oid:
# cancel the stop order
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
if self.e <= 0 and self.x <= 0:
# no exit qty
self._exit_state = StrategyTrade.STATE_CANCELED
elif self.x >= self.e:
self._exit_state = StrategyTrade.STATE_FILLED
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
else:
error = True
if self.limit_oid:
# cancel the limit order
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
if self.e <= 0 and self.x <= 0:
# no exit qty
self._exit_state = StrategyTrade.STATE_CANCELED
elif self.x >= self.e:
self._exit_state = StrategyTrade.STATE_FILLED
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
else:
error = True
return not error
def cancel_open(self, trader: Trader, instrument: Instrument) -> int:
if self.create_oid:
# cancel the buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
if self.e <= 0:
# cancel a just opened trade means it is canceled
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# cancel a partially filled trade means it is then fully filled
self._entry_state = StrategyTrade.STATE_FILLED
return self.ACCEPTED
else:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no create order, nothing to do
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# exists, do nothing need to retry
return self.ERROR
return self.NOTHING_TO_DO
def modify_take_profit(self, trader: Trader, instrument: Instrument, limit_price: float, hard: bool = True) -> int:
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self._exit_state == StrategyTrade.STATE_FILLED:
# exit already fully filled
return self.NOTHING_TO_DO
if self.limit_oid:
# cancel the limit order and create a new one
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no limit order
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all entry qty is filled, if lesser something wrong but its ok
return self.NOTHING_TO_DO
if limit_price and hard:
# only if filled entry partially or totally
order = Order(trader, instrument.market_id)
order.direction = -self.direction
order.order_type = Order.ORDER_LIMIT
order.reduce_only = True
order.quantity = self.e - self.x # remaining
order.price = limit_price
order.margin_trade = True
order.leverage = self.leverage
trader.set_ref_order_id(order)
self.limit_ref_oid = order.ref_order_id
self._stats['take-profit-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.limit_oid = order.order_id
self.limit_order_qty = order.quantity
self.last_tp_ot[0] = order.created_time
self.last_tp_ot[1] += 1
self.tp = limit_price
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.limit_ref_oid = None
self.limit_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.limit_ref_oid = None
self.limit_order_qty = 0.0
return self.REJECTED
elif limit_price:
# soft take-profit
self.tp = limit_price
else:
# remove take-profit
self.tp = 0.0
return self.NOTHING_TO_DO
def modify_stop_loss(self, trader: Trader, instrument: Instrument, stop_price: float, hard: bool = True) -> int:
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self._exit_state == StrategyTrade.STATE_FILLED:
# exit already fully filled
return self.NOTHING_TO_DO
if self.stop_oid:
# cancel the stop order and create a new one
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no stop order
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all entry qty is filled, if lesser something wrong but its ok
return self.NOTHING_TO_DO
if stop_price and hard:
# only if filled entry partially or totally
order = Order(trader, instrument.market_id)
order.direction = -self.direction
order.order_type = Order.ORDER_STOP
order.reduce_only = True
order.quantity = self.e - self.x # remaining
order.stop_price = stop_price
order.leverage = self.leverage
order.margin_trade = True
trader.set_ref_order_id(order)
self.stop_ref_oid = order.ref_order_id
self._stats['stop-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.stop_oid = order.order_id
self.stop_order_qty = order.quantity
self.last_stop_ot[0] = order.created_time
self.last_stop_ot[1] += 1
self.sl = stop_price
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.stop_ref_oid = None
self.stop_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.stop_ref_oid = None
self.stop_order_qty = 0.0
return self.REJECTED
elif stop_price:
# soft stop-loss
self.sl = stop_price
else:
# remove stop-loss
self.sl = 0.0
return self.NOTHING_TO_DO
def close(self, trader: Trader, instrument: Instrument) -> int:
"""
Close the position and cancel the related orders.
"""
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self.create_oid:
# cancel the remaining buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
else:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no create order
self.create_ref_oid = None
self.create_oid = None
else:
return self.ERROR
if self.stop_oid:
# cancel the stop order
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no stop order
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
return self.ERROR
if self.limit_oid:
# cancel the limit order
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no limit order
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all qty is filled
return self.NOTHING_TO_DO
order = Order(trader, instrument.market_id)
order.direction = -self.dir # neg dir
order.order_type = Order.ORDER_MARKET
order.reduce_only = True
order.quantity = self.e - self.x # remaining qty
order.margin_trade = True
order.leverage = self.leverage
# generated a reference order id
trader.set_ref_order_id(order)
self.stop_ref_oid = order.ref_order_id
self._stats['stop-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.stop_oid = order.order_id
self.stop_order_qty = order.quantity
# closing order defined
self._closing = True
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.stop_ref_oid = None
self.stop_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.stop_ref_oid = None
self.stop_order_qty = 0.0
return self.REJECTED
def has_stop_order(self) -> bool:
return self.stop_oid is not None and self.stop_oid != ""
def has_limit_order(self) -> bool:
return self.limit_oid is not None and self.limit_oid != ""
def support_both_order(self) -> bool:
return True
@classmethod
def is_margin(cls) -> bool:
return True
@classmethod
def is_spot(cls) -> bool:
return False
#
# signal
#
def order_signal(self, signal_type: int, data: dict, ref_order_id: str, instrument: Instrument):
if signal_type == Signal.SIGNAL_ORDER_OPENED:
# already get at the return of create_order
if ref_order_id == self.create_ref_oid:
self.create_oid = data['id']
# init created timestamp at the create order open
if not self.eot:
self.eot = data['timestamp']
if data.get('stop-loss'):
self.sl = data['stop-loss']
if data.get('take-profit'):
self.tp = data['take-profit']
self._entry_state = StrategyTrade.STATE_OPENED
elif ref_order_id == self.stop_ref_oid:
self.stop_oid = data['id']
if not self.xot:
self.xot = data['timestamp']
elif ref_order_id == self.limit_ref_oid:
self.limit_oid = data['id']
if not self.xot:
self.xot = data['timestamp']
elif signal_type == Signal.SIGNAL_ORDER_DELETED:
# order is no longer active
if data == self.create_oid:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_DELETED
elif data == self.limit_oid:
self.limit_ref_oid = None
self.limit_oid = None
elif data == self.stop_oid:
self.stop_ref_oid = None
self.stop_oid = None
elif signal_type == Signal.SIGNAL_ORDER_CANCELED:
# order is no longer active
if data == self.create_oid:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
elif data == self.limit_oid:
self.limit_ref_oid = None
self.limit_oid = None
elif data == self.stop_oid:
self.stop_ref_oid = None
self.stop_oid = None
elif signal_type == Signal.SIGNAL_ORDER_UPDATED:
# order price/qty modified, cannot really be used because the strategy might
# cancel the trade or create another one.
# for the qty we could have a remaining_qty member, then comparing
pass
elif signal_type == Signal.SIGNAL_ORDER_TRADED:
# order fully or partially filled
filled = 0
if data['id'] == self.create_oid:
prev_e = self.e
# a single order for the entry, then its OK and preferred to uses cumulative-filled and avg-price
# because precision comes from the broker
if data.get('cumulative-filled') is not None and data['cumulative-filled'] > 0:
filled = data['cumulative-filled'] - self.e # compute filled qty
elif data.get('filled') is not None and data['filled'] > 0:
filled = data['filled']
else:
filled = 0
if data.get('avg-price') is not None and data['avg-price'] > 0:
# in that case we have avg-price already computed
self.aep = data['avg-price']
elif data.get('exec-price') is not None and data['exec-price'] > 0:
# compute the average price
self.aep = ((self.aep * self.e) + (data['exec-price'] * filled)) / (self.e + filled)
else:
# no have uses order price
self.aep = self.op
# cumulative filled entry qty
if data.get('cumulative-filled') is not None:
self.e = data.get('cumulative-filled')
elif filled > 0:
self.e = instrument.adjust_quantity(self.e + filled)
if filled > 0:
# probably need to update exit orders
self._dirty = True
logger.info("Entry avg-price=%s cum-filled=%s" % (self.aep, self.e))
if self.e >= self.oq:
self._entry_state = StrategyTrade.STATE_FILLED
# if no send of ORDER_DELETED signal, cleanup here
self.create_oid = None
self.create_ref_oid = None
else:
self._entry_state = StrategyTrade.STATE_PARTIALLY_FILLED
# retains the trade timestamp
if not self._stats['first-realized-entry-timestamp']:
self._stats['first-realized-entry-timestamp'] = data.get('timestamp', 0.0)
self._stats['last-realized-entry-timestamp'] = data.get('timestamp', 0.0)
elif data['id'] == self.limit_oid or data['id'] == self.stop_oid:
prev_x = self.x
# either we have 'filled' component (partial qty) or the 'cumulative-filled' or both
if data.get('cumulative-filled') is not None and data['cumulative-filled'] > 0:
filled = data['cumulative-filled'] - self.x # computed filled qty
elif data.get('filled') is not None and data['filled'] > 0:
filled = data['filled']
else:
filled = 0
if data.get('avg-price') is not None and data['avg-price'] > 0:
# recompute profit-loss
if self.dir > 0:
self.pl = (data['avg-price'] - self.aep) / self.aep
elif self.dir < 0:
self.pl = (self.aep - data['avg-price']) / self.aep
# in that case we have avg-price already computed
self.axp = data['avg-price']
elif data.get('exec-price') is not None and data['exec-price'] > 0:
# increase/decrease profit/loss (over entry executed quantity)
if self.dir > 0:
self.pl += ((data['exec-price'] * filled) - (self.aep * filled)) / (self.aep * self.e)
elif self.dir < 0:
self.pl += ((self.aep * filled) - (data['exec-price'] * filled)) / (self.aep * self.e)
# compute the average price
self.axp = ((self.axp * self.x) + (data['exec-price'] * filled)) / (self.x + filled)
# cumulative filled exit qty
if data.get('cumulative-filled') is not None:
self.x = data.get('cumulative-filled')
elif filled > 0:
self.x = instrument.adjust_quantity(self.x + filled)
logger.info("Exit avg-price=%s cum-filled=%s" % (self.axp, self.x))
if self.x >= self.oq:
self._exit_state = StrategyTrade.STATE_FILLED
# if no send of ORDER_DELETED signal, cleanup here
if data['id'] == self.limit_oid:
self.limit_oid = None
self.limit_ref_oid = None
elif data['id'] == self.stop_oid:
self.stop_oid = None
self.stop_ref_oid = None
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
# retains the trade timestamp
if not self._stats['first-realized-exit-timestamp']:
self._stats['first-realized-exit-timestamp'] = data.get('timestamp', 0.0)
self._stats['last-realized-exit-timestamp'] = data.get('timestamp', 0.0)
def position_signal(self, signal_type: int, data: dict, ref_order_id: str, instrument: Instrument):
if signal_type == Signal.SIGNAL_POSITION_OPENED:
self.position_id = data['id']
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_UPDATED:
# update the unrealized profit-loss in currency
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_DELETED:
# no longer related position
self.position_id = None
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_AMENDED:
# might not occurs
pass
def is_target_order(self, order_id: str, ref_order_id: str) -> bool:
if order_id and (order_id == self.create_oid or order_id == self.stop_oid or order_id == self.limit_oid):
return True
if ref_order_id and (ref_order_id == self.create_ref_oid or
ref_order_id == self.stop_ref_oid or
ref_order_id == self.limit_ref_oid):
return True
return False
def is_target_position(self, position_id: str, ref_order_id: str) -> bool:
if position_id and (position_id == self.position_id):
return True
if ref_order_id and (ref_order_id == self.create_ref_oid):
return True
#
# persistence
#
def dumps(self) -> dict:
data = super().dumps()
data['create-ref-oid'] = self.create_ref_oid
data['stop-ref-oid'] = self.stop_ref_oid
data['limit-ref-oid'] = self.limit_ref_oid
data['create-oid'] = self.create_oid
data['stop-oid'] = self.stop_oid
data['limit-oid'] = self.limit_oid
data['position-id'] = self.position_id
data['stop-order-qty'] = self.stop_order_qty
data['limit-order-qty'] = self.limit_order_qty
return data
def loads(self, data: dict, strategy_trader: StrategyTrader,
context_builder: Optional[StrategyTraderContextBuilder] = None) -> bool:
if not super().loads(data, strategy_trader, context_builder):
return False
self.create_ref_oid = data.get('create-ref-oid')
self.stop_ref_oid = data.get('stop-ref-oid')
self.limit_ref_oid = data.get('limit-ref-oid')
self.create_oid = data.get('create-oid')
self.stop_oid = data.get('stop-oid')
self.limit_oid = data.get('limit-oid')
self.position_id = data.get('position-id')
self.stop_order_qty = data.get('stop-order-qty', 0.0)
self.limit_order_qty = data.get('limit-order-qty', 0.0)
return True
def check(self, trader: Trader, instrument: Instrument) -> int:
result = 1
#
# entry
#
if self.create_oid:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# entry order error status
# self._entry_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer entry order
self.create_oid = None
self.create_ref_oid = None
else:
if data['cumulative-filled'] > self.e or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
#
# exit
#
if self.stop_oid:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# exit order error status
# self._exit_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer stop order
self.stop_oid = None
self.stop_ref_oid = None
else:
if data['cumulative-filled'] > self.x or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
if self.limit_oid:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# exit order error status
# self._exit_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer stop order
self.limit_oid = None
self.limit_ref_oid = None
else:
if data['cumulative-filled'] > self.x or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
return result
def repair(self, trader: Trader, instrument: Instrument) -> bool:
# @todo fix the trade
return False
#
# stats
#
def update_stats(self, instrument: Instrument, timestamp: float):
super().update_stats(instrument, timestamp)
if self.is_active():
# @todo support only for quantity in asset not in lot or contract of different size
last_price = instrument.close_exec_price(self.direction)
upnl = 0.0 # unrealized PNL
rpnl = 0.0 # realized PNL
# non realized quantity
nrq = self.e - self.x
if self.dir > 0:
upnl = last_price * nrq - self.aep * nrq
rpnl = self.axp * self.x - self.aep * self.x
elif self.dir < 0:
upnl = self.aep * nrq - last_price * nrq
rpnl = self.aep * self.x - self.axp * self.x
# including fees and realized profit and loss
self._stats['unrealized-profit-loss'] = instrument.adjust_quote(
upnl + rpnl - self._stats['entry-fees'] - self._stats['exit-fees'])
def info_report(self, strategy_trader: StrategyTrader) -> Tuple[str]:
data = list(super().info_report(strategy_trader))
if self.create_oid or self.create_ref_oid:
data.append("Entry order id / ref : %s / %s" % (self.create_oid, self.create_ref_oid))
if self.stop_oid or self.stop_ref_oid:
data.append("Stop order id / ref : %s / %s" % (self.stop_oid, self.stop_ref_oid))
if self.limit_oid or self.limit_ref_oid:
data.append("Limit order id / ref : %s / %s" % (self.limit_oid, self.limit_ref_oid))
if self.position_id:
data.append("Position id : %s" % (self.position_id,))
return tuple(data)
| en | 0.849229 | # @date 2018-12-28 # @author <NAME>, All rights reserved without prejudices. # @license Copyright (c) 2018 Dream Overflow # Strategy trade for margin with multiples positions. Specialization for margin trading. This type of trade is related to margin trading market, allowing or not hedging, where there is a position identifier per trade, but generally in the same direction (no hedging). Works with crypto margin brokers (kraken...). @todo do we need like with asset trade an exit_trades list to compute the axp and x values, because if we use cumulative-filled and avg-price we have the same problem here too. @todo have to check about position_updated qty with direction maybe or take care to have trade signal and distinct entry from exit @todo fees and commissions # related entry order id # related stop order id # related limit order id # related informal position id # if stop_oid then this is the qty placed on the stop order # if limit_oid then this is the qty placed on the limit order Open a position or buy an asset. # generated a reference order id # retains the order price # ordered quantity # keep the related create position identifier if available # only at the first open # reset # generated a reference order id # ordered quantity # only at the first open Remove the orders, but doesn't close the position. # cancel the remaining buy order # no entry qty processed, entry canceled # cancel a partially filled trade means it is then fully filled # cancel the stop order # no exit qty # cancel the limit order # no exit qty # cancel the buy order # cancel a just opened trade means it is canceled # cancel a partially filled trade means it is then fully filled # API error, do nothing need retry # cannot retrieve the order, wrong id, no create order, nothing to do # exists, do nothing need to retry # already closing order # exit already fully filled # cancel the limit order and create a new one # API error, do nothing need retry # cannot retrieve the order, wrong id, no limit order # all entry qty is filled, if lesser something wrong but its ok # only if filled entry partially or totally # remaining # rejected because not enough margin, must stop to retry # soft take-profit # remove take-profit # already closing order # exit already fully filled # cancel the stop order and create a new one # API error, do nothing need retry # cannot retrieve the order, wrong id, no stop order # all entry qty is filled, if lesser something wrong but its ok # only if filled entry partially or totally # remaining # rejected because not enough margin, must stop to retry # soft stop-loss # remove stop-loss Close the position and cancel the related orders. # already closing order # cancel the remaining buy order # API error, do nothing need retry # cannot retrieve the order, wrong id, no create order # cancel the stop order # API error, do nothing need retry # cannot retrieve the order, wrong id, no stop order # cancel the limit order # API error, do nothing need retry # cannot retrieve the order, wrong id, no limit order # all qty is filled # neg dir # remaining qty # generated a reference order id # closing order defined # rejected because not enough margin, must stop to retry # # signal # # already get at the return of create_order # init created timestamp at the create order open # order is no longer active # order is no longer active # order price/qty modified, cannot really be used because the strategy might # cancel the trade or create another one. # for the qty we could have a remaining_qty member, then comparing # order fully or partially filled # a single order for the entry, then its OK and preferred to uses cumulative-filled and avg-price # because precision comes from the broker # compute filled qty # in that case we have avg-price already computed # compute the average price # no have uses order price # cumulative filled entry qty # probably need to update exit orders # if no send of ORDER_DELETED signal, cleanup here # retains the trade timestamp # either we have 'filled' component (partial qty) or the 'cumulative-filled' or both # computed filled qty # recompute profit-loss # in that case we have avg-price already computed # increase/decrease profit/loss (over entry executed quantity) # compute the average price # cumulative filled exit qty # if no send of ORDER_DELETED signal, cleanup here # retains the trade timestamp # update the unrealized profit-loss in currency # no longer related position # might not occurs # # persistence # # # entry # # API error, do nothing need retry # entry order error status # self._entry_state = StrategyTrade.STATE_ERROR # cannot retrieve the order, wrong id # no longer entry order # # exit # # API error, do nothing need retry # exit order error status # self._exit_state = StrategyTrade.STATE_ERROR # cannot retrieve the order, wrong id # no longer stop order # API error, do nothing need retry # exit order error status # self._exit_state = StrategyTrade.STATE_ERROR # cannot retrieve the order, wrong id # no longer stop order # @todo fix the trade # # stats # # @todo support only for quantity in asset not in lot or contract of different size # unrealized PNL # realized PNL # non realized quantity # including fees and realized profit and loss | 2.41375 | 2 |
src/pyramid_debugtoolbar_api_sqlalchemy/__init__.py | jvanasco/pyramid_debugtoolbar_api_sqla | 0 | 10596 | <gh_stars>0
# local
from .panels import SqlalchemyCsvDebugPanel
__VERSION__ = "0.3.1"
# ==============================================================================
def includeme(config):
"""
Pyramid hook to install this debugtoolbar plugin.
Update your ENVIRONMENT.ini file
debugtoolbar.includes = pyramid_debugtoolbar_api_sqlalchemy
"""
config.add_debugtoolbar_panel(SqlalchemyCsvDebugPanel)
config.add_route(
"debugtoolbar.api_sqlalchemy.queries.csv",
"/api-sqlalchemy/sqlalchemy-{request_id}.csv",
)
config.scan("pyramid_debugtoolbar_api_sqlalchemy.views")
config.commit()
# ==============================================================================
| # local
from .panels import SqlalchemyCsvDebugPanel
__VERSION__ = "0.3.1"
# ==============================================================================
def includeme(config):
"""
Pyramid hook to install this debugtoolbar plugin.
Update your ENVIRONMENT.ini file
debugtoolbar.includes = pyramid_debugtoolbar_api_sqlalchemy
"""
config.add_debugtoolbar_panel(SqlalchemyCsvDebugPanel)
config.add_route(
"debugtoolbar.api_sqlalchemy.queries.csv",
"/api-sqlalchemy/sqlalchemy-{request_id}.csv",
)
config.scan("pyramid_debugtoolbar_api_sqlalchemy.views")
config.commit()
# ============================================================================== | en | 0.311987 | # local # ============================================================================== Pyramid hook to install this debugtoolbar plugin. Update your ENVIRONMENT.ini file debugtoolbar.includes = pyramid_debugtoolbar_api_sqlalchemy # ============================================================================== | 1.979528 | 2 |
ultron/utilities/zlib_engine.py | wangjiehui11235/ultron | 4 | 10597 | <filename>ultron/utilities/zlib_engine.py
# -*- coding: utf-8 -*-
import os,os.path
import zipfile
def zip_compress(dir_name, zip_filename):
filelist = []
if os.path.isfile(dir_name):
filelist.append(dir_name)
else :
for root, dirs, files in os.walk(dir_name):
for name in files:
filelist.append(os.path.join(root, name))
zf = zipfile.ZipFile(zip_filename, "w", zipfile.zlib.DEFLATED)
for tar in filelist:
arcname = tar[len(dir_name):]
zf.write(tar,arcname)
zf.close()
def unzip_compress(zip_filename, unzip_dir):
if not os.path.exists(unzip_dir):
os.mkdir(unzip_dir)
zfobj = zipfile.ZipFile(zip_filename)
for name in zfobj.namelist():
name = name.replace('\\','/')
if name.endswith('/'):
os.mkdir(os.path.join(unzip_dir, name))
else:
ext_filename = os.path.join(unzip_dir, name)
ext_dir= os.path.dirname(ext_filename)
if not os.path.exists(ext_dir) :
os.mkdir(ext_dir)
outfile = open(ext_filename, 'wb')
outfile.write(zfobj.read(name))
outfile.close() | <filename>ultron/utilities/zlib_engine.py
# -*- coding: utf-8 -*-
import os,os.path
import zipfile
def zip_compress(dir_name, zip_filename):
filelist = []
if os.path.isfile(dir_name):
filelist.append(dir_name)
else :
for root, dirs, files in os.walk(dir_name):
for name in files:
filelist.append(os.path.join(root, name))
zf = zipfile.ZipFile(zip_filename, "w", zipfile.zlib.DEFLATED)
for tar in filelist:
arcname = tar[len(dir_name):]
zf.write(tar,arcname)
zf.close()
def unzip_compress(zip_filename, unzip_dir):
if not os.path.exists(unzip_dir):
os.mkdir(unzip_dir)
zfobj = zipfile.ZipFile(zip_filename)
for name in zfobj.namelist():
name = name.replace('\\','/')
if name.endswith('/'):
os.mkdir(os.path.join(unzip_dir, name))
else:
ext_filename = os.path.join(unzip_dir, name)
ext_dir= os.path.dirname(ext_filename)
if not os.path.exists(ext_dir) :
os.mkdir(ext_dir)
outfile = open(ext_filename, 'wb')
outfile.write(zfobj.read(name))
outfile.close() | en | 0.769321 | # -*- coding: utf-8 -*- | 2.810631 | 3 |
scripts/instances2inventory.py | TipaZloy/coda-automation | 0 | 10598 | <gh_stars>0
#!/usr/bin/env python3
import boto
import boto.ec2
import sys
from pprint import pprint
from collections import defaultdict
output = defaultdict(lambda: [])
comments = defaultdict(lambda: {})
skip_region_strings = ['us-gov', 'cn-', 'ca-']
#skip_region_strings = ['us-gov', 'cn-', 'ca-', 'eu-', 'ap-']
if len(sys.argv) > 1:
filter = sys.argv[1]
else:
filter = False
regions = boto.ec2.regions()
for region in regions:
if any (skip_string in region.name for skip_string in skip_region_strings):
continue
print('# Querying region:', region)
ec2conn = boto.connect_ec2(region=region)
reservations = ec2conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
for i in instances:
if filter:
if 'Name' in i.tags:
if filter not in i.tags['Name']:
continue
if 'running' not in i.state:
continue
if 'Name' in i.tags:
if 'Packer' in i.tags['Name']: continue
if i.tags['Name'].count('_') == 2:
try:
(net, group, num) = i.tags['Name'].split('_')
myregion = region.name
except:
print('Error parsing ', i.tags['Name'])
continue
elif i.tags['Name'].count('_') == 3:
try:
(net, myregion, group, num) = i.tags['Name'].split('_')
except:
print('Error parsing ', i.tags['Name'])
continue
groupname = "%ss" % group
else:
print('NONAME', end='')
groupname = 'unknown'
i.tags['Name'] = 'NONE'
output[groupname].append(i.public_dns_name)
try:
comments[groupname][i.public_dns_name] = "# %s\t%s\t%s\t%s\t%s" % (i.tags['Name'], myregion, i.instance_type, i.ip_address, i.launch_time)
except:
comments[groupname][i.public_dns_name] = "# MISSING DATA"
for group in output:
print("[%s]" % group)
hostlist = output[group]
hostlist.sort()
for host in hostlist:
print("%s \t%s" % (host, comments[group][host]))
print("\n")
| #!/usr/bin/env python3
import boto
import boto.ec2
import sys
from pprint import pprint
from collections import defaultdict
output = defaultdict(lambda: [])
comments = defaultdict(lambda: {})
skip_region_strings = ['us-gov', 'cn-', 'ca-']
#skip_region_strings = ['us-gov', 'cn-', 'ca-', 'eu-', 'ap-']
if len(sys.argv) > 1:
filter = sys.argv[1]
else:
filter = False
regions = boto.ec2.regions()
for region in regions:
if any (skip_string in region.name for skip_string in skip_region_strings):
continue
print('# Querying region:', region)
ec2conn = boto.connect_ec2(region=region)
reservations = ec2conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
for i in instances:
if filter:
if 'Name' in i.tags:
if filter not in i.tags['Name']:
continue
if 'running' not in i.state:
continue
if 'Name' in i.tags:
if 'Packer' in i.tags['Name']: continue
if i.tags['Name'].count('_') == 2:
try:
(net, group, num) = i.tags['Name'].split('_')
myregion = region.name
except:
print('Error parsing ', i.tags['Name'])
continue
elif i.tags['Name'].count('_') == 3:
try:
(net, myregion, group, num) = i.tags['Name'].split('_')
except:
print('Error parsing ', i.tags['Name'])
continue
groupname = "%ss" % group
else:
print('NONAME', end='')
groupname = 'unknown'
i.tags['Name'] = 'NONE'
output[groupname].append(i.public_dns_name)
try:
comments[groupname][i.public_dns_name] = "# %s\t%s\t%s\t%s\t%s" % (i.tags['Name'], myregion, i.instance_type, i.ip_address, i.launch_time)
except:
comments[groupname][i.public_dns_name] = "# MISSING DATA"
for group in output:
print("[%s]" % group)
hostlist = output[group]
hostlist.sort()
for host in hostlist:
print("%s \t%s" % (host, comments[group][host]))
print("\n") | en | 0.085329 | #!/usr/bin/env python3 #skip_region_strings = ['us-gov', 'cn-', 'ca-', 'eu-', 'ap-'] | 2.644648 | 3 |
selfdrive/crash.py | darknight111/openpilot3 | 19 | 10599 | """Install exception handler for process crash."""
from selfdrive.swaglog import cloudlog
from selfdrive.version import version
import sentry_sdk
from sentry_sdk.integrations.threading import ThreadingIntegration
def capture_exception(*args, **kwargs) -> None:
cloudlog.error("crash", exc_info=kwargs.get('exc_info', 1))
try:
sentry_sdk.capture_exception(*args, **kwargs)
sentry_sdk.flush() # https://github.com/getsentry/sentry-python/issues/291
except Exception:
cloudlog.exception("sentry exception")
def bind_user(**kwargs) -> None:
sentry_sdk.set_user(kwargs)
def bind_extra(**kwargs) -> None:
for k, v in kwargs.items():
sentry_sdk.set_tag(k, v)
def init() -> None:
sentry_sdk.init("https://[email protected]/5861866",
default_integrations=False, integrations=[ThreadingIntegration(propagate_hub=True)],
release=version)
| """Install exception handler for process crash."""
from selfdrive.swaglog import cloudlog
from selfdrive.version import version
import sentry_sdk
from sentry_sdk.integrations.threading import ThreadingIntegration
def capture_exception(*args, **kwargs) -> None:
cloudlog.error("crash", exc_info=kwargs.get('exc_info', 1))
try:
sentry_sdk.capture_exception(*args, **kwargs)
sentry_sdk.flush() # https://github.com/getsentry/sentry-python/issues/291
except Exception:
cloudlog.exception("sentry exception")
def bind_user(**kwargs) -> None:
sentry_sdk.set_user(kwargs)
def bind_extra(**kwargs) -> None:
for k, v in kwargs.items():
sentry_sdk.set_tag(k, v)
def init() -> None:
sentry_sdk.init("https://[email protected]/5861866",
default_integrations=False, integrations=[ThreadingIntegration(propagate_hub=True)],
release=version)
| en | 0.776766 | Install exception handler for process crash. # https://github.com/getsentry/sentry-python/issues/291 | 2.036558 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.