max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
OLD/karma_module/text.py | alentoghostflame/StupidAlentoBot | 1 | 8800 | <gh_stars>1-10
ADDED_KARMA_TO_MEMBER = "Gave {} karma to {}, their karma is now at {}."
REMOVED_KARMA_FROM_MEMBER = "Removed {} karma from {}, their karma is now at {}."
LIST_KARMA_OWN = "You currently have {} karma."
LIST_KARMA_OBJECT = "\"{}\" currently has {} karma."
LIST_KARMA_MEMBER = "{} currently has {} karma."
KARMA_TOP_START = "Top karma in server:\n"
KARMA_TOP_FORMAT = "{}. {} \\| {}\n"
| ADDED_KARMA_TO_MEMBER = "Gave {} karma to {}, their karma is now at {}."
REMOVED_KARMA_FROM_MEMBER = "Removed {} karma from {}, their karma is now at {}."
LIST_KARMA_OWN = "You currently have {} karma."
LIST_KARMA_OBJECT = "\"{}\" currently has {} karma."
LIST_KARMA_MEMBER = "{} currently has {} karma."
KARMA_TOP_START = "Top karma in server:\n"
KARMA_TOP_FORMAT = "{}. {} \\| {}\n" | none | 1 | 1.450052 | 1 |
|
read_delphin_data.py | anssilaukkarinen/mry-cluster2 | 0 | 8801 | <filename>read_delphin_data.py
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 14:51:24 2021
@author: laukkara
This script is run first to fetch results data from university's network drive
"""
import os
import pickle
input_folder_for_Delphin_data = r'S:\91202_Rakfys_Mallinnus\RAMI\simulations'
output_folder = os.path.join(r'C:\Local\laukkara\Data\github\mry-cluster2\input')
output_pickle_file_name = 'S_RAMI.pickle'
## Preparations
if not os.path.exists(output_folder):
os.makedirs(output_folder)
output_pickle_file_path = os.path.join(output_folder,
output_pickle_file_name)
## Read in results data from pickle files
cases = {}
data = {}
cases = os.listdir(input_folder_for_Delphin_data)
cases.remove('olds')
cases.remove('RAMI_simulated_cases.xlsx')
data = {}
for case in cases:
print('Reading:', case)
fname = os.path.join(input_folder_for_Delphin_data, case, 'd.pickle')
with open(fname, 'rb') as f:
try:
df = pickle.load(f)
if df.shape[0] == 1200:
data[case] = df
else:
print('ERROR AT:', case)
except:
print('Error when reading case:', case)
print(data[cases[0]].columns)
with open(output_pickle_file_path, 'wb') as f:
pickle.dump(data, f)
| <filename>read_delphin_data.py
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 14:51:24 2021
@author: laukkara
This script is run first to fetch results data from university's network drive
"""
import os
import pickle
input_folder_for_Delphin_data = r'S:\91202_Rakfys_Mallinnus\RAMI\simulations'
output_folder = os.path.join(r'C:\Local\laukkara\Data\github\mry-cluster2\input')
output_pickle_file_name = 'S_RAMI.pickle'
## Preparations
if not os.path.exists(output_folder):
os.makedirs(output_folder)
output_pickle_file_path = os.path.join(output_folder,
output_pickle_file_name)
## Read in results data from pickle files
cases = {}
data = {}
cases = os.listdir(input_folder_for_Delphin_data)
cases.remove('olds')
cases.remove('RAMI_simulated_cases.xlsx')
data = {}
for case in cases:
print('Reading:', case)
fname = os.path.join(input_folder_for_Delphin_data, case, 'd.pickle')
with open(fname, 'rb') as f:
try:
df = pickle.load(f)
if df.shape[0] == 1200:
data[case] = df
else:
print('ERROR AT:', case)
except:
print('Error when reading case:', case)
print(data[cases[0]].columns)
with open(output_pickle_file_path, 'wb') as f:
pickle.dump(data, f)
| en | 0.819867 | # -*- coding: utf-8 -*- Created on Mon Dec 6 14:51:24 2021 @author: laukkara This script is run first to fetch results data from university's network drive ## Preparations ## Read in results data from pickle files | 2.664645 | 3 |
api/config.py | sumesh-aot/namex | 1 | 8802 | """Config for initializing the namex-api."""
import os
from dotenv import find_dotenv, load_dotenv
# this will load all the envars from a .env file located in the project root (api)
load_dotenv(find_dotenv())
CONFIGURATION = {
'development': 'config.DevConfig',
'testing': 'config.TestConfig',
'production': 'config.Config',
'default': 'config.Config'
}
class Config(object):
"""Base config (also production config)."""
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = 'a secret'
SQLALCHEMY_TRACK_MODIFICATIONS = False
NRO_SERVICE_ACCOUNT = os.getenv('NRO_SERVICE_ACCOUNT', 'nro_service_account')
SOLR_BASE_URL = os.getenv('SOLR_BASE_URL', None)
SOLR_SYNONYMS_API_URL = os.getenv('SOLR_SYNONYMS_API_URL', None)
NRO_EXTRACTOR_URI = os.getenv('NRO_EXTRACTOR_URI', None)
AUTO_ANALYZE_URL = os.getenv('AUTO_ANALYZE_URL', None)
AUTO_ANALYZE_CONFIG = os.getenv('AUTO_ANALYZE_CONFIG', None)
REPORT_SVC_URL = os.getenv('REPORT_SVC_URL', None)
REPORT_TEMPLATE_PATH = os.getenv('REPORT_PATH', 'report-templates')
ALEMBIC_INI = 'migrations/alembic.ini'
# POSTGRESQL
DB_USER = os.getenv('DATABASE_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_PASSWORD', '')
DB_NAME = os.getenv('DATABASE_NAME', '')
DB_HOST = os.getenv('DATABASE_HOST', '')
DB_PORT = os.getenv('DATABASE_PORT', '5432')
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=<PASSWORD>,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME
)
# ORACLE - LEGACY NRO NAMESDB
NRO_USER = os.getenv('NRO_USER', '')
NRO_SCHEMA = os.getenv('NRO_SCHEMA', None)
NRO_PASSWORD = os.getenv('NRO_PASSWORD', '')
NRO_DB_NAME = os.getenv('NRO_DB_NAME', '')
NRO_HOST = os.getenv('NRO_HOST', '')
NRO_PORT = int(os.getenv('NRO_PORT', '1521'))
# JWT_OIDC Settings
JWT_OIDC_WELL_KNOWN_CONFIG = os.getenv('JWT_OIDC_WELL_KNOWN_CONFIG')
JWT_OIDC_ALGORITHMS = os.getenv('JWT_OIDC_ALGORITHMS')
JWT_OIDC_JWKS_URI = os.getenv('JWT_OIDC_JWKS_URI')
JWT_OIDC_ISSUER = os.getenv('JWT_OIDC_ISSUER')
JWT_OIDC_AUDIENCE = os.getenv('JWT_OIDC_AUDIENCE')
JWT_OIDC_CLIENT_SECRET = os.getenv('JWT_OIDC_CLIENT_SECRET')
JWT_OIDC_CACHING_ENABLED = os.getenv('JWT_OIDC_CACHING_ENABLED')
JWT_OIDC_JWKS_CACHE_TIMEOUT = int(os.getenv('JWT_OIDC_JWKS_CACHE_TIMEOUT', '300'))
TESTING = False,
DEBUG = False
# You can disable NRO updates for Name Requests by setting the variable in your .env / OpenShift configuration
DISABLE_NAMEREQUEST_NRO_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_NRO_UPDATES', 0))
DISABLE_NAMEREQUEST_SOLR_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_SOLR_UPDATES', 0))
class DevConfig(Config):
"""Dev config used for development."""
TESTING = False,
DEBUG = True
# We can't run NRO locally unless you're provisioned, you can disable NRO updates for Name Requests by setting the variable in your .env
DISABLE_NAMEREQUEST_NRO_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_NRO_UPDATES', 0))
DISABLE_NAMEREQUEST_SOLR_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_SOLR_UPDATES', 0))
class TestConfig(Config):
"""Test config used for pytests."""
DEBUG = True
TESTING = True
# POSTGRESQL
DB_USER = os.getenv('DATABASE_TEST_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_TEST_PASSWORD', '')
DB_NAME = os.getenv('DATABASE_TEST_NAME', '')
DB_HOST = os.getenv('DATABASE_TEST_HOST', '')
DB_PORT = os.getenv('DATABASE_TEST_PORT', '5432')
# Allows for NRO add / update bypass if necessary (for local development)
LOCAL_DEV_MODE = os.getenv('LOCAL_DEV_MODE', False)
# Set this in your .env to debug SQL Alchemy queries (for local development)
SQLALCHEMY_ECHO = 'debug' if os.getenv('DEBUG_SQL_QUERIES', False) else False
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=<PASSWORD>,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME
)
# We can't run NRO locally for running our tests
DISABLE_NAMEREQUEST_NRO_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_NRO_UPDATES', 1))
DISABLE_NAMEREQUEST_SOLR_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_SOLR_UPDATES', 0))
# JWT OIDC settings
# JWT_OIDC_TEST_MODE will set jwt_manager to use
JWT_OIDC_TEST_MODE = True
JWT_OIDC_TEST_AUDIENCE = 'example'
JWT_OIDC_TEST_ISSUER = 'https://example.localdomain/auth/realms/example'
JWT_OIDC_TEST_KEYS = {
'keys': [
{
'kid': 'flask-jwt-oidc-test-client',
'kty': 'RSA',
'alg': 'RS256',
'use': 'sig',
'n': '<KEY>', # noqa: E501
'e': 'AQAB'
}
]
}
JWT_OIDC_TEST_PRIVATE_KEY_JWKS = {
'keys': [
{
'kid': 'flask-jwt-oidc-test-client',
'kty': 'RSA',
'alg': 'RS256',
'use': 'sig',
'n': '<KEY>', # noqa: E501
'e': 'AQAB',
'd': '<KEY>', # noqa: E501
'p': '<KEY>',
'q': '<KEY>',
'dp': '<KEY>',
'dq': '<KEY>',
'qi': '<KEY>'
}
]
}
JWT_OIDC_TEST_PRIVATE_KEY_PEM = """
-----BEGIN RSA PRIVATE KEY-----
<KEY>
ddj7hucMsXWv05QUrCPoL6YUUz7Cgpz7ra24rpAmK5z7lsV+f3BEvXkrUQIDAQAB
AoGAC0G3QGI6OQ6tvbCNYGCqq043YI/8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhs
kURaDwk4+8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh/
xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0CQQD13LrBTEDR44ei
lQ/4TlCMPO5bytd1pAxHnrqgMnWovSIPSShAAH1feFugH7ZGu7RoBO7pYNb6N3ia
C1idc7yjAkEA6Nfc6c8meTRkVRAHCF24LB5GLfsjoMB0tOeEO9w9Ous1a4o+D24b
AePMUImAp3woFoNDRfWtlNktOqLel5PjewJBAN9kBoA5o6/Rl9zeqdsIdWFmv4DB
5lEqlEnC7HlAP+3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhcCQQDb
W0mOp436T6ZaELBfbFNulNLOzLLi5YzNRPLppfG1SRNZjbIrvTIKVL4N/YxLvQbT
NrQw+2OdQACBJiEHsdZzAkBcsTk7frTH4yGx0VfHxXDPjfTj4wmD6gZIlcIr9lZg
4H8UZcVFN95vEKxJiLRjAmj6g273pu9kK4ymXNEjWWJn
-----END RSA PRIVATE KEY-----"""
| """Config for initializing the namex-api."""
import os
from dotenv import find_dotenv, load_dotenv
# this will load all the envars from a .env file located in the project root (api)
load_dotenv(find_dotenv())
CONFIGURATION = {
'development': 'config.DevConfig',
'testing': 'config.TestConfig',
'production': 'config.Config',
'default': 'config.Config'
}
class Config(object):
"""Base config (also production config)."""
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = 'a secret'
SQLALCHEMY_TRACK_MODIFICATIONS = False
NRO_SERVICE_ACCOUNT = os.getenv('NRO_SERVICE_ACCOUNT', 'nro_service_account')
SOLR_BASE_URL = os.getenv('SOLR_BASE_URL', None)
SOLR_SYNONYMS_API_URL = os.getenv('SOLR_SYNONYMS_API_URL', None)
NRO_EXTRACTOR_URI = os.getenv('NRO_EXTRACTOR_URI', None)
AUTO_ANALYZE_URL = os.getenv('AUTO_ANALYZE_URL', None)
AUTO_ANALYZE_CONFIG = os.getenv('AUTO_ANALYZE_CONFIG', None)
REPORT_SVC_URL = os.getenv('REPORT_SVC_URL', None)
REPORT_TEMPLATE_PATH = os.getenv('REPORT_PATH', 'report-templates')
ALEMBIC_INI = 'migrations/alembic.ini'
# POSTGRESQL
DB_USER = os.getenv('DATABASE_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_PASSWORD', '')
DB_NAME = os.getenv('DATABASE_NAME', '')
DB_HOST = os.getenv('DATABASE_HOST', '')
DB_PORT = os.getenv('DATABASE_PORT', '5432')
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=<PASSWORD>,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME
)
# ORACLE - LEGACY NRO NAMESDB
NRO_USER = os.getenv('NRO_USER', '')
NRO_SCHEMA = os.getenv('NRO_SCHEMA', None)
NRO_PASSWORD = os.getenv('NRO_PASSWORD', '')
NRO_DB_NAME = os.getenv('NRO_DB_NAME', '')
NRO_HOST = os.getenv('NRO_HOST', '')
NRO_PORT = int(os.getenv('NRO_PORT', '1521'))
# JWT_OIDC Settings
JWT_OIDC_WELL_KNOWN_CONFIG = os.getenv('JWT_OIDC_WELL_KNOWN_CONFIG')
JWT_OIDC_ALGORITHMS = os.getenv('JWT_OIDC_ALGORITHMS')
JWT_OIDC_JWKS_URI = os.getenv('JWT_OIDC_JWKS_URI')
JWT_OIDC_ISSUER = os.getenv('JWT_OIDC_ISSUER')
JWT_OIDC_AUDIENCE = os.getenv('JWT_OIDC_AUDIENCE')
JWT_OIDC_CLIENT_SECRET = os.getenv('JWT_OIDC_CLIENT_SECRET')
JWT_OIDC_CACHING_ENABLED = os.getenv('JWT_OIDC_CACHING_ENABLED')
JWT_OIDC_JWKS_CACHE_TIMEOUT = int(os.getenv('JWT_OIDC_JWKS_CACHE_TIMEOUT', '300'))
TESTING = False,
DEBUG = False
# You can disable NRO updates for Name Requests by setting the variable in your .env / OpenShift configuration
DISABLE_NAMEREQUEST_NRO_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_NRO_UPDATES', 0))
DISABLE_NAMEREQUEST_SOLR_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_SOLR_UPDATES', 0))
class DevConfig(Config):
"""Dev config used for development."""
TESTING = False,
DEBUG = True
# We can't run NRO locally unless you're provisioned, you can disable NRO updates for Name Requests by setting the variable in your .env
DISABLE_NAMEREQUEST_NRO_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_NRO_UPDATES', 0))
DISABLE_NAMEREQUEST_SOLR_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_SOLR_UPDATES', 0))
class TestConfig(Config):
"""Test config used for pytests."""
DEBUG = True
TESTING = True
# POSTGRESQL
DB_USER = os.getenv('DATABASE_TEST_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_TEST_PASSWORD', '')
DB_NAME = os.getenv('DATABASE_TEST_NAME', '')
DB_HOST = os.getenv('DATABASE_TEST_HOST', '')
DB_PORT = os.getenv('DATABASE_TEST_PORT', '5432')
# Allows for NRO add / update bypass if necessary (for local development)
LOCAL_DEV_MODE = os.getenv('LOCAL_DEV_MODE', False)
# Set this in your .env to debug SQL Alchemy queries (for local development)
SQLALCHEMY_ECHO = 'debug' if os.getenv('DEBUG_SQL_QUERIES', False) else False
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=<PASSWORD>,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME
)
# We can't run NRO locally for running our tests
DISABLE_NAMEREQUEST_NRO_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_NRO_UPDATES', 1))
DISABLE_NAMEREQUEST_SOLR_UPDATES = int(os.getenv('DISABLE_NAMEREQUEST_SOLR_UPDATES', 0))
# JWT OIDC settings
# JWT_OIDC_TEST_MODE will set jwt_manager to use
JWT_OIDC_TEST_MODE = True
JWT_OIDC_TEST_AUDIENCE = 'example'
JWT_OIDC_TEST_ISSUER = 'https://example.localdomain/auth/realms/example'
JWT_OIDC_TEST_KEYS = {
'keys': [
{
'kid': 'flask-jwt-oidc-test-client',
'kty': 'RSA',
'alg': 'RS256',
'use': 'sig',
'n': '<KEY>', # noqa: E501
'e': 'AQAB'
}
]
}
JWT_OIDC_TEST_PRIVATE_KEY_JWKS = {
'keys': [
{
'kid': 'flask-jwt-oidc-test-client',
'kty': 'RSA',
'alg': 'RS256',
'use': 'sig',
'n': '<KEY>', # noqa: E501
'e': 'AQAB',
'd': '<KEY>', # noqa: E501
'p': '<KEY>',
'q': '<KEY>',
'dp': '<KEY>',
'dq': '<KEY>',
'qi': '<KEY>'
}
]
}
JWT_OIDC_TEST_PRIVATE_KEY_PEM = """
-----BEGIN RSA PRIVATE KEY-----
<KEY>
ddj7hucMsXWv05QUrCPoL6YUUz7Cgpz7ra24rpAmK5z7lsV+f3BEvXkrUQIDAQAB
AoGAC0G3QGI6OQ6tvbCNYGCqq043YI/8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhs
kURaDwk4+8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh/
xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0CQQD13LrBTEDR44ei
lQ/4TlCMPO5bytd1pAxHnrqgMnWovSIPSShAAH1feFugH7ZGu7RoBO7pYNb6N3ia
C1idc7yjAkEA6Nfc6c8meTRkVRAHCF24LB5GLfsjoMB0tOeEO9w9Ous1a4o+D24b
AePMUImAp3woFoNDRfWtlNktOqLel5PjewJBAN9kBoA5o6/Rl9zeqdsIdWFmv4DB
5lEqlEnC7HlAP+3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhcCQQDb
W0mOp436T6ZaELBfbFNulNLOzLLi5YzNRPLppfG1SRNZjbIrvTIKVL4N/YxLvQbT
NrQw+2OdQACBJiEHsdZzAkBcsTk7frTH4yGx0VfHxXDPjfTj4wmD6gZIlcIr9lZg
4H8UZcVFN95vEKxJiLRjAmj6g273pu9kK4ymXNEjWWJn
-----END RSA PRIVATE KEY-----"""
| en | 0.5116 | Config for initializing the namex-api. # this will load all the envars from a .env file located in the project root (api) Base config (also production config). # POSTGRESQL # ORACLE - LEGACY NRO NAMESDB # JWT_OIDC Settings # You can disable NRO updates for Name Requests by setting the variable in your .env / OpenShift configuration Dev config used for development. # We can't run NRO locally unless you're provisioned, you can disable NRO updates for Name Requests by setting the variable in your .env Test config used for pytests. # POSTGRESQL # Allows for NRO add / update bypass if necessary (for local development) # Set this in your .env to debug SQL Alchemy queries (for local development) # We can't run NRO locally for running our tests # JWT OIDC settings # JWT_OIDC_TEST_MODE will set jwt_manager to use # noqa: E501 # noqa: E501 # noqa: E501 -----BEGIN RSA PRIVATE KEY----- <KEY> ddj7hucMsXWv05QUrCPoL6YUUz7Cgpz7ra24rpAmK5z7lsV+f3BEvXkrUQIDAQAB AoGAC0G3QGI6OQ6tvbCNYGCqq043YI/8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhs kURaDwk4+8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh/ xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0CQQD13LrBTEDR44ei lQ/4TlCMPO5bytd1pAxHnrqgMnWovSIPSShAAH1feFugH7ZGu7RoBO7pYNb6N3ia C1idc7yjAkEA6Nfc6c8meTRkVRAHCF24LB5GLfsjoMB0tOeEO9w9Ous1a4o+D24b AePMUImAp3woFoNDRfWtlNktOqLel5PjewJBAN9kBoA5o6/Rl9zeqdsIdWFmv4DB 5lEqlEnC7HlAP+3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhcCQQDb W0mOp436T6ZaELBfbFNulNLOzLLi5YzNRPLppfG1SRNZjbIrvTIKVL4N/YxLvQbT NrQw+2OdQACBJiEHsdZzAkBcsTk7frTH4yGx0VfHxXDPjfTj4wmD6gZIlcIr9lZg 4H8UZcVFN95vEKxJiLRjAmj6g273pu9kK4ymXNEjWWJn -----END RSA PRIVATE KEY----- | 2.431407 | 2 |
examples/pylab_examples/fancybox_demo2.py | pierre-haessig/matplotlib | 16 | 8803 | <reponame>pierre-haessig/matplotlib<gh_stars>10-100
import matplotlib.patches as mpatch
import matplotlib.pyplot as plt
styles = mpatch.BoxStyle.get_styles()
figheight = (len(styles)+.5)
fig1 = plt.figure(1, (4/1.5, figheight/1.5))
fontsize = 0.3 * 72
for i, (stylename, styleclass) in enumerate(styles.items()):
fig1.text(0.5, (float(len(styles)) - 0.5 - i)/figheight, stylename,
ha="center",
size=fontsize,
transform=fig1.transFigure,
bbox=dict(boxstyle=stylename, fc="w", ec="k"))
plt.draw()
plt.show()
| import matplotlib.patches as mpatch
import matplotlib.pyplot as plt
styles = mpatch.BoxStyle.get_styles()
figheight = (len(styles)+.5)
fig1 = plt.figure(1, (4/1.5, figheight/1.5))
fontsize = 0.3 * 72
for i, (stylename, styleclass) in enumerate(styles.items()):
fig1.text(0.5, (float(len(styles)) - 0.5 - i)/figheight, stylename,
ha="center",
size=fontsize,
transform=fig1.transFigure,
bbox=dict(boxstyle=stylename, fc="w", ec="k"))
plt.draw()
plt.show() | none | 1 | 2.422122 | 2 |
|
setup.py | sdu-cfei/modest-py | 37 | 8804 | from setuptools import setup
setup(
name='modestpy',
version='0.1',
description='FMI-compliant model identification package',
url='https://github.com/sdu-cfei/modest-py',
keywords='fmi fmu optimization model identification estimation',
author='<NAME>, Center for Energy Informatics SDU',
author_email='<EMAIL>, <EMAIL>',
license='BSD',
platforms=['Windows', 'Linux'],
packages=[
'modestpy',
'modestpy.estim',
'modestpy.estim.ga_parallel',
'modestpy.estim.ga',
'modestpy.estim.ps',
'modestpy.estim.scipy',
'modestpy.fmi',
'modestpy.utilities',
'modestpy.test'],
include_package_data=True,
install_requires=[
'fmpy[complete]',
'scipy',
'pandas',
'matplotlib',
'numpy',
'pyDOE',
'modestga'
],
classifiers=[
'Programming Language :: Python :: 3'
]
)
| from setuptools import setup
setup(
name='modestpy',
version='0.1',
description='FMI-compliant model identification package',
url='https://github.com/sdu-cfei/modest-py',
keywords='fmi fmu optimization model identification estimation',
author='<NAME>, Center for Energy Informatics SDU',
author_email='<EMAIL>, <EMAIL>',
license='BSD',
platforms=['Windows', 'Linux'],
packages=[
'modestpy',
'modestpy.estim',
'modestpy.estim.ga_parallel',
'modestpy.estim.ga',
'modestpy.estim.ps',
'modestpy.estim.scipy',
'modestpy.fmi',
'modestpy.utilities',
'modestpy.test'],
include_package_data=True,
install_requires=[
'fmpy[complete]',
'scipy',
'pandas',
'matplotlib',
'numpy',
'pyDOE',
'modestga'
],
classifiers=[
'Programming Language :: Python :: 3'
]
)
| none | 1 | 1.129361 | 1 |
|
gfworkflow/core.py | andersonbrands/gfworkflow | 0 | 8805 | <reponame>andersonbrands/gfworkflow
import re
import subprocess as sp
from typing import Union, List
from gfworkflow.exceptions import RunCommandException
def run(command: Union[str, List[str]]):
completed_process = sp.run(command, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True)
if completed_process.returncode:
raise RunCommandException(completed_process)
return completed_process
def init():
run('git flow init -d -f')
run('git config gitflow.prefix.versiontag v')
def bump_version(part: str):
run(f'bumpversion {part}')
def start_release(new_version: str):
run(f'git flow release start {new_version}')
def get_new_version(part: str):
output = run(f'bumpversion {part} --list -n --allow-dirty --no-configured-files').stdout
return re.compile(r'new_version=(\S+)').search(output).group(1)
def get_current_branch_name():
return run('git rev-parse --abbrev-ref HEAD').stdout.strip()
def finish_release(release_name):
run(f'git flow release finish -m " - " {release_name}')
| import re
import subprocess as sp
from typing import Union, List
from gfworkflow.exceptions import RunCommandException
def run(command: Union[str, List[str]]):
completed_process = sp.run(command, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True)
if completed_process.returncode:
raise RunCommandException(completed_process)
return completed_process
def init():
run('git flow init -d -f')
run('git config gitflow.prefix.versiontag v')
def bump_version(part: str):
run(f'bumpversion {part}')
def start_release(new_version: str):
run(f'git flow release start {new_version}')
def get_new_version(part: str):
output = run(f'bumpversion {part} --list -n --allow-dirty --no-configured-files').stdout
return re.compile(r'new_version=(\S+)').search(output).group(1)
def get_current_branch_name():
return run('git rev-parse --abbrev-ref HEAD').stdout.strip()
def finish_release(release_name):
run(f'git flow release finish -m " - " {release_name}') | none | 1 | 2.150712 | 2 |
|
tests/integration/lambdas/lambda_python3.py | jorges119/localstack | 31,928 | 8806 | <reponame>jorges119/localstack
# simple test function that uses python 3 features (e.g., f-strings)
# see https://github.com/localstack/localstack/issues/264
def handler(event, context):
# the following line is Python 3.6+ specific
msg = f"Successfully processed {event}" # noqa This code is Python 3.6+ only
return event
| # simple test function that uses python 3 features (e.g., f-strings)
# see https://github.com/localstack/localstack/issues/264
def handler(event, context):
# the following line is Python 3.6+ specific
msg = f"Successfully processed {event}" # noqa This code is Python 3.6+ only
return event | en | 0.85615 | # simple test function that uses python 3 features (e.g., f-strings) # see https://github.com/localstack/localstack/issues/264 # the following line is Python 3.6+ specific # noqa This code is Python 3.6+ only | 1.778962 | 2 |
import_off.py | etiennody/purchoice | 0 | 8807 | <reponame>etiennody/purchoice<gh_stars>0
#! usr/bin/python3
# code: utf-8
"""Download data from Open Food Facts API."""
import json
import requests
from src.purchoice.constants import CATEGORY_SELECTED
from src.purchoice.purchoice_database import PurchoiceDatabase
class ImportOff:
"""ImportOff class downloads data from Open Food Facts API."""
def __init__(self, db):
self.url = "https://fr.openfoodfacts.org//cgi/search.pl?"
self.db = db
def get_url_params(self, category):
"""get_urls_params helps to define more precisely
the request to Open Food Facts API.
Arguments:
category {string} -- a name of category.
Returns:
dictionnary -- contains parameters to complete
the request to Open Food Facts API.
"""
return {
"action": "process",
"tagtype_0": "categories",
"tag_contains_0": "contains",
"tag_0": category,
"sort_by": "unique_scans_n",
"page_size": 500,
"json": 1,
}
def get_off(self, category):
"""get_off method makes a request to the web page of Open Food Facts,
and load data in json if the return status code is successful.
Arguments:
category {string} -- a category name.
Returns:
dictionnary -- Deserialize an bytearray instance containing
a JSON document to a Python object as early as products.
"""
response = requests.get(self.url, params=self.get_url_params(category))
if response.status_code == 200:
return json.loads(response.content)["products"]
def import_by_category(self, category):
"""import_by_category method try to insert
products, categories, brands and stores data
for each product by category in the database.
Arguments:
category {string} -- a category name.
"""
products = self.get_off(category)
products = products if isinstance(products, list) else products.items()
print("Importation des données en cours. Patientez...")
for product in products:
try:
p = self.db.add_product(product)
for category in product.get("categories").split(","):
c = self.db.add_category(category)
p.categories.append(c)
for brand in product.get("brands").split(","):
b = self.db.add_brand(brand)
p.brands.append(b)
for store in product.get("stores").split(","):
s = self.db.add_store(store)
p.stores.append(s)
except Exception:
pass
if __name__ == "__main__":
db = PurchoiceDatabase()
db.truncate_tables()
import_off = ImportOff(db)
for category in CATEGORY_SELECTED:
import_off.import_by_category(category)
print("Merci d'avoir patienté. Vous pouvez lancer l'application !")
| #! usr/bin/python3
# code: utf-8
"""Download data from Open Food Facts API."""
import json
import requests
from src.purchoice.constants import CATEGORY_SELECTED
from src.purchoice.purchoice_database import PurchoiceDatabase
class ImportOff:
"""ImportOff class downloads data from Open Food Facts API."""
def __init__(self, db):
self.url = "https://fr.openfoodfacts.org//cgi/search.pl?"
self.db = db
def get_url_params(self, category):
"""get_urls_params helps to define more precisely
the request to Open Food Facts API.
Arguments:
category {string} -- a name of category.
Returns:
dictionnary -- contains parameters to complete
the request to Open Food Facts API.
"""
return {
"action": "process",
"tagtype_0": "categories",
"tag_contains_0": "contains",
"tag_0": category,
"sort_by": "unique_scans_n",
"page_size": 500,
"json": 1,
}
def get_off(self, category):
"""get_off method makes a request to the web page of Open Food Facts,
and load data in json if the return status code is successful.
Arguments:
category {string} -- a category name.
Returns:
dictionnary -- Deserialize an bytearray instance containing
a JSON document to a Python object as early as products.
"""
response = requests.get(self.url, params=self.get_url_params(category))
if response.status_code == 200:
return json.loads(response.content)["products"]
def import_by_category(self, category):
"""import_by_category method try to insert
products, categories, brands and stores data
for each product by category in the database.
Arguments:
category {string} -- a category name.
"""
products = self.get_off(category)
products = products if isinstance(products, list) else products.items()
print("Importation des données en cours. Patientez...")
for product in products:
try:
p = self.db.add_product(product)
for category in product.get("categories").split(","):
c = self.db.add_category(category)
p.categories.append(c)
for brand in product.get("brands").split(","):
b = self.db.add_brand(brand)
p.brands.append(b)
for store in product.get("stores").split(","):
s = self.db.add_store(store)
p.stores.append(s)
except Exception:
pass
if __name__ == "__main__":
db = PurchoiceDatabase()
db.truncate_tables()
import_off = ImportOff(db)
for category in CATEGORY_SELECTED:
import_off.import_by_category(category)
print("Merci d'avoir patienté. Vous pouvez lancer l'application !") | en | 0.69564 | #! usr/bin/python3 # code: utf-8 Download data from Open Food Facts API. ImportOff class downloads data from Open Food Facts API. get_urls_params helps to define more precisely the request to Open Food Facts API. Arguments: category {string} -- a name of category. Returns: dictionnary -- contains parameters to complete the request to Open Food Facts API. get_off method makes a request to the web page of Open Food Facts, and load data in json if the return status code is successful. Arguments: category {string} -- a category name. Returns: dictionnary -- Deserialize an bytearray instance containing a JSON document to a Python object as early as products. import_by_category method try to insert products, categories, brands and stores data for each product by category in the database. Arguments: category {string} -- a category name. | 3.322186 | 3 |
orio/module/loop/cfg.py | zhjp0/Orio | 0 | 8808 | '''
Created on April 26, 2015
@author: norris
'''
import ast, sys, os, traceback
from orio.main.util.globals import *
from orio.tool.graphlib import graph
from orio.module.loop import astvisitors
class CFGVertex(graph.Vertex):
'''A CFG vertex is a basic block.'''
def __init__(self, name, node=None):
try: graph.Vertex.__init__(self, name)
except Exception,e: err("CFGVertex.__init__:" + str(e))
self.stmts = [node] # basic block, starting with leader node
pass
def append(self, node):
self.stmts.append(node)
def copy(self):
v = CFGVertex(self.name)
v.e = self.e
v.data = self.data
return v
def succ(self):
return self.out_v()
def pred(self):
return self.in_v()
def __str__(self):
return "<%s> " % self.name + str(self.stmts)
pass # End of CFG vertex class
class CFGEdge(graph.DirEdge):
def __init__(self, v1, v2, name=''):
if not name: name = Globals().incrementCounter()
graph.DirEdge.__init__(self, name, v1, v2)
pass
pass # End of CFGEdge class
class CFGGraph(graph.Graph):
def __init__(self, nodes, name='CFG'):
graph.Graph.__init__(self, name)
self.cfgVisitor = CFGVisitor(self)
self.cfgVisitor.visit(nodes)
if True:
self.display()
pass
def nodes(self):
return self.v
def pred(self, bb):
return self.v[bb.name].in_v()
def succ(self, bb):
return self.v[bb.name].out_v()
def display(self):
#sys.stdout.write(str(self))
self.genDOT()
def genDOT(self, fname=''):
buf = 'digraph CFG {\n'
for n,vertex in self.v.items():
label = '[label="%s%s...",shape=box]' % (n,str(vertex.stmts[0]).split('\n')[0])
buf += '\t%s %s;\n' % (n, label)
for edge in vertex.out_e:
for dv in edge.dest_v:
buf += '\t%s -> %s;\n' % (n, dv.name)
buf += '\n}\n'
if fname == '': fname = Globals().tempfilename + '.dot'
f=open(fname,'w')
f.write(buf)
f.close()
# print buf
return buf
pass # End of CFG Graph class
class CFGVisitor(astvisitors.ASTVisitor):
def __init__(self, graph):
astvisitors.ASTVisitor.__init__(self)
self.cfg = graph
v = CFGVertex('_TOP_')
self.cfg.add_v(v)
self.stack = [v]
self.lead = True
self.verbose = False
self.last = None
def display(self, node, msg=''):
if self.verbose:
sys.stdout.write("[%s] " % self.__class__.__name__ + node.__class__.__name__ + ': ' + msg+'\n')
def visit(self, nodes, params={}):
'''Invoke accept method for specified AST node'''
if not isinstance(nodes, (list, tuple)):
nodes = [nodes]
try:
for node in nodes:
if not node: continue
v = CFGVertex(node.id, node)
if isinstance(node, ast.ForStmt):
self.display(node)
# Children: header: node.init, node.test, node.iter; body: node.stmt
v = CFGVertex('ForLoop' + str(node.id), node)
self.cfg.add_v(v)
self.cfg.add_e(CFGEdge(self.stack.pop(),v))
self.stack.append(v)
self.lead = True
self.stack.append(v)
self.visit(node.stmt)
vbottom = CFGVertex('_JOIN_' + str(node.id))
self.cfg.add_v(vbottom)
self.cfg.add_e(CFGEdge(v,vbottom))
self.cfg.add_e(CFGEdge(self.stack.pop(),vbottom))
self.stack.append(vbottom)
self.lead = True
elif isinstance(node, ast.IfStmt):
self.display(node)
v = CFGVertex('IfStmt' + str(node.id) , node)
self.cfg.add_v(v)
self.cfg.add_e(CFGEdge(self.stack.pop(),v))
self.stack.append(v)
self.lead = True
self.visit(node.true_stmt)
truelast = self.stack.pop()
self.stack.append(v)
self.lead = True
self.visit(node.false_stmt)
falselast = self.stack.pop()
self.lead = True
vbottom = CFGVertex('_JOIN_' + str(node.id))
self.cfg.add_v(vbottom)
self.cfg.add_e(CFGEdge(truelast,vbottom))
self.cfg.add_e(CFGEdge(falselast,vbottom))
self.stack.append(vbottom)
elif isinstance(node, ast.CompStmt):
self.display(node)
self.visit(node.stmts)
# TODO: handle gotos
else:
# Add to previous basic block
if self.lead:
v = CFGVertex(node.id, node)
self.cfg.add_v(v)
self.cfg.add_e(CFGEdge(self.stack.pop(),v))
self.stack.append(v)
self.lead = False
else:
self.stack.pop()
self.stack.append(v)
self.stack[-1].append(node)
except Exception as ex:
err("[orio.module.loop.cfg.CFGVisitor.visit()] %s" % str(ex))
return
def getCFG(self):
return self.cfg
pass # end of class CFGVisitor
| '''
Created on April 26, 2015
@author: norris
'''
import ast, sys, os, traceback
from orio.main.util.globals import *
from orio.tool.graphlib import graph
from orio.module.loop import astvisitors
class CFGVertex(graph.Vertex):
'''A CFG vertex is a basic block.'''
def __init__(self, name, node=None):
try: graph.Vertex.__init__(self, name)
except Exception,e: err("CFGVertex.__init__:" + str(e))
self.stmts = [node] # basic block, starting with leader node
pass
def append(self, node):
self.stmts.append(node)
def copy(self):
v = CFGVertex(self.name)
v.e = self.e
v.data = self.data
return v
def succ(self):
return self.out_v()
def pred(self):
return self.in_v()
def __str__(self):
return "<%s> " % self.name + str(self.stmts)
pass # End of CFG vertex class
class CFGEdge(graph.DirEdge):
def __init__(self, v1, v2, name=''):
if not name: name = Globals().incrementCounter()
graph.DirEdge.__init__(self, name, v1, v2)
pass
pass # End of CFGEdge class
class CFGGraph(graph.Graph):
def __init__(self, nodes, name='CFG'):
graph.Graph.__init__(self, name)
self.cfgVisitor = CFGVisitor(self)
self.cfgVisitor.visit(nodes)
if True:
self.display()
pass
def nodes(self):
return self.v
def pred(self, bb):
return self.v[bb.name].in_v()
def succ(self, bb):
return self.v[bb.name].out_v()
def display(self):
#sys.stdout.write(str(self))
self.genDOT()
def genDOT(self, fname=''):
buf = 'digraph CFG {\n'
for n,vertex in self.v.items():
label = '[label="%s%s...",shape=box]' % (n,str(vertex.stmts[0]).split('\n')[0])
buf += '\t%s %s;\n' % (n, label)
for edge in vertex.out_e:
for dv in edge.dest_v:
buf += '\t%s -> %s;\n' % (n, dv.name)
buf += '\n}\n'
if fname == '': fname = Globals().tempfilename + '.dot'
f=open(fname,'w')
f.write(buf)
f.close()
# print buf
return buf
pass # End of CFG Graph class
class CFGVisitor(astvisitors.ASTVisitor):
def __init__(self, graph):
astvisitors.ASTVisitor.__init__(self)
self.cfg = graph
v = CFGVertex('_TOP_')
self.cfg.add_v(v)
self.stack = [v]
self.lead = True
self.verbose = False
self.last = None
def display(self, node, msg=''):
if self.verbose:
sys.stdout.write("[%s] " % self.__class__.__name__ + node.__class__.__name__ + ': ' + msg+'\n')
def visit(self, nodes, params={}):
'''Invoke accept method for specified AST node'''
if not isinstance(nodes, (list, tuple)):
nodes = [nodes]
try:
for node in nodes:
if not node: continue
v = CFGVertex(node.id, node)
if isinstance(node, ast.ForStmt):
self.display(node)
# Children: header: node.init, node.test, node.iter; body: node.stmt
v = CFGVertex('ForLoop' + str(node.id), node)
self.cfg.add_v(v)
self.cfg.add_e(CFGEdge(self.stack.pop(),v))
self.stack.append(v)
self.lead = True
self.stack.append(v)
self.visit(node.stmt)
vbottom = CFGVertex('_JOIN_' + str(node.id))
self.cfg.add_v(vbottom)
self.cfg.add_e(CFGEdge(v,vbottom))
self.cfg.add_e(CFGEdge(self.stack.pop(),vbottom))
self.stack.append(vbottom)
self.lead = True
elif isinstance(node, ast.IfStmt):
self.display(node)
v = CFGVertex('IfStmt' + str(node.id) , node)
self.cfg.add_v(v)
self.cfg.add_e(CFGEdge(self.stack.pop(),v))
self.stack.append(v)
self.lead = True
self.visit(node.true_stmt)
truelast = self.stack.pop()
self.stack.append(v)
self.lead = True
self.visit(node.false_stmt)
falselast = self.stack.pop()
self.lead = True
vbottom = CFGVertex('_JOIN_' + str(node.id))
self.cfg.add_v(vbottom)
self.cfg.add_e(CFGEdge(truelast,vbottom))
self.cfg.add_e(CFGEdge(falselast,vbottom))
self.stack.append(vbottom)
elif isinstance(node, ast.CompStmt):
self.display(node)
self.visit(node.stmts)
# TODO: handle gotos
else:
# Add to previous basic block
if self.lead:
v = CFGVertex(node.id, node)
self.cfg.add_v(v)
self.cfg.add_e(CFGEdge(self.stack.pop(),v))
self.stack.append(v)
self.lead = False
else:
self.stack.pop()
self.stack.append(v)
self.stack[-1].append(node)
except Exception as ex:
err("[orio.module.loop.cfg.CFGVisitor.visit()] %s" % str(ex))
return
def getCFG(self):
return self.cfg
pass # end of class CFGVisitor
| en | 0.647864 | Created on April 26, 2015 @author: norris A CFG vertex is a basic block. # basic block, starting with leader node # End of CFG vertex class # End of CFGEdge class #sys.stdout.write(str(self)) # print buf # End of CFG Graph class Invoke accept method for specified AST node # Children: header: node.init, node.test, node.iter; body: node.stmt # TODO: handle gotos # Add to previous basic block # end of class CFGVisitor | 2.823057 | 3 |
cogs rework/server specified/on_message_delete.py | lubnc4261/House-Keeper | 0 | 8809 | import discord
from discord import Embed
@commands.Cog.listener()
async def on_message_delete(self, message):
channel = "xxxxxxxxxxxxxxxxxxxxx"
deleted = Embed(
description=f"Message deleted in {message.channel.mention}", color=0x4040EC
).set_author(name=message.author, url=Embed.Empty, icon_url=message.author.avatar_url)
deleted.add_field(name="Message", value=message.content)
deleted.timestamp = message.created_at
await channel.send(embed=deleted) | import discord
from discord import Embed
@commands.Cog.listener()
async def on_message_delete(self, message):
channel = "xxxxxxxxxxxxxxxxxxxxx"
deleted = Embed(
description=f"Message deleted in {message.channel.mention}", color=0x4040EC
).set_author(name=message.author, url=Embed.Empty, icon_url=message.author.avatar_url)
deleted.add_field(name="Message", value=message.content)
deleted.timestamp = message.created_at
await channel.send(embed=deleted) | none | 1 | 2.599362 | 3 |
|
test/modules/md/md_env.py | icing/mod_md | 320 | 8810 | import copy
import inspect
import json
import logging
import pytest
import re
import os
import shutil
import subprocess
import time
from datetime import datetime, timedelta
from configparser import ConfigParser, ExtendedInterpolation
from typing import Dict, List, Optional
from pyhttpd.certs import CertificateSpec
from .md_cert_util import MDCertUtil
from pyhttpd.env import HttpdTestSetup, HttpdTestEnv
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class MDTestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["proxy_connect", "md"])
if "pebble" == self.env.acme_server:
self._make_pebble_conf()
def _make_pebble_conf(self):
our_dir = os.path.dirname(inspect.getfile(MDTestSetup))
conf_src_dir = os.path.join(our_dir, 'pebble')
conf_dest_dir = os.path.join(self.env.gen_dir, 'pebble')
if not os.path.exists(conf_dest_dir):
os.makedirs(conf_dest_dir)
for name in os.listdir(conf_src_dir):
src_path = os.path.join(conf_src_dir, name)
m = re.match(r'(.+).template', name)
if m:
self._make_template(src_path, os.path.join(conf_dest_dir, m.group(1)))
elif os.path.isfile(src_path):
shutil.copy(src_path, os.path.join(conf_dest_dir, name))
class MDTestEnv(HttpdTestEnv):
MD_S_UNKNOWN = 0
MD_S_INCOMPLETE = 1
MD_S_COMPLETE = 2
MD_S_EXPIRED = 3
MD_S_ERROR = 4
EMPTY_JOUT = {'status': 0, 'output': []}
DOMAIN_SUFFIX = "%d.org" % time.time()
LOG_FMT_TIGHT = '%(levelname)s: %(message)s'
@classmethod
def get_acme_server(cls):
return os.environ['ACME'] if 'ACME' in os.environ else "pebble"
@classmethod
def has_acme_server(cls):
return cls.get_acme_server() != 'none'
@classmethod
def has_acme_eab(cls):
return cls.get_acme_server() == 'pebble'
@classmethod
def is_pebble(cls) -> bool:
return cls.get_acme_server() == 'pebble'
@classmethod
def lacks_ocsp(cls):
return cls.is_pebble()
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(MDTestEnv)),
interesting_modules=["md"])
self._acme_server = self.get_acme_server()
self._acme_tos = "accepted"
self._acme_ca_pemfile = os.path.join(self.gen_dir, "apache/acme-ca.pem")
if "pebble" == self._acme_server:
self._acme_url = "https://localhost:14000/dir"
self._acme_eab_url = "https://localhost:14001/dir"
elif "boulder" == self._acme_server:
self._acme_url = "http://localhost:4001/directory"
self._acme_eab_url = None
else:
raise Exception(f"unknown ACME server type: {self._acme_server}")
self._acme_server_down = False
self._acme_server_ok = False
self._a2md_bin = os.path.join(self.bin_dir, 'a2md')
self._default_domain = f"test1.{self.http_tld}"
self._store_dir = "./md"
self.set_store_dir_default()
self.add_cert_specs([
CertificateSpec(domains=[f"expired.{self._http_tld}"],
valid_from=timedelta(days=-100),
valid_to=timedelta(days=-10)),
CertificateSpec(domains=["localhost"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
#"AH10045", # mod_md complains that there is no vhost for an MDomain
"AH10105", # mod_md does not find a vhost with SSL enabled for an MDomain
"AH10085" # mod_ssl complains about fallback certificates
])
if self.lacks_ocsp():
self.httpd_error_log.set_ignored_patterns([
re.compile(r'.*certificate with serial \S+ has no OCSP responder URL.*'),
])
if setup_dirs:
self._setup = MDTestSetup(env=self)
self._setup.make()
self.issue_certs()
self.clear_store()
def set_store_dir_default(self):
dirpath = "md"
if self.httpd_is_at_least("2.5.0"):
dirpath = os.path.join("state", dirpath)
self.set_store_dir(dirpath)
def set_store_dir(self, dirpath):
self._store_dir = os.path.join(self.server_dir, dirpath)
if self.acme_url:
self.a2md_stdargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile, "-j"])
self.a2md_rawargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile])
def get_apxs_var(self, name: str) -> str:
p = subprocess.run([self._apxs, "-q", name], capture_output=True, text=True)
if p.returncode != 0:
return ""
return p.stdout.strip()
@property
def acme_server(self):
return self._acme_server
@property
def acme_url(self):
return self._acme_url
@property
def acme_tos(self):
return self._acme_tos
@property
def a2md_bin(self):
return self._a2md_bin
@property
def acme_ca_pemfile(self):
return self._acme_ca_pemfile
@property
def store_dir(self):
return self._store_dir
def get_request_domain(self, request):
return "%s-%s" % (re.sub(r'[_]', '-', request.node.originalname), MDTestEnv.DOMAIN_SUFFIX)
def get_method_domain(self, method):
return "%s-%s" % (re.sub(r'[_]', '-', method.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_module_domain(self, module):
return "%s-%s" % (re.sub(r'[_]', '-', module.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_class_domain(self, c):
return "%s-%s" % (re.sub(r'[_]', '-', c.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
# --------- cmd execution ---------
_a2md_args = []
_a2md_args_raw = []
def a2md_stdargs(self, args):
self._a2md_args = [] + args
def a2md_rawargs(self, args):
self._a2md_args_raw = [] + args
def a2md(self, args, raw=False) -> ExecResult:
preargs = self._a2md_args
if raw:
preargs = self._a2md_args_raw
log.debug("running: {0} {1}".format(preargs, args))
return self.run(preargs + args)
def check_acme(self):
if self._acme_server_ok:
return True
if self._acme_server_down:
pytest.skip(msg="ACME server not running")
return False
if self.is_live(self.acme_url, timeout=timedelta(seconds=0.5)):
self._acme_server_ok = True
return True
else:
self._acme_server_down = True
pytest.fail(msg="ACME server not running", pytrace=False)
return False
def get_ca_pem_file(self, hostname: str) -> Optional[str]:
pem_file = super().get_ca_pem_file(hostname)
if pem_file is None:
pem_file = self.acme_ca_pemfile
return pem_file
# --------- access local store ---------
def purge_store(self):
log.debug("purge store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if os.path.exists(self._store_dir):
shutil.rmtree(self._store_dir, ignore_errors=False)
os.makedirs(self._store_dir)
def clear_store(self):
log.debug("clear store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if not os.path.exists(self._store_dir):
os.makedirs(self._store_dir)
for dirpath in ["challenges", "tmp", "archive", "domains", "accounts", "staging", "ocsp"]:
shutil.rmtree(os.path.join(self._store_dir, dirpath), ignore_errors=True)
def clear_ocsp_store(self):
assert len(self._store_dir) > 1
dirpath = os.path.join(self._store_dir, "ocsp")
log.debug("clear ocsp store dir: %s" % dir)
if os.path.exists(dirpath):
shutil.rmtree(dirpath, ignore_errors=True)
def authz_save(self, name, content):
dirpath = os.path.join(self._store_dir, 'staging', name)
os.makedirs(dirpath)
open(os.path.join(dirpath, 'authz.json'), "w").write(content)
def path_store_json(self):
return os.path.join(self._store_dir, 'md_store.json')
def path_account(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.json')
def path_account_key(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.pem')
def store_domains(self):
return os.path.join(self._store_dir, 'domains')
def store_archives(self):
return os.path.join(self._store_dir, 'archive')
def store_stagings(self):
return os.path.join(self._store_dir, 'staging')
def store_challenges(self):
return os.path.join(self._store_dir, 'challenges')
def store_domain_file(self, domain, filename):
return os.path.join(self.store_domains(), domain, filename)
def store_archived_file(self, domain, version, filename):
return os.path.join(self.store_archives(), "%s.%d" % (domain, version), filename)
def store_staged_file(self, domain, filename):
return os.path.join(self.store_stagings(), domain, filename)
def path_fallback_cert(self, domain):
return os.path.join(self._store_dir, 'domains', domain, 'fallback-pubcert.pem')
def path_job(self, domain):
return os.path.join(self._store_dir, 'staging', domain, 'job.json')
def replace_store(self, src):
shutil.rmtree(self._store_dir, ignore_errors=False)
shutil.copytree(src, self._store_dir)
def list_accounts(self):
return os.listdir(os.path.join(self._store_dir, 'accounts'))
def check_md(self, domain, md=None, state=-1, ca=None, protocol=None, agreement=None, contacts=None):
domains = None
if isinstance(domain, list):
domains = domain
domain = domains[0]
if md:
domain = md
path = self.store_domain_file(domain, 'md.json')
with open(path) as f:
md = json.load(f)
assert md
if domains:
assert md['domains'] == domains
if state >= 0:
assert md['state'] == state
if ca:
assert md['ca']['url'] == ca
if protocol:
assert md['ca']['proto'] == protocol
if agreement:
assert md['ca']['agreement'] == agreement
if contacts:
assert md['contacts'] == contacts
def pkey_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "privkey.{0}.pem".format(pkeyspec)
return 'privkey.pem'
def cert_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "pubcert.{0}.pem".format(pkeyspec)
return 'pubcert.pem'
def check_md_complete(self, domain, pkey=None):
md = self.get_md_status(domain)
assert md
assert 'state' in md, "md is unexpected: {0}".format(md)
assert md['state'] is MDTestEnv.MD_S_COMPLETE, "unexpected state: {0}".format(md['state'])
assert os.path.isfile(self.store_domain_file(domain, self.pkey_fname(pkey)))
assert os.path.isfile(self.store_domain_file(domain, self.cert_fname(pkey)))
def check_md_credentials(self, domain):
if isinstance(domain, list):
domains = domain
domain = domains[0]
else:
domains = [domain]
# check private key, validate certificate, etc
MDCertUtil.validate_privkey(self.store_domain_file(domain, 'privkey.pem'))
cert = MDCertUtil(self.store_domain_file(domain, 'pubcert.pem'))
cert.validate_cert_matches_priv_key(self.store_domain_file(domain, 'privkey.pem'))
# check SANs and CN
assert cert.get_cn() == domain
# compare lists twice in opposite directions: SAN may not respect ordering
san_list = list(cert.get_san_list())
assert len(san_list) == len(domains)
assert set(san_list).issubset(domains)
assert set(domains).issubset(san_list)
# check valid dates interval
not_before = cert.get_not_before()
not_after = cert.get_not_after()
assert not_before < datetime.now(not_before.tzinfo)
assert not_after > datetime.now(not_after.tzinfo)
# --------- check utilities ---------
def check_json_contains(self, actual, expected):
# write all expected key:value bindings to a copy of the actual data ...
# ... assert it stays unchanged
test_json = copy.deepcopy(actual)
test_json.update(expected)
assert actual == test_json
def check_file_access(self, path, exp_mask):
actual_mask = os.lstat(path).st_mode & 0o777
assert oct(actual_mask) == oct(exp_mask)
def check_dir_empty(self, path):
assert os.listdir(path) == []
def get_http_status(self, domain, path, use_https=True):
r = self.get_meta(domain, path, use_https, insecure=True)
return r.response['status']
def get_cert(self, domain, tls=None, ciphers=None):
return MDCertUtil.load_server_cert(self._httpd_addr, self.https_port,
domain, tls=tls, ciphers=ciphers)
def get_server_cert(self, domain, proto=None, ciphers=None):
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if ciphers is not None:
args.extend(["-cipher", ciphers])
r = self.run(args)
# noinspection PyBroadException
try:
return MDCertUtil.parse_pem_cert(r.stdout)
except:
return None
def verify_cert_key_lenghts(self, domain, pkeys):
for p in pkeys:
cert = self.get_server_cert(domain, proto="tls1_2", ciphers=p['ciphers'])
if 0 == p['keylen']:
assert cert is None
else:
assert cert, "no cert returned for cipher: {0}".format(p['ciphers'])
assert cert.get_key_length() == p['keylen'], "key length, expected {0}, got {1}".format(
p['keylen'], cert.get_key_length()
)
def get_meta(self, domain, path, use_https=True, insecure=False):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}", insecure=insecure)
assert r.exit_code == 0
assert r.response
assert r.response['header']
return r
def get_content(self, domain, path, use_https=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}")
assert r.exit_code == 0
return r.stdout
def get_json_content(self, domain, path, use_https=True, insecure=False,
debug_log=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
url = f"{schema}://{domain}:{port}{path}"
r = self.curl_get(url, insecure=insecure, debug_log=debug_log)
if r.exit_code != 0:
log.error(f"curl get on {url} returned {r.exit_code}"
f"\nstdout: {r.stdout}"
f"\nstderr: {r.stderr}")
assert r.exit_code == 0, r.stderr
return r.json
def get_certificate_status(self, domain) -> Dict:
return self.get_json_content(domain, "/.httpd/certificate-status", insecure=True)
def get_md_status(self, domain, via_domain=None, use_https=True, debug_log=False) -> Dict:
if via_domain is None:
via_domain = self._default_domain
return self.get_json_content(via_domain, f"/md-status/{domain}",
use_https=use_https, debug_log=debug_log)
def get_server_status(self, query="/", via_domain=None, use_https=True):
if via_domain is None:
via_domain = self._default_domain
return self.get_content(via_domain, "/server-status%s" % query, use_https=use_https)
def await_completion(self, names, must_renew=False, restart=True, timeout=60,
via_domain=None, use_https=True):
try_until = time.time() + timeout
renewals = {}
names = names.copy()
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
mds = self.get_md_status(name, via_domain=via_domain, use_https=use_https)
if mds is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in mds:
renewal = mds['renewal']
renewals[name] = True
if 'finished' in renewal and renewal['finished'] is True:
if (not must_renew) or (name in renewals):
log.debug(f"domain cert was renewed: {name}")
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
if restart:
time.sleep(0.1)
return self.apache_restart() == 0
return True
def is_renewing(self, name):
stat = self.get_certificate_status(name)
return 'renewal' in stat
def await_renewal(self, names, timeout=60):
try_until = time.time() + timeout
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
md = self.get_md_status(name)
if md is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in md:
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
return True
def await_error(self, domain, timeout=60, via_domain=None, use_https=True, errors=1):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
md = self.get_md_status(domain, via_domain=via_domain, use_https=use_https)
if md:
if 'state' in md and md['state'] == MDTestEnv.MD_S_ERROR:
return md
if 'renewal' in md and 'errors' in md['renewal'] \
and md['renewal']['errors'] >= errors:
return md
time.sleep(0.1)
return None
def await_file(self, fpath, timeout=60):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
if os.path.isfile(fpath):
return True
time.sleep(0.1)
def check_file_permissions(self, domain):
md = self.a2md(["list", domain]).json['output'][0]
assert md
acct = md['ca']['account']
assert acct
self.check_file_access(self.path_store_json(), 0o600)
# domains
self.check_file_access(self.store_domains(), 0o700)
self.check_file_access(os.path.join(self.store_domains(), domain), 0o700)
self.check_file_access(self.store_domain_file(domain, 'privkey.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'pubcert.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'md.json'), 0o600)
# archive
self.check_file_access(self.store_archived_file(domain, 1, 'md.json'), 0o600)
# accounts
self.check_file_access(os.path.join(self._store_dir, 'accounts'), 0o755)
self.check_file_access(os.path.join(self._store_dir, 'accounts', acct), 0o755)
self.check_file_access(self.path_account(acct), 0o644)
self.check_file_access(self.path_account_key(acct), 0o644)
# staging
self.check_file_access(self.store_stagings(), 0o755)
def get_ocsp_status(self, domain, proto=None, cipher=None, ca_file=None):
stat = {}
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", ca_file if ca_file else self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if cipher is not None:
args.extend(["-cipher", cipher])
r = self.run(args, debug_log=False)
ocsp_regex = re.compile(r'OCSP response: +([^=\n]+)\n')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
if 'ocsp' not in stat:
ocsp_regex = re.compile(r'OCSP Response Status:\s*(.+)')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
verify_regex = re.compile(r'Verify return code:\s*(.+)')
matches = verify_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['verify'] = m.group(1)
return stat
def await_ocsp_status(self, domain, timeout=10, ca_file=None):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
break
stat = self.get_ocsp_status(domain, ca_file=ca_file)
if 'ocsp' in stat and stat['ocsp'] != "no response sent":
return stat
time.sleep(0.1)
raise TimeoutError(f"ocsp respopnse not available: {domain}")
def create_self_signed_cert(self, name_list, valid_days, serial=1000, path=None):
dirpath = path
if not path:
dirpath = os.path.join(self.store_domains(), name_list[0])
return MDCertUtil.create_self_signed_cert(dirpath, name_list, valid_days, serial) | import copy
import inspect
import json
import logging
import pytest
import re
import os
import shutil
import subprocess
import time
from datetime import datetime, timedelta
from configparser import ConfigParser, ExtendedInterpolation
from typing import Dict, List, Optional
from pyhttpd.certs import CertificateSpec
from .md_cert_util import MDCertUtil
from pyhttpd.env import HttpdTestSetup, HttpdTestEnv
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class MDTestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["proxy_connect", "md"])
if "pebble" == self.env.acme_server:
self._make_pebble_conf()
def _make_pebble_conf(self):
our_dir = os.path.dirname(inspect.getfile(MDTestSetup))
conf_src_dir = os.path.join(our_dir, 'pebble')
conf_dest_dir = os.path.join(self.env.gen_dir, 'pebble')
if not os.path.exists(conf_dest_dir):
os.makedirs(conf_dest_dir)
for name in os.listdir(conf_src_dir):
src_path = os.path.join(conf_src_dir, name)
m = re.match(r'(.+).template', name)
if m:
self._make_template(src_path, os.path.join(conf_dest_dir, m.group(1)))
elif os.path.isfile(src_path):
shutil.copy(src_path, os.path.join(conf_dest_dir, name))
class MDTestEnv(HttpdTestEnv):
MD_S_UNKNOWN = 0
MD_S_INCOMPLETE = 1
MD_S_COMPLETE = 2
MD_S_EXPIRED = 3
MD_S_ERROR = 4
EMPTY_JOUT = {'status': 0, 'output': []}
DOMAIN_SUFFIX = "%d.org" % time.time()
LOG_FMT_TIGHT = '%(levelname)s: %(message)s'
@classmethod
def get_acme_server(cls):
return os.environ['ACME'] if 'ACME' in os.environ else "pebble"
@classmethod
def has_acme_server(cls):
return cls.get_acme_server() != 'none'
@classmethod
def has_acme_eab(cls):
return cls.get_acme_server() == 'pebble'
@classmethod
def is_pebble(cls) -> bool:
return cls.get_acme_server() == 'pebble'
@classmethod
def lacks_ocsp(cls):
return cls.is_pebble()
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(MDTestEnv)),
interesting_modules=["md"])
self._acme_server = self.get_acme_server()
self._acme_tos = "accepted"
self._acme_ca_pemfile = os.path.join(self.gen_dir, "apache/acme-ca.pem")
if "pebble" == self._acme_server:
self._acme_url = "https://localhost:14000/dir"
self._acme_eab_url = "https://localhost:14001/dir"
elif "boulder" == self._acme_server:
self._acme_url = "http://localhost:4001/directory"
self._acme_eab_url = None
else:
raise Exception(f"unknown ACME server type: {self._acme_server}")
self._acme_server_down = False
self._acme_server_ok = False
self._a2md_bin = os.path.join(self.bin_dir, 'a2md')
self._default_domain = f"test1.{self.http_tld}"
self._store_dir = "./md"
self.set_store_dir_default()
self.add_cert_specs([
CertificateSpec(domains=[f"expired.{self._http_tld}"],
valid_from=timedelta(days=-100),
valid_to=timedelta(days=-10)),
CertificateSpec(domains=["localhost"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
#"AH10045", # mod_md complains that there is no vhost for an MDomain
"AH10105", # mod_md does not find a vhost with SSL enabled for an MDomain
"AH10085" # mod_ssl complains about fallback certificates
])
if self.lacks_ocsp():
self.httpd_error_log.set_ignored_patterns([
re.compile(r'.*certificate with serial \S+ has no OCSP responder URL.*'),
])
if setup_dirs:
self._setup = MDTestSetup(env=self)
self._setup.make()
self.issue_certs()
self.clear_store()
def set_store_dir_default(self):
dirpath = "md"
if self.httpd_is_at_least("2.5.0"):
dirpath = os.path.join("state", dirpath)
self.set_store_dir(dirpath)
def set_store_dir(self, dirpath):
self._store_dir = os.path.join(self.server_dir, dirpath)
if self.acme_url:
self.a2md_stdargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile, "-j"])
self.a2md_rawargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile])
def get_apxs_var(self, name: str) -> str:
p = subprocess.run([self._apxs, "-q", name], capture_output=True, text=True)
if p.returncode != 0:
return ""
return p.stdout.strip()
@property
def acme_server(self):
return self._acme_server
@property
def acme_url(self):
return self._acme_url
@property
def acme_tos(self):
return self._acme_tos
@property
def a2md_bin(self):
return self._a2md_bin
@property
def acme_ca_pemfile(self):
return self._acme_ca_pemfile
@property
def store_dir(self):
return self._store_dir
def get_request_domain(self, request):
return "%s-%s" % (re.sub(r'[_]', '-', request.node.originalname), MDTestEnv.DOMAIN_SUFFIX)
def get_method_domain(self, method):
return "%s-%s" % (re.sub(r'[_]', '-', method.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_module_domain(self, module):
return "%s-%s" % (re.sub(r'[_]', '-', module.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_class_domain(self, c):
return "%s-%s" % (re.sub(r'[_]', '-', c.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
# --------- cmd execution ---------
_a2md_args = []
_a2md_args_raw = []
def a2md_stdargs(self, args):
self._a2md_args = [] + args
def a2md_rawargs(self, args):
self._a2md_args_raw = [] + args
def a2md(self, args, raw=False) -> ExecResult:
preargs = self._a2md_args
if raw:
preargs = self._a2md_args_raw
log.debug("running: {0} {1}".format(preargs, args))
return self.run(preargs + args)
def check_acme(self):
if self._acme_server_ok:
return True
if self._acme_server_down:
pytest.skip(msg="ACME server not running")
return False
if self.is_live(self.acme_url, timeout=timedelta(seconds=0.5)):
self._acme_server_ok = True
return True
else:
self._acme_server_down = True
pytest.fail(msg="ACME server not running", pytrace=False)
return False
def get_ca_pem_file(self, hostname: str) -> Optional[str]:
pem_file = super().get_ca_pem_file(hostname)
if pem_file is None:
pem_file = self.acme_ca_pemfile
return pem_file
# --------- access local store ---------
def purge_store(self):
log.debug("purge store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if os.path.exists(self._store_dir):
shutil.rmtree(self._store_dir, ignore_errors=False)
os.makedirs(self._store_dir)
def clear_store(self):
log.debug("clear store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if not os.path.exists(self._store_dir):
os.makedirs(self._store_dir)
for dirpath in ["challenges", "tmp", "archive", "domains", "accounts", "staging", "ocsp"]:
shutil.rmtree(os.path.join(self._store_dir, dirpath), ignore_errors=True)
def clear_ocsp_store(self):
assert len(self._store_dir) > 1
dirpath = os.path.join(self._store_dir, "ocsp")
log.debug("clear ocsp store dir: %s" % dir)
if os.path.exists(dirpath):
shutil.rmtree(dirpath, ignore_errors=True)
def authz_save(self, name, content):
dirpath = os.path.join(self._store_dir, 'staging', name)
os.makedirs(dirpath)
open(os.path.join(dirpath, 'authz.json'), "w").write(content)
def path_store_json(self):
return os.path.join(self._store_dir, 'md_store.json')
def path_account(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.json')
def path_account_key(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.pem')
def store_domains(self):
return os.path.join(self._store_dir, 'domains')
def store_archives(self):
return os.path.join(self._store_dir, 'archive')
def store_stagings(self):
return os.path.join(self._store_dir, 'staging')
def store_challenges(self):
return os.path.join(self._store_dir, 'challenges')
def store_domain_file(self, domain, filename):
return os.path.join(self.store_domains(), domain, filename)
def store_archived_file(self, domain, version, filename):
return os.path.join(self.store_archives(), "%s.%d" % (domain, version), filename)
def store_staged_file(self, domain, filename):
return os.path.join(self.store_stagings(), domain, filename)
def path_fallback_cert(self, domain):
return os.path.join(self._store_dir, 'domains', domain, 'fallback-pubcert.pem')
def path_job(self, domain):
return os.path.join(self._store_dir, 'staging', domain, 'job.json')
def replace_store(self, src):
shutil.rmtree(self._store_dir, ignore_errors=False)
shutil.copytree(src, self._store_dir)
def list_accounts(self):
return os.listdir(os.path.join(self._store_dir, 'accounts'))
def check_md(self, domain, md=None, state=-1, ca=None, protocol=None, agreement=None, contacts=None):
domains = None
if isinstance(domain, list):
domains = domain
domain = domains[0]
if md:
domain = md
path = self.store_domain_file(domain, 'md.json')
with open(path) as f:
md = json.load(f)
assert md
if domains:
assert md['domains'] == domains
if state >= 0:
assert md['state'] == state
if ca:
assert md['ca']['url'] == ca
if protocol:
assert md['ca']['proto'] == protocol
if agreement:
assert md['ca']['agreement'] == agreement
if contacts:
assert md['contacts'] == contacts
def pkey_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "privkey.{0}.pem".format(pkeyspec)
return 'privkey.pem'
def cert_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "pubcert.{0}.pem".format(pkeyspec)
return 'pubcert.pem'
def check_md_complete(self, domain, pkey=None):
md = self.get_md_status(domain)
assert md
assert 'state' in md, "md is unexpected: {0}".format(md)
assert md['state'] is MDTestEnv.MD_S_COMPLETE, "unexpected state: {0}".format(md['state'])
assert os.path.isfile(self.store_domain_file(domain, self.pkey_fname(pkey)))
assert os.path.isfile(self.store_domain_file(domain, self.cert_fname(pkey)))
def check_md_credentials(self, domain):
if isinstance(domain, list):
domains = domain
domain = domains[0]
else:
domains = [domain]
# check private key, validate certificate, etc
MDCertUtil.validate_privkey(self.store_domain_file(domain, 'privkey.pem'))
cert = MDCertUtil(self.store_domain_file(domain, 'pubcert.pem'))
cert.validate_cert_matches_priv_key(self.store_domain_file(domain, 'privkey.pem'))
# check SANs and CN
assert cert.get_cn() == domain
# compare lists twice in opposite directions: SAN may not respect ordering
san_list = list(cert.get_san_list())
assert len(san_list) == len(domains)
assert set(san_list).issubset(domains)
assert set(domains).issubset(san_list)
# check valid dates interval
not_before = cert.get_not_before()
not_after = cert.get_not_after()
assert not_before < datetime.now(not_before.tzinfo)
assert not_after > datetime.now(not_after.tzinfo)
# --------- check utilities ---------
def check_json_contains(self, actual, expected):
# write all expected key:value bindings to a copy of the actual data ...
# ... assert it stays unchanged
test_json = copy.deepcopy(actual)
test_json.update(expected)
assert actual == test_json
def check_file_access(self, path, exp_mask):
actual_mask = os.lstat(path).st_mode & 0o777
assert oct(actual_mask) == oct(exp_mask)
def check_dir_empty(self, path):
assert os.listdir(path) == []
def get_http_status(self, domain, path, use_https=True):
r = self.get_meta(domain, path, use_https, insecure=True)
return r.response['status']
def get_cert(self, domain, tls=None, ciphers=None):
return MDCertUtil.load_server_cert(self._httpd_addr, self.https_port,
domain, tls=tls, ciphers=ciphers)
def get_server_cert(self, domain, proto=None, ciphers=None):
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if ciphers is not None:
args.extend(["-cipher", ciphers])
r = self.run(args)
# noinspection PyBroadException
try:
return MDCertUtil.parse_pem_cert(r.stdout)
except:
return None
def verify_cert_key_lenghts(self, domain, pkeys):
for p in pkeys:
cert = self.get_server_cert(domain, proto="tls1_2", ciphers=p['ciphers'])
if 0 == p['keylen']:
assert cert is None
else:
assert cert, "no cert returned for cipher: {0}".format(p['ciphers'])
assert cert.get_key_length() == p['keylen'], "key length, expected {0}, got {1}".format(
p['keylen'], cert.get_key_length()
)
def get_meta(self, domain, path, use_https=True, insecure=False):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}", insecure=insecure)
assert r.exit_code == 0
assert r.response
assert r.response['header']
return r
def get_content(self, domain, path, use_https=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}")
assert r.exit_code == 0
return r.stdout
def get_json_content(self, domain, path, use_https=True, insecure=False,
debug_log=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
url = f"{schema}://{domain}:{port}{path}"
r = self.curl_get(url, insecure=insecure, debug_log=debug_log)
if r.exit_code != 0:
log.error(f"curl get on {url} returned {r.exit_code}"
f"\nstdout: {r.stdout}"
f"\nstderr: {r.stderr}")
assert r.exit_code == 0, r.stderr
return r.json
def get_certificate_status(self, domain) -> Dict:
return self.get_json_content(domain, "/.httpd/certificate-status", insecure=True)
def get_md_status(self, domain, via_domain=None, use_https=True, debug_log=False) -> Dict:
if via_domain is None:
via_domain = self._default_domain
return self.get_json_content(via_domain, f"/md-status/{domain}",
use_https=use_https, debug_log=debug_log)
def get_server_status(self, query="/", via_domain=None, use_https=True):
if via_domain is None:
via_domain = self._default_domain
return self.get_content(via_domain, "/server-status%s" % query, use_https=use_https)
def await_completion(self, names, must_renew=False, restart=True, timeout=60,
via_domain=None, use_https=True):
try_until = time.time() + timeout
renewals = {}
names = names.copy()
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
mds = self.get_md_status(name, via_domain=via_domain, use_https=use_https)
if mds is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in mds:
renewal = mds['renewal']
renewals[name] = True
if 'finished' in renewal and renewal['finished'] is True:
if (not must_renew) or (name in renewals):
log.debug(f"domain cert was renewed: {name}")
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
if restart:
time.sleep(0.1)
return self.apache_restart() == 0
return True
def is_renewing(self, name):
stat = self.get_certificate_status(name)
return 'renewal' in stat
def await_renewal(self, names, timeout=60):
try_until = time.time() + timeout
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
md = self.get_md_status(name)
if md is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in md:
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
return True
def await_error(self, domain, timeout=60, via_domain=None, use_https=True, errors=1):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
md = self.get_md_status(domain, via_domain=via_domain, use_https=use_https)
if md:
if 'state' in md and md['state'] == MDTestEnv.MD_S_ERROR:
return md
if 'renewal' in md and 'errors' in md['renewal'] \
and md['renewal']['errors'] >= errors:
return md
time.sleep(0.1)
return None
def await_file(self, fpath, timeout=60):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
if os.path.isfile(fpath):
return True
time.sleep(0.1)
def check_file_permissions(self, domain):
md = self.a2md(["list", domain]).json['output'][0]
assert md
acct = md['ca']['account']
assert acct
self.check_file_access(self.path_store_json(), 0o600)
# domains
self.check_file_access(self.store_domains(), 0o700)
self.check_file_access(os.path.join(self.store_domains(), domain), 0o700)
self.check_file_access(self.store_domain_file(domain, 'privkey.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'pubcert.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'md.json'), 0o600)
# archive
self.check_file_access(self.store_archived_file(domain, 1, 'md.json'), 0o600)
# accounts
self.check_file_access(os.path.join(self._store_dir, 'accounts'), 0o755)
self.check_file_access(os.path.join(self._store_dir, 'accounts', acct), 0o755)
self.check_file_access(self.path_account(acct), 0o644)
self.check_file_access(self.path_account_key(acct), 0o644)
# staging
self.check_file_access(self.store_stagings(), 0o755)
def get_ocsp_status(self, domain, proto=None, cipher=None, ca_file=None):
stat = {}
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", ca_file if ca_file else self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if cipher is not None:
args.extend(["-cipher", cipher])
r = self.run(args, debug_log=False)
ocsp_regex = re.compile(r'OCSP response: +([^=\n]+)\n')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
if 'ocsp' not in stat:
ocsp_regex = re.compile(r'OCSP Response Status:\s*(.+)')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
verify_regex = re.compile(r'Verify return code:\s*(.+)')
matches = verify_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['verify'] = m.group(1)
return stat
def await_ocsp_status(self, domain, timeout=10, ca_file=None):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
break
stat = self.get_ocsp_status(domain, ca_file=ca_file)
if 'ocsp' in stat and stat['ocsp'] != "no response sent":
return stat
time.sleep(0.1)
raise TimeoutError(f"ocsp respopnse not available: {domain}")
def create_self_signed_cert(self, name_list, valid_days, serial=1000, path=None):
dirpath = path
if not path:
dirpath = os.path.join(self.store_domains(), name_list[0])
return MDCertUtil.create_self_signed_cert(dirpath, name_list, valid_days, serial) | en | 0.623003 | #"AH10045", # mod_md complains that there is no vhost for an MDomain # mod_md does not find a vhost with SSL enabled for an MDomain # mod_ssl complains about fallback certificates # --------- cmd execution --------- # --------- access local store --------- # check private key, validate certificate, etc # check SANs and CN # compare lists twice in opposite directions: SAN may not respect ordering # check valid dates interval # --------- check utilities --------- # write all expected key:value bindings to a copy of the actual data ... # ... assert it stays unchanged # noinspection PyBroadException # domains # archive # accounts # staging | 2.13999 | 2 |
models/create_message_response.py | ajrice6713/bw-messaging-emulator | 0 | 8811 | <reponame>ajrice6713/bw-messaging-emulator
import datetime
import json
import random
import string
from typing import Dict
from sms_counter import SMSCounter
class CreateMessageResponse:
def __init__(self, request):
self.id = self.generate_id()
self.owner = request['from']
self.applicationId = request['applicationId']
self.time = str(datetime.datetime.utcnow().isoformat())
self.segmentCount = 1
self.direction = 'out'
if type(request['to']) is str:
self.to = [request['to']]
else:
self.to = request['to']
self.mfrom = request['from']
if 'media' in request:
self.media = request['media']
if 'text' in request:
self.text = request['text']
if 'tag' in request:
self.tag = request['tag']
if 'priority' in request:
self.priority = request['priority']
def calculate_segments(self, message) -> int:
count = SMSCounter.count(message)
return count['messages']
def generate_id(self) -> str:
pre = random.randint(1400000000000,1799999999999)
return str(pre) + ''.join(random.choice(string.ascii_lowercase) for x in range(16))
def to_json(self) -> str:
dict_response = {
'id': self.id,
'owner': self.owner,
'applicationId': self.applicationId,
'time': self.time,
'direction': self.direction,
'to': self.to,
'from': self.mfrom
}
if hasattr(self, 'media'): dict_response['media'] = self.media
if hasattr(self, 'text'):
dict_response['text'] = self.text
dict_response['segmentCount'] = self.calculate_segments(self.text)
if hasattr(self, 'tag'): dict_response['tag'] = self.tag
if hasattr(self, 'priority'): dict_response['priority'] = self.priority
return json.dumps(dict_response)
| import datetime
import json
import random
import string
from typing import Dict
from sms_counter import SMSCounter
class CreateMessageResponse:
def __init__(self, request):
self.id = self.generate_id()
self.owner = request['from']
self.applicationId = request['applicationId']
self.time = str(datetime.datetime.utcnow().isoformat())
self.segmentCount = 1
self.direction = 'out'
if type(request['to']) is str:
self.to = [request['to']]
else:
self.to = request['to']
self.mfrom = request['from']
if 'media' in request:
self.media = request['media']
if 'text' in request:
self.text = request['text']
if 'tag' in request:
self.tag = request['tag']
if 'priority' in request:
self.priority = request['priority']
def calculate_segments(self, message) -> int:
count = SMSCounter.count(message)
return count['messages']
def generate_id(self) -> str:
pre = random.randint(1400000000000,1799999999999)
return str(pre) + ''.join(random.choice(string.ascii_lowercase) for x in range(16))
def to_json(self) -> str:
dict_response = {
'id': self.id,
'owner': self.owner,
'applicationId': self.applicationId,
'time': self.time,
'direction': self.direction,
'to': self.to,
'from': self.mfrom
}
if hasattr(self, 'media'): dict_response['media'] = self.media
if hasattr(self, 'text'):
dict_response['text'] = self.text
dict_response['segmentCount'] = self.calculate_segments(self.text)
if hasattr(self, 'tag'): dict_response['tag'] = self.tag
if hasattr(self, 'priority'): dict_response['priority'] = self.priority
return json.dumps(dict_response) | none | 1 | 2.645919 | 3 |
|
python/ccxt/async_support/uex.py | victor95pc/ccxt | 1 | 8812 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
class uex (Exchange):
def describe(self):
return self.deep_extend(super(uex, self).describe(), {
'id': 'uex',
'name': 'UEX',
'countries': ['SG', 'US'],
'version': 'v1.0.3',
'rateLimit': 1000,
'certified': False,
# new metainfo interface
'has': {
'CORS': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'3h': '180',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': '1440',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/43999923-051d9884-9e1f-11e8-965a-76948cb17678.jpg',
'api': 'https://open-api.uex.com/open/api',
'www': 'https://www.uex.com',
'doc': 'https://download.uex.com/doc/UEX-API-English-1.0.3.pdf',
'fees': 'https://www.uex.com/footer/ufees.html',
'referral': 'https://www.uex.com/signup.html?code=VAGQLL',
},
'api': {
'public': {
'get': [
'common/coins', # funding limits
'common/symbols',
'get_records', # ohlcvs
'get_ticker',
'get_trades',
'market_dept', # dept here is not a typo... they mean depth
],
},
'private': {
'get': [
'deposit_address_list',
'withdraw_address_list',
'deposit_history',
'withdraw_history',
'user/account',
'market', # an assoc array of market ids to corresponding prices traded most recently(prices of last trades per market)
'order_info',
'new_order', # a list of currently open orders
'all_order',
'all_trade',
],
'post': [
'create_order',
'cancel_order',
'create_withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.0010,
'taker': 0.0010,
},
},
'exceptions': {
# descriptions from ↓ exchange
# '0': 'no error', # succeed
'4': InsufficientFunds, # {"code":"4","msg":"余额不足:0E-16","data":null}
'5': InvalidOrder, # fail to order {"code":"5","msg":"Price fluctuates more than1000.0%","data":null}
'6': InvalidOrder, # the quantity value less than the minimum one {"code":"6","msg":"数量小于最小值:0.001","data":null}
'7': InvalidOrder, # the quantity value more than the maximum one {"code":"7","msg":"数量大于最大值:10000","data":null}
'8': InvalidOrder, # fail to cancel order
'9': ExchangeError, # transaction be frozen
'13': ExchangeError, # Sorry, the program made an error, please contact with the manager.
'19': InsufficientFunds, # Available balance is insufficient.
'22': OrderNotFound, # The order does not exist. {"code":"22","msg":"not exist order","data":null}
'23': InvalidOrder, # Lack of parameters of numbers of transaction
'24': InvalidOrder, # Lack of parameters of transaction price
'100001': ExchangeError, # System is abnormal
'100002': ExchangeNotAvailable, # Update System
'100004': ExchangeError, # {"code":"100004","msg":"request parameter illegal","data":null}
'100005': AuthenticationError, # {"code":"100005","msg":"request sign illegal","data":null}
'100007': PermissionDenied, # illegal IP
'110002': ExchangeError, # unknown currency code
'110003': AuthenticationError, # fund password error
'110004': AuthenticationError, # fund password error
'110005': InsufficientFunds, # Available balance is insufficient.
'110020': AuthenticationError, # Username does not exist.
'110023': AuthenticationError, # Phone number is registered.
'110024': AuthenticationError, # Email box is registered.
'110025': PermissionDenied, # Account is locked by background manager
'110032': PermissionDenied, # The user has no authority to do self operation.
'110033': ExchangeError, # fail to recharge
'110034': ExchangeError, # fail to withdraw
'-100': ExchangeError, # {"code":"-100","msg":"Your request path is not exist or you can try method GET/POST.","data":null}
'-1000': ExchangeNotAvailable, # {"msg":"System maintenancenot ","code":"-1000","data":null}
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
'limits': {
'BTC/USDT': {'amount': {'min': 0.001}, 'price': {'min': 0.01}},
'ETH/USDT': {'amount': {'min': 0.001}, 'price': {'min': 0.01}},
'BCH/USDT': {'amount': {'min': 0.001}, 'price': {'min': 0.01}},
'ETH/BTC': {'amount': {'min': 0.001}, 'price': {'min': 0.000001}},
'BCH/BTC': {'amount': {'min': 0.001}, 'price': {'min': 0.000001}},
'LEEK/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'CTXC/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'COSM/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'MANA/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'LBA/BTC': {'amount': {'min': 10}, 'price': {'min': 10}},
'OLT/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'DTA/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'KNT/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'REN/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'LBA/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'EXC/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'ZIL/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'RATING/ETH': {'amount': {'min': 100}, 'price': {'min': 100}},
'CENNZ/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'TTC/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
},
},
})
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.currency_to_precision(market[key], cost)),
}
async def fetch_markets(self, params={}):
response = await self.publicGetCommonSymbols()
#
# {code: "0",
# msg: "suc",
# data: [{ symbol: "btcusdt",
# count_coin: "usdt",
# amount_precision: 3,
# base_coin: "btc",
# price_precision: 2 },
# { symbol: "ethusdt",
# count_coin: "usdt",
# amount_precision: 3,
# base_coin: "eth",
# price_precision: 2 },
# { symbol: "ethbtc",
# count_coin: "btc",
# amount_precision: 3,
# base_coin: "eth",
# price_precision: 6 }]}
#
result = []
markets = response['data']
for i in range(0, len(markets)):
market = markets[i]
id = market['symbol']
baseId = market['base_coin']
quoteId = market['count_coin']
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': market['amount_precision'],
'price': market['price_precision'],
}
active = True
defaultLimits = self.safe_value(self.options['limits'], symbol, {})
limits = self.deep_extend({
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}, defaultLimits)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'info': market,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetUserAccount(params)
#
# {code: "0",
# msg: "suc",
# data: {total_asset: "0.00000000",
# coin_list: [{ normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "usdt" },
# { normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "btc" },
# { normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "eth" },
# { normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "ren" }]}}
#
balances = response['data']['coin_list']
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = balance['coin']
code = currencyId.upper()
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
else:
code = self.common_currency_code(code)
account = self.account()
free = float(balance['normal'])
used = float(balance['locked'])
total = self.sum(free, used)
account['free'] = free
account['used'] = used
account['total'] = total
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
response = await self.publicGetMarketDept(self.extend({
'symbol': self.market_id(symbol),
'type': 'step0', # step1, step2 from most detailed to least detailed
}, params))
#
# {code: "0",
# msg: "suc",
# data: {tick: {asks: [["0.05824200", 9.77],
# ["0.05830000", 7.81],
# ["0.05832900", 8.59],
# ["0.10000000", 0.001] ],
# bids: [["0.05780000", 8.25],
# ["0.05775000", 8.12],
# ["0.05773200", 8.57],
# ["0.00010000", 0.79] ],
# time: 1533412622463 }} }
#
timestamp = self.safe_integer(response['data']['tick'], 'time')
return self.parse_order_book(response['data']['tick'], timestamp)
def parse_ticker(self, ticker, market=None):
#
# {code: "0",
# msg: "suc",
# data: {symbol: "ETHBTC",
# high: 0.058426,
# vol: 19055.875,
# last: 0.058019,
# low: 0.055802,
# change: 0.03437271,
# buy: "0.05780000",
# sell: "0.05824200",
# time: 1533413083184} }
#
timestamp = self.safe_integer(ticker, 'time')
symbol = None
if market is None:
marketId = self.safe_string(ticker, 'symbol')
marketId = marketId.lower()
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
change = self.safe_float(ticker, 'change')
percentage = change * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetGetTicker(self.extend({
'symbol': market['id'],
}, params))
#
# {code: "0",
# msg: "suc",
# data: {symbol: "ETHBTC",
# high: 0.058426,
# vol: 19055.875,
# last: 0.058019,
# low: 0.055802,
# change: 0.03437271,
# buy: "0.05780000",
# sell: "0.05824200",
# time: 1533413083184} }
#
return self.parse_ticker(response['data'], market)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# { amount: 0.88,
# create_time: 1533414358000,
# price: 0.058019,
# id: 406531,
# type: "sell" },
#
# private fetchMyTrades, fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# { volume: "0.010",
# side: "SELL",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出",
# bid_id: 3669539, # only in fetchMyTrades
# ask_id: 3669583, # only in fetchMyTrades
# }
#
timestamp = self.safe_integer_2(trade, 'create_time', 'ctime')
if timestamp is None:
timestring = self.safe_string(trade, 'created_at')
if timestring is not None:
timestamp = self.parse8601('2018-' + timestring + ':00Z')
side = self.safe_string_2(trade, 'side', 'type')
if side is not None:
side = side.lower()
id = self.safe_string(trade, 'id')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'volume', 'amount')
cost = self.safe_float(trade, 'deal_price')
if cost is None:
if amount is not None:
if price is not None:
cost = amount * price
fee = None
feeCost = self.safe_float_2(trade, 'fee', 'deal_fee')
if feeCost is not None:
feeCurrency = self.safe_string(trade, 'feeCoin')
if feeCurrency is not None:
currencyId = feeCurrency.lower()
if currencyId in self.currencies_by_id:
feeCurrency = self.currencies_by_id[currencyId]['code']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
orderIdField = 'ask_id' if (side == 'sell') else 'bid_id'
orderId = self.safe_string(trade, orderIdField)
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetGetTrades(self.extend({
'symbol': market['id'],
}, params))
#
# {code: "0",
# msg: "suc",
# data: [{ amount: 0.88,
# create_time: 1533414358000,
# price: 0.058019,
# id: 406531,
# type: "sell" },
# { amount: 4.88,
# create_time: 1533414331000,
# price: 0.058019,
# id: 406530,
# type: "buy" },
# { amount: 0.5,
# create_time: 1533414311000,
# price: 0.058019,
# id: 406529,
# type: "sell" }]}
#
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1d', since=None, limit=None):
return [
ohlcv[0] * 1000, # timestamp
ohlcv[1], # open
ohlcv[2], # high
ohlcv[3], # low
ohlcv[4], # close
ohlcv[5], # volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe], # in minutes
}
response = await self.publicGetGetRecords(self.extend(request, params))
#
# {code: '0',
# msg: 'suc',
# data:
# [[1533402420, 0.057833, 0.057833, 0.057833, 0.057833, 18.1],
# [1533402480, 0.057833, 0.057833, 0.057833, 0.057833, 29.88],
# [1533402540, 0.057833, 0.057833, 0.057833, 0.057833, 29.06] ]}
#
return self.parse_ohlcvs(response['data'], market, timeframe, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
amount = amount * price
await self.load_markets()
market = self.market(symbol)
orderType = '1' if (type == 'limit') else '2'
orderSide = side.upper()
amountToPrecision = self.amount_to_precision(symbol, amount)
request = {
'side': orderSide,
'type': orderType,
'symbol': market['id'],
'volume': amountToPrecision,
# An excerpt from their docs:
# side required Trading Direction
# type required pending order types,1:Limit-price Delegation 2:Market- price Delegation
# volume required
# Purchase Quantity(polysemy,multiplex field)
# type=1: Quantity of buying and selling
# type=2: Buying represents gross price, and selling represents total number
# Trading restriction user/me-user information
# price optional Delegation Price:type=2:self parameter is no use.
# fee_is_user_exchange_coin optional
# 0,when making transactions with all platform currencies,
# self parameter represents whether to use them to pay
# fees or not and 0 is no, 1 is yes.
}
priceToPrecision = None
if type == 'limit':
priceToPrecision = self.price_to_precision(symbol, price)
request['price'] = priceToPrecision
response = await self.privatePostCreateOrder(self.extend(request, params))
#
# {code: '0',
# msg: 'suc',
# data: {'order_id' : 34343} }
#
result = self.parse_order(response['data'], market)
return self.extend(result, {
'info': response,
'symbol': symbol,
'type': type,
'side': side,
'status': 'open',
'price': float(priceToPrecision),
'amount': float(amountToPrecision),
})
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'symbol': market['id'],
}
response = await self.privatePostCancelOrder(self.extend(request, params))
order = self.safe_value(response, 'data', {})
return self.extend(self.parse_order(order), {
'id': id,
'symbol': symbol,
'status': 'canceled',
})
def parse_order_status(self, status):
statuses = {
'0': 'open', # INIT(0,"primary order,untraded and not enter the market")
'1': 'open', # NEW_(1,"new order,untraded and enter the market ")
'2': 'closed', # FILLED(2,"complete deal")
'3': 'open', # PART_FILLED(3,"partial deal")
'4': 'canceled', # CANCELED(4,"already withdrawn")
'5': 'canceled', # PENDING_CANCEL(5,"pending withdrawak")
'6': 'canceled', # EXPIRED(6,"abnormal orders")
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
#
# createOrder
#
# {"order_id":34343}
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# { side: "BUY",
# total_price: "0.10000000",
# created_at: 1510993841000,
# avg_price: "0.10000000",
# countCoin: "btc",
# source: 1,
# type: 1,
# side_msg: "买入",
# volume: "1.000",
# price: "0.10000000",
# source_msg: "WEB",
# status_msg: "完全成交",
# deal_volume: "1.00000000",
# id: 424,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "1.000",
# feeCoin: "YLB",
# price: "0.10000000",
# fee: "0.16431104",
# ctime: 1510996571195,
# deal_price: "0.10000000",
# id: 306,
# type: "买入" }],
# status: 2 }
#
# fetchOrder
#
# {trade_list: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# order_info: { side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "卖出",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "完全成交",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# status: 2 }}
#
side = self.safe_string(order, 'side')
if side is not None:
side = side.lower()
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = None
if market is None:
baseId = self.safe_string(order, 'baseCoin')
quoteId = self.safe_string(order, 'countCoin')
marketId = baseId + quoteId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
if (baseId is not None) and(quoteId is not None):
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'created_at')
if timestamp is None:
timestring = self.safe_string(order, 'created_at')
if timestring is not None:
timestamp = self.parse8601('2018-' + timestring + ':00Z')
lastTradeTimestamp = None
fee = None
average = self.safe_float(order, 'avg_price')
price = self.safe_float(order, 'price')
if price == 0:
price = average
amount = self.safe_float(order, 'volume')
filled = self.safe_float(order, 'deal_volume')
remaining = self.safe_float(order, 'remain_volume')
cost = self.safe_float(order, 'total_price')
id = self.safe_string_2(order, 'id', 'order_id')
trades = None
tradeList = self.safe_value(order, 'tradeList', [])
feeCurrencies = {}
feeCost = None
for i in range(0, len(tradeList)):
trade = self.parse_trade(tradeList[i], market)
if feeCost is None:
feeCost = 0
feeCost = feeCost + trade['fee']['cost']
tradeFeeCurrency = trade['fee']['currency']
feeCurrencies[tradeFeeCurrency] = trade['fee']['cost']
if trades is None:
trades = []
lastTradeTimestamp = trade['timestamp']
trades.append(self.extend(trade, {
'order': id,
}))
if feeCost is not None:
feeCurrency = None
keys = list(feeCurrencies.keys())
numCurrencies = len(keys)
if numCurrencies == 1:
feeCurrency = keys[0]
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
return result
async def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersWithMethod() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
# pageSize optional page size
# page optional page number
'symbol': market['id'],
}
if limit is not None:
request['pageSize'] = limit
response = await getattr(self, method)(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: { count: 1,
# orderList: [{ side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "卖出",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "完全成交",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# status: 2 }]} }
#
# privateGetNewOrder returns resultList, privateGetAllOrder returns orderList
orders = self.safe_value_2(response['data'], 'orderList', 'resultList', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetNewOrder', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetAllOrder', symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'symbol': market['id'],
}
response = await self.privateGetOrderInfo(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: {trade_list: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# order_info: { side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "卖出",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "完全成交",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# status: 2 }} }
#
return self.parse_order(response['data']['order_info'], market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
# pageSize optional page size
# page optional page number
'symbol': market['id'],
}
if limit is not None:
request['pageSize'] = limit
response = await self.privateGetAllTrade(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: { count: 1,
# resultList: [{ volume: "0.010",
# side: "SELL",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出",
# bid_id: 3669539,
# ask_id: 3669583 }]} }
#
trades = self.safe_value(response['data'], 'resultList', [])
return self.parse_trades(trades, market, since, limit)
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
}
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-deposit-address-of-assigned-token
response = await self.privateGetDepositAddressList(self.extend(request, params))
#
# {
# "code": "0",
# "msg": "suc",
# "data": {
# "addressList": [
# {
# "address": "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# "tag": "",
# },
# ],
# },
# }
#
data = self.safe_value(response, 'data')
if data is None:
raise InvalidAddress(self.id + ' privateGetDepositAddressList() returned no data')
addressList = self.safe_value(data, 'addressList')
if addressList is None:
raise InvalidAddress(self.id + ' privateGetDepositAddressList() returned no address list')
numAddresses = len(addressList)
if numAddresses < 1:
raise InvalidAddress(self.id + ' privatePostDepositAddresses() returned no addresses')
firstAddress = addressList[0]
address = self.safe_string(firstAddress, 'address')
tag = self.safe_string(firstAddress, 'tag')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
async def fetch_transactions_by_type(self, type, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals requires a currency code argument')
currency = self.currency(code)
request = {
'coin': currency['id'],
}
if limit is not None:
request['pageSize'] = limit # default 10
transactionType = 'deposit' if (type == 'deposit') else 'withdraw' # instead of withdrawal...
method = 'privateGet' + self.capitalize(transactionType) + 'History'
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-deposit-record-of-assigned-token
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-withdraw-record-of-assigned-token
response = await getattr(self, method)(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: {depositList: [{ createdAt: 1533615955000,
# amount: "0.01",
# updateAt: 1533616311000,
# txid: "0x0922fde6ab8270fe6eb31cb5a37dc732d96dc8193f81cf46c4ab29fde…",
# tag: "",
# confirmations: 30,
# addressTo: "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# status: 1,
# coin: "ETH" }]} }
#
# {
# "code": "0",
# "msg": "suc",
# "data": {
# "withdrawList": [{
# "updateAt": 1540344965000,
# "createdAt": 1539311971000,
# "status": 0,
# "addressTo": "tz1d7DXJXU3AKWh77gSmpP7hWTeDYs8WF18q",
# "tag": "100128877",
# "id": 5,
# "txid": "",
# "fee": 0.0,
# "amount": "1",
# "symbol": "XTZ"
# }]
# }
# }
#
transactions = self.safe_value(response['data'], transactionType + 'List')
return self.parse_transactions_by_type(type, transactions, code, since, limit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_by_type('deposit', code, since, limit, params)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_by_type('withdrawal', code, since, limit, params)
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filterByCurrencySinceLimit(result, code, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# { createdAt: 1533615955000,
# amount: "0.01",
# updateAt: 1533616311000,
# txid: "0x0922fde6ab8270fe6eb31cb5a37dc732d96dc8193f81cf46c4ab29fde…",
# tag: "",
# confirmations: 30,
# addressTo: "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# status: 1,
# coin: "ETH" }]} }
#
# withdrawals
#
# {
# "updateAt": 1540344965000,
# "createdAt": 1539311971000,
# "status": 0,
# "addressTo": "tz1d7DXJXU3AKWh77gSmpP7hWTeDYs8WF18q",
# "tag": "100128877",
# "id": 5,
# "txid": "",
# "fee": 0.0,
# "amount": "1",
# "symbol": "XTZ"
# }
#
id = self.safe_string(transaction, 'id')
txid = self.safe_string(transaction, 'txid')
timestamp = self.safe_integer(transaction, 'createdAt')
updated = self.safe_integer(transaction, 'updateAt')
code = None
currencyId = self.safe_string_2(transaction, 'symbol', 'coin')
currency = self.safe_value(self.currencies_by_id, currencyId)
if currency is not None:
code = currency['code']
else:
code = self.common_currency_code(currencyId)
address = self.safe_string(transaction, 'addressTo')
tag = self.safe_string(transaction, 'tag')
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = self.safe_string(transaction, 'type') # injected from the outside
feeCost = self.safe_float(transaction, 'fee')
if (type == 'deposit') and(feeCost is None):
feeCost = 0
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': tag,
'status': status,
'type': type,
'updated': updated,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_transaction_status(self, status):
statuses = {
'0': 'pending', # unaudited
'1': 'ok', # audited
'2': 'failed', # audit failed
'3': 'pending', # "payment"
'4': 'failed', # payment failed
'5': 'ok',
'6': 'canceled',
}
return self.safe_string(statuses, status, status)
async def withdraw(self, code, amount, address, tag=None, params={}):
await self.load_markets()
fee = self.safe_float(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + 'requires a "fee" extra parameter in its last argument')
self.check_address(address)
currency = self.currency(code)
request = {
'coin': currency['id'],
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'fee': fee, # balance >= self.sum(amount, fee)
}
if tag is not None:
request['tag'] = tag
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Withdraw
response = await self.privatePostCreateWithdraw(self.extend(request, params))
id = None
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
timestamp = str(self.seconds())
auth = ''
query = self.keysort(self.extend(params, {
'api_key': self.apiKey,
'time': timestamp,
}))
keys = list(query.keys())
for i in range(0, len(keys)):
key = keys[i]
auth += key
auth += str(query[key])
signature = self.hash(self.encode(auth + self.secret))
if query:
if method == 'GET':
url += '?' + self.urlencode(query) + '&sign=' + signature
else:
body = self.urlencode(query) + '&sign=' + signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
#
# {"code":"0","msg":"suc","data":{}}
#
code = self.safe_string(response, 'code')
# message = self.safe_string(response, 'msg')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code != '0':
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
class uex (Exchange):
def describe(self):
return self.deep_extend(super(uex, self).describe(), {
'id': 'uex',
'name': 'UEX',
'countries': ['SG', 'US'],
'version': 'v1.0.3',
'rateLimit': 1000,
'certified': False,
# new metainfo interface
'has': {
'CORS': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'3h': '180',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': '1440',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/43999923-051d9884-9e1f-11e8-965a-76948cb17678.jpg',
'api': 'https://open-api.uex.com/open/api',
'www': 'https://www.uex.com',
'doc': 'https://download.uex.com/doc/UEX-API-English-1.0.3.pdf',
'fees': 'https://www.uex.com/footer/ufees.html',
'referral': 'https://www.uex.com/signup.html?code=VAGQLL',
},
'api': {
'public': {
'get': [
'common/coins', # funding limits
'common/symbols',
'get_records', # ohlcvs
'get_ticker',
'get_trades',
'market_dept', # dept here is not a typo... they mean depth
],
},
'private': {
'get': [
'deposit_address_list',
'withdraw_address_list',
'deposit_history',
'withdraw_history',
'user/account',
'market', # an assoc array of market ids to corresponding prices traded most recently(prices of last trades per market)
'order_info',
'new_order', # a list of currently open orders
'all_order',
'all_trade',
],
'post': [
'create_order',
'cancel_order',
'create_withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.0010,
'taker': 0.0010,
},
},
'exceptions': {
# descriptions from ↓ exchange
# '0': 'no error', # succeed
'4': InsufficientFunds, # {"code":"4","msg":"余额不足:0E-16","data":null}
'5': InvalidOrder, # fail to order {"code":"5","msg":"Price fluctuates more than1000.0%","data":null}
'6': InvalidOrder, # the quantity value less than the minimum one {"code":"6","msg":"数量小于最小值:0.001","data":null}
'7': InvalidOrder, # the quantity value more than the maximum one {"code":"7","msg":"数量大于最大值:10000","data":null}
'8': InvalidOrder, # fail to cancel order
'9': ExchangeError, # transaction be frozen
'13': ExchangeError, # Sorry, the program made an error, please contact with the manager.
'19': InsufficientFunds, # Available balance is insufficient.
'22': OrderNotFound, # The order does not exist. {"code":"22","msg":"not exist order","data":null}
'23': InvalidOrder, # Lack of parameters of numbers of transaction
'24': InvalidOrder, # Lack of parameters of transaction price
'100001': ExchangeError, # System is abnormal
'100002': ExchangeNotAvailable, # Update System
'100004': ExchangeError, # {"code":"100004","msg":"request parameter illegal","data":null}
'100005': AuthenticationError, # {"code":"100005","msg":"request sign illegal","data":null}
'100007': PermissionDenied, # illegal IP
'110002': ExchangeError, # unknown currency code
'110003': AuthenticationError, # fund password error
'110004': AuthenticationError, # fund password error
'110005': InsufficientFunds, # Available balance is insufficient.
'110020': AuthenticationError, # Username does not exist.
'110023': AuthenticationError, # Phone number is registered.
'110024': AuthenticationError, # Email box is registered.
'110025': PermissionDenied, # Account is locked by background manager
'110032': PermissionDenied, # The user has no authority to do self operation.
'110033': ExchangeError, # fail to recharge
'110034': ExchangeError, # fail to withdraw
'-100': ExchangeError, # {"code":"-100","msg":"Your request path is not exist or you can try method GET/POST.","data":null}
'-1000': ExchangeNotAvailable, # {"msg":"System maintenancenot ","code":"-1000","data":null}
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
'limits': {
'BTC/USDT': {'amount': {'min': 0.001}, 'price': {'min': 0.01}},
'ETH/USDT': {'amount': {'min': 0.001}, 'price': {'min': 0.01}},
'BCH/USDT': {'amount': {'min': 0.001}, 'price': {'min': 0.01}},
'ETH/BTC': {'amount': {'min': 0.001}, 'price': {'min': 0.000001}},
'BCH/BTC': {'amount': {'min': 0.001}, 'price': {'min': 0.000001}},
'LEEK/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'CTXC/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'COSM/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'MANA/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'LBA/BTC': {'amount': {'min': 10}, 'price': {'min': 10}},
'OLT/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'DTA/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'KNT/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'REN/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'LBA/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'EXC/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'ZIL/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'RATING/ETH': {'amount': {'min': 100}, 'price': {'min': 100}},
'CENNZ/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'TTC/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
},
},
})
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.currency_to_precision(market[key], cost)),
}
async def fetch_markets(self, params={}):
response = await self.publicGetCommonSymbols()
#
# {code: "0",
# msg: "suc",
# data: [{ symbol: "btcusdt",
# count_coin: "usdt",
# amount_precision: 3,
# base_coin: "btc",
# price_precision: 2 },
# { symbol: "ethusdt",
# count_coin: "usdt",
# amount_precision: 3,
# base_coin: "eth",
# price_precision: 2 },
# { symbol: "ethbtc",
# count_coin: "btc",
# amount_precision: 3,
# base_coin: "eth",
# price_precision: 6 }]}
#
result = []
markets = response['data']
for i in range(0, len(markets)):
market = markets[i]
id = market['symbol']
baseId = market['base_coin']
quoteId = market['count_coin']
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': market['amount_precision'],
'price': market['price_precision'],
}
active = True
defaultLimits = self.safe_value(self.options['limits'], symbol, {})
limits = self.deep_extend({
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}, defaultLimits)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'info': market,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetUserAccount(params)
#
# {code: "0",
# msg: "suc",
# data: {total_asset: "0.00000000",
# coin_list: [{ normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "usdt" },
# { normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "btc" },
# { normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "eth" },
# { normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "ren" }]}}
#
balances = response['data']['coin_list']
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = balance['coin']
code = currencyId.upper()
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
else:
code = self.common_currency_code(code)
account = self.account()
free = float(balance['normal'])
used = float(balance['locked'])
total = self.sum(free, used)
account['free'] = free
account['used'] = used
account['total'] = total
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
response = await self.publicGetMarketDept(self.extend({
'symbol': self.market_id(symbol),
'type': 'step0', # step1, step2 from most detailed to least detailed
}, params))
#
# {code: "0",
# msg: "suc",
# data: {tick: {asks: [["0.05824200", 9.77],
# ["0.05830000", 7.81],
# ["0.05832900", 8.59],
# ["0.10000000", 0.001] ],
# bids: [["0.05780000", 8.25],
# ["0.05775000", 8.12],
# ["0.05773200", 8.57],
# ["0.00010000", 0.79] ],
# time: 1533412622463 }} }
#
timestamp = self.safe_integer(response['data']['tick'], 'time')
return self.parse_order_book(response['data']['tick'], timestamp)
def parse_ticker(self, ticker, market=None):
#
# {code: "0",
# msg: "suc",
# data: {symbol: "ETHBTC",
# high: 0.058426,
# vol: 19055.875,
# last: 0.058019,
# low: 0.055802,
# change: 0.03437271,
# buy: "0.05780000",
# sell: "0.05824200",
# time: 1533413083184} }
#
timestamp = self.safe_integer(ticker, 'time')
symbol = None
if market is None:
marketId = self.safe_string(ticker, 'symbol')
marketId = marketId.lower()
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
change = self.safe_float(ticker, 'change')
percentage = change * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetGetTicker(self.extend({
'symbol': market['id'],
}, params))
#
# {code: "0",
# msg: "suc",
# data: {symbol: "ETHBTC",
# high: 0.058426,
# vol: 19055.875,
# last: 0.058019,
# low: 0.055802,
# change: 0.03437271,
# buy: "0.05780000",
# sell: "0.05824200",
# time: 1533413083184} }
#
return self.parse_ticker(response['data'], market)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# { amount: 0.88,
# create_time: 1533414358000,
# price: 0.058019,
# id: 406531,
# type: "sell" },
#
# private fetchMyTrades, fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# { volume: "0.010",
# side: "SELL",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出",
# bid_id: 3669539, # only in fetchMyTrades
# ask_id: 3669583, # only in fetchMyTrades
# }
#
timestamp = self.safe_integer_2(trade, 'create_time', 'ctime')
if timestamp is None:
timestring = self.safe_string(trade, 'created_at')
if timestring is not None:
timestamp = self.parse8601('2018-' + timestring + ':00Z')
side = self.safe_string_2(trade, 'side', 'type')
if side is not None:
side = side.lower()
id = self.safe_string(trade, 'id')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'volume', 'amount')
cost = self.safe_float(trade, 'deal_price')
if cost is None:
if amount is not None:
if price is not None:
cost = amount * price
fee = None
feeCost = self.safe_float_2(trade, 'fee', 'deal_fee')
if feeCost is not None:
feeCurrency = self.safe_string(trade, 'feeCoin')
if feeCurrency is not None:
currencyId = feeCurrency.lower()
if currencyId in self.currencies_by_id:
feeCurrency = self.currencies_by_id[currencyId]['code']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
orderIdField = 'ask_id' if (side == 'sell') else 'bid_id'
orderId = self.safe_string(trade, orderIdField)
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetGetTrades(self.extend({
'symbol': market['id'],
}, params))
#
# {code: "0",
# msg: "suc",
# data: [{ amount: 0.88,
# create_time: 1533414358000,
# price: 0.058019,
# id: 406531,
# type: "sell" },
# { amount: 4.88,
# create_time: 1533414331000,
# price: 0.058019,
# id: 406530,
# type: "buy" },
# { amount: 0.5,
# create_time: 1533414311000,
# price: 0.058019,
# id: 406529,
# type: "sell" }]}
#
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1d', since=None, limit=None):
return [
ohlcv[0] * 1000, # timestamp
ohlcv[1], # open
ohlcv[2], # high
ohlcv[3], # low
ohlcv[4], # close
ohlcv[5], # volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe], # in minutes
}
response = await self.publicGetGetRecords(self.extend(request, params))
#
# {code: '0',
# msg: 'suc',
# data:
# [[1533402420, 0.057833, 0.057833, 0.057833, 0.057833, 18.1],
# [1533402480, 0.057833, 0.057833, 0.057833, 0.057833, 29.88],
# [1533402540, 0.057833, 0.057833, 0.057833, 0.057833, 29.06] ]}
#
return self.parse_ohlcvs(response['data'], market, timeframe, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
amount = amount * price
await self.load_markets()
market = self.market(symbol)
orderType = '1' if (type == 'limit') else '2'
orderSide = side.upper()
amountToPrecision = self.amount_to_precision(symbol, amount)
request = {
'side': orderSide,
'type': orderType,
'symbol': market['id'],
'volume': amountToPrecision,
# An excerpt from their docs:
# side required Trading Direction
# type required pending order types,1:Limit-price Delegation 2:Market- price Delegation
# volume required
# Purchase Quantity(polysemy,multiplex field)
# type=1: Quantity of buying and selling
# type=2: Buying represents gross price, and selling represents total number
# Trading restriction user/me-user information
# price optional Delegation Price:type=2:self parameter is no use.
# fee_is_user_exchange_coin optional
# 0,when making transactions with all platform currencies,
# self parameter represents whether to use them to pay
# fees or not and 0 is no, 1 is yes.
}
priceToPrecision = None
if type == 'limit':
priceToPrecision = self.price_to_precision(symbol, price)
request['price'] = priceToPrecision
response = await self.privatePostCreateOrder(self.extend(request, params))
#
# {code: '0',
# msg: 'suc',
# data: {'order_id' : 34343} }
#
result = self.parse_order(response['data'], market)
return self.extend(result, {
'info': response,
'symbol': symbol,
'type': type,
'side': side,
'status': 'open',
'price': float(priceToPrecision),
'amount': float(amountToPrecision),
})
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'symbol': market['id'],
}
response = await self.privatePostCancelOrder(self.extend(request, params))
order = self.safe_value(response, 'data', {})
return self.extend(self.parse_order(order), {
'id': id,
'symbol': symbol,
'status': 'canceled',
})
def parse_order_status(self, status):
statuses = {
'0': 'open', # INIT(0,"primary order,untraded and not enter the market")
'1': 'open', # NEW_(1,"new order,untraded and enter the market ")
'2': 'closed', # FILLED(2,"complete deal")
'3': 'open', # PART_FILLED(3,"partial deal")
'4': 'canceled', # CANCELED(4,"already withdrawn")
'5': 'canceled', # PENDING_CANCEL(5,"pending withdrawak")
'6': 'canceled', # EXPIRED(6,"abnormal orders")
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
#
# createOrder
#
# {"order_id":34343}
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# { side: "BUY",
# total_price: "0.10000000",
# created_at: 1510993841000,
# avg_price: "0.10000000",
# countCoin: "btc",
# source: 1,
# type: 1,
# side_msg: "买入",
# volume: "1.000",
# price: "0.10000000",
# source_msg: "WEB",
# status_msg: "完全成交",
# deal_volume: "1.00000000",
# id: 424,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "1.000",
# feeCoin: "YLB",
# price: "0.10000000",
# fee: "0.16431104",
# ctime: 1510996571195,
# deal_price: "0.10000000",
# id: 306,
# type: "买入" }],
# status: 2 }
#
# fetchOrder
#
# {trade_list: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# order_info: { side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "卖出",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "完全成交",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# status: 2 }}
#
side = self.safe_string(order, 'side')
if side is not None:
side = side.lower()
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = None
if market is None:
baseId = self.safe_string(order, 'baseCoin')
quoteId = self.safe_string(order, 'countCoin')
marketId = baseId + quoteId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
if (baseId is not None) and(quoteId is not None):
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'created_at')
if timestamp is None:
timestring = self.safe_string(order, 'created_at')
if timestring is not None:
timestamp = self.parse8601('2018-' + timestring + ':00Z')
lastTradeTimestamp = None
fee = None
average = self.safe_float(order, 'avg_price')
price = self.safe_float(order, 'price')
if price == 0:
price = average
amount = self.safe_float(order, 'volume')
filled = self.safe_float(order, 'deal_volume')
remaining = self.safe_float(order, 'remain_volume')
cost = self.safe_float(order, 'total_price')
id = self.safe_string_2(order, 'id', 'order_id')
trades = None
tradeList = self.safe_value(order, 'tradeList', [])
feeCurrencies = {}
feeCost = None
for i in range(0, len(tradeList)):
trade = self.parse_trade(tradeList[i], market)
if feeCost is None:
feeCost = 0
feeCost = feeCost + trade['fee']['cost']
tradeFeeCurrency = trade['fee']['currency']
feeCurrencies[tradeFeeCurrency] = trade['fee']['cost']
if trades is None:
trades = []
lastTradeTimestamp = trade['timestamp']
trades.append(self.extend(trade, {
'order': id,
}))
if feeCost is not None:
feeCurrency = None
keys = list(feeCurrencies.keys())
numCurrencies = len(keys)
if numCurrencies == 1:
feeCurrency = keys[0]
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
return result
async def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersWithMethod() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
# pageSize optional page size
# page optional page number
'symbol': market['id'],
}
if limit is not None:
request['pageSize'] = limit
response = await getattr(self, method)(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: { count: 1,
# orderList: [{ side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "卖出",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "完全成交",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# status: 2 }]} }
#
# privateGetNewOrder returns resultList, privateGetAllOrder returns orderList
orders = self.safe_value_2(response['data'], 'orderList', 'resultList', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetNewOrder', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetAllOrder', symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'symbol': market['id'],
}
response = await self.privateGetOrderInfo(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: {trade_list: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# order_info: { side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "卖出",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "完全成交",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# status: 2 }} }
#
return self.parse_order(response['data']['order_info'], market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
# pageSize optional page size
# page optional page number
'symbol': market['id'],
}
if limit is not None:
request['pageSize'] = limit
response = await self.privateGetAllTrade(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: { count: 1,
# resultList: [{ volume: "0.010",
# side: "SELL",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出",
# bid_id: 3669539,
# ask_id: 3669583 }]} }
#
trades = self.safe_value(response['data'], 'resultList', [])
return self.parse_trades(trades, market, since, limit)
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
}
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-deposit-address-of-assigned-token
response = await self.privateGetDepositAddressList(self.extend(request, params))
#
# {
# "code": "0",
# "msg": "suc",
# "data": {
# "addressList": [
# {
# "address": "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# "tag": "",
# },
# ],
# },
# }
#
data = self.safe_value(response, 'data')
if data is None:
raise InvalidAddress(self.id + ' privateGetDepositAddressList() returned no data')
addressList = self.safe_value(data, 'addressList')
if addressList is None:
raise InvalidAddress(self.id + ' privateGetDepositAddressList() returned no address list')
numAddresses = len(addressList)
if numAddresses < 1:
raise InvalidAddress(self.id + ' privatePostDepositAddresses() returned no addresses')
firstAddress = addressList[0]
address = self.safe_string(firstAddress, 'address')
tag = self.safe_string(firstAddress, 'tag')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
async def fetch_transactions_by_type(self, type, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals requires a currency code argument')
currency = self.currency(code)
request = {
'coin': currency['id'],
}
if limit is not None:
request['pageSize'] = limit # default 10
transactionType = 'deposit' if (type == 'deposit') else 'withdraw' # instead of withdrawal...
method = 'privateGet' + self.capitalize(transactionType) + 'History'
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-deposit-record-of-assigned-token
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-withdraw-record-of-assigned-token
response = await getattr(self, method)(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: {depositList: [{ createdAt: 1533615955000,
# amount: "0.01",
# updateAt: 1533616311000,
# txid: "0x0922fde6ab8270fe6eb31cb5a37dc732d96dc8193f81cf46c4ab29fde…",
# tag: "",
# confirmations: 30,
# addressTo: "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# status: 1,
# coin: "ETH" }]} }
#
# {
# "code": "0",
# "msg": "suc",
# "data": {
# "withdrawList": [{
# "updateAt": 1540344965000,
# "createdAt": 1539311971000,
# "status": 0,
# "addressTo": "tz1d7DXJXU3AKWh77gSmpP7hWTeDYs8WF18q",
# "tag": "100128877",
# "id": 5,
# "txid": "",
# "fee": 0.0,
# "amount": "1",
# "symbol": "XTZ"
# }]
# }
# }
#
transactions = self.safe_value(response['data'], transactionType + 'List')
return self.parse_transactions_by_type(type, transactions, code, since, limit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_by_type('deposit', code, since, limit, params)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_by_type('withdrawal', code, since, limit, params)
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filterByCurrencySinceLimit(result, code, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# { createdAt: 1533615955000,
# amount: "0.01",
# updateAt: 1533616311000,
# txid: "0x0922fde6ab8270fe6eb31cb5a37dc732d96dc8193f81cf46c4ab29fde…",
# tag: "",
# confirmations: 30,
# addressTo: "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# status: 1,
# coin: "ETH" }]} }
#
# withdrawals
#
# {
# "updateAt": 1540344965000,
# "createdAt": 1539311971000,
# "status": 0,
# "addressTo": "tz1d7DXJXU3AKWh77gSmpP7hWTeDYs8WF18q",
# "tag": "100128877",
# "id": 5,
# "txid": "",
# "fee": 0.0,
# "amount": "1",
# "symbol": "XTZ"
# }
#
id = self.safe_string(transaction, 'id')
txid = self.safe_string(transaction, 'txid')
timestamp = self.safe_integer(transaction, 'createdAt')
updated = self.safe_integer(transaction, 'updateAt')
code = None
currencyId = self.safe_string_2(transaction, 'symbol', 'coin')
currency = self.safe_value(self.currencies_by_id, currencyId)
if currency is not None:
code = currency['code']
else:
code = self.common_currency_code(currencyId)
address = self.safe_string(transaction, 'addressTo')
tag = self.safe_string(transaction, 'tag')
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = self.safe_string(transaction, 'type') # injected from the outside
feeCost = self.safe_float(transaction, 'fee')
if (type == 'deposit') and(feeCost is None):
feeCost = 0
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': tag,
'status': status,
'type': type,
'updated': updated,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_transaction_status(self, status):
statuses = {
'0': 'pending', # unaudited
'1': 'ok', # audited
'2': 'failed', # audit failed
'3': 'pending', # "payment"
'4': 'failed', # payment failed
'5': 'ok',
'6': 'canceled',
}
return self.safe_string(statuses, status, status)
async def withdraw(self, code, amount, address, tag=None, params={}):
await self.load_markets()
fee = self.safe_float(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + 'requires a "fee" extra parameter in its last argument')
self.check_address(address)
currency = self.currency(code)
request = {
'coin': currency['id'],
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'fee': fee, # balance >= self.sum(amount, fee)
}
if tag is not None:
request['tag'] = tag
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Withdraw
response = await self.privatePostCreateWithdraw(self.extend(request, params))
id = None
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
timestamp = str(self.seconds())
auth = ''
query = self.keysort(self.extend(params, {
'api_key': self.apiKey,
'time': timestamp,
}))
keys = list(query.keys())
for i in range(0, len(keys)):
key = keys[i]
auth += key
auth += str(query[key])
signature = self.hash(self.encode(auth + self.secret))
if query:
if method == 'GET':
url += '?' + self.urlencode(query) + '&sign=' + signature
else:
body = self.urlencode(query) + '&sign=' + signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
#
# {"code":"0","msg":"suc","data":{}}
#
code = self.safe_string(response, 'code')
# message = self.safe_string(response, 'msg')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code != '0':
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| en | 0.539473 | # -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code # ----------------------------------------------------------------------------- # Python 3 # Python 2 # new metainfo interface # funding limits # ohlcvs # dept here is not a typo... they mean depth # an assoc array of market ids to corresponding prices traded most recently(prices of last trades per market) # a list of currently open orders # descriptions from ↓ exchange # '0': 'no error', # succeed # {"code":"4","msg":"余额不足:0E-16","data":null} # fail to order {"code":"5","msg":"Price fluctuates more than1000.0%","data":null} # the quantity value less than the minimum one {"code":"6","msg":"数量小于最小值:0.001","data":null} # the quantity value more than the maximum one {"code":"7","msg":"数量大于最大值:10000","data":null} # fail to cancel order # transaction be frozen # Sorry, the program made an error, please contact with the manager. # Available balance is insufficient. # The order does not exist. {"code":"22","msg":"not exist order","data":null} # Lack of parameters of numbers of transaction # Lack of parameters of transaction price # System is abnormal # Update System # {"code":"100004","msg":"request parameter illegal","data":null} # {"code":"100005","msg":"request sign illegal","data":null} # illegal IP # unknown currency code # fund password error # fund password error # Available balance is insufficient. # Username does not exist. # Phone number is registered. # Email box is registered. # Account is locked by background manager # The user has no authority to do self operation. # fail to recharge # fail to withdraw # {"code":"-100","msg":"Your request path is not exist or you can try method GET/POST.","data":null} # {"msg":"System maintenancenot ","code":"-1000","data":null} # # {code: "0", # msg: "suc", # data: [{ symbol: "btcusdt", # count_coin: "usdt", # amount_precision: 3, # base_coin: "btc", # price_precision: 2 }, # { symbol: "ethusdt", # count_coin: "usdt", # amount_precision: 3, # base_coin: "eth", # price_precision: 2 }, # { symbol: "ethbtc", # count_coin: "btc", # amount_precision: 3, # base_coin: "eth", # price_precision: 6 }]} # # # {code: "0", # msg: "suc", # data: {total_asset: "0.00000000", # coin_list: [{ normal: "0.00000000", # btcValuatin: "0.00000000", # locked: "0.00000000", # coin: "usdt" }, # { normal: "0.00000000", # btcValuatin: "0.00000000", # locked: "0.00000000", # coin: "btc" }, # { normal: "0.00000000", # btcValuatin: "0.00000000", # locked: "0.00000000", # coin: "eth" }, # { normal: "0.00000000", # btcValuatin: "0.00000000", # locked: "0.00000000", # coin: "ren" }]}} # # step1, step2 from most detailed to least detailed # # {code: "0", # msg: "suc", # data: {tick: {asks: [["0.05824200", 9.77], # ["0.05830000", 7.81], # ["0.05832900", 8.59], # ["0.10000000", 0.001] ], # bids: [["0.05780000", 8.25], # ["0.05775000", 8.12], # ["0.05773200", 8.57], # ["0.00010000", 0.79] ], # time: 1533412622463 }} } # # # {code: "0", # msg: "suc", # data: {symbol: "ETHBTC", # high: 0.058426, # vol: 19055.875, # last: 0.058019, # low: 0.055802, # change: 0.03437271, # buy: "0.05780000", # sell: "0.05824200", # time: 1533413083184} } # # # {code: "0", # msg: "suc", # data: {symbol: "ETHBTC", # high: 0.058426, # vol: 19055.875, # last: 0.058019, # low: 0.055802, # change: 0.03437271, # buy: "0.05780000", # sell: "0.05824200", # time: 1533413083184} } # # # public fetchTrades # # { amount: 0.88, # create_time: 1533414358000, # price: 0.058019, # id: 406531, # type: "sell" }, # # private fetchMyTrades, fetchOrder, fetchOpenOrders, fetchClosedOrders # # { volume: "0.010", # side: "SELL", # feeCoin: "BTC", # price: "0.05816200", # fee: "0.00000029", # ctime: 1533616674000, # deal_price: "0.00058162", # id: 415779, # type: "卖出", # bid_id: 3669539, # only in fetchMyTrades # ask_id: 3669583, # only in fetchMyTrades # } # # # {code: "0", # msg: "suc", # data: [{ amount: 0.88, # create_time: 1533414358000, # price: 0.058019, # id: 406531, # type: "sell" }, # { amount: 4.88, # create_time: 1533414331000, # price: 0.058019, # id: 406530, # type: "buy" }, # { amount: 0.5, # create_time: 1533414311000, # price: 0.058019, # id: 406529, # type: "sell" }]} # # timestamp # open # high # low # close # volume # in minutes # # {code: '0', # msg: 'suc', # data: # [[1533402420, 0.057833, 0.057833, 0.057833, 0.057833, 18.1], # [1533402480, 0.057833, 0.057833, 0.057833, 0.057833, 29.88], # [1533402540, 0.057833, 0.057833, 0.057833, 0.057833, 29.06] ]} # # for market buy it requires the amount of quote currency to spend # An excerpt from their docs: # side required Trading Direction # type required pending order types,1:Limit-price Delegation 2:Market- price Delegation # volume required # Purchase Quantity(polysemy,multiplex field) # type=1: Quantity of buying and selling # type=2: Buying represents gross price, and selling represents total number # Trading restriction user/me-user information # price optional Delegation Price:type=2:self parameter is no use. # fee_is_user_exchange_coin optional # 0,when making transactions with all platform currencies, # self parameter represents whether to use them to pay # fees or not and 0 is no, 1 is yes. # # {code: '0', # msg: 'suc', # data: {'order_id' : 34343} } # # INIT(0,"primary order,untraded and not enter the market") # NEW_(1,"new order,untraded and enter the market ") # FILLED(2,"complete deal") # PART_FILLED(3,"partial deal") # CANCELED(4,"already withdrawn") # PENDING_CANCEL(5,"pending withdrawak") # EXPIRED(6,"abnormal orders") # # createOrder # # {"order_id":34343} # # fetchOrder, fetchOpenOrders, fetchClosedOrders # # { side: "BUY", # total_price: "0.10000000", # created_at: 1510993841000, # avg_price: "0.10000000", # countCoin: "btc", # source: 1, # type: 1, # side_msg: "买入", # volume: "1.000", # price: "0.10000000", # source_msg: "WEB", # status_msg: "完全成交", # deal_volume: "1.00000000", # id: 424, # remain_volume: "0.00000000", # baseCoin: "eth", # tradeList: [{ volume: "1.000", # feeCoin: "YLB", # price: "0.10000000", # fee: "0.16431104", # ctime: 1510996571195, # deal_price: "0.10000000", # id: 306, # type: "买入" }], # status: 2 } # # fetchOrder # # {trade_list: [{ volume: "0.010", # feeCoin: "BTC", # price: "0.05816200", # fee: "0.00000029", # ctime: 1533616674000, # deal_price: "0.00058162", # id: 415779, # type: "卖出" }], # order_info: { side: "SELL", # total_price: "0.010", # created_at: 1533616673000, # avg_price: "0.05816200", # countCoin: "btc", # source: 3, # type: 2, # side_msg: "卖出", # volume: "0.010", # price: "0.00000000", # source_msg: "API", # status_msg: "完全成交", # deal_volume: "0.01000000", # id: 3669583, # remain_volume: "0.00000000", # baseCoin: "eth", # tradeList: [{ volume: "0.010", # feeCoin: "BTC", # price: "0.05816200", # fee: "0.00000029", # ctime: 1533616674000, # deal_price: "0.00058162", # id: 415779, # type: "卖出" }], # status: 2 }} # # pageSize optional page size # page optional page number # # {code: "0", # msg: "suc", # data: { count: 1, # orderList: [{ side: "SELL", # total_price: "0.010", # created_at: 1533616673000, # avg_price: "0.05816200", # countCoin: "btc", # source: 3, # type: 2, # side_msg: "卖出", # volume: "0.010", # price: "0.00000000", # source_msg: "API", # status_msg: "完全成交", # deal_volume: "0.01000000", # id: 3669583, # remain_volume: "0.00000000", # baseCoin: "eth", # tradeList: [{ volume: "0.010", # feeCoin: "BTC", # price: "0.05816200", # fee: "0.00000029", # ctime: 1533616674000, # deal_price: "0.00058162", # id: 415779, # type: "卖出" }], # status: 2 }]} } # # privateGetNewOrder returns resultList, privateGetAllOrder returns orderList # # {code: "0", # msg: "suc", # data: {trade_list: [{ volume: "0.010", # feeCoin: "BTC", # price: "0.05816200", # fee: "0.00000029", # ctime: 1533616674000, # deal_price: "0.00058162", # id: 415779, # type: "卖出" }], # order_info: { side: "SELL", # total_price: "0.010", # created_at: 1533616673000, # avg_price: "0.05816200", # countCoin: "btc", # source: 3, # type: 2, # side_msg: "卖出", # volume: "0.010", # price: "0.00000000", # source_msg: "API", # status_msg: "完全成交", # deal_volume: "0.01000000", # id: 3669583, # remain_volume: "0.00000000", # baseCoin: "eth", # tradeList: [{ volume: "0.010", # feeCoin: "BTC", # price: "0.05816200", # fee: "0.00000029", # ctime: 1533616674000, # deal_price: "0.00058162", # id: 415779, # type: "卖出" }], # status: 2 }} } # # pageSize optional page size # page optional page number # # {code: "0", # msg: "suc", # data: { count: 1, # resultList: [{ volume: "0.010", # side: "SELL", # feeCoin: "BTC", # price: "0.05816200", # fee: "0.00000029", # ctime: 1533616674000, # deal_price: "0.00058162", # id: 415779, # type: "卖出", # bid_id: 3669539, # ask_id: 3669583 }]} } # # https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-deposit-address-of-assigned-token # # { # "code": "0", # "msg": "suc", # "data": { # "addressList": [ # { # "address": "0x198803ef8e0df9e8812c0105421885e843e6d2e2", # "tag": "", # }, # ], # }, # } # # default 10 # instead of withdrawal... # https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-deposit-record-of-assigned-token # https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-withdraw-record-of-assigned-token # # {code: "0", # msg: "suc", # data: {depositList: [{ createdAt: 1533615955000, # amount: "0.01", # updateAt: 1533616311000, # txid: "0x0922fde6ab8270fe6eb31cb5a37dc732d96dc8193f81cf46c4ab29fde…", # tag: "", # confirmations: 30, # addressTo: "0x198803ef8e0df9e8812c0105421885e843e6d2e2", # status: 1, # coin: "ETH" }]} } # # { # "code": "0", # "msg": "suc", # "data": { # "withdrawList": [{ # "updateAt": 1540344965000, # "createdAt": 1539311971000, # "status": 0, # "addressTo": "tz1d7DXJXU3AKWh77gSmpP7hWTeDYs8WF18q", # "tag": "100128877", # "id": 5, # "txid": "", # "fee": 0.0, # "amount": "1", # "symbol": "XTZ" # }] # } # } # # # deposits # # { createdAt: 1533615955000, # amount: "0.01", # updateAt: 1533616311000, # txid: "0x0922fde6ab8270fe6eb31cb5a37dc732d96dc8193f81cf46c4ab29fde…", # tag: "", # confirmations: 30, # addressTo: "0x198803ef8e0df9e8812c0105421885e843e6d2e2", # status: 1, # coin: "ETH" }]} } # # withdrawals # # { # "updateAt": 1540344965000, # "createdAt": 1539311971000, # "status": 0, # "addressTo": "tz1d7DXJXU3AKWh77gSmpP7hWTeDYs8WF18q", # "tag": "100128877", # "id": 5, # "txid": "", # "fee": 0.0, # "amount": "1", # "symbol": "XTZ" # } # # injected from the outside # unaudited # audited # audit failed # "payment" # payment failed # only supports existing addresses in your withdraw address list # balance >= self.sum(amount, fee) # https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Withdraw # fallback to default error handler # fallback to default error handler # # {"code":"0","msg":"suc","data":{}} # # message = self.safe_string(response, 'msg') | 1.55805 | 2 |
Alpha & Beta/wootMath/decimalToBinaryFraction.py | Mdlkxzmcp/various_python | 0 | 8813 | def decimal_to_binary_fraction(x=0.5):
"""
Input: x, a float between 0 and 1
Returns binary representation of x
"""
p = 0
while ((2 ** p) * x) % 1 != 0:
# print('Remainder = ' + str((2**p)*x - int((2**p)*x)))
p += 1
num = int(x * (2 ** p))
result = ''
if num == 0:
result = '0'
while num > 0:
result = str(num % 2) + result
num //= 2
for i in range(p - len(result)):
result = '0' + result
result = result[0:-p] + '.' + result[-p:]
return result # If there is no integer p such that x*(2**p) is a whole number, then internal
# representation is always an approximation
# Suggest that testing equality of floats is not exact: Use abs(x-y) < some
# small number, rather than x == y
# Why does print(0.1) return 0.1, if not exact?
# Because Python designers set it up this way to automatically round
| def decimal_to_binary_fraction(x=0.5):
"""
Input: x, a float between 0 and 1
Returns binary representation of x
"""
p = 0
while ((2 ** p) * x) % 1 != 0:
# print('Remainder = ' + str((2**p)*x - int((2**p)*x)))
p += 1
num = int(x * (2 ** p))
result = ''
if num == 0:
result = '0'
while num > 0:
result = str(num % 2) + result
num //= 2
for i in range(p - len(result)):
result = '0' + result
result = result[0:-p] + '.' + result[-p:]
return result # If there is no integer p such that x*(2**p) is a whole number, then internal
# representation is always an approximation
# Suggest that testing equality of floats is not exact: Use abs(x-y) < some
# small number, rather than x == y
# Why does print(0.1) return 0.1, if not exact?
# Because Python designers set it up this way to automatically round
| en | 0.804337 | Input: x, a float between 0 and 1 Returns binary representation of x # print('Remainder = ' + str((2**p)*x - int((2**p)*x))) # If there is no integer p such that x*(2**p) is a whole number, then internal # representation is always an approximation # Suggest that testing equality of floats is not exact: Use abs(x-y) < some # small number, rather than x == y # Why does print(0.1) return 0.1, if not exact? # Because Python designers set it up this way to automatically round | 4.049032 | 4 |
composer/utils/run_directory.py | ajaysaini725/composer | 0 | 8814 | <reponame>ajaysaini725/composer
# Copyright 2021 MosaicML. All Rights Reserved.
import datetime
import logging
import os
import pathlib
import time
from composer.utils import dist
log = logging.getLogger(__name__)
_RUN_DIRECTORY_KEY = "COMPOSER_RUN_DIRECTORY"
_start_time_str = datetime.datetime.now().isoformat()
def get_node_run_directory() -> str:
"""Returns the run directory for the node. This folder is shared by all ranks on the node.
Returns:
str: The node run directory.
"""
node_run_directory = os.environ.get(_RUN_DIRECTORY_KEY, os.path.join("runs", _start_time_str))
if node_run_directory.endswith(os.path.sep):
# chop off the training slash so os.path.basename would work as expected
node_run_directory = node_run_directory[:-1]
os.makedirs(node_run_directory, exist_ok=True)
return os.path.abspath(node_run_directory)
def get_run_directory() -> str:
"""Returns the run directory for the current rank.
Returns:
str: The run directory.
"""
run_dir = os.path.join(get_node_run_directory(), f"rank_{dist.get_global_rank()}")
os.makedirs(run_dir, exist_ok=True)
return run_dir
def get_modified_files(modified_since_timestamp: float, *, ignore_hidden: bool = True):
"""Returns a list of files (recursively) in the run directory that have been modified since
``modified_since_timestamp``.
Args:
modified_since_timestamp (float): Minimum last modified timestamp(in seconds since EPOCH)
of files to include.
ignore_hidden (bool, optional): Whether to ignore hidden files and folders (default: ``True``)
Returns:
List[str]: List of filepaths that have been modified since ``modified_since_timestamp``
"""
modified_files = []
run_directory = get_run_directory()
if run_directory is None:
raise RuntimeError("Run directory is not defined")
for root, dirs, files in os.walk(run_directory):
del dirs # unused
for file in files:
if ignore_hidden and any(x.startswith(".") for x in file.split(os.path.sep)):
# skip hidden files and folders
continue
filepath = os.path.join(root, file)
modified_time = os.path.getmtime(filepath)
if modified_time >= modified_since_timestamp:
modified_files.append(filepath)
return modified_files
def get_run_directory_timestamp() -> float:
"""Returns the current timestamp on the run directory filesystem.
Note that the disk time can differ from system time (e.g. when using
network filesystems).
Returns:
float: the current timestamp on the run directory filesystem.
"""
run_directory = get_run_directory()
if run_directory is None:
raise RuntimeError("Run directory is not defined")
python_time = time.time()
touch_file = (pathlib.Path(run_directory) / f".{python_time}")
touch_file.touch()
new_last_uploaded_timestamp = os.path.getmtime(str(touch_file))
os.remove(str(touch_file))
return new_last_uploaded_timestamp
| # Copyright 2021 MosaicML. All Rights Reserved.
import datetime
import logging
import os
import pathlib
import time
from composer.utils import dist
log = logging.getLogger(__name__)
_RUN_DIRECTORY_KEY = "COMPOSER_RUN_DIRECTORY"
_start_time_str = datetime.datetime.now().isoformat()
def get_node_run_directory() -> str:
"""Returns the run directory for the node. This folder is shared by all ranks on the node.
Returns:
str: The node run directory.
"""
node_run_directory = os.environ.get(_RUN_DIRECTORY_KEY, os.path.join("runs", _start_time_str))
if node_run_directory.endswith(os.path.sep):
# chop off the training slash so os.path.basename would work as expected
node_run_directory = node_run_directory[:-1]
os.makedirs(node_run_directory, exist_ok=True)
return os.path.abspath(node_run_directory)
def get_run_directory() -> str:
"""Returns the run directory for the current rank.
Returns:
str: The run directory.
"""
run_dir = os.path.join(get_node_run_directory(), f"rank_{dist.get_global_rank()}")
os.makedirs(run_dir, exist_ok=True)
return run_dir
def get_modified_files(modified_since_timestamp: float, *, ignore_hidden: bool = True):
"""Returns a list of files (recursively) in the run directory that have been modified since
``modified_since_timestamp``.
Args:
modified_since_timestamp (float): Minimum last modified timestamp(in seconds since EPOCH)
of files to include.
ignore_hidden (bool, optional): Whether to ignore hidden files and folders (default: ``True``)
Returns:
List[str]: List of filepaths that have been modified since ``modified_since_timestamp``
"""
modified_files = []
run_directory = get_run_directory()
if run_directory is None:
raise RuntimeError("Run directory is not defined")
for root, dirs, files in os.walk(run_directory):
del dirs # unused
for file in files:
if ignore_hidden and any(x.startswith(".") for x in file.split(os.path.sep)):
# skip hidden files and folders
continue
filepath = os.path.join(root, file)
modified_time = os.path.getmtime(filepath)
if modified_time >= modified_since_timestamp:
modified_files.append(filepath)
return modified_files
def get_run_directory_timestamp() -> float:
"""Returns the current timestamp on the run directory filesystem.
Note that the disk time can differ from system time (e.g. when using
network filesystems).
Returns:
float: the current timestamp on the run directory filesystem.
"""
run_directory = get_run_directory()
if run_directory is None:
raise RuntimeError("Run directory is not defined")
python_time = time.time()
touch_file = (pathlib.Path(run_directory) / f".{python_time}")
touch_file.touch()
new_last_uploaded_timestamp = os.path.getmtime(str(touch_file))
os.remove(str(touch_file))
return new_last_uploaded_timestamp | en | 0.770862 | # Copyright 2021 MosaicML. All Rights Reserved. Returns the run directory for the node. This folder is shared by all ranks on the node. Returns: str: The node run directory. # chop off the training slash so os.path.basename would work as expected Returns the run directory for the current rank. Returns: str: The run directory. Returns a list of files (recursively) in the run directory that have been modified since ``modified_since_timestamp``. Args: modified_since_timestamp (float): Minimum last modified timestamp(in seconds since EPOCH) of files to include. ignore_hidden (bool, optional): Whether to ignore hidden files and folders (default: ``True``) Returns: List[str]: List of filepaths that have been modified since ``modified_since_timestamp`` # unused # skip hidden files and folders Returns the current timestamp on the run directory filesystem. Note that the disk time can differ from system time (e.g. when using network filesystems). Returns: float: the current timestamp on the run directory filesystem. | 2.359989 | 2 |
newsapp/migrations/0003_news.py | adi112100/newsapp | 0 | 8815 | # Generated by Django 3.0.8 on 2020-07-11 08:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsapp', '0002_auto_20200711_1124'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('indian_news', models.TextField()),
('national_news', models.TextField()),
('international_news', models.TextField()),
('bollywood_news', models.TextField()),
('lifestyle_news', models.TextField()),
('sport_news', models.TextField()),
('business_news', models.TextField()),
('sharemarket_news', models.TextField()),
('corona_news', models.TextField()),
('space_news', models.TextField()),
('motivation_news', models.TextField()),
],
),
]
| # Generated by Django 3.0.8 on 2020-07-11 08:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsapp', '0002_auto_20200711_1124'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('indian_news', models.TextField()),
('national_news', models.TextField()),
('international_news', models.TextField()),
('bollywood_news', models.TextField()),
('lifestyle_news', models.TextField()),
('sport_news', models.TextField()),
('business_news', models.TextField()),
('sharemarket_news', models.TextField()),
('corona_news', models.TextField()),
('space_news', models.TextField()),
('motivation_news', models.TextField()),
],
),
]
| en | 0.795622 | # Generated by Django 3.0.8 on 2020-07-11 08:10 | 1.753405 | 2 |
src/enum/__init__.py | NazarioJL/faker_enum | 5 | 8816 | # -*- coding: utf-8 -*-
from enum import Enum
from typing import TypeVar, Type, List, Iterable, cast
from faker.providers import BaseProvider
TEnum = TypeVar("TEnum", bound=Enum)
class EnumProvider(BaseProvider):
"""
A Provider for enums.
"""
def enum(self, enum_cls: Type[TEnum]) -> TEnum:
members: List[TEnum] = list(cast(Iterable[TEnum], enum_cls))
return self.random_element(members)
| # -*- coding: utf-8 -*-
from enum import Enum
from typing import TypeVar, Type, List, Iterable, cast
from faker.providers import BaseProvider
TEnum = TypeVar("TEnum", bound=Enum)
class EnumProvider(BaseProvider):
"""
A Provider for enums.
"""
def enum(self, enum_cls: Type[TEnum]) -> TEnum:
members: List[TEnum] = list(cast(Iterable[TEnum], enum_cls))
return self.random_element(members)
| en | 0.784669 | # -*- coding: utf-8 -*- A Provider for enums. | 2.961921 | 3 |
tests/performance/bottle/simple_server.py | Varriount/sanic | 4,959 | 8817 | # Run with: gunicorn --workers=1 --worker-class=meinheld.gmeinheld.MeinheldWorker -b :8000 simple_server:app
import bottle
import ujson
from bottle import route, run
@route("/")
def index():
return ujson.dumps({"test": True})
app = bottle.default_app()
| # Run with: gunicorn --workers=1 --worker-class=meinheld.gmeinheld.MeinheldWorker -b :8000 simple_server:app
import bottle
import ujson
from bottle import route, run
@route("/")
def index():
return ujson.dumps({"test": True})
app = bottle.default_app()
| en | 0.512396 | # Run with: gunicorn --workers=1 --worker-class=meinheld.gmeinheld.MeinheldWorker -b :8000 simple_server:app | 1.734676 | 2 |
usuarios/views.py | alvarocneto/alura_django | 1 | 8818 | <filename>usuarios/views.py
from django.shortcuts import redirect
from django.shortcuts import render
from django.contrib.auth.models import User
from django.views.generic.base import View
from perfis.models import Perfil
from usuarios.forms import RegistrarUsuarioForm
class RegistrarUsuarioView(View):
template_name = 'registrar.html'
def get(self, request):
return render(request, self.template_name)
def post(self, request):
# preenche o from
form = RegistrarUsuarioForm(request.POST)
# verifica se eh valido
if form.is_valid():
dados_form = form.data
# cria o usuario
usuario = User.objects.create_user(dados_form['nome'],
dados_form['email'],
dados_form['senha'])
# cria o perfil
perfil = Perfil(nome=dados_form['nome'],
telefone=dados_form['telefone'],
nome_empresa=dados_form['nome_empresa'],
usuario=usuario)
# grava no banco
perfil.save()
# redireciona para index
return redirect('index')
# so chega aqui se nao for valido
# vamos devolver o form para mostrar o formulario preenchido
return render(request, self.template_name, {'form': form})
| <filename>usuarios/views.py
from django.shortcuts import redirect
from django.shortcuts import render
from django.contrib.auth.models import User
from django.views.generic.base import View
from perfis.models import Perfil
from usuarios.forms import RegistrarUsuarioForm
class RegistrarUsuarioView(View):
template_name = 'registrar.html'
def get(self, request):
return render(request, self.template_name)
def post(self, request):
# preenche o from
form = RegistrarUsuarioForm(request.POST)
# verifica se eh valido
if form.is_valid():
dados_form = form.data
# cria o usuario
usuario = User.objects.create_user(dados_form['nome'],
dados_form['email'],
dados_form['senha'])
# cria o perfil
perfil = Perfil(nome=dados_form['nome'],
telefone=dados_form['telefone'],
nome_empresa=dados_form['nome_empresa'],
usuario=usuario)
# grava no banco
perfil.save()
# redireciona para index
return redirect('index')
# so chega aqui se nao for valido
# vamos devolver o form para mostrar o formulario preenchido
return render(request, self.template_name, {'form': form})
| pt | 0.68944 | # preenche o from # verifica se eh valido # cria o usuario # cria o perfil # grava no banco # redireciona para index # so chega aqui se nao for valido # vamos devolver o form para mostrar o formulario preenchido | 2.095048 | 2 |
antolib/AntoCommon.py | srsuper/BOT2020 | 1 | 8819 | ANTO_VER = '0.1.2'
| ANTO_VER = '0.1.2'
| none | 1 | 1.189043 | 1 |
|
cpc_fusion/pkgs/keys/main.py | CPChain/fusion | 5 | 8820 | from typing import (Any, Union, Type) # noqa: F401
from ..keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from eth_keys.exceptions import (
ValidationError,
)
from eth_keys.validation import (
validate_message_hash,
)
# These must be aliased due to a scoping issue in mypy
# https://github.com/python/mypy/issues/1775
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
#
# datatype shortcuts
#
PublicKey = PublicKey # type: Type[_PublicKey]
PrivateKey = PrivateKey # type: Type[_PrivateKey]
Signature = Signature # type: Type[_Signature]
#
# Proxy method calls to the backends
#
def ecdsa_sign(self,
message_hash, # type: bytes
private_key # type: _PrivateKey
):
# type: (...) -> _Signature
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `eth_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash, # type: bytes
signature, # type: _Signature
public_key # type: _PublicKey
) -> bool:
if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `eth_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash, # type: bytes
signature # type: _Signature
):
# type: (...) -> _PublicKey
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `eth_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key):
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
# This creates an easy to import backend which will lazily fetch whatever
# backend has been configured at runtime (as opposed to import or instantiation time).
lazy_key_api = KeyAPI(backend=None)
| from typing import (Any, Union, Type) # noqa: F401
from ..keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from eth_keys.exceptions import (
ValidationError,
)
from eth_keys.validation import (
validate_message_hash,
)
# These must be aliased due to a scoping issue in mypy
# https://github.com/python/mypy/issues/1775
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
#
# datatype shortcuts
#
PublicKey = PublicKey # type: Type[_PublicKey]
PrivateKey = PrivateKey # type: Type[_PrivateKey]
Signature = Signature # type: Type[_Signature]
#
# Proxy method calls to the backends
#
def ecdsa_sign(self,
message_hash, # type: bytes
private_key # type: _PrivateKey
):
# type: (...) -> _Signature
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `eth_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash, # type: bytes
signature, # type: _Signature
public_key # type: _PublicKey
) -> bool:
if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `eth_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash, # type: bytes
signature # type: _Signature
):
# type: (...) -> _PublicKey
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `eth_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key):
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
# This creates an easy to import backend which will lazily fetch whatever
# backend has been configured at runtime (as opposed to import or instantiation time).
lazy_key_api = KeyAPI(backend=None)
| en | 0.75043 | # noqa: F401 # These must be aliased due to a scoping issue in mypy # https://github.com/python/mypy/issues/1775 # # datatype shortcuts # # type: Type[_PublicKey] # type: Type[_PrivateKey] # type: Type[_Signature] # # Proxy method calls to the backends # # type: bytes # type: _PrivateKey # type: (...) -> _Signature # type: bytes # type: _Signature # type: _PublicKey # type: bytes # type: _Signature # type: (...) -> _PublicKey # This creates an easy to import backend which will lazily fetch whatever # backend has been configured at runtime (as opposed to import or instantiation time). | 2.41417 | 2 |
qiskit/pulse/transforms/canonicalization.py | gadial/qiskit-terra | 1 | 8821 | <reponame>gadial/qiskit-terra
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Basic rescheduling functions which take schedule or instructions and return new schedules."""
import warnings
from collections import defaultdict
from typing import List, Optional, Iterable, Union
import numpy as np
from qiskit.pulse import channels as chans, exceptions, instructions
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.exceptions import UnassignedDurationError
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap
from qiskit.pulse.instructions import directives
from qiskit.pulse.schedule import Schedule, ScheduleBlock, ScheduleComponent
def block_to_schedule(block: ScheduleBlock) -> Schedule:
"""Convert ``ScheduleBlock`` to ``Schedule``.
Args:
block: A ``ScheduleBlock`` to convert.
Returns:
Scheduled pulse program.
Raises:
UnassignedDurationError: When any instruction duration is not assigned.
"""
if not block.is_schedulable():
raise UnassignedDurationError(
'All instruction durations should be assigned before creating `Schedule`.'
'Please check `.parameters` to find unassigned parameter objects.')
schedule = Schedule(name=block.name, metadata=block.metadata)
for op_data in block.instructions:
if isinstance(op_data, ScheduleBlock):
context_schedule = block_to_schedule(op_data)
schedule.append(context_schedule, inplace=True)
else:
schedule.append(op_data, inplace=True)
# transform with defined policy
return block.alignment_context.align(schedule)
def compress_pulses(schedules: List[Schedule]) -> List[Schedule]:
"""Optimization pass to replace identical pulses.
Args:
schedules: Schedules to compress.
Returns:
Compressed schedules.
"""
existing_pulses = []
new_schedules = []
for schedule in schedules:
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Play):
if inst.pulse in existing_pulses:
idx = existing_pulses.index(inst.pulse)
identical_pulse = existing_pulses[idx]
new_schedule.insert(time,
instructions.Play(identical_pulse,
inst.channel,
inst.name),
inplace=True)
else:
existing_pulses.append(inst.pulse)
new_schedule.insert(time, inst, inplace=True)
else:
new_schedule.insert(time, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def flatten(program: Schedule) -> Schedule:
"""Flatten (inline) any called nodes into a Schedule tree with no nested children.
Args:
program: Pulse program to remove nested structure.
Returns:
Flatten pulse program.
Raises:
PulseError: When invalid data format is given.
"""
if isinstance(program, Schedule):
return Schedule(*program.instructions, name=program.name, metadata=program.metadata)
else:
raise PulseError(f'Invalid input program {program.__class__.__name__} is specified.')
def inline_subroutines(program: Union[Schedule, ScheduleBlock]) -> Union[Schedule, ScheduleBlock]:
"""Recursively remove call instructions and inline the respective subroutine instructions.
Assigned parameter values, which are stored in the parameter table, are also applied.
The subroutine is copied before the parameter assignment to avoid mutation problem.
Args:
program: A program which may contain the subroutine, i.e. ``Call`` instruction.
Returns:
A schedule without subroutine.
Raises:
PulseError: When input program is not valid data format.
"""
if isinstance(program, Schedule):
return _inline_schedule(program)
elif isinstance(program, ScheduleBlock):
return _inline_block(program)
else:
raise PulseError(f'Invalid program {program.__class__.__name__} is specified.')
def _inline_schedule(schedule: Schedule) -> Schedule:
"""A helper function to inline subroutine of schedule.
.. note:: If subroutine is ``ScheduleBlock`` it is converted into Schedule to get ``t0``.
"""
ret_schedule = Schedule(name=schedule.name,
metadata=schedule.metadata)
for t0, inst in schedule.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
# convert into schedule if block is given
if isinstance(subroutine, ScheduleBlock):
subroutine = block_to_schedule(subroutine)
# recursively inline the program
inline_schedule = _inline_schedule(subroutine)
ret_schedule.insert(t0, inline_schedule, inplace=True)
else:
ret_schedule.insert(t0, inst, inplace=True)
return ret_schedule
def _inline_block(block: ScheduleBlock) -> ScheduleBlock:
"""A helper function to inline subroutine of schedule block.
.. note:: If subroutine is ``Schedule`` the function raises an error.
"""
ret_block = ScheduleBlock(alignment_context=block.alignment_context,
name=block.name,
metadata=block.metadata)
for inst in block.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
if isinstance(subroutine, Schedule):
raise PulseError(f'A subroutine {subroutine.name} is a pulse Schedule. '
'This program cannot be inserted into ScheduleBlock because '
't0 associated with instruction will be lost.')
# recursively inline the program
inline_block = _inline_block(subroutine)
ret_block.append(inline_block, inplace=True)
else:
ret_block.append(inst, inplace=True)
return ret_block
def remove_directives(schedule: Schedule) -> Schedule:
"""Remove directives.
Args:
schedule: A schedule to remove compiler directives.
Returns:
A schedule without directives.
"""
return schedule.exclude(instruction_types=[directives.Directive])
def remove_trivial_barriers(schedule: Schedule) -> Schedule:
"""Remove trivial barriers with 0 or 1 channels.
Args:
schedule: A schedule to remove trivial barriers.
Returns:
schedule: A schedule without trivial barriers
"""
def filter_func(inst):
return (isinstance(inst[1], directives.RelativeBarrier) and
len(inst[1].channels) < 2)
return schedule.exclude(filter_func)
def align_measures(schedules: Iterable[ScheduleComponent],
inst_map: Optional[InstructionScheduleMap] = None,
cal_gate: str = 'u3',
max_calibration_duration: Optional[int] = None,
align_time: Optional[int] = None,
align_all: Optional[bool] = True,
) -> List[Schedule]:
"""Return new schedules where measurements occur at the same physical time.
This transformation will align the first :class:`qiskit.pulse.Acquire` on
every channel to occur at the same time.
Minimum measurement wait time (to allow for calibration pulses) is enforced
and may be set with ``max_calibration_duration``.
By default only instructions containing a :class:`~qiskit.pulse.AcquireChannel`
or :class:`~qiskit.pulse.MeasureChannel` will be shifted. If you wish to keep
the relative timing of all instructions in the schedule set ``align_all=True``.
This method assumes that ``MeasureChannel(i)`` and ``AcquireChannel(i)``
correspond to the same qubit and the acquire/play instructions
should be shifted together on these channels.
.. jupyter-kernel:: python3
:id: align_measures
.. jupyter-execute::
from qiskit import pulse
from qiskit.pulse import transforms
with pulse.build() as sched:
with pulse.align_sequential():
pulse.play(pulse.Constant(10, 0.5), pulse.DriveChannel(0))
pulse.play(pulse.Constant(10, 1.), pulse.MeasureChannel(0))
pulse.acquire(20, pulse.AcquireChannel(0), pulse.MemorySlot(0))
sched_shifted = sched << 20
aligned_sched, aligned_sched_shifted = transforms.align_measures([sched, sched_shifted])
assert aligned_sched == aligned_sched_shifted
If it is desired to only shift acquisition and measurement stimulus instructions
set the flag ``align_all=False``:
.. jupyter-execute::
aligned_sched, aligned_sched_shifted = transforms.align_measures(
[sched, sched_shifted],
align_all=False,
)
assert aligned_sched != aligned_sched_shifted
Args:
schedules: Collection of schedules to be aligned together
inst_map: Mapping of circuit operations to pulse schedules
cal_gate: The name of the gate to inspect for the calibration time
max_calibration_duration: If provided, inst_map and cal_gate will be ignored
align_time: If provided, this will be used as final align time.
align_all: Shift all instructions in the schedule such that they maintain
their relative alignment with the shifted acquisition instruction.
If ``False`` only the acquisition and measurement pulse instructions
will be shifted.
Returns:
The input list of schedules transformed to have their measurements aligned.
Raises:
PulseError: If the provided alignment time is negative.
"""
def get_first_acquire_times(schedules):
"""Return a list of first acquire times for each schedule."""
acquire_times = []
for schedule in schedules:
visited_channels = set()
qubit_first_acquire_times = defaultdict(lambda: None)
for time, inst in schedule.instructions:
if (isinstance(inst, instructions.Acquire) and
inst.channel not in visited_channels):
visited_channels.add(inst.channel)
qubit_first_acquire_times[inst.channel.index] = time
acquire_times.append(qubit_first_acquire_times)
return acquire_times
def get_max_calibration_duration(inst_map, cal_gate):
"""Return the time needed to allow for readout discrimination calibration pulses."""
# TODO (qiskit-terra #5472): fix behavior of this.
max_calibration_duration = 0
for qubits in inst_map.qubits_with_instruction(cal_gate):
cmd = inst_map.get(cal_gate, qubits, np.pi, 0, np.pi)
max_calibration_duration = max(cmd.duration, max_calibration_duration)
return max_calibration_duration
if align_time is not None and align_time < 0:
raise exceptions.PulseError("Align time cannot be negative.")
first_acquire_times = get_first_acquire_times(schedules)
# Extract the maximum acquire in every schedule across all acquires in the schedule.
# If there are no acquires in the schedule default to 0.
max_acquire_times = [max(0, *times.values()) for times in first_acquire_times]
if align_time is None:
if max_calibration_duration is None:
if inst_map:
max_calibration_duration = get_max_calibration_duration(inst_map, cal_gate)
else:
max_calibration_duration = 0
align_time = max(max_calibration_duration, *max_acquire_times)
# Shift acquires according to the new scheduled time
new_schedules = []
for sched_idx, schedule in enumerate(schedules):
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
stop_time = schedule.stop_time
if align_all:
if first_acquire_times[sched_idx]:
shift = align_time - max_acquire_times[sched_idx]
else:
shift = align_time - stop_time
else:
shift = 0
for time, inst in schedule.instructions:
measurement_channels = {
chan.index for chan in inst.channels if
isinstance(chan, (chans.MeasureChannel, chans.AcquireChannel))
}
if measurement_channels:
sched_first_acquire_times = first_acquire_times[sched_idx]
max_start_time = max(sched_first_acquire_times[chan]
for chan in measurement_channels if
chan in sched_first_acquire_times)
shift = align_time - max_start_time
if shift < 0:
warnings.warn(
"The provided alignment time is scheduling an acquire instruction "
"earlier than it was scheduled for in the original Schedule. "
"This may result in an instruction being scheduled before t=0 and "
"an error being raised."
)
new_schedule.insert(time+shift, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def add_implicit_acquires(schedule: ScheduleComponent,
meas_map: List[List[int]]
) -> Schedule:
"""Return a new schedule with implicit acquires from the measurement mapping replaced by
explicit ones.
.. warning:: Since new acquires are being added, Memory Slots will be set to match the
qubit index. This may overwrite your specification.
Args:
schedule: Schedule to be aligned.
meas_map: List of lists of qubits that are measured together.
Returns:
A ``Schedule`` with the additional acquisition instructions.
"""
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
acquire_map = dict()
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Acquire):
if inst.mem_slot and inst.mem_slot.index != inst.channel.index:
warnings.warn("One of your acquires was mapped to a memory slot which didn't match"
" the qubit index. I'm relabeling them to match.")
# Get the label of all qubits that are measured with the qubit(s) in this instruction
all_qubits = []
for sublist in meas_map:
if inst.channel.index in sublist:
all_qubits.extend(sublist)
# Replace the old acquire instruction by a new one explicitly acquiring all qubits in
# the measurement group.
for i in all_qubits:
explicit_inst = instructions.Acquire(inst.duration,
chans.AcquireChannel(i),
mem_slot=chans.MemorySlot(i),
kernel=inst.kernel,
discriminator=inst.discriminator)
if time not in acquire_map:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map = {time: {i}}
elif i not in acquire_map[time]:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map[time].add(i)
else:
new_schedule.insert(time, inst, inplace=True)
return new_schedule
| # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Basic rescheduling functions which take schedule or instructions and return new schedules."""
import warnings
from collections import defaultdict
from typing import List, Optional, Iterable, Union
import numpy as np
from qiskit.pulse import channels as chans, exceptions, instructions
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.exceptions import UnassignedDurationError
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap
from qiskit.pulse.instructions import directives
from qiskit.pulse.schedule import Schedule, ScheduleBlock, ScheduleComponent
def block_to_schedule(block: ScheduleBlock) -> Schedule:
"""Convert ``ScheduleBlock`` to ``Schedule``.
Args:
block: A ``ScheduleBlock`` to convert.
Returns:
Scheduled pulse program.
Raises:
UnassignedDurationError: When any instruction duration is not assigned.
"""
if not block.is_schedulable():
raise UnassignedDurationError(
'All instruction durations should be assigned before creating `Schedule`.'
'Please check `.parameters` to find unassigned parameter objects.')
schedule = Schedule(name=block.name, metadata=block.metadata)
for op_data in block.instructions:
if isinstance(op_data, ScheduleBlock):
context_schedule = block_to_schedule(op_data)
schedule.append(context_schedule, inplace=True)
else:
schedule.append(op_data, inplace=True)
# transform with defined policy
return block.alignment_context.align(schedule)
def compress_pulses(schedules: List[Schedule]) -> List[Schedule]:
"""Optimization pass to replace identical pulses.
Args:
schedules: Schedules to compress.
Returns:
Compressed schedules.
"""
existing_pulses = []
new_schedules = []
for schedule in schedules:
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Play):
if inst.pulse in existing_pulses:
idx = existing_pulses.index(inst.pulse)
identical_pulse = existing_pulses[idx]
new_schedule.insert(time,
instructions.Play(identical_pulse,
inst.channel,
inst.name),
inplace=True)
else:
existing_pulses.append(inst.pulse)
new_schedule.insert(time, inst, inplace=True)
else:
new_schedule.insert(time, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def flatten(program: Schedule) -> Schedule:
"""Flatten (inline) any called nodes into a Schedule tree with no nested children.
Args:
program: Pulse program to remove nested structure.
Returns:
Flatten pulse program.
Raises:
PulseError: When invalid data format is given.
"""
if isinstance(program, Schedule):
return Schedule(*program.instructions, name=program.name, metadata=program.metadata)
else:
raise PulseError(f'Invalid input program {program.__class__.__name__} is specified.')
def inline_subroutines(program: Union[Schedule, ScheduleBlock]) -> Union[Schedule, ScheduleBlock]:
"""Recursively remove call instructions and inline the respective subroutine instructions.
Assigned parameter values, which are stored in the parameter table, are also applied.
The subroutine is copied before the parameter assignment to avoid mutation problem.
Args:
program: A program which may contain the subroutine, i.e. ``Call`` instruction.
Returns:
A schedule without subroutine.
Raises:
PulseError: When input program is not valid data format.
"""
if isinstance(program, Schedule):
return _inline_schedule(program)
elif isinstance(program, ScheduleBlock):
return _inline_block(program)
else:
raise PulseError(f'Invalid program {program.__class__.__name__} is specified.')
def _inline_schedule(schedule: Schedule) -> Schedule:
"""A helper function to inline subroutine of schedule.
.. note:: If subroutine is ``ScheduleBlock`` it is converted into Schedule to get ``t0``.
"""
ret_schedule = Schedule(name=schedule.name,
metadata=schedule.metadata)
for t0, inst in schedule.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
# convert into schedule if block is given
if isinstance(subroutine, ScheduleBlock):
subroutine = block_to_schedule(subroutine)
# recursively inline the program
inline_schedule = _inline_schedule(subroutine)
ret_schedule.insert(t0, inline_schedule, inplace=True)
else:
ret_schedule.insert(t0, inst, inplace=True)
return ret_schedule
def _inline_block(block: ScheduleBlock) -> ScheduleBlock:
"""A helper function to inline subroutine of schedule block.
.. note:: If subroutine is ``Schedule`` the function raises an error.
"""
ret_block = ScheduleBlock(alignment_context=block.alignment_context,
name=block.name,
metadata=block.metadata)
for inst in block.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
if isinstance(subroutine, Schedule):
raise PulseError(f'A subroutine {subroutine.name} is a pulse Schedule. '
'This program cannot be inserted into ScheduleBlock because '
't0 associated with instruction will be lost.')
# recursively inline the program
inline_block = _inline_block(subroutine)
ret_block.append(inline_block, inplace=True)
else:
ret_block.append(inst, inplace=True)
return ret_block
def remove_directives(schedule: Schedule) -> Schedule:
"""Remove directives.
Args:
schedule: A schedule to remove compiler directives.
Returns:
A schedule without directives.
"""
return schedule.exclude(instruction_types=[directives.Directive])
def remove_trivial_barriers(schedule: Schedule) -> Schedule:
"""Remove trivial barriers with 0 or 1 channels.
Args:
schedule: A schedule to remove trivial barriers.
Returns:
schedule: A schedule without trivial barriers
"""
def filter_func(inst):
return (isinstance(inst[1], directives.RelativeBarrier) and
len(inst[1].channels) < 2)
return schedule.exclude(filter_func)
def align_measures(schedules: Iterable[ScheduleComponent],
inst_map: Optional[InstructionScheduleMap] = None,
cal_gate: str = 'u3',
max_calibration_duration: Optional[int] = None,
align_time: Optional[int] = None,
align_all: Optional[bool] = True,
) -> List[Schedule]:
"""Return new schedules where measurements occur at the same physical time.
This transformation will align the first :class:`qiskit.pulse.Acquire` on
every channel to occur at the same time.
Minimum measurement wait time (to allow for calibration pulses) is enforced
and may be set with ``max_calibration_duration``.
By default only instructions containing a :class:`~qiskit.pulse.AcquireChannel`
or :class:`~qiskit.pulse.MeasureChannel` will be shifted. If you wish to keep
the relative timing of all instructions in the schedule set ``align_all=True``.
This method assumes that ``MeasureChannel(i)`` and ``AcquireChannel(i)``
correspond to the same qubit and the acquire/play instructions
should be shifted together on these channels.
.. jupyter-kernel:: python3
:id: align_measures
.. jupyter-execute::
from qiskit import pulse
from qiskit.pulse import transforms
with pulse.build() as sched:
with pulse.align_sequential():
pulse.play(pulse.Constant(10, 0.5), pulse.DriveChannel(0))
pulse.play(pulse.Constant(10, 1.), pulse.MeasureChannel(0))
pulse.acquire(20, pulse.AcquireChannel(0), pulse.MemorySlot(0))
sched_shifted = sched << 20
aligned_sched, aligned_sched_shifted = transforms.align_measures([sched, sched_shifted])
assert aligned_sched == aligned_sched_shifted
If it is desired to only shift acquisition and measurement stimulus instructions
set the flag ``align_all=False``:
.. jupyter-execute::
aligned_sched, aligned_sched_shifted = transforms.align_measures(
[sched, sched_shifted],
align_all=False,
)
assert aligned_sched != aligned_sched_shifted
Args:
schedules: Collection of schedules to be aligned together
inst_map: Mapping of circuit operations to pulse schedules
cal_gate: The name of the gate to inspect for the calibration time
max_calibration_duration: If provided, inst_map and cal_gate will be ignored
align_time: If provided, this will be used as final align time.
align_all: Shift all instructions in the schedule such that they maintain
their relative alignment with the shifted acquisition instruction.
If ``False`` only the acquisition and measurement pulse instructions
will be shifted.
Returns:
The input list of schedules transformed to have their measurements aligned.
Raises:
PulseError: If the provided alignment time is negative.
"""
def get_first_acquire_times(schedules):
"""Return a list of first acquire times for each schedule."""
acquire_times = []
for schedule in schedules:
visited_channels = set()
qubit_first_acquire_times = defaultdict(lambda: None)
for time, inst in schedule.instructions:
if (isinstance(inst, instructions.Acquire) and
inst.channel not in visited_channels):
visited_channels.add(inst.channel)
qubit_first_acquire_times[inst.channel.index] = time
acquire_times.append(qubit_first_acquire_times)
return acquire_times
def get_max_calibration_duration(inst_map, cal_gate):
"""Return the time needed to allow for readout discrimination calibration pulses."""
# TODO (qiskit-terra #5472): fix behavior of this.
max_calibration_duration = 0
for qubits in inst_map.qubits_with_instruction(cal_gate):
cmd = inst_map.get(cal_gate, qubits, np.pi, 0, np.pi)
max_calibration_duration = max(cmd.duration, max_calibration_duration)
return max_calibration_duration
if align_time is not None and align_time < 0:
raise exceptions.PulseError("Align time cannot be negative.")
first_acquire_times = get_first_acquire_times(schedules)
# Extract the maximum acquire in every schedule across all acquires in the schedule.
# If there are no acquires in the schedule default to 0.
max_acquire_times = [max(0, *times.values()) for times in first_acquire_times]
if align_time is None:
if max_calibration_duration is None:
if inst_map:
max_calibration_duration = get_max_calibration_duration(inst_map, cal_gate)
else:
max_calibration_duration = 0
align_time = max(max_calibration_duration, *max_acquire_times)
# Shift acquires according to the new scheduled time
new_schedules = []
for sched_idx, schedule in enumerate(schedules):
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
stop_time = schedule.stop_time
if align_all:
if first_acquire_times[sched_idx]:
shift = align_time - max_acquire_times[sched_idx]
else:
shift = align_time - stop_time
else:
shift = 0
for time, inst in schedule.instructions:
measurement_channels = {
chan.index for chan in inst.channels if
isinstance(chan, (chans.MeasureChannel, chans.AcquireChannel))
}
if measurement_channels:
sched_first_acquire_times = first_acquire_times[sched_idx]
max_start_time = max(sched_first_acquire_times[chan]
for chan in measurement_channels if
chan in sched_first_acquire_times)
shift = align_time - max_start_time
if shift < 0:
warnings.warn(
"The provided alignment time is scheduling an acquire instruction "
"earlier than it was scheduled for in the original Schedule. "
"This may result in an instruction being scheduled before t=0 and "
"an error being raised."
)
new_schedule.insert(time+shift, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def add_implicit_acquires(schedule: ScheduleComponent,
meas_map: List[List[int]]
) -> Schedule:
"""Return a new schedule with implicit acquires from the measurement mapping replaced by
explicit ones.
.. warning:: Since new acquires are being added, Memory Slots will be set to match the
qubit index. This may overwrite your specification.
Args:
schedule: Schedule to be aligned.
meas_map: List of lists of qubits that are measured together.
Returns:
A ``Schedule`` with the additional acquisition instructions.
"""
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
acquire_map = dict()
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Acquire):
if inst.mem_slot and inst.mem_slot.index != inst.channel.index:
warnings.warn("One of your acquires was mapped to a memory slot which didn't match"
" the qubit index. I'm relabeling them to match.")
# Get the label of all qubits that are measured with the qubit(s) in this instruction
all_qubits = []
for sublist in meas_map:
if inst.channel.index in sublist:
all_qubits.extend(sublist)
# Replace the old acquire instruction by a new one explicitly acquiring all qubits in
# the measurement group.
for i in all_qubits:
explicit_inst = instructions.Acquire(inst.duration,
chans.AcquireChannel(i),
mem_slot=chans.MemorySlot(i),
kernel=inst.kernel,
discriminator=inst.discriminator)
if time not in acquire_map:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map = {time: {i}}
elif i not in acquire_map[time]:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map[time].add(i)
else:
new_schedule.insert(time, inst, inplace=True)
return new_schedule | en | 0.803862 | # This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. Basic rescheduling functions which take schedule or instructions and return new schedules. Convert ``ScheduleBlock`` to ``Schedule``. Args: block: A ``ScheduleBlock`` to convert. Returns: Scheduled pulse program. Raises: UnassignedDurationError: When any instruction duration is not assigned. # transform with defined policy Optimization pass to replace identical pulses. Args: schedules: Schedules to compress. Returns: Compressed schedules. Flatten (inline) any called nodes into a Schedule tree with no nested children. Args: program: Pulse program to remove nested structure. Returns: Flatten pulse program. Raises: PulseError: When invalid data format is given. Recursively remove call instructions and inline the respective subroutine instructions. Assigned parameter values, which are stored in the parameter table, are also applied. The subroutine is copied before the parameter assignment to avoid mutation problem. Args: program: A program which may contain the subroutine, i.e. ``Call`` instruction. Returns: A schedule without subroutine. Raises: PulseError: When input program is not valid data format. A helper function to inline subroutine of schedule. .. note:: If subroutine is ``ScheduleBlock`` it is converted into Schedule to get ``t0``. # bind parameter # convert into schedule if block is given # recursively inline the program A helper function to inline subroutine of schedule block. .. note:: If subroutine is ``Schedule`` the function raises an error. # bind parameter # recursively inline the program Remove directives. Args: schedule: A schedule to remove compiler directives. Returns: A schedule without directives. Remove trivial barriers with 0 or 1 channels. Args: schedule: A schedule to remove trivial barriers. Returns: schedule: A schedule without trivial barriers Return new schedules where measurements occur at the same physical time. This transformation will align the first :class:`qiskit.pulse.Acquire` on every channel to occur at the same time. Minimum measurement wait time (to allow for calibration pulses) is enforced and may be set with ``max_calibration_duration``. By default only instructions containing a :class:`~qiskit.pulse.AcquireChannel` or :class:`~qiskit.pulse.MeasureChannel` will be shifted. If you wish to keep the relative timing of all instructions in the schedule set ``align_all=True``. This method assumes that ``MeasureChannel(i)`` and ``AcquireChannel(i)`` correspond to the same qubit and the acquire/play instructions should be shifted together on these channels. .. jupyter-kernel:: python3 :id: align_measures .. jupyter-execute:: from qiskit import pulse from qiskit.pulse import transforms with pulse.build() as sched: with pulse.align_sequential(): pulse.play(pulse.Constant(10, 0.5), pulse.DriveChannel(0)) pulse.play(pulse.Constant(10, 1.), pulse.MeasureChannel(0)) pulse.acquire(20, pulse.AcquireChannel(0), pulse.MemorySlot(0)) sched_shifted = sched << 20 aligned_sched, aligned_sched_shifted = transforms.align_measures([sched, sched_shifted]) assert aligned_sched == aligned_sched_shifted If it is desired to only shift acquisition and measurement stimulus instructions set the flag ``align_all=False``: .. jupyter-execute:: aligned_sched, aligned_sched_shifted = transforms.align_measures( [sched, sched_shifted], align_all=False, ) assert aligned_sched != aligned_sched_shifted Args: schedules: Collection of schedules to be aligned together inst_map: Mapping of circuit operations to pulse schedules cal_gate: The name of the gate to inspect for the calibration time max_calibration_duration: If provided, inst_map and cal_gate will be ignored align_time: If provided, this will be used as final align time. align_all: Shift all instructions in the schedule such that they maintain their relative alignment with the shifted acquisition instruction. If ``False`` only the acquisition and measurement pulse instructions will be shifted. Returns: The input list of schedules transformed to have their measurements aligned. Raises: PulseError: If the provided alignment time is negative. Return a list of first acquire times for each schedule. Return the time needed to allow for readout discrimination calibration pulses. # TODO (qiskit-terra #5472): fix behavior of this. # Extract the maximum acquire in every schedule across all acquires in the schedule. # If there are no acquires in the schedule default to 0. # Shift acquires according to the new scheduled time Return a new schedule with implicit acquires from the measurement mapping replaced by explicit ones. .. warning:: Since new acquires are being added, Memory Slots will be set to match the qubit index. This may overwrite your specification. Args: schedule: Schedule to be aligned. meas_map: List of lists of qubits that are measured together. Returns: A ``Schedule`` with the additional acquisition instructions. # Get the label of all qubits that are measured with the qubit(s) in this instruction # Replace the old acquire instruction by a new one explicitly acquiring all qubits in # the measurement group. | 2.085211 | 2 |
tests/test_scraper.py | ananelson/oacensus | 0 | 8822 | from oacensus.scraper import Scraper
from oacensus.commands import defaults
class TestScraper(Scraper):
"""
Scraper for testing scraper methods.
"""
aliases = ['testscraper']
def scrape(self):
pass
def process(self):
pass
def test_hashcode():
scraper = Scraper.create_instance('testscraper', defaults)
assert len(scraper.hashcode()) == 32
def test_run():
scraper = Scraper.create_instance('testscraper', defaults)
scraper.run()
| from oacensus.scraper import Scraper
from oacensus.commands import defaults
class TestScraper(Scraper):
"""
Scraper for testing scraper methods.
"""
aliases = ['testscraper']
def scrape(self):
pass
def process(self):
pass
def test_hashcode():
scraper = Scraper.create_instance('testscraper', defaults)
assert len(scraper.hashcode()) == 32
def test_run():
scraper = Scraper.create_instance('testscraper', defaults)
scraper.run()
| en | 0.939762 | Scraper for testing scraper methods. | 2.593766 | 3 |
python/test-deco-1-1.py | li-ma/homework | 0 | 8823 | def deco1(func):
print("before myfunc() called.")
func()
print("after myfunc() called.")
def myfunc():
print("myfunc() called.")
deco1(myfunc)
| def deco1(func):
print("before myfunc() called.")
func()
print("after myfunc() called.")
def myfunc():
print("myfunc() called.")
deco1(myfunc)
| none | 1 | 2.595969 | 3 |
|
lib/jnpr/junos/transport/tty_netconf.py | mmoucka/py-junos-eznc | 0 | 8824 | <reponame>mmoucka/py-junos-eznc
import re
import time
from lxml import etree
import select
import socket
import logging
import sys
from lxml.builder import E
from lxml.etree import XMLSyntaxError
from datetime import datetime, timedelta
from ncclient.operations.rpc import RPCReply, RPCError
from ncclient.xml_ import to_ele
import six
from ncclient.transport.session import HelloHandler
class PY6:
NEW_LINE = six.b("\n")
EMPTY_STR = six.b("")
NETCONF_EOM = six.b("]]>]]>")
STARTS_WITH = six.b("<!--")
__all__ = ["xmlmode_netconf"]
_NETCONF_EOM = six.b("]]>]]>")
_xmlns = re.compile(six.b("xmlns=[^>]+"))
_xmlns_strip = lambda text: _xmlns.sub(PY6.EMPTY_STR, text)
_junosns = re.compile(six.b("junos:"))
_junosns_strip = lambda text: _junosns.sub(PY6.EMPTY_STR, text)
logger = logging.getLogger("jnpr.junos.tty_netconf")
# =========================================================================
# xmlmode_netconf
# =========================================================================
class tty_netconf(object):
"""
Basic Junos XML API for bootstraping through the TTY
"""
def __init__(self, tty):
self._tty = tty
self.hello = None
self._session_id = -1
# -------------------------------------------------------------------------
# NETCONF session open and close
# -------------------------------------------------------------------------
def open(self, at_shell):
""" start the XML API process and receive the 'hello' message """
nc_cmd = ("junoscript", "xml-mode")[at_shell]
self._tty.write(nc_cmd + " netconf need-trailer")
mark_start = datetime.now()
mark_end = mark_start + timedelta(seconds=15)
while datetime.now() < mark_end:
time.sleep(0.1)
line = self._tty.read()
if line.startswith(PY6.STARTS_WITH):
break
else:
# exceeded the while loop timeout
raise RuntimeError("Error: netconf not responding")
self.hello = self._receive()
self._session_id, _ = HelloHandler.parse(self.hello.decode("utf-8"))
def close(self, device_handler, force=False):
""" issue the XML API to close the session """
# if we do not have an open connection, then return now.
if force is False:
if self.hello is None:
return
self.rpc("close-session", device_handler)
# removed flush
# -------------------------------------------------------------------------
# MISC device commands
# -------------------------------------------------------------------------
def zeroize(self):
""" issue a reboot to the device """
cmd = E.command("request system zeroize")
try:
encode = None if sys.version < "3" else "unicode"
self.rpc(etree.tostring(cmd, encoding=encode))
except:
return False
return True
# -------------------------------------------------------------------------
# XML RPC command execution
# -------------------------------------------------------------------------
def rpc(self, cmd, device_handler):
"""
Write the XML cmd and return the response as XML object.
:cmd:
<str> of the XML command. if the :cmd: is not XML, then
this routine will perform the brackets; i.e. if given
'get-software-information', this routine will turn
it into '<get-software-information/>'
NOTES:
The return XML object is the first child element after
the <rpc-reply>. There is also no error-checking
performing by this routine.
"""
if not cmd.startswith("<"):
cmd = "<{}/>".format(cmd)
rpc = six.b("<rpc>{}</rpc>".format(cmd))
logger.info("Calling rpc: %s" % rpc)
self._tty.rawwrite(rpc)
rsp = self._receive()
rsp = rsp.decode("utf-8") if isinstance(rsp, bytes) else rsp
reply = RPCReply(rsp, device_handler, huge_tree=self._tty._huge_tree)
errors = reply.errors
if len(errors) > 1:
raise RPCError(to_ele(reply._raw), errs=errors)
elif len(errors) == 1:
raise reply.error
return reply
# -------------------------------------------------------------------------
# LOW-LEVEL I/O for reading back XML response
# -------------------------------------------------------------------------
def _receive(self):
# On windows select.select throws io.UnsupportedOperation: fileno
# so use read function for windows serial COM ports
if hasattr(self._tty, "port") and str(self._tty.port).startswith("COM"):
return self._receive_serial_win()
else:
return self._receive_serial()
def _receive_serial(self):
""" process the XML response into an XML object """
rxbuf = PY6.EMPTY_STR
line = PY6.EMPTY_STR
while True:
try:
rd, wt, err = select.select([self._tty._rx], [], [], 0.1)
except select.error as err:
raise err
except socket.error as err:
raise err
if rd:
line, lastline = rd[0].read_until(PY6.NETCONF_EOM, 0.1), line
if not line:
continue
if _NETCONF_EOM in line or _NETCONF_EOM in lastline + line:
rxbuf = rxbuf + line
break
else:
rxbuf = rxbuf + line
if _NETCONF_EOM in rxbuf:
break
return self._parse_buffer(rxbuf)
# -------------------------------------------------------------------------
# Read message from windows COM ports
# -------------------------------------------------------------------------
def _receive_serial_win(self):
""" process incoming data from windows port"""
rxbuf = PY6.EMPTY_STR
line = PY6.EMPTY_STR
while True:
line, lastline = self._tty.read().strip(), line
if not line:
continue
if _NETCONF_EOM in line or _NETCONF_EOM in lastline + line:
rxbuf = rxbuf + line
break
else:
rxbuf = rxbuf + line
if _NETCONF_EOM in rxbuf:
break
return self._parse_buffer(rxbuf)
def _parse_buffer(self, rxbuf):
rxbuf = rxbuf.splitlines()
if _NETCONF_EOM in rxbuf[-1]:
if rxbuf[-1] == _NETCONF_EOM:
rxbuf.pop()
else:
rxbuf[-1] = rxbuf[-1].split(_NETCONF_EOM)[0]
try:
rxbuf = [i.strip() for i in rxbuf if i.strip() != PY6.EMPTY_STR]
rcvd_data = PY6.NEW_LINE.join(rxbuf)
logger.debug("Received: \n%s" % rcvd_data)
parser = etree.XMLParser(
remove_blank_text=True, huge_tree=self._tty._huge_tree
)
try:
etree.XML(rcvd_data, parser)
except XMLSyntaxError:
if _NETCONF_EOM in rcvd_data:
rcvd_data = rcvd_data[: rcvd_data.index(_NETCONF_EOM)]
etree.XML(rcvd_data) # just to recheck
else:
parser = etree.XMLParser(recover=True)
rcvd_data = etree.tostring(etree.XML(rcvd_data, parser=parser))
return rcvd_data
except:
if "</xnm:error>" in rxbuf:
for x in rxbuf:
if "<message>" in x:
return etree.XML(
"<error-in-receive>" + x + "</error-in-receive>"
)
else:
return etree.XML("<error-in-receive/>")
| import re
import time
from lxml import etree
import select
import socket
import logging
import sys
from lxml.builder import E
from lxml.etree import XMLSyntaxError
from datetime import datetime, timedelta
from ncclient.operations.rpc import RPCReply, RPCError
from ncclient.xml_ import to_ele
import six
from ncclient.transport.session import HelloHandler
class PY6:
NEW_LINE = six.b("\n")
EMPTY_STR = six.b("")
NETCONF_EOM = six.b("]]>]]>")
STARTS_WITH = six.b("<!--")
__all__ = ["xmlmode_netconf"]
_NETCONF_EOM = six.b("]]>]]>")
_xmlns = re.compile(six.b("xmlns=[^>]+"))
_xmlns_strip = lambda text: _xmlns.sub(PY6.EMPTY_STR, text)
_junosns = re.compile(six.b("junos:"))
_junosns_strip = lambda text: _junosns.sub(PY6.EMPTY_STR, text)
logger = logging.getLogger("jnpr.junos.tty_netconf")
# =========================================================================
# xmlmode_netconf
# =========================================================================
class tty_netconf(object):
"""
Basic Junos XML API for bootstraping through the TTY
"""
def __init__(self, tty):
self._tty = tty
self.hello = None
self._session_id = -1
# -------------------------------------------------------------------------
# NETCONF session open and close
# -------------------------------------------------------------------------
def open(self, at_shell):
""" start the XML API process and receive the 'hello' message """
nc_cmd = ("junoscript", "xml-mode")[at_shell]
self._tty.write(nc_cmd + " netconf need-trailer")
mark_start = datetime.now()
mark_end = mark_start + timedelta(seconds=15)
while datetime.now() < mark_end:
time.sleep(0.1)
line = self._tty.read()
if line.startswith(PY6.STARTS_WITH):
break
else:
# exceeded the while loop timeout
raise RuntimeError("Error: netconf not responding")
self.hello = self._receive()
self._session_id, _ = HelloHandler.parse(self.hello.decode("utf-8"))
def close(self, device_handler, force=False):
""" issue the XML API to close the session """
# if we do not have an open connection, then return now.
if force is False:
if self.hello is None:
return
self.rpc("close-session", device_handler)
# removed flush
# -------------------------------------------------------------------------
# MISC device commands
# -------------------------------------------------------------------------
def zeroize(self):
""" issue a reboot to the device """
cmd = E.command("request system zeroize")
try:
encode = None if sys.version < "3" else "unicode"
self.rpc(etree.tostring(cmd, encoding=encode))
except:
return False
return True
# -------------------------------------------------------------------------
# XML RPC command execution
# -------------------------------------------------------------------------
def rpc(self, cmd, device_handler):
"""
Write the XML cmd and return the response as XML object.
:cmd:
<str> of the XML command. if the :cmd: is not XML, then
this routine will perform the brackets; i.e. if given
'get-software-information', this routine will turn
it into '<get-software-information/>'
NOTES:
The return XML object is the first child element after
the <rpc-reply>. There is also no error-checking
performing by this routine.
"""
if not cmd.startswith("<"):
cmd = "<{}/>".format(cmd)
rpc = six.b("<rpc>{}</rpc>".format(cmd))
logger.info("Calling rpc: %s" % rpc)
self._tty.rawwrite(rpc)
rsp = self._receive()
rsp = rsp.decode("utf-8") if isinstance(rsp, bytes) else rsp
reply = RPCReply(rsp, device_handler, huge_tree=self._tty._huge_tree)
errors = reply.errors
if len(errors) > 1:
raise RPCError(to_ele(reply._raw), errs=errors)
elif len(errors) == 1:
raise reply.error
return reply
# -------------------------------------------------------------------------
# LOW-LEVEL I/O for reading back XML response
# -------------------------------------------------------------------------
def _receive(self):
# On windows select.select throws io.UnsupportedOperation: fileno
# so use read function for windows serial COM ports
if hasattr(self._tty, "port") and str(self._tty.port).startswith("COM"):
return self._receive_serial_win()
else:
return self._receive_serial()
def _receive_serial(self):
""" process the XML response into an XML object """
rxbuf = PY6.EMPTY_STR
line = PY6.EMPTY_STR
while True:
try:
rd, wt, err = select.select([self._tty._rx], [], [], 0.1)
except select.error as err:
raise err
except socket.error as err:
raise err
if rd:
line, lastline = rd[0].read_until(PY6.NETCONF_EOM, 0.1), line
if not line:
continue
if _NETCONF_EOM in line or _NETCONF_EOM in lastline + line:
rxbuf = rxbuf + line
break
else:
rxbuf = rxbuf + line
if _NETCONF_EOM in rxbuf:
break
return self._parse_buffer(rxbuf)
# -------------------------------------------------------------------------
# Read message from windows COM ports
# -------------------------------------------------------------------------
def _receive_serial_win(self):
""" process incoming data from windows port"""
rxbuf = PY6.EMPTY_STR
line = PY6.EMPTY_STR
while True:
line, lastline = self._tty.read().strip(), line
if not line:
continue
if _NETCONF_EOM in line or _NETCONF_EOM in lastline + line:
rxbuf = rxbuf + line
break
else:
rxbuf = rxbuf + line
if _NETCONF_EOM in rxbuf:
break
return self._parse_buffer(rxbuf)
def _parse_buffer(self, rxbuf):
rxbuf = rxbuf.splitlines()
if _NETCONF_EOM in rxbuf[-1]:
if rxbuf[-1] == _NETCONF_EOM:
rxbuf.pop()
else:
rxbuf[-1] = rxbuf[-1].split(_NETCONF_EOM)[0]
try:
rxbuf = [i.strip() for i in rxbuf if i.strip() != PY6.EMPTY_STR]
rcvd_data = PY6.NEW_LINE.join(rxbuf)
logger.debug("Received: \n%s" % rcvd_data)
parser = etree.XMLParser(
remove_blank_text=True, huge_tree=self._tty._huge_tree
)
try:
etree.XML(rcvd_data, parser)
except XMLSyntaxError:
if _NETCONF_EOM in rcvd_data:
rcvd_data = rcvd_data[: rcvd_data.index(_NETCONF_EOM)]
etree.XML(rcvd_data) # just to recheck
else:
parser = etree.XMLParser(recover=True)
rcvd_data = etree.tostring(etree.XML(rcvd_data, parser=parser))
return rcvd_data
except:
if "</xnm:error>" in rxbuf:
for x in rxbuf:
if "<message>" in x:
return etree.XML(
"<error-in-receive>" + x + "</error-in-receive>"
)
else:
return etree.XML("<error-in-receive/>") | en | 0.524567 | # ========================================================================= # xmlmode_netconf # ========================================================================= Basic Junos XML API for bootstraping through the TTY # ------------------------------------------------------------------------- # NETCONF session open and close # ------------------------------------------------------------------------- start the XML API process and receive the 'hello' message # exceeded the while loop timeout issue the XML API to close the session # if we do not have an open connection, then return now. # removed flush # ------------------------------------------------------------------------- # MISC device commands # ------------------------------------------------------------------------- issue a reboot to the device # ------------------------------------------------------------------------- # XML RPC command execution # ------------------------------------------------------------------------- Write the XML cmd and return the response as XML object. :cmd: <str> of the XML command. if the :cmd: is not XML, then this routine will perform the brackets; i.e. if given 'get-software-information', this routine will turn it into '<get-software-information/>' NOTES: The return XML object is the first child element after the <rpc-reply>. There is also no error-checking performing by this routine. # ------------------------------------------------------------------------- # LOW-LEVEL I/O for reading back XML response # ------------------------------------------------------------------------- # On windows select.select throws io.UnsupportedOperation: fileno # so use read function for windows serial COM ports process the XML response into an XML object # ------------------------------------------------------------------------- # Read message from windows COM ports # ------------------------------------------------------------------------- process incoming data from windows port # just to recheck | 2.224512 | 2 |
test/_test_client.py | eydam-prototyping/mp_modbus | 2 | 8825 | from pymodbus.client.sync import ModbusTcpClient as ModbusClient
import logging
FORMAT = ('%(asctime)-15s %(threadName)-15s '
'%(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
client = ModbusClient('192.168.178.61', port=502)
client.connect()
f = client.read_holding_registers(305,1)
print(f.registers) | from pymodbus.client.sync import ModbusTcpClient as ModbusClient
import logging
FORMAT = ('%(asctime)-15s %(threadName)-15s '
'%(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
client = ModbusClient('192.168.178.61', port=502)
client.connect()
f = client.read_holding_registers(305,1)
print(f.registers) | none | 1 | 2.134649 | 2 |
|
tests/selenium/test_about/test_about_page.py | technolotrix/tests | 0 | 8826 | import unittest
from selenium import webdriver
import page
class AboutPage(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get("http://nicolesmith.nyc")
#self.driver.get("http://127.0.0.1:4747/about")
self.about_page = page.AboutPage(self.driver)
######## HEADER STUFF ########
def test_title_on_about_page(self):
assert self.about_page.is_title_matches(), "about page title doesn't match"
def test_click_get_quote(self):
assert self.about_page.click_quote_button(), "link to contact page is broken"
def test_click_home_button(self):
assert self.about_page.click_home_button(), "home button does not go to homepage"
@unittest.skip("Needs fixing.")
def test_click_about_link(self):
assert self.about_page.click_projects_link(), "about link does not go to about page"
@unittest.skip("Needs fixing.")
def test_click_projects_link(self):
assert self.about_page.click_projects_link(), "projects link does not go to projects page"
@unittest.skip("Needs fixing.")
def test_click_services_link(self):
assert self.about_page.click_projects_link(), "services link does not go to services page"
######## PAGE SPECIFIC STUFF ########
def test_click_resume(self):
return self.about_page.click_resume(), "link to resume is broken"
def test_click_resumator(self):
return self.about_page.click_resumator(), "link to resumator is broken"
def test_click_contact_me(self):
return self.about_page.click_contact_me(), "link to contact me page is broken in FAQ"
def test_click_html5up_backlink(self):
return self.about_page.click_html5up_backlink(), "backlink to html5up in FAQ is broken"
######## FOOTER STUFF ########
def test_click_github(self):
assert self.about_page.click_github_button(), "link to github is broken"
def test_click_linkedin(self):
assert self.about_page.click_linkedin_button(), "link to linkedin is broken"
def test_click_gplus(self):
assert self.about_page.click_gplus_button(), "link to google plus is broken"
def test_click_twitter(self):
assert self.about_page.click_twitter_button(), "link to twitter is broken"
def test_click_html5up(self):
assert self.about_page.click_html5up_link(), "link to html5up template owner is broken"
def test_copyright_on_about_page(self):
assert self.about_page.is_copyright_matches(), "about page has wrong copyright"
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main() | import unittest
from selenium import webdriver
import page
class AboutPage(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get("http://nicolesmith.nyc")
#self.driver.get("http://127.0.0.1:4747/about")
self.about_page = page.AboutPage(self.driver)
######## HEADER STUFF ########
def test_title_on_about_page(self):
assert self.about_page.is_title_matches(), "about page title doesn't match"
def test_click_get_quote(self):
assert self.about_page.click_quote_button(), "link to contact page is broken"
def test_click_home_button(self):
assert self.about_page.click_home_button(), "home button does not go to homepage"
@unittest.skip("Needs fixing.")
def test_click_about_link(self):
assert self.about_page.click_projects_link(), "about link does not go to about page"
@unittest.skip("Needs fixing.")
def test_click_projects_link(self):
assert self.about_page.click_projects_link(), "projects link does not go to projects page"
@unittest.skip("Needs fixing.")
def test_click_services_link(self):
assert self.about_page.click_projects_link(), "services link does not go to services page"
######## PAGE SPECIFIC STUFF ########
def test_click_resume(self):
return self.about_page.click_resume(), "link to resume is broken"
def test_click_resumator(self):
return self.about_page.click_resumator(), "link to resumator is broken"
def test_click_contact_me(self):
return self.about_page.click_contact_me(), "link to contact me page is broken in FAQ"
def test_click_html5up_backlink(self):
return self.about_page.click_html5up_backlink(), "backlink to html5up in FAQ is broken"
######## FOOTER STUFF ########
def test_click_github(self):
assert self.about_page.click_github_button(), "link to github is broken"
def test_click_linkedin(self):
assert self.about_page.click_linkedin_button(), "link to linkedin is broken"
def test_click_gplus(self):
assert self.about_page.click_gplus_button(), "link to google plus is broken"
def test_click_twitter(self):
assert self.about_page.click_twitter_button(), "link to twitter is broken"
def test_click_html5up(self):
assert self.about_page.click_html5up_link(), "link to html5up template owner is broken"
def test_copyright_on_about_page(self):
assert self.about_page.is_copyright_matches(), "about page has wrong copyright"
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main() | de | 0.331163 | #self.driver.get("http://127.0.0.1:4747/about") ######## HEADER STUFF ######## ######## PAGE SPECIFIC STUFF ######## ######## FOOTER STUFF ######## | 3.13721 | 3 |
sdk/python/lib/test/langhost/future_input/__main__.py | pcen/pulumi | 12,004 | 8827 | # Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from pulumi import CustomResource, Output, Input
async def read_a_file_or_something():
await asyncio.sleep(0)
return "here's a file"
def assert_eq(l, r):
assert l == r
class FileResource(CustomResource):
contents: Output[str]
def __init__(self, name: str, file_contents: Input[str]) -> None:
CustomResource.__init__(self, "test:index:FileResource", name, {
"contents": file_contents
})
# read_a_file_or_something returns a coroutine when called, which needs to be scheduled
# and awaited in order to yield a value.
file_res = FileResource("file", read_a_file_or_something())
file_res.contents.apply(lambda c: assert_eq(c, "here's a file"))
| # Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from pulumi import CustomResource, Output, Input
async def read_a_file_or_something():
await asyncio.sleep(0)
return "here's a file"
def assert_eq(l, r):
assert l == r
class FileResource(CustomResource):
contents: Output[str]
def __init__(self, name: str, file_contents: Input[str]) -> None:
CustomResource.__init__(self, "test:index:FileResource", name, {
"contents": file_contents
})
# read_a_file_or_something returns a coroutine when called, which needs to be scheduled
# and awaited in order to yield a value.
file_res = FileResource("file", read_a_file_or_something())
file_res.contents.apply(lambda c: assert_eq(c, "here's a file"))
| en | 0.860297 | # Copyright 2016-2018, Pulumi Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # read_a_file_or_something returns a coroutine when called, which needs to be scheduled # and awaited in order to yield a value. | 2.686109 | 3 |
src/dewloosh/geom/cells/h8.py | dewloosh/dewloosh-geom | 2 | 8828 | <filename>src/dewloosh/geom/cells/h8.py
# -*- coding: utf-8 -*-
from dewloosh.geom.polyhedron import HexaHedron
from dewloosh.math.numint import GaussPoints as Gauss
from dewloosh.geom.utils import cells_coords
from numba import njit, prange
import numpy as np
from numpy import ndarray
__cache = True
@njit(nogil=True, cache=__cache)
def monoms_H8(pcoord: np.ndarray):
r, s, t = pcoord
return np.array([1, r, s, t, r*s, r*t, s*t, r*s*t])
@njit(nogil=True, cache=__cache)
def shp_H8(pcoord):
r, s, t = pcoord
return np.array([-0.125*r*s*t + 0.125*r*s + 0.125*r*t - 0.125*r +
0.125*s*t - 0.125*s - 0.125*t + 0.125,
0.125*r*s*t - 0.125*r*s - 0.125*r*t + 0.125*r +
0.125*s*t - 0.125*s - 0.125*t + 0.125,
-0.125*r*s*t + 0.125*r*s - 0.125*r*t + 0.125*r -
0.125*s*t + 0.125*s - 0.125*t + 0.125,
0.125*r*s*t - 0.125*r*s + 0.125*r*t - 0.125*r -
0.125*s*t + 0.125*s - 0.125*t + 0.125,
0.125*r*s*t + 0.125*r*s - 0.125*r*t - 0.125*r -
0.125*s*t - 0.125*s + 0.125*t + 0.125,
-0.125*r*s*t - 0.125*r*s + 0.125*r*t + 0.125*r -
0.125*s*t - 0.125*s + 0.125*t + 0.125,
0.125*r*s*t + 0.125*r*s + 0.125*r*t + 0.125*r +
0.125*s*t + 0.125*s + 0.125*t + 0.125,
-0.125*r*s*t - 0.125*r*s - 0.125*r*t - 0.125*r +
0.125*s*t + 0.125*s + 0.125*t + 0.125]
)
@njit(nogil=True, parallel=True, cache=__cache)
def shape_function_matrix_H8(pcoord: np.ndarray):
eye = np.eye(3, dtype=pcoord.dtype)
shp = shp_H8(pcoord)
res = np.zeros((3, 24), dtype=pcoord.dtype)
for i in prange(8):
res[:, i*3: (i+1) * 3] = eye*shp[i]
return res
@njit(nogil=True, cache=__cache)
def dshp_H8(pcoord):
r, s, t = pcoord
return np.array(
[[-0.125*s*t + 0.125*s + 0.125*t - 0.125,
-0.125*r*t + 0.125*r + 0.125*t - 0.125,
-0.125*r*s + 0.125*r + 0.125*s - 0.125],
[0.125*s*t - 0.125*s - 0.125*t + 0.125,
0.125*r*t - 0.125*r + 0.125*t - 0.125,
0.125*r*s - 0.125*r + 0.125*s - 0.125],
[-0.125*s*t + 0.125*s - 0.125*t + 0.125,
-0.125*r*t + 0.125*r - 0.125*t + 0.125,
-0.125*r*s - 0.125*r - 0.125*s - 0.125],
[0.125*s*t - 0.125*s + 0.125*t - 0.125,
0.125*r*t - 0.125*r - 0.125*t + 0.125,
0.125*r*s + 0.125*r - 0.125*s - 0.125],
[0.125*s*t + 0.125*s - 0.125*t - 0.125,
0.125*r*t + 0.125*r - 0.125*t - 0.125,
0.125*r*s - 0.125*r - 0.125*s + 0.125],
[-0.125*s*t - 0.125*s + 0.125*t + 0.125,
-0.125*r*t - 0.125*r - 0.125*t - 0.125,
-0.125*r*s + 0.125*r - 0.125*s + 0.125],
[0.125*s*t + 0.125*s + 0.125*t + 0.125,
0.125*r*t + 0.125*r + 0.125*t + 0.125,
0.125*r*s + 0.125*r + 0.125*s + 0.125],
[-0.125*s*t - 0.125*s - 0.125*t - 0.125,
-0.125*r*t - 0.125*r + 0.125*t + 0.125,
-0.125*r*s - 0.125*r + 0.125*s + 0.125]]
)
@njit(nogil=True, parallel=True, cache=__cache)
def dshp_H8_bulk(pcoords: ndarray):
nP = pcoords.shape[0]
res = np.zeros((nP, 8, 3), dtype=pcoords.dtype)
for iP in prange(nP):
res[iP] = dshp_H8(pcoords[iP])
return res
@njit(nogil=True, parallel=True, fastmath=True, cache=__cache)
def volumes_H8(ecoords: np.ndarray, qpos: np.ndarray,
qweight: np.ndarray):
nE = ecoords.shape[0]
volumes = np.zeros(nE, dtype=ecoords.dtype)
nQ = len(qweight)
for iQ in range(nQ):
dshp = dshp_H8(qpos[iQ])
for i in prange(nE):
jac = ecoords[i].T @ dshp
djac = np.linalg.det(jac)
volumes[i] += qweight[iQ] * djac
return volumes
class H8(HexaHedron):
"""
8-node isoparametric hexahedron.
top
7--6
| |
4--5
bottom
3--2
| |
0--1
"""
@classmethod
def lcoords(cls, *args, **kwargs):
return np.array([[-1., -1., -1],
[1., -1., -1.],
[1., 1., -1.],
[-1., 1., -1.],
[-1., -1., 1.],
[1., -1., 1.],
[1., 1., 1.],
[-1., 1., 1.]])
@classmethod
def lcenter(cls, *args, **kwargs):
return np.array([0., 0., 0.])
def shape_function_derivatives(self, coords=None, *args, **kwargs):
coords = self.pointdata.x.to_numpy() if coords is None else coords
if len(coords.shape) == 2:
return dshp_H8_bulk(coords)
else:
return dshp_H8(coords)
def volumes(self, coords=None, topo=None):
coords = self.pointdata.x.to_numpy() if coords is None else coords
topo = self.nodes.to_numpy() if topo is None else topo
ecoords = cells_coords(coords, topo)
qpos, qweight = Gauss(2, 2, 2)
return volumes_H8(ecoords, qpos, qweight)
| <filename>src/dewloosh/geom/cells/h8.py
# -*- coding: utf-8 -*-
from dewloosh.geom.polyhedron import HexaHedron
from dewloosh.math.numint import GaussPoints as Gauss
from dewloosh.geom.utils import cells_coords
from numba import njit, prange
import numpy as np
from numpy import ndarray
__cache = True
@njit(nogil=True, cache=__cache)
def monoms_H8(pcoord: np.ndarray):
r, s, t = pcoord
return np.array([1, r, s, t, r*s, r*t, s*t, r*s*t])
@njit(nogil=True, cache=__cache)
def shp_H8(pcoord):
r, s, t = pcoord
return np.array([-0.125*r*s*t + 0.125*r*s + 0.125*r*t - 0.125*r +
0.125*s*t - 0.125*s - 0.125*t + 0.125,
0.125*r*s*t - 0.125*r*s - 0.125*r*t + 0.125*r +
0.125*s*t - 0.125*s - 0.125*t + 0.125,
-0.125*r*s*t + 0.125*r*s - 0.125*r*t + 0.125*r -
0.125*s*t + 0.125*s - 0.125*t + 0.125,
0.125*r*s*t - 0.125*r*s + 0.125*r*t - 0.125*r -
0.125*s*t + 0.125*s - 0.125*t + 0.125,
0.125*r*s*t + 0.125*r*s - 0.125*r*t - 0.125*r -
0.125*s*t - 0.125*s + 0.125*t + 0.125,
-0.125*r*s*t - 0.125*r*s + 0.125*r*t + 0.125*r -
0.125*s*t - 0.125*s + 0.125*t + 0.125,
0.125*r*s*t + 0.125*r*s + 0.125*r*t + 0.125*r +
0.125*s*t + 0.125*s + 0.125*t + 0.125,
-0.125*r*s*t - 0.125*r*s - 0.125*r*t - 0.125*r +
0.125*s*t + 0.125*s + 0.125*t + 0.125]
)
@njit(nogil=True, parallel=True, cache=__cache)
def shape_function_matrix_H8(pcoord: np.ndarray):
eye = np.eye(3, dtype=pcoord.dtype)
shp = shp_H8(pcoord)
res = np.zeros((3, 24), dtype=pcoord.dtype)
for i in prange(8):
res[:, i*3: (i+1) * 3] = eye*shp[i]
return res
@njit(nogil=True, cache=__cache)
def dshp_H8(pcoord):
r, s, t = pcoord
return np.array(
[[-0.125*s*t + 0.125*s + 0.125*t - 0.125,
-0.125*r*t + 0.125*r + 0.125*t - 0.125,
-0.125*r*s + 0.125*r + 0.125*s - 0.125],
[0.125*s*t - 0.125*s - 0.125*t + 0.125,
0.125*r*t - 0.125*r + 0.125*t - 0.125,
0.125*r*s - 0.125*r + 0.125*s - 0.125],
[-0.125*s*t + 0.125*s - 0.125*t + 0.125,
-0.125*r*t + 0.125*r - 0.125*t + 0.125,
-0.125*r*s - 0.125*r - 0.125*s - 0.125],
[0.125*s*t - 0.125*s + 0.125*t - 0.125,
0.125*r*t - 0.125*r - 0.125*t + 0.125,
0.125*r*s + 0.125*r - 0.125*s - 0.125],
[0.125*s*t + 0.125*s - 0.125*t - 0.125,
0.125*r*t + 0.125*r - 0.125*t - 0.125,
0.125*r*s - 0.125*r - 0.125*s + 0.125],
[-0.125*s*t - 0.125*s + 0.125*t + 0.125,
-0.125*r*t - 0.125*r - 0.125*t - 0.125,
-0.125*r*s + 0.125*r - 0.125*s + 0.125],
[0.125*s*t + 0.125*s + 0.125*t + 0.125,
0.125*r*t + 0.125*r + 0.125*t + 0.125,
0.125*r*s + 0.125*r + 0.125*s + 0.125],
[-0.125*s*t - 0.125*s - 0.125*t - 0.125,
-0.125*r*t - 0.125*r + 0.125*t + 0.125,
-0.125*r*s - 0.125*r + 0.125*s + 0.125]]
)
@njit(nogil=True, parallel=True, cache=__cache)
def dshp_H8_bulk(pcoords: ndarray):
nP = pcoords.shape[0]
res = np.zeros((nP, 8, 3), dtype=pcoords.dtype)
for iP in prange(nP):
res[iP] = dshp_H8(pcoords[iP])
return res
@njit(nogil=True, parallel=True, fastmath=True, cache=__cache)
def volumes_H8(ecoords: np.ndarray, qpos: np.ndarray,
qweight: np.ndarray):
nE = ecoords.shape[0]
volumes = np.zeros(nE, dtype=ecoords.dtype)
nQ = len(qweight)
for iQ in range(nQ):
dshp = dshp_H8(qpos[iQ])
for i in prange(nE):
jac = ecoords[i].T @ dshp
djac = np.linalg.det(jac)
volumes[i] += qweight[iQ] * djac
return volumes
class H8(HexaHedron):
"""
8-node isoparametric hexahedron.
top
7--6
| |
4--5
bottom
3--2
| |
0--1
"""
@classmethod
def lcoords(cls, *args, **kwargs):
return np.array([[-1., -1., -1],
[1., -1., -1.],
[1., 1., -1.],
[-1., 1., -1.],
[-1., -1., 1.],
[1., -1., 1.],
[1., 1., 1.],
[-1., 1., 1.]])
@classmethod
def lcenter(cls, *args, **kwargs):
return np.array([0., 0., 0.])
def shape_function_derivatives(self, coords=None, *args, **kwargs):
coords = self.pointdata.x.to_numpy() if coords is None else coords
if len(coords.shape) == 2:
return dshp_H8_bulk(coords)
else:
return dshp_H8(coords)
def volumes(self, coords=None, topo=None):
coords = self.pointdata.x.to_numpy() if coords is None else coords
topo = self.nodes.to_numpy() if topo is None else topo
ecoords = cells_coords(coords, topo)
qpos, qweight = Gauss(2, 2, 2)
return volumes_H8(ecoords, qpos, qweight)
| en | 0.221092 | # -*- coding: utf-8 -*- 8-node isoparametric hexahedron. top 7--6 | | 4--5 bottom 3--2 | | 0--1 | 2.189699 | 2 |
shopping_cart_test/shoppingcart2.py | Simbadeveloper/studious-octo-waddle.io | 0 | 8829 | <reponame>Simbadeveloper/studious-octo-waddle.io
class ShoppingCart(object):
def __init__(self):
self.total = 0
self.items = dict()
def add_item(self, item_name, quantity, price):
if item_name != None and quantity >= 1:
self.items.update({item_name: quantity})
if quantity and price >= 1:
self.total += (quantity * price)
def remove_item(self, item_name, quantity, price):
if item_name in self.items:
if quantity < self.items[item_name] and quantity > 0:
self.items[item_name] -= quantity
self.total -= price*quantity
def checkout(self, cash_paid):
balance = 0
if cash_paid < self.total:
return "Cash paid not enough"
balance = cash_paid - self.total
return balance
class Shop(ShoppingCart):
def __init__(self):
self.quantity = 100
def remove_item(self):
self.quantity -= 1
| class ShoppingCart(object):
def __init__(self):
self.total = 0
self.items = dict()
def add_item(self, item_name, quantity, price):
if item_name != None and quantity >= 1:
self.items.update({item_name: quantity})
if quantity and price >= 1:
self.total += (quantity * price)
def remove_item(self, item_name, quantity, price):
if item_name in self.items:
if quantity < self.items[item_name] and quantity > 0:
self.items[item_name] -= quantity
self.total -= price*quantity
def checkout(self, cash_paid):
balance = 0
if cash_paid < self.total:
return "Cash paid not enough"
balance = cash_paid - self.total
return balance
class Shop(ShoppingCart):
def __init__(self):
self.quantity = 100
def remove_item(self):
self.quantity -= 1 | none | 1 | 3.699316 | 4 |
|
tests/models/pr_test_data.py | heaven00/github-contribution-leaderboard | 0 | 8830 | import copy
import json
from ghcl.models.pull_request import PullRequest
class PRData:
def __init__(self, data: dict = None):
if data is None:
with open('./tests/models/empty_pr_data.json') as file:
self._data = json.load(file)
else:
self._data = data
def with_pr_url(self, url: str = 'some-url'):
data = copy.deepcopy(self._data)
data['issues_data']['pull_request']['html_url'] = url
return PRData(data)
def with_label(self, label_to_add: str = None):
data = copy.deepcopy(self._data)
if label_to_add is None:
label_number = len(data["issues_data"]["labels"]) + 1
label_to_add = f'label-{label_number}'
data['issues_data']['labels'].append({'name': label_to_add})
return PRData(data)
def with_created_at(self, created_at: str = '2014-04-24T16:34:47Z'):
data = copy.deepcopy(self._data)
data['issues_data']['created_at'] = created_at
return PRData(data)
def with_owner(self, owner: str = 'owner_user_id'):
data = copy.deepcopy(self._data)
data['pr_data']['base']['repo']['owner']['login'] = owner
return PRData(data)
def with_pr_raised_by(self, pr_raised_by: str = 'pr_raised_by_user_id'):
data = copy.deepcopy(self._data)
data['pr_data']['head']['user']['login'] = pr_raised_by
return PRData(data)
def with_merged(self, merged=False):
data = copy.deepcopy(self._data)
data['pr_data']['merged'] = merged
return PRData(data)
def with_state(self, state='some_state'):
data = copy.deepcopy(self._data)
data['issues_data']['state'] = state
return PRData(data)
def with_defaults(self):
return PRData(self._data).with_pr_url()\
.with_label()\
.with_label()\
.with_created_at()\
.with_owner()\
.with_pr_raised_by()\
.with_merged()\
.with_state()
def as_pull_request(self):
return PullRequest(**self._data)
| import copy
import json
from ghcl.models.pull_request import PullRequest
class PRData:
def __init__(self, data: dict = None):
if data is None:
with open('./tests/models/empty_pr_data.json') as file:
self._data = json.load(file)
else:
self._data = data
def with_pr_url(self, url: str = 'some-url'):
data = copy.deepcopy(self._data)
data['issues_data']['pull_request']['html_url'] = url
return PRData(data)
def with_label(self, label_to_add: str = None):
data = copy.deepcopy(self._data)
if label_to_add is None:
label_number = len(data["issues_data"]["labels"]) + 1
label_to_add = f'label-{label_number}'
data['issues_data']['labels'].append({'name': label_to_add})
return PRData(data)
def with_created_at(self, created_at: str = '2014-04-24T16:34:47Z'):
data = copy.deepcopy(self._data)
data['issues_data']['created_at'] = created_at
return PRData(data)
def with_owner(self, owner: str = 'owner_user_id'):
data = copy.deepcopy(self._data)
data['pr_data']['base']['repo']['owner']['login'] = owner
return PRData(data)
def with_pr_raised_by(self, pr_raised_by: str = 'pr_raised_by_user_id'):
data = copy.deepcopy(self._data)
data['pr_data']['head']['user']['login'] = pr_raised_by
return PRData(data)
def with_merged(self, merged=False):
data = copy.deepcopy(self._data)
data['pr_data']['merged'] = merged
return PRData(data)
def with_state(self, state='some_state'):
data = copy.deepcopy(self._data)
data['issues_data']['state'] = state
return PRData(data)
def with_defaults(self):
return PRData(self._data).with_pr_url()\
.with_label()\
.with_label()\
.with_created_at()\
.with_owner()\
.with_pr_raised_by()\
.with_merged()\
.with_state()
def as_pull_request(self):
return PullRequest(**self._data)
| none | 1 | 2.664785 | 3 |
|
Validation/EventGenerator/python/BasicGenParticleValidation_cfi.py | PKUfudawei/cmssw | 2 | 8831 | <gh_stars>1-10
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
basicGenParticleValidation = DQMEDAnalyzer('BasicGenParticleValidation',
hepmcCollection = cms.InputTag("generatorSmeared"),
genparticleCollection = cms.InputTag("genParticles",""),
genjetsCollection = cms.InputTag("ak4GenJets",""),
matchingPrecision = cms.double(0.001),
verbosity = cms.untracked.uint32(0),
UseWeightFromHepMC = cms.bool(True),
signalParticlesOnly = cms.bool(False)
)
basicGenParticleValidationHiMix = basicGenParticleValidation.clone(signalParticlesOnly = True)
| import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
basicGenParticleValidation = DQMEDAnalyzer('BasicGenParticleValidation',
hepmcCollection = cms.InputTag("generatorSmeared"),
genparticleCollection = cms.InputTag("genParticles",""),
genjetsCollection = cms.InputTag("ak4GenJets",""),
matchingPrecision = cms.double(0.001),
verbosity = cms.untracked.uint32(0),
UseWeightFromHepMC = cms.bool(True),
signalParticlesOnly = cms.bool(False)
)
basicGenParticleValidationHiMix = basicGenParticleValidation.clone(signalParticlesOnly = True) | none | 1 | 1.377898 | 1 |
|
k_values_graph.py | leobouts/Skyline_top_k_queries | 0 | 8832 | <filename>k_values_graph.py
from a_top_k import *
from b_top_k import *
import time
def main():
# test the generator for the top-k input
# starting time
values_k = [1, 2, 5, 10, 20, 50, 100]
times_topk_join_a = []
times_topk_join_b = []
number_of_valid_lines_a = []
number_of_valid_lines_b = []
for k in values_k:
number_of_valid_lines = []
top_k_a_generator = generate_top_join_a(number_of_valid_lines)
start_time_a = time.time()
for i in range(k):
next(top_k_a_generator)
number_of_valid_lines_a.append(len(number_of_valid_lines))
top_k_time_a = time.time() - start_time_a
times_topk_join_a.append(top_k_time_a)
number_of_valid_lines = []
top_k_b_generator = generate_top_join_b(number_of_valid_lines)
start_time_b = time.time()
for i in range(k):
next(top_k_b_generator)
number_of_valid_lines_b.append(len(number_of_valid_lines))
top_k_time_b = time.time() - start_time_b
times_topk_join_b.append(top_k_time_b)
print(times_topk_join_a)
print(times_topk_join_b)
print(number_of_valid_lines_a)
print(number_of_valid_lines_b)
if __name__ == "__main__":
main()
| <filename>k_values_graph.py
from a_top_k import *
from b_top_k import *
import time
def main():
# test the generator for the top-k input
# starting time
values_k = [1, 2, 5, 10, 20, 50, 100]
times_topk_join_a = []
times_topk_join_b = []
number_of_valid_lines_a = []
number_of_valid_lines_b = []
for k in values_k:
number_of_valid_lines = []
top_k_a_generator = generate_top_join_a(number_of_valid_lines)
start_time_a = time.time()
for i in range(k):
next(top_k_a_generator)
number_of_valid_lines_a.append(len(number_of_valid_lines))
top_k_time_a = time.time() - start_time_a
times_topk_join_a.append(top_k_time_a)
number_of_valid_lines = []
top_k_b_generator = generate_top_join_b(number_of_valid_lines)
start_time_b = time.time()
for i in range(k):
next(top_k_b_generator)
number_of_valid_lines_b.append(len(number_of_valid_lines))
top_k_time_b = time.time() - start_time_b
times_topk_join_b.append(top_k_time_b)
print(times_topk_join_a)
print(times_topk_join_b)
print(number_of_valid_lines_a)
print(number_of_valid_lines_b)
if __name__ == "__main__":
main()
| en | 0.598651 | # test the generator for the top-k input # starting time | 2.951339 | 3 |
AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py | CEOALT1/RefindPlusUDK | 2,757 | 8833 | <reponame>CEOALT1/RefindPlusUDK<gh_stars>1000+
"""Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: <NAME>
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
| """Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: <NAME>
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix)) | en | 0.333115 | Fix bound method attributes (method.im_? -> method.__?__). # Author: <NAME> # Local imports power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > | 2.31207 | 2 |
models/TextCNN/cnn2d.py | Renovamen/Text-Classification | 72 | 8834 | <reponame>Renovamen/Text-Classification<filename>models/TextCNN/cnn2d.py<gh_stars>10-100
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
class TextCNN2D(nn.Module):
"""
Implementation of 2D version of TextCNN proposed in paper [1].
`Here <https://github.com/yoonkim/CNN_sentence>`_ is the official
implementation of TextCNN.
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
n_kernels : int
Number of kernels
kernel_sizes : List[int]
Size of each kernel
dropout : float
Dropout
n_channels : int
Number of channels (1 / 2)
References
----------
1. "`Convolutional Neural Networks for Sentence Classification. \
<https://www.aclweb.org/anthology/D14-1181.pdf>`_" <NAME>. EMNLP 2014.
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
n_kernels: int,
kernel_sizes: List[int],
dropout: float,
n_channels = 1
) -> None:
super(TextCNN2D, self).__init__()
# embedding layer
self.embedding1 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, fine_tune)
if n_channels == 2:
# multichannel: a static channel and a non-static channel
# which means embedding2 is frozen
self.embedding2 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, False)
else:
self.embedding2 = None
# 2d conv layer
self.convs = nn.ModuleList([
nn.Conv2d(
in_channels = n_channels,
out_channels = n_kernels,
kernel_size = (size, emb_size)
)
for size in kernel_sizes
])
self.fc = nn.Linear(len(kernel_sizes) * n_kernels, n_classes)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def set_embeddings(
self,
embeddings: torch.Tensor,
layer_id: int = 1,
fine_tune: bool = True
) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
layer_id : int
Embedding layer 1 or 2 (when adopting multichannel architecture)
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
if layer_id == 1:
self.embedding1.weight.data.uniform_(-0.1, 0.1)
else:
self.embedding2.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
if layer_id == 1:
self.embedding1.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
else:
self.embedding2.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
text : torch.Tensor (batch_size, word_pad_len)
Input data
words_per_sentence : torch.Tensor (batch_size)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
"""
# word embedding
embeddings = self.embedding1(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
# multichannel
if self.embedding2:
embeddings2 = self.embedding2(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
embeddings = torch.cat((embeddings, embeddings2), dim = 1) # (batch_size, 2, word_pad_len, emb_size)
# conv
conved = [self.relu(conv(embeddings)).squeeze(3) for conv in self.convs] # [(batch size, n_kernels, word_pad_len - kernel_sizes[n] + 1)]
# pooling
pooled = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in conved] # [(batch size, n_kernels)]
# flatten
flattened = self.dropout(torch.cat(pooled, dim = 1)) # (batch size, n_kernels * len(kernel_sizes))
scores = self.fc(flattened) # (batch size, n_classes)
return scores
| import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
class TextCNN2D(nn.Module):
"""
Implementation of 2D version of TextCNN proposed in paper [1].
`Here <https://github.com/yoonkim/CNN_sentence>`_ is the official
implementation of TextCNN.
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
n_kernels : int
Number of kernels
kernel_sizes : List[int]
Size of each kernel
dropout : float
Dropout
n_channels : int
Number of channels (1 / 2)
References
----------
1. "`Convolutional Neural Networks for Sentence Classification. \
<https://www.aclweb.org/anthology/D14-1181.pdf>`_" <NAME>. EMNLP 2014.
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
n_kernels: int,
kernel_sizes: List[int],
dropout: float,
n_channels = 1
) -> None:
super(TextCNN2D, self).__init__()
# embedding layer
self.embedding1 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, fine_tune)
if n_channels == 2:
# multichannel: a static channel and a non-static channel
# which means embedding2 is frozen
self.embedding2 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, False)
else:
self.embedding2 = None
# 2d conv layer
self.convs = nn.ModuleList([
nn.Conv2d(
in_channels = n_channels,
out_channels = n_kernels,
kernel_size = (size, emb_size)
)
for size in kernel_sizes
])
self.fc = nn.Linear(len(kernel_sizes) * n_kernels, n_classes)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def set_embeddings(
self,
embeddings: torch.Tensor,
layer_id: int = 1,
fine_tune: bool = True
) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
layer_id : int
Embedding layer 1 or 2 (when adopting multichannel architecture)
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
if layer_id == 1:
self.embedding1.weight.data.uniform_(-0.1, 0.1)
else:
self.embedding2.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
if layer_id == 1:
self.embedding1.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
else:
self.embedding2.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
text : torch.Tensor (batch_size, word_pad_len)
Input data
words_per_sentence : torch.Tensor (batch_size)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
"""
# word embedding
embeddings = self.embedding1(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
# multichannel
if self.embedding2:
embeddings2 = self.embedding2(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
embeddings = torch.cat((embeddings, embeddings2), dim = 1) # (batch_size, 2, word_pad_len, emb_size)
# conv
conved = [self.relu(conv(embeddings)).squeeze(3) for conv in self.convs] # [(batch size, n_kernels, word_pad_len - kernel_sizes[n] + 1)]
# pooling
pooled = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in conved] # [(batch size, n_kernels)]
# flatten
flattened = self.dropout(torch.cat(pooled, dim = 1)) # (batch size, n_kernels * len(kernel_sizes))
scores = self.fc(flattened) # (batch size, n_classes)
return scores | en | 0.607538 | Implementation of 2D version of TextCNN proposed in paper [1]. `Here <https://github.com/yoonkim/CNN_sentence>`_ is the official implementation of TextCNN. Parameters ---------- n_classes : int Number of classes vocab_size : int Number of words in the vocabulary embeddings : torch.Tensor Word embedding weights emb_size : int Size of word embeddings fine_tune : bool Allow fine-tuning of embedding layer? (only makes sense when using pre-trained embeddings) n_kernels : int Number of kernels kernel_sizes : List[int] Size of each kernel dropout : float Dropout n_channels : int Number of channels (1 / 2) References ---------- 1. "`Convolutional Neural Networks for Sentence Classification. \ <https://www.aclweb.org/anthology/D14-1181.pdf>`_" <NAME>. EMNLP 2014. # embedding layer # multichannel: a static channel and a non-static channel # which means embedding2 is frozen # 2d conv layer Set weights for embedding layer Parameters ---------- embeddings : torch.Tensor Word embeddings layer_id : int Embedding layer 1 or 2 (when adopting multichannel architecture) fine_tune : bool, optional, default=True Allow fine-tuning of embedding layer? (only makes sense when using pre-trained embeddings) # initialize embedding layer with the uniform distribution # initialize embedding layer with pre-trained embeddings Parameters ---------- text : torch.Tensor (batch_size, word_pad_len) Input data words_per_sentence : torch.Tensor (batch_size) Sentence lengths Returns ------- scores : torch.Tensor (batch_size, n_classes) Class scores # word embedding # (batch_size, 1, word_pad_len, emb_size) # multichannel # (batch_size, 1, word_pad_len, emb_size) # (batch_size, 2, word_pad_len, emb_size) # conv # [(batch size, n_kernels, word_pad_len - kernel_sizes[n] + 1)] # pooling # [(batch size, n_kernels)] # flatten # (batch size, n_kernels * len(kernel_sizes)) # (batch size, n_classes) | 3.489628 | 3 |
LEVEL2/다리를지나는트럭/solution.py | seunghwanly/CODING-TEST | 0 | 8835 | def solution(bridge_length, weight, truck_weights):
answer = 0
# { weight, time }
wait = truck_weights[:]
bridge = []
passed = 0
currWeight = 0
while True:
if passed == len(truck_weights) and len(wait) == 0: return answer
answer += 1
# sth needs to be passed
if bridge:
if bridge[0]['t'] + bridge_length == answer:
front = bridge.pop(0)
currWeight -= front['w']
passed += 1
# add new truck
if wait:
if currWeight + wait[0] <= weight:
bridge.append({ 'w' : wait[0], 't' : answer })
currWeight += wait[0]
wait.pop(0)
# print(solution(2, 10, [7, 4, 5, 6]))
print(solution(100, 100, [10]))
| def solution(bridge_length, weight, truck_weights):
answer = 0
# { weight, time }
wait = truck_weights[:]
bridge = []
passed = 0
currWeight = 0
while True:
if passed == len(truck_weights) and len(wait) == 0: return answer
answer += 1
# sth needs to be passed
if bridge:
if bridge[0]['t'] + bridge_length == answer:
front = bridge.pop(0)
currWeight -= front['w']
passed += 1
# add new truck
if wait:
if currWeight + wait[0] <= weight:
bridge.append({ 'w' : wait[0], 't' : answer })
currWeight += wait[0]
wait.pop(0)
# print(solution(2, 10, [7, 4, 5, 6]))
print(solution(100, 100, [10]))
| en | 0.808966 | # { weight, time } # sth needs to be passed # add new truck # print(solution(2, 10, [7, 4, 5, 6])) | 3.582555 | 4 |
heat/tests/convergence/framework/testutils.py | maestro-hybrid-cloud/heat | 0 | 8836 | <gh_stars>0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log as logging
from heat.tests.convergence.framework import reality
from heat.tests.convergence.framework import scenario_template
LOG = logging.getLogger(__name__)
def verify(test, reality, tmpl):
for name in tmpl.resources:
rsrc_count = len(reality.resources_by_logical_name(name))
test.assertEqual(1, rsrc_count,
'Found %d copies of resource "%s"' % (rsrc_count,
name))
all_rsrcs = reality.all_resources()
for name, defn in tmpl.resources.items():
phys_rsrc = reality.resources_by_logical_name(name)[0]
for prop_name, prop_def in defn.properties.items():
real_value = reality.resource_properties(phys_rsrc, prop_name)
if isinstance(prop_def, scenario_template.GetAtt):
targs = reality.resources_by_logical_name(prop_def.target_name)
att_value = targs[0].properties_data[prop_def.attr]
test.assertEqual(att_value, real_value)
elif isinstance(prop_def, scenario_template.GetRes):
targs = reality.resources_by_logical_name(prop_def.target_name)
test.assertEqual(targs[0].nova_instance, real_value)
else:
test.assertEqual(prop_def, real_value)
test.assertEqual(len(defn.properties), len(phys_rsrc.properties_data))
test.assertEqual(len(tmpl.resources), len(all_rsrcs))
def scenario_globals(procs, testcase):
return {
'test': testcase,
'reality': reality.reality,
'verify': functools.partial(verify,
testcase,
reality.reality),
'Template': scenario_template.Template,
'RsrcDef': scenario_template.RsrcDef,
'GetRes': scenario_template.GetRes,
'GetAtt': scenario_template.GetAtt,
'engine': procs.engine,
'worker': procs.worker,
}
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log as logging
from heat.tests.convergence.framework import reality
from heat.tests.convergence.framework import scenario_template
LOG = logging.getLogger(__name__)
def verify(test, reality, tmpl):
for name in tmpl.resources:
rsrc_count = len(reality.resources_by_logical_name(name))
test.assertEqual(1, rsrc_count,
'Found %d copies of resource "%s"' % (rsrc_count,
name))
all_rsrcs = reality.all_resources()
for name, defn in tmpl.resources.items():
phys_rsrc = reality.resources_by_logical_name(name)[0]
for prop_name, prop_def in defn.properties.items():
real_value = reality.resource_properties(phys_rsrc, prop_name)
if isinstance(prop_def, scenario_template.GetAtt):
targs = reality.resources_by_logical_name(prop_def.target_name)
att_value = targs[0].properties_data[prop_def.attr]
test.assertEqual(att_value, real_value)
elif isinstance(prop_def, scenario_template.GetRes):
targs = reality.resources_by_logical_name(prop_def.target_name)
test.assertEqual(targs[0].nova_instance, real_value)
else:
test.assertEqual(prop_def, real_value)
test.assertEqual(len(defn.properties), len(phys_rsrc.properties_data))
test.assertEqual(len(tmpl.resources), len(all_rsrcs))
def scenario_globals(procs, testcase):
return {
'test': testcase,
'reality': reality.reality,
'verify': functools.partial(verify,
testcase,
reality.reality),
'Template': scenario_template.Template,
'RsrcDef': scenario_template.RsrcDef,
'GetRes': scenario_template.GetRes,
'GetAtt': scenario_template.GetAtt,
'engine': procs.engine,
'worker': procs.worker,
} | en | 0.859194 | # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.964841 | 2 |
device_geometry.py | AstroShen/fpga21-scaled-tech | 2 | 8837 | """Holds the device gemoetry parameters (Table 5), taken from Wu et al.,
>> A Predictive 3-D Source/Drain Resistance Compact Model and the Impact on 7 nm and Scaled FinFets<<, 2020, with interpolation for 4nm. 16nm is taken from PTM HP.
"""
node_names = [16, 7, 5, 4, 3]
GP = [64, 56, 48, 44, 41]
FP = [40, 30, 28, 24, 22]
GL = [20, 18, 16, 15, 14]
FH = [26, 35, 45, 50, 55]
FW = [12, 6.5, 6, 5.5, 5.5]
vdd = [0.85, 0.75, 0.7, 0.65, 0.65]
| """Holds the device gemoetry parameters (Table 5), taken from Wu et al.,
>> A Predictive 3-D Source/Drain Resistance Compact Model and the Impact on 7 nm and Scaled FinFets<<, 2020, with interpolation for 4nm. 16nm is taken from PTM HP.
"""
node_names = [16, 7, 5, 4, 3]
GP = [64, 56, 48, 44, 41]
FP = [40, 30, 28, 24, 22]
GL = [20, 18, 16, 15, 14]
FH = [26, 35, 45, 50, 55]
FW = [12, 6.5, 6, 5.5, 5.5]
vdd = [0.85, 0.75, 0.7, 0.65, 0.65]
| en | 0.782484 | Holds the device gemoetry parameters (Table 5), taken from Wu et al., >> A Predictive 3-D Source/Drain Resistance Compact Model and the Impact on 7 nm and Scaled FinFets<<, 2020, with interpolation for 4nm. 16nm is taken from PTM HP. | 1.095154 | 1 |
nova/tests/servicegroup/test_zk_driver.py | vmthunder/nova | 7 | 8838 | # Copyright (c) AT&T 2012-2013 <NAME> <<EMAIL>>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the ZooKeeper driver for servicegroup.
You need to install ZooKeeper locally and related dependencies
to run the test. It's unclear how to install python-zookeeper lib
in venv so you might have to run the test without it.
To set up in Ubuntu 12.04:
$ sudo apt-get install zookeeper zookeeperd python-zookeeper
$ sudo pip install evzookeeper
$ nosetests nova.tests.servicegroup.test_zk_driver
"""
import eventlet
from nova import servicegroup
from nova import test
class ZKServiceGroupTestCase(test.NoDBTestCase):
def setUp(self):
super(ZKServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None
from nova.servicegroup.drivers import zk
self.flags(servicegroup_driver='zk')
self.flags(address='localhost:2181', group="zookeeper")
try:
zk.ZooKeeperDriver()
except ImportError:
self.skipTest("Unable to test due to lack of ZooKeeper")
def test_join_leave(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
self.servicegroup_api.join(service_id['host'], service_id['topic'])
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
self.servicegroup_api.leave(service_id['host'], service_id['topic'])
# make sure zookeeper is updated and watcher is triggered
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
def test_stop(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
pulse = self.servicegroup_api.join(service_id['host'],
service_id['topic'], None)
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
pulse.stop()
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
| # Copyright (c) AT&T 2012-2013 <NAME> <<EMAIL>>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the ZooKeeper driver for servicegroup.
You need to install ZooKeeper locally and related dependencies
to run the test. It's unclear how to install python-zookeeper lib
in venv so you might have to run the test without it.
To set up in Ubuntu 12.04:
$ sudo apt-get install zookeeper zookeeperd python-zookeeper
$ sudo pip install evzookeeper
$ nosetests nova.tests.servicegroup.test_zk_driver
"""
import eventlet
from nova import servicegroup
from nova import test
class ZKServiceGroupTestCase(test.NoDBTestCase):
def setUp(self):
super(ZKServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None
from nova.servicegroup.drivers import zk
self.flags(servicegroup_driver='zk')
self.flags(address='localhost:2181', group="zookeeper")
try:
zk.ZooKeeperDriver()
except ImportError:
self.skipTest("Unable to test due to lack of ZooKeeper")
def test_join_leave(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
self.servicegroup_api.join(service_id['host'], service_id['topic'])
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
self.servicegroup_api.leave(service_id['host'], service_id['topic'])
# make sure zookeeper is updated and watcher is triggered
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
def test_stop(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
pulse = self.servicegroup_api.join(service_id['host'],
service_id['topic'], None)
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
pulse.stop()
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
| en | 0.825054 | # Copyright (c) AT&T 2012-2013 <NAME> <<EMAIL>> # Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Test the ZooKeeper driver for servicegroup. You need to install ZooKeeper locally and related dependencies to run the test. It's unclear how to install python-zookeeper lib in venv so you might have to run the test without it. To set up in Ubuntu 12.04: $ sudo apt-get install zookeeper zookeeperd python-zookeeper $ sudo pip install evzookeeper $ nosetests nova.tests.servicegroup.test_zk_driver # make sure zookeeper is updated and watcher is triggered | 1.843293 | 2 |
tests/test_misc.py | lordmauve/chopsticks | 171 | 8839 | """Tests for miscellaneous properties, such as debuggability."""
import time
from chopsticks.tunnel import Docker
from chopsticks.group import Group
def test_tunnel_repr():
"""Tunnels have a usable repr."""
tun = Docker('py36', image='python:3.6')
assert repr(tun) == "Docker('py36')"
def test_group_repr():
"""Groups have a usable repr."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
assert repr(grp) == "Group([Docker('py35'), Docker('py36')])"
def test_group_reuse():
"""We can re-use a group."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
with grp:
grp.call(time.time)
grp.call(time.time)
| """Tests for miscellaneous properties, such as debuggability."""
import time
from chopsticks.tunnel import Docker
from chopsticks.group import Group
def test_tunnel_repr():
"""Tunnels have a usable repr."""
tun = Docker('py36', image='python:3.6')
assert repr(tun) == "Docker('py36')"
def test_group_repr():
"""Groups have a usable repr."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
assert repr(grp) == "Group([Docker('py35'), Docker('py36')])"
def test_group_reuse():
"""We can re-use a group."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
with grp:
grp.call(time.time)
grp.call(time.time)
| en | 0.96398 | Tests for miscellaneous properties, such as debuggability. Tunnels have a usable repr. Groups have a usable repr. We can re-use a group. | 2.678924 | 3 |
Evaluation/PostProcesing.py | AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion | 0 | 8840 | <reponame>AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion<gh_stars>0
import pandas as pd
import re
import glob
def rebuild_counts_from_csv(path,n_dims, shots):
df = pd.read_csv(path)
return rebuild_counts_from_dataframe(dataframe=df, n_dims=n_dims, shots=shots)
def rebuild_counts_from_dataframe(dataframe,n_dims,shots):
dimension_counts = {}
for dimension in range(n_dims):
dimension_counts[dimension] = []
pde = list(dataframe.probability_density)
for idx, density in enumerate(pde):
n_counts = int(density*shots)
for _ in range(n_counts):
# print(dataframe["dimension_0"][idx])
for dimension in range(n_dims):
dimension_key = "dimension_{}".format(dimension)
#
dimension_counts[dimension]+=[dataframe[dimension_key][idx]]
# print(dimension_counts)
rebuilt_dict = {}
for dimension in range(n_dims):
rebuilt_dict[f"d{dimension}"] = dimension_counts[dimension]
return rebuilt_dict
def rebuild_counts_from_dictionary(dictionary:dict, n_dims, shots):
dataframe = pd.DataFrame(dictionary)
return rebuild_counts_from_dataframe(dataframe=dataframe, n_dims=n_dims, shots=shots)
def get_stats_from_counts_dict(results_dict:dict):
dataframe = pd.DataFrame(results_dict)
return get_stats_from_counts_dataframe(dataframe)
def get_stats_from_counts_dataframe(counts_dataframe: pd.DataFrame)-> dict:
results_dict = {}
results_dict["corr"] = counts_dataframe.corr()
results_dict["cov"] = counts_dataframe.cov()
results_dict["mean"] = counts_dataframe.mean()
results_dict['var'] = counts_dataframe.var()
return results_dict
def get_n_steps_from_filepath(filepath)-> int:
filename = filepath.split('/')[-1]
return int(re.findall(r"\d+_steps",filename)[0].split('_')[0])
def get_n_shots_from_path(path)-> int:
experiment_dir_name = path.split('/')[-1]
nshots = int(re.findall(r"\d+shots",experiment_dir_name)[0].split('s')[0])
return nshots
def get_n_dims_from_path(path)-> int:
experiment_dir_name = path.split('/')[-1]
ndims = int(re.findall(r"\d+D_",experiment_dir_name)[0].split('D')[0])
return ndims
def extract_mean_variance_vs_nsteps(directory_path: str,dimension = 0):
nshots = get_n_shots_from_path(directory_path)
ndims = get_n_dims_from_path(directory_path)
assert dimension < ndims, "queried dimension exceeds experiment space"
files = glob.glob(directory_path+'/*/data/**.csv')
files.sort(key = get_n_steps_from_filepath)
n_steps = []
variance = []
mean = []
for filepath in files:
filename = filepath.split('/')[-1]
nsteps = int(re.findall(r"\d+_steps",filename)[0].split('_')[0])
rebuilt_dict = rebuild_counts_from_csv(filepath,n_dims=ndims,shots=nshots)
stats = get_stats_from_counts_dict(rebuilt_dict)
variance.append(stats['var'][dimension])
mean.append(stats['mean'][dimension])
n_steps.append(nsteps)
return n_steps, variance, mean
| import pandas as pd
import re
import glob
def rebuild_counts_from_csv(path,n_dims, shots):
df = pd.read_csv(path)
return rebuild_counts_from_dataframe(dataframe=df, n_dims=n_dims, shots=shots)
def rebuild_counts_from_dataframe(dataframe,n_dims,shots):
dimension_counts = {}
for dimension in range(n_dims):
dimension_counts[dimension] = []
pde = list(dataframe.probability_density)
for idx, density in enumerate(pde):
n_counts = int(density*shots)
for _ in range(n_counts):
# print(dataframe["dimension_0"][idx])
for dimension in range(n_dims):
dimension_key = "dimension_{}".format(dimension)
#
dimension_counts[dimension]+=[dataframe[dimension_key][idx]]
# print(dimension_counts)
rebuilt_dict = {}
for dimension in range(n_dims):
rebuilt_dict[f"d{dimension}"] = dimension_counts[dimension]
return rebuilt_dict
def rebuild_counts_from_dictionary(dictionary:dict, n_dims, shots):
dataframe = pd.DataFrame(dictionary)
return rebuild_counts_from_dataframe(dataframe=dataframe, n_dims=n_dims, shots=shots)
def get_stats_from_counts_dict(results_dict:dict):
dataframe = pd.DataFrame(results_dict)
return get_stats_from_counts_dataframe(dataframe)
def get_stats_from_counts_dataframe(counts_dataframe: pd.DataFrame)-> dict:
results_dict = {}
results_dict["corr"] = counts_dataframe.corr()
results_dict["cov"] = counts_dataframe.cov()
results_dict["mean"] = counts_dataframe.mean()
results_dict['var'] = counts_dataframe.var()
return results_dict
def get_n_steps_from_filepath(filepath)-> int:
filename = filepath.split('/')[-1]
return int(re.findall(r"\d+_steps",filename)[0].split('_')[0])
def get_n_shots_from_path(path)-> int:
experiment_dir_name = path.split('/')[-1]
nshots = int(re.findall(r"\d+shots",experiment_dir_name)[0].split('s')[0])
return nshots
def get_n_dims_from_path(path)-> int:
experiment_dir_name = path.split('/')[-1]
ndims = int(re.findall(r"\d+D_",experiment_dir_name)[0].split('D')[0])
return ndims
def extract_mean_variance_vs_nsteps(directory_path: str,dimension = 0):
nshots = get_n_shots_from_path(directory_path)
ndims = get_n_dims_from_path(directory_path)
assert dimension < ndims, "queried dimension exceeds experiment space"
files = glob.glob(directory_path+'/*/data/**.csv')
files.sort(key = get_n_steps_from_filepath)
n_steps = []
variance = []
mean = []
for filepath in files:
filename = filepath.split('/')[-1]
nsteps = int(re.findall(r"\d+_steps",filename)[0].split('_')[0])
rebuilt_dict = rebuild_counts_from_csv(filepath,n_dims=ndims,shots=nshots)
stats = get_stats_from_counts_dict(rebuilt_dict)
variance.append(stats['var'][dimension])
mean.append(stats['mean'][dimension])
n_steps.append(nsteps)
return n_steps, variance, mean | en | 0.305122 | # print(dataframe["dimension_0"][idx]) # # print(dimension_counts) | 2.795626 | 3 |
app/wirecard/tasks.py | michel-rodrigues/viggio_backend | 0 | 8841 | <gh_stars>0
from sentry_sdk import capture_exception
from dateutil.parser import parse
from project_configuration.celery import app
from orders.models import Charge
from request_shoutout.domain.models import Charge as DomainCharge
from .models import WirecardTransactionData
CROSS_SYSTEMS_STATUS_MAPPING = {
'WAITING': DomainCharge.PROCESSING,
'IN_ANALYSIS': DomainCharge.PROCESSING,
'PRE_AUTHORIZED': DomainCharge.PRE_AUTHORIZED,
'AUTHORIZED': DomainCharge.PAID,
'CANCELLED': DomainCharge.CANCELLED,
'REFUNDED': DomainCharge.CANCELLED,
'REVERSED': DomainCharge.CANCELLED,
'SETTLED': DomainCharge.PAID,
}
def _update_status(wirecard_status, wirecard_payment_hash):
(
Charge.objects
.filter(order__third_party_transaction__wirecard_payment_hash=wirecard_payment_hash)
.update(status=CROSS_SYSTEMS_STATUS_MAPPING[wirecard_status])
)
def _update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp):
wirecard_transaction.payment_event_last_timestamp = payment_event_timestamp
wirecard_transaction.save()
def _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction):
if wirecard_transaction.payment_event_last_timestamp:
return payment_event_timestamp < wirecard_transaction.payment_event_last_timestamp
return False
@app.task
def update_payment_status(notification):
payment_event_timestamp = parse(notification['resource']['payment']['updatedAt'])
payment_status = notification['resource']['payment']['status']
wirecard_payment_hash = notification['resource']['payment']['id']
try:
wirecard_transaction = (
WirecardTransactionData.objects.get(wirecard_payment_hash=wirecard_payment_hash)
)
# Algumas vezes tem subido essa exceção, como não sabemos se é devido à falhas na sandbox
# da wirecard, estamos evitando quebrar a aplicação e enviando a exceção para o sentry
except WirecardTransactionData.DoesNotExist:
capture_exception()
else:
if not _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction):
_update_status(payment_status, wirecard_payment_hash)
_update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp)
| from sentry_sdk import capture_exception
from dateutil.parser import parse
from project_configuration.celery import app
from orders.models import Charge
from request_shoutout.domain.models import Charge as DomainCharge
from .models import WirecardTransactionData
CROSS_SYSTEMS_STATUS_MAPPING = {
'WAITING': DomainCharge.PROCESSING,
'IN_ANALYSIS': DomainCharge.PROCESSING,
'PRE_AUTHORIZED': DomainCharge.PRE_AUTHORIZED,
'AUTHORIZED': DomainCharge.PAID,
'CANCELLED': DomainCharge.CANCELLED,
'REFUNDED': DomainCharge.CANCELLED,
'REVERSED': DomainCharge.CANCELLED,
'SETTLED': DomainCharge.PAID,
}
def _update_status(wirecard_status, wirecard_payment_hash):
(
Charge.objects
.filter(order__third_party_transaction__wirecard_payment_hash=wirecard_payment_hash)
.update(status=CROSS_SYSTEMS_STATUS_MAPPING[wirecard_status])
)
def _update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp):
wirecard_transaction.payment_event_last_timestamp = payment_event_timestamp
wirecard_transaction.save()
def _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction):
if wirecard_transaction.payment_event_last_timestamp:
return payment_event_timestamp < wirecard_transaction.payment_event_last_timestamp
return False
@app.task
def update_payment_status(notification):
payment_event_timestamp = parse(notification['resource']['payment']['updatedAt'])
payment_status = notification['resource']['payment']['status']
wirecard_payment_hash = notification['resource']['payment']['id']
try:
wirecard_transaction = (
WirecardTransactionData.objects.get(wirecard_payment_hash=wirecard_payment_hash)
)
# Algumas vezes tem subido essa exceção, como não sabemos se é devido à falhas na sandbox
# da wirecard, estamos evitando quebrar a aplicação e enviando a exceção para o sentry
except WirecardTransactionData.DoesNotExist:
capture_exception()
else:
if not _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction):
_update_status(payment_status, wirecard_payment_hash)
_update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp) | pt | 0.999087 | # Algumas vezes tem subido essa exceção, como não sabemos se é devido à falhas na sandbox # da wirecard, estamos evitando quebrar a aplicação e enviando a exceção para o sentry | 1.889436 | 2 |
py/multiple_dispatch_example.py | coalpha/coalpha.github.io | 0 | 8842 | <filename>py/multiple_dispatch_example.py
from typing import *
from multiple_dispatch import multiple_dispatch
@overload
@multiple_dispatch
def add(a: Literal[4, 6, 8], b):
raise TypeError("No adding 2, 4, 6, or 8!")
@overload
@multiple_dispatch
def add(a: int, b: str):
return f"int + str = {a} + {b}"
@overload
@multiple_dispatch
def add(a: int, b: int):
return a + b
@multiple_dispatch
def add(a, b):
return f"Any + Any = {a} + {b}"
print(add(2, "hello"))
| <filename>py/multiple_dispatch_example.py
from typing import *
from multiple_dispatch import multiple_dispatch
@overload
@multiple_dispatch
def add(a: Literal[4, 6, 8], b):
raise TypeError("No adding 2, 4, 6, or 8!")
@overload
@multiple_dispatch
def add(a: int, b: str):
return f"int + str = {a} + {b}"
@overload
@multiple_dispatch
def add(a: int, b: int):
return a + b
@multiple_dispatch
def add(a, b):
return f"Any + Any = {a} + {b}"
print(add(2, "hello"))
| none | 1 | 3.509457 | 4 |
|
dygraph/alexnet/network.py | Sunyingbin/models | 0 | 8843 | <gh_stars>0
"""
动态图构建 AlexNet
"""
import paddle.fluid as fluid
import numpy as np
class Conv2D(fluid.dygraph.Layer):
def __init__(self,
name_scope,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(Conv2D, self).__init__(name_scope)
self._conv2d = fluid.dygraph.Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
param_attr=param_attr,
bias_attr=bias_attr,
act=act,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
return x
class Conv2DPool(fluid.dygraph.Layer):
def __init__(self,
name_scope,
num_channels,
num_filters,
filter_size,
pool_size,
pool_stride,
pool_padding=0,
pool_type='max',
global_pooling=False,
conv_stride=1,
conv_padding=0,
conv_dilation=1,
conv_groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(Conv2DPool, self).__init__(name_scope)
self._conv2d = fluid.dygraph.Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=conv_stride,
padding=conv_padding,
dilation=conv_dilation,
groups=conv_groups,
param_attr=param_attr,
bias_attr=bias_attr,
act=act,
use_cudnn=use_cudnn)
self._pool2d = fluid.dygraph.Pool2D(
pool_size=pool_size,
pool_type=pool_type,
pool_stride=pool_stride,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
x = self._pool2d(x)
return x
class AlexNet(fluid.dygraph.Layer):
def __init__(self, name_scope, class_dim):
super(AlexNet, self).__init__(name_scope)
self.conv_pool_1 = Conv2DPool(self.full_name(), 3, 64, 11, 3, 2, conv_stride=4, conv_padding=2, act='relu')
self.conv_pool_2 = Conv2DPool(self.full_name(), 64, 192, 5, 3, 2, conv_stride=1, conv_padding=2, act='relu')
self.conv_3 = Conv2D(self.full_name(), 192, 384, 3, 1, 1, act='relu')
self.conv_4 = Conv2D(self.full_name(), 384, 256, 3, 1, 1, act='relu')
self.conv_pool_5 = Conv2DPool(self.full_name(), 256, 256, 3, 3, 2, conv_stride=1, conv_padding=1, act='relu')
self.fc6 = fluid.dygraph.FC(self.full_name(), 9216, 4096, act='relu')
self.fc7 = fluid.dygraph.FC(self.full_name(), 4096, 4096, act='relu')
self.fc8 = fluid.dygraph.FC(self.full_name(), 4096, class_dim, act='softmax')
def forward(self, inputs, label=None):
out = self.conv_pool_1(inputs)
out = self.conv_pool_2(out)
out = self.conv_3(out)
out = self.conv_4(out)
out = self.conv_pool_5(out)
out = self.fc6(out)
out = fluid.layers.dropout(out, 0.5)
out = self.fc7(out)
out = fluid.layers.dropout(out, 0.5)
out = self.fc8(out)
if label is not None:
acc = fluid.layers.accuracy(input=out, label=label)
return out, acc
else:
return out
if __name__ == '__main__':
with fluid.dygraph.guard():
alexnet = AlexNet('alex-net', 3)
img = np.zeros([2, 3, 224, 224]).astype('float32')
img = fluid.dygraph.to_variable(img)
outs = alexnet(img).numpy()
print(outs)
| """
动态图构建 AlexNet
"""
import paddle.fluid as fluid
import numpy as np
class Conv2D(fluid.dygraph.Layer):
def __init__(self,
name_scope,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(Conv2D, self).__init__(name_scope)
self._conv2d = fluid.dygraph.Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
param_attr=param_attr,
bias_attr=bias_attr,
act=act,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
return x
class Conv2DPool(fluid.dygraph.Layer):
def __init__(self,
name_scope,
num_channels,
num_filters,
filter_size,
pool_size,
pool_stride,
pool_padding=0,
pool_type='max',
global_pooling=False,
conv_stride=1,
conv_padding=0,
conv_dilation=1,
conv_groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(Conv2DPool, self).__init__(name_scope)
self._conv2d = fluid.dygraph.Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=conv_stride,
padding=conv_padding,
dilation=conv_dilation,
groups=conv_groups,
param_attr=param_attr,
bias_attr=bias_attr,
act=act,
use_cudnn=use_cudnn)
self._pool2d = fluid.dygraph.Pool2D(
pool_size=pool_size,
pool_type=pool_type,
pool_stride=pool_stride,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
x = self._pool2d(x)
return x
class AlexNet(fluid.dygraph.Layer):
def __init__(self, name_scope, class_dim):
super(AlexNet, self).__init__(name_scope)
self.conv_pool_1 = Conv2DPool(self.full_name(), 3, 64, 11, 3, 2, conv_stride=4, conv_padding=2, act='relu')
self.conv_pool_2 = Conv2DPool(self.full_name(), 64, 192, 5, 3, 2, conv_stride=1, conv_padding=2, act='relu')
self.conv_3 = Conv2D(self.full_name(), 192, 384, 3, 1, 1, act='relu')
self.conv_4 = Conv2D(self.full_name(), 384, 256, 3, 1, 1, act='relu')
self.conv_pool_5 = Conv2DPool(self.full_name(), 256, 256, 3, 3, 2, conv_stride=1, conv_padding=1, act='relu')
self.fc6 = fluid.dygraph.FC(self.full_name(), 9216, 4096, act='relu')
self.fc7 = fluid.dygraph.FC(self.full_name(), 4096, 4096, act='relu')
self.fc8 = fluid.dygraph.FC(self.full_name(), 4096, class_dim, act='softmax')
def forward(self, inputs, label=None):
out = self.conv_pool_1(inputs)
out = self.conv_pool_2(out)
out = self.conv_3(out)
out = self.conv_4(out)
out = self.conv_pool_5(out)
out = self.fc6(out)
out = fluid.layers.dropout(out, 0.5)
out = self.fc7(out)
out = fluid.layers.dropout(out, 0.5)
out = self.fc8(out)
if label is not None:
acc = fluid.layers.accuracy(input=out, label=label)
return out, acc
else:
return out
if __name__ == '__main__':
with fluid.dygraph.guard():
alexnet = AlexNet('alex-net', 3)
img = np.zeros([2, 3, 224, 224]).astype('float32')
img = fluid.dygraph.to_variable(img)
outs = alexnet(img).numpy()
print(outs) | ja | 0.393774 | 动态图构建 AlexNet | 2.973459 | 3 |
turtlegameproject/turtlegame.py | Ayon134/code_for_Kids | 0 | 8844 | import turtle
import random
p1=turtle.Turtle()
p1.color("green")
p1.shape("turtle")
p1.penup()
p1.goto(-200,100)
p2=p1.clone()
p2.color("blue")
p2.penup()
p2.goto(-200,-100)
p1.goto(300,60)
p1.pendown()
p1.circle(40)
p1.penup()
p1.goto(-200,100)
p2.goto(300,-140)
p2.pendown()
p2.circle(40)
p2.penup()
p2.goto(-200,-100)
die=[1,2,3,4,5,6]
i=1
while(i <= 20):
if p1.pos() >= (300,100):
print("p1 wins")
break
elif p2.pos() >= (300,-100):
print("p2 wins")
break
else:
p1_turn=input("press enter to start")
die_out=random.choice(die)
print("you get", die_out)
print("the number of steps:", 20*die_out)
p1.forward(20*die_out)
p2_turn=input("press enter to challenge")
d=random.choice(die)
print("you get",d)
print("the number os steps:",20*d)
p2.forward(20*d) | import turtle
import random
p1=turtle.Turtle()
p1.color("green")
p1.shape("turtle")
p1.penup()
p1.goto(-200,100)
p2=p1.clone()
p2.color("blue")
p2.penup()
p2.goto(-200,-100)
p1.goto(300,60)
p1.pendown()
p1.circle(40)
p1.penup()
p1.goto(-200,100)
p2.goto(300,-140)
p2.pendown()
p2.circle(40)
p2.penup()
p2.goto(-200,-100)
die=[1,2,3,4,5,6]
i=1
while(i <= 20):
if p1.pos() >= (300,100):
print("p1 wins")
break
elif p2.pos() >= (300,-100):
print("p2 wins")
break
else:
p1_turn=input("press enter to start")
die_out=random.choice(die)
print("you get", die_out)
print("the number of steps:", 20*die_out)
p1.forward(20*die_out)
p2_turn=input("press enter to challenge")
d=random.choice(die)
print("you get",d)
print("the number os steps:",20*d)
p2.forward(20*d) | none | 1 | 3.884433 | 4 |
|
hivwholeseq/sequencing/check_pipeline.py | neherlab/hivwholeseq | 3 | 8845 | #!/usr/bin/env python
# vim: fdm=marker
'''
author: <NAME>
date: 15/06/14
content: Check the status of the pipeline for one or more sequencing samples.
'''
# Modules
import os
import sys
from itertools import izip
import argparse
from Bio import SeqIO
from hivwholeseq.utils.generic import getchar
from hivwholeseq.sequencing.samples import SampleSeq, load_sequencing_run
from hivwholeseq.patients.patients import load_samples_sequenced as lssp
from hivwholeseq.patients.patients import SamplePat
from hivwholeseq.sequencing.samples import load_samples_sequenced as lss
from hivwholeseq.utils.mapping import get_number_reads
from hivwholeseq.cluster.fork_cluster import fork_check_pipeline as fork_self
# Globals
len_fr = 8
len_msg = 6
spacing_fragments = 4
# Functions
def check_status(sample, step, detail=1):
'''Check for a sample a certain step of the pipeline at a certain detail'''
if detail == 1:
if step == 'premapped':
return [os.path.isfile(sample.get_premapped_filename())]
elif step == 'divided':
return [(fr, os.path.isfile(sample.get_divided_filename(fr)))
for fr in sample.regions_complete]
elif step == 'consensus':
return [(fr, os.path.isfile(sample.get_consensus_filename(fr)))
for fr in sample.regions_generic]
elif step == 'mapped':
return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=False)))
for fr in sample.regions_generic]
elif step == 'filtered':
return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=True)))
for fr in sample.regions_generic]
elif step == 'mapped_initial':
return [(fr, os.path.isfile(sample.get_mapped_to_initial_filename(fr)))
for fr in sample.regions_generic]
elif step == 'mapped_filtered':
# Check whether the mapped filtered is older than the mapped_initial
from hivwholeseq.utils.generic import modification_date
out = []
for fr in sample.regions_generic:
fn_mi = sample.get_mapped_to_initial_filename(fr)
fn_mf = sample.get_mapped_filtered_filename(fr)
if not os.path.isfile(fn_mf):
out.append((fr, False))
continue
if not os.path.isfile(fn_mi):
out.append((fr, True))
continue
md_mi = modification_date(fn_mi)
md_mf = modification_date(fn_mf)
if md_mf < md_mi:
out.append((fr, 'OLD'))
else:
out.append((fr, True))
return out
elif detail == 2:
if step in ('filtered', 'consensus'):
return check_status(sample, step, detail=3)
else:
return check_status(sample, step, detail=1)
elif detail == 3:
if step == 'premapped':
if os.path.isfile(sample.get_premapped_filename()):
return [get_number_reads(sample.get_premapped_filename())]
else:
return [False]
elif step == 'divided':
stati = []
for fr in sample.regions_complete:
fn = sample.get_divided_filename(fr)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'consensus':
stati = []
for fr in sample.regions_generic:
fn = sample.get_consensus_filename(fr)
if os.path.isfile(fn):
status = (fr, len(SeqIO.read(fn, 'fasta')))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'mapped':
stati = []
for fr in sample.regions_generic:
fn = sample.get_mapped_filename(fr, filtered=False)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'filtered':
stati = []
for fr in sample.regions_generic:
fn = sample.get_mapped_filename(fr, filtered=True)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
# TODO: add mapped_to_initial and downstream
elif step in ('mapped_initial', 'mapped_filtered'):
return check_status(sample, step, detail=1)
def print_info(name, status, detail=1):
'''Print info on these files'''
print '{:<20s}'.format(name+':'),
if name.lower() in ['premapped']:
status = status[0]
if status == True:
print 'OK'
elif status == False:
print 'MISS'
else:
print str(status)
else:
stati = list(status)
msg = []
for (fr, status) in stati:
ms = ('{:<'+str(len_fr)+'s}').format(fr+':')
if status == True:
msg.append(ms+('{:>'+str(len_msg)+'}').format('OK'))
elif status == False:
msg.append(ms+('{:>'+str(len_msg)+'}').format('MISS'))
else:
msg.append(ms+('{:>'+str(len_msg)+'}').format(str(status)))
print (' ' * spacing_fragments).join(msg)
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Check sequencing run for missing parts of the analysis',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--runs', required=True, nargs='+',
help='Seq runs to analyze (e.g. Tue28, test_tiny)')
parser.add_argument('--adaIDs', nargs='+',
help='Adapter IDs to analyze (e.g. TS2)')
parser.add_argument('--nopatients', action='store_false', dest='use_pats',
help='Include non-patient samples (e.g. reference strains)')
parser.add_argument('--interactive', action='store_true',
help='Interactive mode')
parser.add_argument('--detail', type=int, default=1,
help='Include details on number of reads, length of consensus')
parser.add_argument('--submit', action='store_true',
help='Execute the script in parallel on the cluster')
args = parser.parse_args()
seq_runs = args.runs
adaIDs = args.adaIDs
use_pats = args.use_pats
use_interactive = args.interactive
detail = args.detail
submit = args.submit
if submit:
fork_self(seq_runs, adaIDs=adaIDs,
pats=use_pats,
detail=detail)
sys.exit()
samples_pat = lssp(include_wrong=True)
samples = lss()
samples = samples.loc[samples['seq run'].isin(seq_runs)]
if adaIDs is not None:
samples = samples.loc[samples.adapter.isin(adaIDs)]
if len(seq_runs) >= 2:
samples.sort(columns=['patient sample', 'seq run'], inplace=True)
for isa, (samplename, sample) in enumerate(samples.iterrows()):
sample = SampleSeq(sample)
print sample.name, 'seq:', sample['seq run'], sample.adapter,
if sample['patient sample'] == 'nan':
print 'not a patient sample',
if use_pats:
print '(skip)'
continue
else:
print ''
else:
sample_pat = samples_pat.loc[sample['patient sample']]
print 'patient: '+sample_pat.patient
steps = ['premapped', 'divided', 'consensus', 'mapped', 'filtered',
'mapped_initial', 'mapped_filtered']
for step in steps:
status = check_status(sample, step, detail=detail)
print_info(step.capitalize(), status, detail=detail)
if (isa != len(samples) - 1):
print ''
if use_interactive and (isa != len(samples) - 1):
print 'Press q to exit',
sys.stdout.flush()
ch = getchar()
if ch.lower() in ['q']:
print 'stopped'
break
else:
sys.stdout.write("\x1b[1A")
print ''
| #!/usr/bin/env python
# vim: fdm=marker
'''
author: <NAME>
date: 15/06/14
content: Check the status of the pipeline for one or more sequencing samples.
'''
# Modules
import os
import sys
from itertools import izip
import argparse
from Bio import SeqIO
from hivwholeseq.utils.generic import getchar
from hivwholeseq.sequencing.samples import SampleSeq, load_sequencing_run
from hivwholeseq.patients.patients import load_samples_sequenced as lssp
from hivwholeseq.patients.patients import SamplePat
from hivwholeseq.sequencing.samples import load_samples_sequenced as lss
from hivwholeseq.utils.mapping import get_number_reads
from hivwholeseq.cluster.fork_cluster import fork_check_pipeline as fork_self
# Globals
len_fr = 8
len_msg = 6
spacing_fragments = 4
# Functions
def check_status(sample, step, detail=1):
'''Check for a sample a certain step of the pipeline at a certain detail'''
if detail == 1:
if step == 'premapped':
return [os.path.isfile(sample.get_premapped_filename())]
elif step == 'divided':
return [(fr, os.path.isfile(sample.get_divided_filename(fr)))
for fr in sample.regions_complete]
elif step == 'consensus':
return [(fr, os.path.isfile(sample.get_consensus_filename(fr)))
for fr in sample.regions_generic]
elif step == 'mapped':
return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=False)))
for fr in sample.regions_generic]
elif step == 'filtered':
return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=True)))
for fr in sample.regions_generic]
elif step == 'mapped_initial':
return [(fr, os.path.isfile(sample.get_mapped_to_initial_filename(fr)))
for fr in sample.regions_generic]
elif step == 'mapped_filtered':
# Check whether the mapped filtered is older than the mapped_initial
from hivwholeseq.utils.generic import modification_date
out = []
for fr in sample.regions_generic:
fn_mi = sample.get_mapped_to_initial_filename(fr)
fn_mf = sample.get_mapped_filtered_filename(fr)
if not os.path.isfile(fn_mf):
out.append((fr, False))
continue
if not os.path.isfile(fn_mi):
out.append((fr, True))
continue
md_mi = modification_date(fn_mi)
md_mf = modification_date(fn_mf)
if md_mf < md_mi:
out.append((fr, 'OLD'))
else:
out.append((fr, True))
return out
elif detail == 2:
if step in ('filtered', 'consensus'):
return check_status(sample, step, detail=3)
else:
return check_status(sample, step, detail=1)
elif detail == 3:
if step == 'premapped':
if os.path.isfile(sample.get_premapped_filename()):
return [get_number_reads(sample.get_premapped_filename())]
else:
return [False]
elif step == 'divided':
stati = []
for fr in sample.regions_complete:
fn = sample.get_divided_filename(fr)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'consensus':
stati = []
for fr in sample.regions_generic:
fn = sample.get_consensus_filename(fr)
if os.path.isfile(fn):
status = (fr, len(SeqIO.read(fn, 'fasta')))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'mapped':
stati = []
for fr in sample.regions_generic:
fn = sample.get_mapped_filename(fr, filtered=False)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'filtered':
stati = []
for fr in sample.regions_generic:
fn = sample.get_mapped_filename(fr, filtered=True)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
# TODO: add mapped_to_initial and downstream
elif step in ('mapped_initial', 'mapped_filtered'):
return check_status(sample, step, detail=1)
def print_info(name, status, detail=1):
'''Print info on these files'''
print '{:<20s}'.format(name+':'),
if name.lower() in ['premapped']:
status = status[0]
if status == True:
print 'OK'
elif status == False:
print 'MISS'
else:
print str(status)
else:
stati = list(status)
msg = []
for (fr, status) in stati:
ms = ('{:<'+str(len_fr)+'s}').format(fr+':')
if status == True:
msg.append(ms+('{:>'+str(len_msg)+'}').format('OK'))
elif status == False:
msg.append(ms+('{:>'+str(len_msg)+'}').format('MISS'))
else:
msg.append(ms+('{:>'+str(len_msg)+'}').format(str(status)))
print (' ' * spacing_fragments).join(msg)
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Check sequencing run for missing parts of the analysis',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--runs', required=True, nargs='+',
help='Seq runs to analyze (e.g. Tue28, test_tiny)')
parser.add_argument('--adaIDs', nargs='+',
help='Adapter IDs to analyze (e.g. TS2)')
parser.add_argument('--nopatients', action='store_false', dest='use_pats',
help='Include non-patient samples (e.g. reference strains)')
parser.add_argument('--interactive', action='store_true',
help='Interactive mode')
parser.add_argument('--detail', type=int, default=1,
help='Include details on number of reads, length of consensus')
parser.add_argument('--submit', action='store_true',
help='Execute the script in parallel on the cluster')
args = parser.parse_args()
seq_runs = args.runs
adaIDs = args.adaIDs
use_pats = args.use_pats
use_interactive = args.interactive
detail = args.detail
submit = args.submit
if submit:
fork_self(seq_runs, adaIDs=adaIDs,
pats=use_pats,
detail=detail)
sys.exit()
samples_pat = lssp(include_wrong=True)
samples = lss()
samples = samples.loc[samples['seq run'].isin(seq_runs)]
if adaIDs is not None:
samples = samples.loc[samples.adapter.isin(adaIDs)]
if len(seq_runs) >= 2:
samples.sort(columns=['patient sample', 'seq run'], inplace=True)
for isa, (samplename, sample) in enumerate(samples.iterrows()):
sample = SampleSeq(sample)
print sample.name, 'seq:', sample['seq run'], sample.adapter,
if sample['patient sample'] == 'nan':
print 'not a patient sample',
if use_pats:
print '(skip)'
continue
else:
print ''
else:
sample_pat = samples_pat.loc[sample['patient sample']]
print 'patient: '+sample_pat.patient
steps = ['premapped', 'divided', 'consensus', 'mapped', 'filtered',
'mapped_initial', 'mapped_filtered']
for step in steps:
status = check_status(sample, step, detail=detail)
print_info(step.capitalize(), status, detail=detail)
if (isa != len(samples) - 1):
print ''
if use_interactive and (isa != len(samples) - 1):
print 'Press q to exit',
sys.stdout.flush()
ch = getchar()
if ch.lower() in ['q']:
print 'stopped'
break
else:
sys.stdout.write("\x1b[1A")
print ''
| en | 0.672106 | #!/usr/bin/env python # vim: fdm=marker author: <NAME> date: 15/06/14 content: Check the status of the pipeline for one or more sequencing samples. # Modules # Globals # Functions Check for a sample a certain step of the pipeline at a certain detail # Check whether the mapped filtered is older than the mapped_initial # TODO: add mapped_to_initial and downstream Print info on these files # Script # Parse input args | 2.410825 | 2 |
app.py | thliang01/nba-s | 0 | 8846 | <gh_stars>0
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# --------------------------------------------------------------
# Import and clean data
game_details = pd.read_csv('games_details.csv')
# print(game_details.head(5))
game_details.drop(['GAME_ID', 'TEAM_ID', 'PLAYER_ID', 'START_POSITION',
'COMMENT', 'TEAM_ABBREVIATION'], axis=1, inplace=True)
game_details['FTL'] = game_details['FTA'] - game_details['FTM']
game_details = game_details.dropna()
# game_details.shape
# game_details.info()
game_details['MIN'] = game_details['MIN'].str.strip(':').str[0:2]
df = game_details.copy()
if st.checkbox('Show dataframe'):
st.write("Players Game Details")
st.dataframe(df.head(10))
# --------------------------------------------------------------
st.write("Top 20 Players in the NBA")
top_activities = df.groupby(by='PLAYER_NAME')['PTS'].sum().sort_values(ascending=False).head(20).reset_index()
plt.figure(figsize=(15, 10))
plt.xlabel('POINTS', fontsize=15)
plt.ylabel('PLAYER_NAME', fontsize=15)
plt.title('Top 20 Players in the NBA League', fontsize=20)
ax = sns.barplot(x=top_activities['PTS'], y=top_activities['PLAYER_NAME'])
for i, (value, name) in enumerate(zip(top_activities['PTS'], top_activities['PLAYER_NAME'])):
ax.text(value, i - .05, f'{value:,.0f}', size=10, ha='left', va='center')
ax.set(xlabel='POINTS', ylabel='PLAYER_NAME')
st.pyplot(plt)
player = st.multiselect(
"Choose Player", df['PLAYER_NAME']
)
st.write("""
# My first app
Hello *world!*
""")
x = st.slider("Select a number")
st.write("You selected:", x)
| import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# --------------------------------------------------------------
# Import and clean data
game_details = pd.read_csv('games_details.csv')
# print(game_details.head(5))
game_details.drop(['GAME_ID', 'TEAM_ID', 'PLAYER_ID', 'START_POSITION',
'COMMENT', 'TEAM_ABBREVIATION'], axis=1, inplace=True)
game_details['FTL'] = game_details['FTA'] - game_details['FTM']
game_details = game_details.dropna()
# game_details.shape
# game_details.info()
game_details['MIN'] = game_details['MIN'].str.strip(':').str[0:2]
df = game_details.copy()
if st.checkbox('Show dataframe'):
st.write("Players Game Details")
st.dataframe(df.head(10))
# --------------------------------------------------------------
st.write("Top 20 Players in the NBA")
top_activities = df.groupby(by='PLAYER_NAME')['PTS'].sum().sort_values(ascending=False).head(20).reset_index()
plt.figure(figsize=(15, 10))
plt.xlabel('POINTS', fontsize=15)
plt.ylabel('PLAYER_NAME', fontsize=15)
plt.title('Top 20 Players in the NBA League', fontsize=20)
ax = sns.barplot(x=top_activities['PTS'], y=top_activities['PLAYER_NAME'])
for i, (value, name) in enumerate(zip(top_activities['PTS'], top_activities['PLAYER_NAME'])):
ax.text(value, i - .05, f'{value:,.0f}', size=10, ha='left', va='center')
ax.set(xlabel='POINTS', ylabel='PLAYER_NAME')
st.pyplot(plt)
player = st.multiselect(
"Choose Player", df['PLAYER_NAME']
)
st.write("""
# My first app
Hello *world!*
""")
x = st.slider("Select a number")
st.write("You selected:", x) | en | 0.343465 | # -------------------------------------------------------------- # Import and clean data # print(game_details.head(5)) # game_details.shape # game_details.info() # -------------------------------------------------------------- # My first app Hello *world!* | 3.370561 | 3 |
python/craftassist/voxel_models/geoscorer/geoscorer_util.py | kepolol/craftassist | 0 | 8847 | <gh_stars>0
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
from datetime import datetime
import sys
import argparse
import torch
import os
from inspect import currentframe, getframeinfo
GEOSCORER_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(GEOSCORER_DIR, "../")
sys.path.append(CRAFTASSIST_DIR)
from shapes import get_bounds
def pretty_log(log_string):
cf = currentframe().f_back
filename = getframeinfo(cf).filename.split("/")[-1]
print(
"{} {}:{} {}".format(
datetime.now().strftime("%m/%d/%Y %H:%M:%S"), filename, cf.f_lineno, log_string
)
)
sys.stdout.flush()
## Train Fxns ##
def get_base_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", type=int, default=1, help="0 for cpu")
parser.add_argument("--batchsize", type=int, default=64, help="batchsize")
parser.add_argument("--dataset", default="shapes", help="shapes/segments/both")
parser.add_argument(
"--epochsize", type=int, default=1000, help="number of examples in an epoch"
)
parser.add_argument("--nepoch", type=int, default=1000, help="number of epochs")
parser.add_argument("--context_sidelength", type=int, default=32, help="size of cube")
parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim")
parser.add_argument("--num_layers", type=int, default=3, help="num layers")
parser.add_argument(
"--blockid_embedding_dim", type=int, default=8, help="size of blockid embedding"
)
parser.add_argument(
"--num_words", type=int, default=256, help="number of words for the blockid embeds"
)
parser.add_argument("--lr", type=float, default=0.1, help="step size for net")
parser.add_argument(
"--optim", type=str, default="adagrad", help="optim type to use (adagrad|sgd|adam)"
)
parser.add_argument("--momentum", type=float, default=0.0, help="momentum")
parser.add_argument("--checkpoint", default="", help="where to save model")
parser.add_argument("--num_workers", type=int, default=4, help="number of dataloader workers")
return parser
def add_dataset_flags(parser):
parser.add_argument(
"--dataset_ratios", type=str, default="shape:1.0", help="comma separated name:prob"
)
parser.add_argument("--useid", type=bool, default=False, help="use blockid")
parser.add_argument("--fixed_cube_size", type=int, default=None, help="fixed_cube_size")
parser.add_argument("--fixed_center", type=bool, default=False, help="fixed_center")
parser.add_argument(
"--min_seg_size", type=int, default=6, help="min seg size for seg data type"
)
parser.add_argument(
"--use_saved_data",
type=bool,
default=False,
help="use preparsed data for this min_seg_size",
)
def add_directional_flags(parser):
parser.add_argument("--spatial_embedding_dim", type=int, default=8, help="size of spatial emb")
parser.add_argument("--output_embedding_dim", type=int, default=8, help="size of output emb")
parser.add_argument(
"--seg_direction_net", type=bool, default=False, help="use segdirnet module"
)
parser.add_argument(
"--seg_use_viewer_pos", type=bool, default=False, help="use viewer pos in seg"
)
parser.add_argument(
"--seg_use_viewer_look", type=bool, default=False, help="use viewer look in seg"
)
parser.add_argument(
"--seg_use_direction", type=bool, default=False, help="use direction in seg"
)
parser.add_argument("--num_seg_dir_layers", type=int, default=3, help="num segdir net layers")
parser.add_argument(
"--cont_use_direction", type=bool, default=False, help="use direction in context"
)
parser.add_argument(
"--cont_use_xyz_from_viewer_look",
type=bool,
default=False,
help="use xyz position relative to viewer look in context emb",
)
def get_dataloader(dataset, opts, collate_fxn):
def init_fn(wid):
np.random.seed(torch.initial_seed() % (2 ** 32))
return torch.utils.data.DataLoader(
dataset,
batch_size=opts["batchsize"],
shuffle=True,
pin_memory=True,
drop_last=True,
num_workers=opts["num_workers"],
worker_init_fn=init_fn,
collate_fn=collate_fxn,
)
def to_cuda(list_modules):
for m in list_modules:
m.cuda()
def multitensor_collate_fxn(x):
"""
Takes a list of BATCHSIZE lists of tensors of length D.
Returns a list of length D of batched tensors.
"""
num_tensors_to_batch = len(x[0])
regroup_tensors = [[] for i in range(num_tensors_to_batch)]
for t_list in x:
for i, t in enumerate(t_list):
regroup_tensors[i].append(t.unsqueeze(0))
batched_tensors = [torch.cat(tl) for tl in regroup_tensors]
return batched_tensors
## 3D Utils ##
def get_side_lengths(bounds):
"""
Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z].
Returns a list of the side lengths.
"""
return [x + 1 for x in (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])]
def coord_to_index(coord, sl):
"""
Takes a 3D coordinate in a cube and the cube side length.
Returns index in flattened 3D array.
"""
return coord[0] * sl * sl + coord[1] * sl + coord[2]
def index_to_coord(index, sl):
"""
Takes an index into a flattened 3D array and its side length.
Returns the coordinate in the cube.
"""
coord = []
two_d_slice_size = sl * sl
coord.append(index // two_d_slice_size)
remaining = index % two_d_slice_size
coord.append(remaining // sl)
coord.append(remaining % sl)
return coord
def shift_subsegment_corner(S):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns the segment in the same form, shifted to the origin, and the shift vec
"""
bounds = get_bounds(S)
shift_zero_vec = [-bounds[0], -bounds[2], -bounds[4]]
new_S = []
for s in S:
new_S.append((tuple([sum(x) for x in zip(s[0], shift_zero_vec)]), s[1]))
return new_S, shift_zero_vec
def subset_and_scale_3d(init_array, mins, maxs, scale=1):
return scale * init_array[mins[0] : maxs[0], mins[1] : maxs[1], mins[2] : maxs[2]]
def combine_seg_context(seg, context, seg_shift, seg_mult=1):
completed_context = context.clone()
# Calculate the region to copy over, sometimes the segment
# falls outside the range of the context bounding box
c_mins = [int(i) for i in seg_shift]
c_maxs = [int(min(ss + 8, 32)) for ss in seg_shift]
s_mins = [0 for i in range(3)]
# If the edge of the segment goes past the edge of the context (ss + 8 > 32),
# remove the extra from the segment.
s_maxs = [int(8 - max(0, (ss + 8) - 32)) for ss in seg_shift]
seg_to_add = subset_and_scale_3d(seg, s_mins, s_maxs, seg_mult)
context_subset = subset_and_scale_3d(completed_context, c_mins, c_maxs, 1)
completed_context[c_mins[0] : c_maxs[0], c_mins[1] : c_maxs[1], c_mins[2] : c_maxs[2]] = (
seg_to_add + context_subset
)
return completed_context
def get_vector(start, end):
return end - start
def get_random_viewer_info(sl):
viewer_pos = torch.tensor(random_int_triple(0, sl - 1))
viewer_look = torch.tensor(random_int_triple(0, sl - 1))
if viewer_pos.eq(viewer_look).sum() == viewer_pos.size(0):
if viewer_look[0] < sl + 1:
viewer_look[0] += 1
else:
viewer_look[0] -= 1
return viewer_pos, viewer_look
def b_greater_than_a(a, b):
if a == b:
return 0
return 1 if b > a else -1
def shift_block(b, s):
return tuple((tuple((b[0][0] + s[0], b[0][1] + s[1], b[0][2] + s[2])), b[1]))
def rotate_block(b, c, r):
""" rotates the block b around the point c by 90*r degrees
in the xz plane. r should be 1 or -1."""
# TODO add a reflection
c = np.array(c)
p = np.add(b[0], -c)
x = p[0]
z = p[2]
if r == -1:
p[0] = z
p[2] = -x
else:
p[0] = -z
p[2] = x
return (tuple(p + c), b[1])
def random_int_triple(minval, maxval):
t = [
random.randint(minval, maxval),
random.randint(minval, maxval),
random.randint(minval, maxval),
]
return t
def check_inrange(x, minval, maxval):
"""inclusive check"""
return all([v >= minval for v in x]) and all([v <= maxval for v in x])
def normalize(batched_vector):
vec = batched_vector.double()
norm = torch.norm(vec, dim=1)
# Set norm to 1 if it's 0
norm = norm + norm.eq(0).double()
expanded_norm = norm.unsqueeze(1).expand(-1, vec.size()[1])
return torch.div(vec, expanded_norm)
def get_rotation_matrix(viewer_pos, viewer_look):
# VP, VL: N x 3, VP_to_VL: N x 3
vp_to_vl = get_vector(viewer_pos, viewer_look)[:, :2]
nlook_vec = normalize(vp_to_vl)
nly = nlook_vec[:, 1]
# Nlx necessary to correct for the range of acrcos
nlx = nlook_vec[:, 0]
nlx = nlx.gt(0).double() - nlx.lt(0).double() - nlx.eq(0).double()
# Take care of nans created by raising 0 to a power
# and then masking the sin theta to 0 as intended
base = 1 - nly * nly
nan_mask = torch.isnan(torch.pow(base, 0.5)).double()
base = base + nan_mask
sin_theta = nlx * nan_mask.eq(0).double() * torch.pow(base, 0.5)
nly = nly.unsqueeze(1)
sin_theta = sin_theta.unsqueeze(1)
rm_pt1 = torch.cat([nly, sin_theta], 1).unsqueeze(1)
rm_pt2 = torch.cat([-sin_theta, nly], 1).unsqueeze(1)
rm = torch.cat([rm_pt1, rm_pt2], 1)
return rm
def rotate_x_y(coord, rotation_matrix):
return torch.mm(coord.unsqueeze(0), rotation_matrix).squeeze(0)
def float_equals(a, b, epsilon):
return True if abs(a - b) < epsilon else False
def get_argmax_list(vals, epsilon, minlist=False, maxlen=None):
mult = -1 if minlist else 1
max_ind = []
for i, v in enumerate(vals):
if not max_ind or float_equals(max_ind[0][1], v, epsilon):
if maxlen and len(max_ind) == maxlen:
continue
max_ind.append((i, v))
elif mult * (v - max_ind[0][1]) > 0:
max_ind = [(i, v)]
return max_ind
def get_firstmax(vals, epsilon, minlist=False):
return get_argmax_list(vals, epsilon, minlist, 1)[0]
# N -> batch size in training
# D -> num target coord per element
# Viewer pos, viewer_look are N x 3 tensors
# Batched target coords is a N x D x 3 tensor
# Output is a N x D x 3 tensor
def get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords):
# First verify the sizing and unsqueeze if necessary
btc_sizes = batched_target_coords.size()
vp_sizes = viewer_pos.size()
vl_sizes = viewer_look.size()
if len(btc_sizes) > 3 or len(vp_sizes) > 2 or len(vl_sizes) > 2:
raise Exception("One input has too many dimensions")
if btc_sizes[-1] != 3 or vp_sizes[-1] != 3 or vl_sizes[-1] != 3:
raise Exception("The last dimension of all inputs should be size 3")
if len(btc_sizes) < 3:
for i in range(3 - len(btc_sizes)):
batched_target_coords = batched_target_coords.unsqueeze(0)
if len(vp_sizes) == 1:
viewer_pos = viewer_pos.unsqueeze(0)
if len(vl_sizes) == 1:
viewer_look = viewer_look.unsqueeze(0)
n = batched_target_coords.size()[0]
d = batched_target_coords.size()[1]
# Handle xy and z separately
# XY = N X D x 2
xy = batched_target_coords[:, :, 0:2].double()
# Z = N x D x 1
z = batched_target_coords[:, :, 2].unsqueeze(2).double()
## XY
# Shift such that viewer pos is the origin
# VPXY, VLXY: N x 2
vpxy = viewer_pos.double()[:, 0:2]
vlxy = viewer_look.double()[:, 0:2]
vpxy_to_vlxy = vlxy - vpxy
# VPXY to XY: N x D x 2
vpxy_to_xy = xy - vpxy.unsqueeze(1).expand(n, d, -1)
# Rotate them around the viewer position such that a normalized
# viewer look vector would be (0, 1)
# Rotation_matrix: N x 2 x 2
rotation_matrix = get_rotation_matrix(viewer_pos, viewer_look)
# N x 1 x 2 mm N x 2 x 2 ==> N x 1 x 2 ==> N x 2
r_vpxy_to_vlxy = torch.bmm(vpxy_to_vlxy.unsqueeze(1), rotation_matrix).unsqueeze(1)
# RM: N x 2 x 2 ==> N x D x 2 x 2
expanded_rm = rotation_matrix.unsqueeze(1).expand(n, d, 2, 2).contiguous().view(-1, 2, 2)
# N x D x 2 ==> N*D x 1 x 2 mm N*D x 2 x 2 ==> N*D x 1 x 2 ==> N x D x 2
reshape_vpxy_to_xy = vpxy_to_xy.contiguous().view(-1, 1, 2)
r_vpxy_to_xy = torch.bmm(reshape_vpxy_to_xy, expanded_rm).contiguous().view(n, d, 2)
# N x D x 2
# Get the xy position in this rotated coord system with rvl as the origin
rvl_to_rxy = r_vpxy_to_xy - r_vpxy_to_vlxy.squeeze(1).expand(n, d, 2)
## Z
# VLZ = N x 1
vlz = viewer_look.double()[:, 2]
# Z = N x D x 1
diffz = z - vlz.view(-1, 1, 1).expand(n, d, -1)
## Combine
# rvl_to_rxy: N x D x 2, diffz: N x D x 1
new_xyz = torch.cat([rvl_to_rxy, diffz], 2)
return new_xyz
def get_dir_dist(viewer_pos, viewer_look, batched_target_coords):
if len(batched_target_coords.size()) == 1:
batched_target_coords = batched_target_coords.unsqueeze(0)
xyz = get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords)
dist = xyz.abs()
direction = xyz.gt(0).double() - xyz.lt(0).double()
return direction, dist
def get_sampled_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.random.choice(3, p=ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
def get_max_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.argmax(ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
# outputs a dense voxel rep (np array) from a sparse one.
# size should be a tuple of (H, W, D) for the desired voxel representation
# useid=True puts the block id into the voxel representation,
# otherwise put a 1
def densify(blocks, size, center=(0, 0, 0), useid=False):
V = np.zeros((size[0], size[1], size[2]), dtype="int32")
offsets = (size[0] // 2 - center[0], size[1] // 2 - center[1], size[2] // 2 - center[2])
for b in blocks:
x = b[0][0] + offsets[0]
y = b[0][1] + offsets[1]
z = b[0][2] + offsets[2]
if x >= 0 and y >= 0 and z >= 0 and x < size[0] and y < size[1] and z < size[2]:
if type(b[1]) is int:
V[x, y, z] = b[1]
else:
V[x, y, z] = b[1][0]
if not useid:
V[V > 0] = 1
return V, offsets
def center_of_mass(S, seg=None):
seg = seg or [True for i in S]
if len(S[0]) == 2:
m = list(np.round(np.mean([S[i][0] for i in range(len(S)) if seg[i]], axis=0)))
else:
m = list(np.round(np.mean([S[i] for i in range(len(S)) if seg[i]], axis=0)))
return [int(i) for i in m]
def check_l1_dist(a, b, d):
return abs(b[0] - a[0]) <= d[0] and abs(b[1] - a[1]) <= d[1] and abs(b[2] - a[2]) <= d[2]
def sparsify_segment(seg, context):
seg_sparse = []
for i, use in enumerate(seg):
if use:
seg_sparse.append(context[i])
return seg_sparse
def get_dense_array_from_sl(sparse_shape, sl, useid):
center = [sl // 2, sl // 2, sl // 2]
shape_dense, _ = np.asarray(densify(sparse_shape, [sl, sl, sl], center=center, useid=useid))
return shape_dense
def convert_sparse_context_seg_to_example(
context_sparse, seg_sparse, c_sl, s_sl, useid, vis=False
):
context_dense = get_dense_array_from_sl(context_sparse, c_sl, useid)
seg_dense_uncentered = get_dense_array_from_sl(seg_sparse, c_sl, useid)
# For visualization
if vis:
context_dense = context_dense + seg_dense_uncentered
else:
context_dense = context_dense - seg_dense_uncentered
shifted_seg_sparse, shift_vec = shift_subsegment_corner(seg_sparse)
seg_dense_centered = get_dense_array_from_sl(shifted_seg_sparse, s_sl, useid)
target_coord = [-x for x in shift_vec]
target_index = coord_to_index(target_coord, c_sl)
return [
torch.from_numpy(context_dense),
torch.from_numpy(seg_dense_centered),
torch.tensor([target_index]),
]
############################################################################
# For these "S" is a list of blocks in ((x,y,z),(id, meta)) format
# the segment is a list of the same length as S with either True or False
# at each entry marking whether that block is in the segment
# each outputs a list of blocks in ((x,y,z),(id, meta)) format
def shift_negative_vec(S, segment, vec, args):
N = []
for s in range(len(segment)):
if not segment[s]:
new_coords = tuple(np.add(S[s][0], vec))
N.append([new_coords, S[s][1]])
else:
if "seg_id" in args:
N.append([S[s][0], (args["seg_id"], S[s][1][1])])
else:
N.append(S[s])
return N
def shift_negative(S, segment, args):
shift_max = args["shift_max"]
"""takes the blocks not in the sgement and shifts them randomly"""
shift_vec = random_int_triple(-shift_max, shift_max)
return shift_negative_vec(S, segment, shift_vec, args)
def rotate_negative(S, segment, args):
c = center_of_mass(S, seg=segment)
r = random.choice([1, -1])
return [rotate_block(S[i], c, r) if segment[i] else S[i] for i in range(len(S))]
def replace_negative(S, segment, args):
data = args["data"]
oseg, oS = data.get_positive()
c_pos = center_of_mass(S, seg=segment)
c_neg = center_of_mass(oS, seg=oseg)
offset = np.add(c_pos, -np.array(c_neg))
N = [S[i] for i in range(len(S)) if not segment[i]]
return N + [shift_block(oS[i], offset) for i in range(len(oS)) if oseg[i]]
class NegativeSampler:
def __init__(self, dataloader, shift_max=10, ntype_probs=[0.6, 0.2, 0.2]):
# self.data_prob = [x['prob'] for x in dataloaders.values()]
# self.dataloaders = [x['data'] for x in dataloaders.values()]
self.dataloader = dataloader
self.shift_max = shift_max
self.ntype_probs = ntype_probs
self.negative_samplers = [shift_negative, rotate_negative, replace_negative]
def build_negative(self, S, segment):
negative_fn = np.random.choice(self.negative_samplers, p=self.ntype_probs)
return negative_fn(S, segment, {"shift_max": self.shift_max, "data": self.dataloader})
| """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
from datetime import datetime
import sys
import argparse
import torch
import os
from inspect import currentframe, getframeinfo
GEOSCORER_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(GEOSCORER_DIR, "../")
sys.path.append(CRAFTASSIST_DIR)
from shapes import get_bounds
def pretty_log(log_string):
cf = currentframe().f_back
filename = getframeinfo(cf).filename.split("/")[-1]
print(
"{} {}:{} {}".format(
datetime.now().strftime("%m/%d/%Y %H:%M:%S"), filename, cf.f_lineno, log_string
)
)
sys.stdout.flush()
## Train Fxns ##
def get_base_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", type=int, default=1, help="0 for cpu")
parser.add_argument("--batchsize", type=int, default=64, help="batchsize")
parser.add_argument("--dataset", default="shapes", help="shapes/segments/both")
parser.add_argument(
"--epochsize", type=int, default=1000, help="number of examples in an epoch"
)
parser.add_argument("--nepoch", type=int, default=1000, help="number of epochs")
parser.add_argument("--context_sidelength", type=int, default=32, help="size of cube")
parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim")
parser.add_argument("--num_layers", type=int, default=3, help="num layers")
parser.add_argument(
"--blockid_embedding_dim", type=int, default=8, help="size of blockid embedding"
)
parser.add_argument(
"--num_words", type=int, default=256, help="number of words for the blockid embeds"
)
parser.add_argument("--lr", type=float, default=0.1, help="step size for net")
parser.add_argument(
"--optim", type=str, default="adagrad", help="optim type to use (adagrad|sgd|adam)"
)
parser.add_argument("--momentum", type=float, default=0.0, help="momentum")
parser.add_argument("--checkpoint", default="", help="where to save model")
parser.add_argument("--num_workers", type=int, default=4, help="number of dataloader workers")
return parser
def add_dataset_flags(parser):
parser.add_argument(
"--dataset_ratios", type=str, default="shape:1.0", help="comma separated name:prob"
)
parser.add_argument("--useid", type=bool, default=False, help="use blockid")
parser.add_argument("--fixed_cube_size", type=int, default=None, help="fixed_cube_size")
parser.add_argument("--fixed_center", type=bool, default=False, help="fixed_center")
parser.add_argument(
"--min_seg_size", type=int, default=6, help="min seg size for seg data type"
)
parser.add_argument(
"--use_saved_data",
type=bool,
default=False,
help="use preparsed data for this min_seg_size",
)
def add_directional_flags(parser):
parser.add_argument("--spatial_embedding_dim", type=int, default=8, help="size of spatial emb")
parser.add_argument("--output_embedding_dim", type=int, default=8, help="size of output emb")
parser.add_argument(
"--seg_direction_net", type=bool, default=False, help="use segdirnet module"
)
parser.add_argument(
"--seg_use_viewer_pos", type=bool, default=False, help="use viewer pos in seg"
)
parser.add_argument(
"--seg_use_viewer_look", type=bool, default=False, help="use viewer look in seg"
)
parser.add_argument(
"--seg_use_direction", type=bool, default=False, help="use direction in seg"
)
parser.add_argument("--num_seg_dir_layers", type=int, default=3, help="num segdir net layers")
parser.add_argument(
"--cont_use_direction", type=bool, default=False, help="use direction in context"
)
parser.add_argument(
"--cont_use_xyz_from_viewer_look",
type=bool,
default=False,
help="use xyz position relative to viewer look in context emb",
)
def get_dataloader(dataset, opts, collate_fxn):
def init_fn(wid):
np.random.seed(torch.initial_seed() % (2 ** 32))
return torch.utils.data.DataLoader(
dataset,
batch_size=opts["batchsize"],
shuffle=True,
pin_memory=True,
drop_last=True,
num_workers=opts["num_workers"],
worker_init_fn=init_fn,
collate_fn=collate_fxn,
)
def to_cuda(list_modules):
for m in list_modules:
m.cuda()
def multitensor_collate_fxn(x):
"""
Takes a list of BATCHSIZE lists of tensors of length D.
Returns a list of length D of batched tensors.
"""
num_tensors_to_batch = len(x[0])
regroup_tensors = [[] for i in range(num_tensors_to_batch)]
for t_list in x:
for i, t in enumerate(t_list):
regroup_tensors[i].append(t.unsqueeze(0))
batched_tensors = [torch.cat(tl) for tl in regroup_tensors]
return batched_tensors
## 3D Utils ##
def get_side_lengths(bounds):
"""
Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z].
Returns a list of the side lengths.
"""
return [x + 1 for x in (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])]
def coord_to_index(coord, sl):
"""
Takes a 3D coordinate in a cube and the cube side length.
Returns index in flattened 3D array.
"""
return coord[0] * sl * sl + coord[1] * sl + coord[2]
def index_to_coord(index, sl):
"""
Takes an index into a flattened 3D array and its side length.
Returns the coordinate in the cube.
"""
coord = []
two_d_slice_size = sl * sl
coord.append(index // two_d_slice_size)
remaining = index % two_d_slice_size
coord.append(remaining // sl)
coord.append(remaining % sl)
return coord
def shift_subsegment_corner(S):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns the segment in the same form, shifted to the origin, and the shift vec
"""
bounds = get_bounds(S)
shift_zero_vec = [-bounds[0], -bounds[2], -bounds[4]]
new_S = []
for s in S:
new_S.append((tuple([sum(x) for x in zip(s[0], shift_zero_vec)]), s[1]))
return new_S, shift_zero_vec
def subset_and_scale_3d(init_array, mins, maxs, scale=1):
return scale * init_array[mins[0] : maxs[0], mins[1] : maxs[1], mins[2] : maxs[2]]
def combine_seg_context(seg, context, seg_shift, seg_mult=1):
completed_context = context.clone()
# Calculate the region to copy over, sometimes the segment
# falls outside the range of the context bounding box
c_mins = [int(i) for i in seg_shift]
c_maxs = [int(min(ss + 8, 32)) for ss in seg_shift]
s_mins = [0 for i in range(3)]
# If the edge of the segment goes past the edge of the context (ss + 8 > 32),
# remove the extra from the segment.
s_maxs = [int(8 - max(0, (ss + 8) - 32)) for ss in seg_shift]
seg_to_add = subset_and_scale_3d(seg, s_mins, s_maxs, seg_mult)
context_subset = subset_and_scale_3d(completed_context, c_mins, c_maxs, 1)
completed_context[c_mins[0] : c_maxs[0], c_mins[1] : c_maxs[1], c_mins[2] : c_maxs[2]] = (
seg_to_add + context_subset
)
return completed_context
def get_vector(start, end):
return end - start
def get_random_viewer_info(sl):
viewer_pos = torch.tensor(random_int_triple(0, sl - 1))
viewer_look = torch.tensor(random_int_triple(0, sl - 1))
if viewer_pos.eq(viewer_look).sum() == viewer_pos.size(0):
if viewer_look[0] < sl + 1:
viewer_look[0] += 1
else:
viewer_look[0] -= 1
return viewer_pos, viewer_look
def b_greater_than_a(a, b):
if a == b:
return 0
return 1 if b > a else -1
def shift_block(b, s):
return tuple((tuple((b[0][0] + s[0], b[0][1] + s[1], b[0][2] + s[2])), b[1]))
def rotate_block(b, c, r):
""" rotates the block b around the point c by 90*r degrees
in the xz plane. r should be 1 or -1."""
# TODO add a reflection
c = np.array(c)
p = np.add(b[0], -c)
x = p[0]
z = p[2]
if r == -1:
p[0] = z
p[2] = -x
else:
p[0] = -z
p[2] = x
return (tuple(p + c), b[1])
def random_int_triple(minval, maxval):
t = [
random.randint(minval, maxval),
random.randint(minval, maxval),
random.randint(minval, maxval),
]
return t
def check_inrange(x, minval, maxval):
"""inclusive check"""
return all([v >= minval for v in x]) and all([v <= maxval for v in x])
def normalize(batched_vector):
vec = batched_vector.double()
norm = torch.norm(vec, dim=1)
# Set norm to 1 if it's 0
norm = norm + norm.eq(0).double()
expanded_norm = norm.unsqueeze(1).expand(-1, vec.size()[1])
return torch.div(vec, expanded_norm)
def get_rotation_matrix(viewer_pos, viewer_look):
# VP, VL: N x 3, VP_to_VL: N x 3
vp_to_vl = get_vector(viewer_pos, viewer_look)[:, :2]
nlook_vec = normalize(vp_to_vl)
nly = nlook_vec[:, 1]
# Nlx necessary to correct for the range of acrcos
nlx = nlook_vec[:, 0]
nlx = nlx.gt(0).double() - nlx.lt(0).double() - nlx.eq(0).double()
# Take care of nans created by raising 0 to a power
# and then masking the sin theta to 0 as intended
base = 1 - nly * nly
nan_mask = torch.isnan(torch.pow(base, 0.5)).double()
base = base + nan_mask
sin_theta = nlx * nan_mask.eq(0).double() * torch.pow(base, 0.5)
nly = nly.unsqueeze(1)
sin_theta = sin_theta.unsqueeze(1)
rm_pt1 = torch.cat([nly, sin_theta], 1).unsqueeze(1)
rm_pt2 = torch.cat([-sin_theta, nly], 1).unsqueeze(1)
rm = torch.cat([rm_pt1, rm_pt2], 1)
return rm
def rotate_x_y(coord, rotation_matrix):
return torch.mm(coord.unsqueeze(0), rotation_matrix).squeeze(0)
def float_equals(a, b, epsilon):
return True if abs(a - b) < epsilon else False
def get_argmax_list(vals, epsilon, minlist=False, maxlen=None):
mult = -1 if minlist else 1
max_ind = []
for i, v in enumerate(vals):
if not max_ind or float_equals(max_ind[0][1], v, epsilon):
if maxlen and len(max_ind) == maxlen:
continue
max_ind.append((i, v))
elif mult * (v - max_ind[0][1]) > 0:
max_ind = [(i, v)]
return max_ind
def get_firstmax(vals, epsilon, minlist=False):
return get_argmax_list(vals, epsilon, minlist, 1)[0]
# N -> batch size in training
# D -> num target coord per element
# Viewer pos, viewer_look are N x 3 tensors
# Batched target coords is a N x D x 3 tensor
# Output is a N x D x 3 tensor
def get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords):
# First verify the sizing and unsqueeze if necessary
btc_sizes = batched_target_coords.size()
vp_sizes = viewer_pos.size()
vl_sizes = viewer_look.size()
if len(btc_sizes) > 3 or len(vp_sizes) > 2 or len(vl_sizes) > 2:
raise Exception("One input has too many dimensions")
if btc_sizes[-1] != 3 or vp_sizes[-1] != 3 or vl_sizes[-1] != 3:
raise Exception("The last dimension of all inputs should be size 3")
if len(btc_sizes) < 3:
for i in range(3 - len(btc_sizes)):
batched_target_coords = batched_target_coords.unsqueeze(0)
if len(vp_sizes) == 1:
viewer_pos = viewer_pos.unsqueeze(0)
if len(vl_sizes) == 1:
viewer_look = viewer_look.unsqueeze(0)
n = batched_target_coords.size()[0]
d = batched_target_coords.size()[1]
# Handle xy and z separately
# XY = N X D x 2
xy = batched_target_coords[:, :, 0:2].double()
# Z = N x D x 1
z = batched_target_coords[:, :, 2].unsqueeze(2).double()
## XY
# Shift such that viewer pos is the origin
# VPXY, VLXY: N x 2
vpxy = viewer_pos.double()[:, 0:2]
vlxy = viewer_look.double()[:, 0:2]
vpxy_to_vlxy = vlxy - vpxy
# VPXY to XY: N x D x 2
vpxy_to_xy = xy - vpxy.unsqueeze(1).expand(n, d, -1)
# Rotate them around the viewer position such that a normalized
# viewer look vector would be (0, 1)
# Rotation_matrix: N x 2 x 2
rotation_matrix = get_rotation_matrix(viewer_pos, viewer_look)
# N x 1 x 2 mm N x 2 x 2 ==> N x 1 x 2 ==> N x 2
r_vpxy_to_vlxy = torch.bmm(vpxy_to_vlxy.unsqueeze(1), rotation_matrix).unsqueeze(1)
# RM: N x 2 x 2 ==> N x D x 2 x 2
expanded_rm = rotation_matrix.unsqueeze(1).expand(n, d, 2, 2).contiguous().view(-1, 2, 2)
# N x D x 2 ==> N*D x 1 x 2 mm N*D x 2 x 2 ==> N*D x 1 x 2 ==> N x D x 2
reshape_vpxy_to_xy = vpxy_to_xy.contiguous().view(-1, 1, 2)
r_vpxy_to_xy = torch.bmm(reshape_vpxy_to_xy, expanded_rm).contiguous().view(n, d, 2)
# N x D x 2
# Get the xy position in this rotated coord system with rvl as the origin
rvl_to_rxy = r_vpxy_to_xy - r_vpxy_to_vlxy.squeeze(1).expand(n, d, 2)
## Z
# VLZ = N x 1
vlz = viewer_look.double()[:, 2]
# Z = N x D x 1
diffz = z - vlz.view(-1, 1, 1).expand(n, d, -1)
## Combine
# rvl_to_rxy: N x D x 2, diffz: N x D x 1
new_xyz = torch.cat([rvl_to_rxy, diffz], 2)
return new_xyz
def get_dir_dist(viewer_pos, viewer_look, batched_target_coords):
if len(batched_target_coords.size()) == 1:
batched_target_coords = batched_target_coords.unsqueeze(0)
xyz = get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords)
dist = xyz.abs()
direction = xyz.gt(0).double() - xyz.lt(0).double()
return direction, dist
def get_sampled_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.random.choice(3, p=ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
def get_max_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.argmax(ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
# outputs a dense voxel rep (np array) from a sparse one.
# size should be a tuple of (H, W, D) for the desired voxel representation
# useid=True puts the block id into the voxel representation,
# otherwise put a 1
def densify(blocks, size, center=(0, 0, 0), useid=False):
V = np.zeros((size[0], size[1], size[2]), dtype="int32")
offsets = (size[0] // 2 - center[0], size[1] // 2 - center[1], size[2] // 2 - center[2])
for b in blocks:
x = b[0][0] + offsets[0]
y = b[0][1] + offsets[1]
z = b[0][2] + offsets[2]
if x >= 0 and y >= 0 and z >= 0 and x < size[0] and y < size[1] and z < size[2]:
if type(b[1]) is int:
V[x, y, z] = b[1]
else:
V[x, y, z] = b[1][0]
if not useid:
V[V > 0] = 1
return V, offsets
def center_of_mass(S, seg=None):
seg = seg or [True for i in S]
if len(S[0]) == 2:
m = list(np.round(np.mean([S[i][0] for i in range(len(S)) if seg[i]], axis=0)))
else:
m = list(np.round(np.mean([S[i] for i in range(len(S)) if seg[i]], axis=0)))
return [int(i) for i in m]
def check_l1_dist(a, b, d):
return abs(b[0] - a[0]) <= d[0] and abs(b[1] - a[1]) <= d[1] and abs(b[2] - a[2]) <= d[2]
def sparsify_segment(seg, context):
seg_sparse = []
for i, use in enumerate(seg):
if use:
seg_sparse.append(context[i])
return seg_sparse
def get_dense_array_from_sl(sparse_shape, sl, useid):
center = [sl // 2, sl // 2, sl // 2]
shape_dense, _ = np.asarray(densify(sparse_shape, [sl, sl, sl], center=center, useid=useid))
return shape_dense
def convert_sparse_context_seg_to_example(
context_sparse, seg_sparse, c_sl, s_sl, useid, vis=False
):
context_dense = get_dense_array_from_sl(context_sparse, c_sl, useid)
seg_dense_uncentered = get_dense_array_from_sl(seg_sparse, c_sl, useid)
# For visualization
if vis:
context_dense = context_dense + seg_dense_uncentered
else:
context_dense = context_dense - seg_dense_uncentered
shifted_seg_sparse, shift_vec = shift_subsegment_corner(seg_sparse)
seg_dense_centered = get_dense_array_from_sl(shifted_seg_sparse, s_sl, useid)
target_coord = [-x for x in shift_vec]
target_index = coord_to_index(target_coord, c_sl)
return [
torch.from_numpy(context_dense),
torch.from_numpy(seg_dense_centered),
torch.tensor([target_index]),
]
############################################################################
# For these "S" is a list of blocks in ((x,y,z),(id, meta)) format
# the segment is a list of the same length as S with either True or False
# at each entry marking whether that block is in the segment
# each outputs a list of blocks in ((x,y,z),(id, meta)) format
def shift_negative_vec(S, segment, vec, args):
N = []
for s in range(len(segment)):
if not segment[s]:
new_coords = tuple(np.add(S[s][0], vec))
N.append([new_coords, S[s][1]])
else:
if "seg_id" in args:
N.append([S[s][0], (args["seg_id"], S[s][1][1])])
else:
N.append(S[s])
return N
def shift_negative(S, segment, args):
shift_max = args["shift_max"]
"""takes the blocks not in the sgement and shifts them randomly"""
shift_vec = random_int_triple(-shift_max, shift_max)
return shift_negative_vec(S, segment, shift_vec, args)
def rotate_negative(S, segment, args):
c = center_of_mass(S, seg=segment)
r = random.choice([1, -1])
return [rotate_block(S[i], c, r) if segment[i] else S[i] for i in range(len(S))]
def replace_negative(S, segment, args):
data = args["data"]
oseg, oS = data.get_positive()
c_pos = center_of_mass(S, seg=segment)
c_neg = center_of_mass(oS, seg=oseg)
offset = np.add(c_pos, -np.array(c_neg))
N = [S[i] for i in range(len(S)) if not segment[i]]
return N + [shift_block(oS[i], offset) for i in range(len(oS)) if oseg[i]]
class NegativeSampler:
def __init__(self, dataloader, shift_max=10, ntype_probs=[0.6, 0.2, 0.2]):
# self.data_prob = [x['prob'] for x in dataloaders.values()]
# self.dataloaders = [x['data'] for x in dataloaders.values()]
self.dataloader = dataloader
self.shift_max = shift_max
self.ntype_probs = ntype_probs
self.negative_samplers = [shift_negative, rotate_negative, replace_negative]
def build_negative(self, S, segment):
negative_fn = np.random.choice(self.negative_samplers, p=self.ntype_probs)
return negative_fn(S, segment, {"shift_max": self.shift_max, "data": self.dataloader}) | en | 0.797339 | Copyright (c) Facebook, Inc. and its affiliates. ## Train Fxns ## Takes a list of BATCHSIZE lists of tensors of length D. Returns a list of length D of batched tensors. ## 3D Utils ## Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z]. Returns a list of the side lengths. Takes a 3D coordinate in a cube and the cube side length. Returns index in flattened 3D array. Takes an index into a flattened 3D array and its side length. Returns the coordinate in the cube. Takes a segment, described as a list of tuples of the form: ((x, y, z), (block_id, ?)) Returns the segment in the same form, shifted to the origin, and the shift vec # Calculate the region to copy over, sometimes the segment # falls outside the range of the context bounding box # If the edge of the segment goes past the edge of the context (ss + 8 > 32), # remove the extra from the segment. rotates the block b around the point c by 90*r degrees in the xz plane. r should be 1 or -1. # TODO add a reflection inclusive check # Set norm to 1 if it's 0 # VP, VL: N x 3, VP_to_VL: N x 3 # Nlx necessary to correct for the range of acrcos # Take care of nans created by raising 0 to a power # and then masking the sin theta to 0 as intended # N -> batch size in training # D -> num target coord per element # Viewer pos, viewer_look are N x 3 tensors # Batched target coords is a N x D x 3 tensor # Output is a N x D x 3 tensor # First verify the sizing and unsqueeze if necessary # Handle xy and z separately # XY = N X D x 2 # Z = N x D x 1 ## XY # Shift such that viewer pos is the origin # VPXY, VLXY: N x 2 # VPXY to XY: N x D x 2 # Rotate them around the viewer position such that a normalized # viewer look vector would be (0, 1) # Rotation_matrix: N x 2 x 2 # N x 1 x 2 mm N x 2 x 2 ==> N x 1 x 2 ==> N x 2 # RM: N x 2 x 2 ==> N x D x 2 x 2 # N x D x 2 ==> N*D x 1 x 2 mm N*D x 2 x 2 ==> N*D x 1 x 2 ==> N x D x 2 # N x D x 2 # Get the xy position in this rotated coord system with rvl as the origin ## Z # VLZ = N x 1 # Z = N x D x 1 ## Combine # rvl_to_rxy: N x D x 2, diffz: N x D x 1 # outputs a dense voxel rep (np array) from a sparse one. # size should be a tuple of (H, W, D) for the desired voxel representation # useid=True puts the block id into the voxel representation, # otherwise put a 1 # For visualization ############################################################################ # For these "S" is a list of blocks in ((x,y,z),(id, meta)) format # the segment is a list of the same length as S with either True or False # at each entry marking whether that block is in the segment # each outputs a list of blocks in ((x,y,z),(id, meta)) format takes the blocks not in the sgement and shifts them randomly # self.data_prob = [x['prob'] for x in dataloaders.values()] # self.dataloaders = [x['data'] for x in dataloaders.values()] | 1.993962 | 2 |
CryptoAttacks/tests/Block/test_gcm.py | akbarszcz/CryptoAttacks | 54 | 8848 | <gh_stars>10-100
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
import subprocess
from builtins import bytes, range
from os.path import abspath, dirname
from os.path import join as join_path
from random import randint
from CryptoAttacks.Block.gcm import *
from CryptoAttacks.Utils import log
def test_polynomials():
print("Test polynomials")
Pmod = GF_2k_generator(128, [128,7,2,1,0])
P = Pmod(0b10011010101100110100100110011101100110010111111000111011101000000110110100010101000101100100111100011001010100100110100111011000)
Q = Pmod(0b01111010101010110111000011011100010011101111000001010000011000010000111010001111100001111010110001001000011101000011111110010101)
print(P.to_bits(), bin(P.to_int()), P)
print(Q.to_bits(), bin(Q.to_int()), Q)
w = P*Q
print(w.to_bits(), bin(w.to_int()), w)
assert Q.coefficients == Pmod(Q.coefficients).coefficients
assert Q.coefficients == Pmod(Q.to_int()).coefficients
assert Q.coefficients == Pmod(Q.to_bytes()).coefficients
print('')
Pmod = GF_2k_generator(10, [11,7,2,1,0])
c1 = Pmod(1)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(0)
polynomial1 = Polynomial_128([c1,c2,c3,c4])
c1 = Pmod(1236)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(0)
polynomial2 = Polynomial_128([c1,c2,c3,c4])
print(polynomial1)
print(polynomial2)
print("+", polynomial1 + polynomial2)
print("*", polynomial1 * polynomial2)
q = polynomial1 / polynomial2
r = polynomial1 % polynomial2
print("/", q)
print("%", r)
print('')
print(polynomial1)
print(polynomial2*q + r)
print('')
def test_gcm():
print("Test GCM")
plaintext = bytes(b'hn9YA(F BW&B (W&&W(RT&WEF f7*WB FTgsdc')
additional = bytes(b'j gej8g0SRYH8s 8s9yf sgd78taDS* GASyd ')
key = bytes(b'<KEY>')
nonce = bytes(b'a drO*1@((js')
ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce)
assert gcm_verify(tag, ciphertext, additional, key, nonce)
blocks = aes_bytes_to_poly_blocks(ciphertext, additional)
ciphertext2, additional2 = poly_blocks_to_aes_bytes(blocks)
assert ciphertext == ciphertext2
assert additional == additional2
def polynomial_factors_product(factorization):
"""factorization: [(poly1, power), (poly2, power)]"""
result = factorization[0][0].one_element()
for f, f_degree in factorization:
result *= f**f_degree
return result
def test_factor():
print("Test factor")
Pmod = GF_2k_generator(9, [9,7,2,1,0])
c1 = Pmod(31)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(3)
polynomial1 = Polynomial_128([c1,c2,c3,c4])
c1 = Pmod(237)
c2 = Pmod(1)
c3 = Pmod(0)
c4 = Pmod(10)
polynomial2 = Polynomial_128([c1,c2,c3,c4])
polynomial = polynomial1 * polynomial2
print(polynomial1)
print(polynomial2)
print(polynomial)
print(polynomial.monic())
print('')
factorization = factor_polynomial(polynomial)
print(factorization)
result = polynomial.one_element()
for f, f_degree in factorization:
result *= f**f_degree
print(result)
print('')
assert polynomial_factors_product(factorization) == polynomial.monic()
def test_repeated_nonce():
print("Test Key-Recovery Attack on GCM with Repeated Nonces")
for _ in range(3):
nonce = random_bytes(12)
key = random_bytes(16)
h = bytes(AES.new(key, AES.MODE_ECB).encrypt(bytes(b'\x00'*16)))
h = aes_polynomial(h)
ciphertexts_additionals_tags = []
for _ in range(4):
plaintext = random_bytes(randint(0, 50))
additional = random_bytes(randint(0, 50))
ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce)
ciphertexts_additionals_tags.append((ciphertext, additional, tag))
valid_ciphertext, valid_additional, valid_tag = ciphertexts_additionals_tags[0]
auth_key_candidates = recover_key_repated_nonce(ciphertexts_additionals_tags)
assert h.to_bytes() in auth_key_candidates
# try found auth key candidates
correct_auth_key_found = False
for auth_key in auth_key_candidates:
forged_ciphertext = random_bytes(randint(0, 10))
forged_additional = random_bytes(randint(0, 10))
forged_tag = gcm_forge_tag(ciphertext=forged_ciphertext, additional=forged_additional, auth_key=auth_key,
valid_ciphertext=valid_ciphertext, valid_additional=valid_additional, valid_tag=valid_tag)
if gcm_verify(forged_tag, forged_ciphertext, forged_additional, key, nonce):
correct_auth_key_found = True
break
assert correct_auth_key_found
def run():
log.level = 'debug'
test_polynomials()
test_gcm()
test_factor()
test_repeated_nonce()
if __name__ == "__main__":
run()
| #!/usr/bin/python
from __future__ import absolute_import, division, print_function
import subprocess
from builtins import bytes, range
from os.path import abspath, dirname
from os.path import join as join_path
from random import randint
from CryptoAttacks.Block.gcm import *
from CryptoAttacks.Utils import log
def test_polynomials():
print("Test polynomials")
Pmod = GF_2k_generator(128, [128,7,2,1,0])
P = Pmod(0b10011010101100110100100110011101100110010111111000111011101000000110110100010101000101100100111100011001010100100110100111011000)
Q = Pmod(0b01111010101010110111000011011100010011101111000001010000011000010000111010001111100001111010110001001000011101000011111110010101)
print(P.to_bits(), bin(P.to_int()), P)
print(Q.to_bits(), bin(Q.to_int()), Q)
w = P*Q
print(w.to_bits(), bin(w.to_int()), w)
assert Q.coefficients == Pmod(Q.coefficients).coefficients
assert Q.coefficients == Pmod(Q.to_int()).coefficients
assert Q.coefficients == Pmod(Q.to_bytes()).coefficients
print('')
Pmod = GF_2k_generator(10, [11,7,2,1,0])
c1 = Pmod(1)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(0)
polynomial1 = Polynomial_128([c1,c2,c3,c4])
c1 = Pmod(1236)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(0)
polynomial2 = Polynomial_128([c1,c2,c3,c4])
print(polynomial1)
print(polynomial2)
print("+", polynomial1 + polynomial2)
print("*", polynomial1 * polynomial2)
q = polynomial1 / polynomial2
r = polynomial1 % polynomial2
print("/", q)
print("%", r)
print('')
print(polynomial1)
print(polynomial2*q + r)
print('')
def test_gcm():
print("Test GCM")
plaintext = bytes(b'hn9YA(F BW&B (W&&W(RT&WEF f7*WB FTgsdc')
additional = bytes(b'j gej8g0SRYH8s 8s9yf sgd78taDS* GASyd ')
key = bytes(b'<KEY>')
nonce = bytes(b'a drO*1@((js')
ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce)
assert gcm_verify(tag, ciphertext, additional, key, nonce)
blocks = aes_bytes_to_poly_blocks(ciphertext, additional)
ciphertext2, additional2 = poly_blocks_to_aes_bytes(blocks)
assert ciphertext == ciphertext2
assert additional == additional2
def polynomial_factors_product(factorization):
"""factorization: [(poly1, power), (poly2, power)]"""
result = factorization[0][0].one_element()
for f, f_degree in factorization:
result *= f**f_degree
return result
def test_factor():
print("Test factor")
Pmod = GF_2k_generator(9, [9,7,2,1,0])
c1 = Pmod(31)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(3)
polynomial1 = Polynomial_128([c1,c2,c3,c4])
c1 = Pmod(237)
c2 = Pmod(1)
c3 = Pmod(0)
c4 = Pmod(10)
polynomial2 = Polynomial_128([c1,c2,c3,c4])
polynomial = polynomial1 * polynomial2
print(polynomial1)
print(polynomial2)
print(polynomial)
print(polynomial.monic())
print('')
factorization = factor_polynomial(polynomial)
print(factorization)
result = polynomial.one_element()
for f, f_degree in factorization:
result *= f**f_degree
print(result)
print('')
assert polynomial_factors_product(factorization) == polynomial.monic()
def test_repeated_nonce():
print("Test Key-Recovery Attack on GCM with Repeated Nonces")
for _ in range(3):
nonce = random_bytes(12)
key = random_bytes(16)
h = bytes(AES.new(key, AES.MODE_ECB).encrypt(bytes(b'\x00'*16)))
h = aes_polynomial(h)
ciphertexts_additionals_tags = []
for _ in range(4):
plaintext = random_bytes(randint(0, 50))
additional = random_bytes(randint(0, 50))
ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce)
ciphertexts_additionals_tags.append((ciphertext, additional, tag))
valid_ciphertext, valid_additional, valid_tag = ciphertexts_additionals_tags[0]
auth_key_candidates = recover_key_repated_nonce(ciphertexts_additionals_tags)
assert h.to_bytes() in auth_key_candidates
# try found auth key candidates
correct_auth_key_found = False
for auth_key in auth_key_candidates:
forged_ciphertext = random_bytes(randint(0, 10))
forged_additional = random_bytes(randint(0, 10))
forged_tag = gcm_forge_tag(ciphertext=forged_ciphertext, additional=forged_additional, auth_key=auth_key,
valid_ciphertext=valid_ciphertext, valid_additional=valid_additional, valid_tag=valid_tag)
if gcm_verify(forged_tag, forged_ciphertext, forged_additional, key, nonce):
correct_auth_key_found = True
break
assert correct_auth_key_found
def run():
log.level = 'debug'
test_polynomials()
test_gcm()
test_factor()
test_repeated_nonce()
if __name__ == "__main__":
run() | en | 0.79597 | #!/usr/bin/python factorization: [(poly1, power), (poly2, power)] # try found auth key candidates | 2.158551 | 2 |
python_clean_architecture/use_cases/orderdata_use_case.py | jfsolarte/python_clean_architecture | 0 | 8849 | <gh_stars>0
from python_clean_architecture.shared import use_case as uc
from python_clean_architecture.shared import response_object as res
class OrderDataGetUseCase(uc.UseCase):
def __init__(self, repo):
self.repo = repo
def execute(self, request_object):
#if not request_object:
#return res.ResponseFailure.build_from_invalid_request_object(request_object)
storage_rooms = self.repo.order(items=request_object.items)
return res.ResponseSuccess(storage_rooms)
| from python_clean_architecture.shared import use_case as uc
from python_clean_architecture.shared import response_object as res
class OrderDataGetUseCase(uc.UseCase):
def __init__(self, repo):
self.repo = repo
def execute(self, request_object):
#if not request_object:
#return res.ResponseFailure.build_from_invalid_request_object(request_object)
storage_rooms = self.repo.order(items=request_object.items)
return res.ResponseSuccess(storage_rooms) | en | 0.57002 | #if not request_object: #return res.ResponseFailure.build_from_invalid_request_object(request_object) | 2.369828 | 2 |
owscapable/swe/common.py | b-cube/OwsCapable | 1 | 8850 | <filename>owscapable/swe/common.py
from __future__ import (absolute_import, division, print_function)
from owscapable.util import nspath_eval
from owscapable.namespaces import Namespaces
from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime
from dateutil import parser
from datetime import timedelta
from owscapable.etree import etree
def get_namespaces():
ns = Namespaces()
return ns.get_namespaces(["swe20", "xlink"])
namespaces = get_namespaces()
def nspv(path):
return nspath_eval(path, namespaces)
def make_pair(string, cast=None):
if string is None:
return None
string = string.split(" ")
if cast is not None:
try:
string = map(lambda x: cast(x), string)
except:
print("Could not cast pair to correct type. Setting to an empty tuple!")
string = ""
return tuple(string)
def get_uom(element):
uom = testXMLAttribute(element, "code")
if uom is None:
uom = testXMLAttribute(element, nspv("xlink:href"))
return uom
def get_boolean(value):
if value is None:
return None
if value is True or value.lower() in ["yes","true"]:
return True
elif value is False or value.lower() in ["no","false"]:
return False
else:
return None
def get_int(value):
try:
return int(value)
except:
return None
def get_float(value):
try:
return float(value)
except:
return None
AnyScalar = map(lambda x: nspv(x), ["swe20:Boolean", "swe20:Count", "swe20:Quantity", "swe20:Time", "swe20:Category", "swe20:Text"])
AnyNumerical = map(lambda x: nspv(x), ["swe20:Count", "swe20:Quantity", "swe20:Time"])
AnyRange = map(lambda x: nspv(x), ["swe20:QuantityRange", "swe20:TimeRange", "swe20:CountRange", "swe20:CategoryRange"])
class NamedObject(object):
def __init__(self, element):
# No call to super(), the type object will process that.
self.name = testXMLAttribute(element, "name")
try:
self.content = eval(element[-1].tag.split("}")[-1])(element[-1])
except IndexError:
self.content = None
except BaseException:
raise
# Revert to the content if attribute does not exists
def __getattr__(self, name):
return getattr(self.content, name)
class AbstractSWE(object):
def __init__(self, element):
# Attributes
self.id = testXMLAttribute(element,"id") # string, optional
# Elements
self.extention = [] # anyType, min=0, max=X
class AbstractSWEIdentifiable(AbstractSWE):
def __init__(self, element):
super(AbstractSWEIdentifiable, self).__init__(element)
# Elements
self.identifier = testXMLValue(element.find(nspv("swe20:identifier"))) # anyURI, min=0
self.label = testXMLValue(element.find(nspv("swe20:label"))) # string, min=0
self.description = testXMLValue(element.find(nspv("swe20:description"))) # string, min=0
class AbstractDataComponent(AbstractSWEIdentifiable):
def __init__(self, element):
super(AbstractDataComponent, self).__init__(element)
# Attributes
self.definition = testXMLAttribute(element,"definition") # anyURI, required
self.updatable = get_boolean(testXMLAttribute(element,"updatable")) # boolean, optional
self.optional = get_boolean(testXMLAttribute(element,"optional")) or False # boolean, default=False
class AbstractSimpleComponent(AbstractDataComponent):
def __init__(self, element):
super(AbstractSimpleComponent, self).__init__(element)
# Attributes
self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, optional
self.axisID = testXMLAttribute(element,"axisID") # string, optional
# Elements
self.quality = filter(None, [Quality(q) for q in [e.find('*') for e in element.findall(nspv("swe20:quality"))] if q is not None])
try:
self.nilValues = NilValues(element.find(nspv("swe20:nilValues")))
except:
self.nilValues = None
class Quality(object):
def __new__(cls, element):
t = element.tag.split("}")[-1]
if t == "Quantity":
return Quantity(element)
elif t == "QuantityRange":
return QuantityRange(element)
elif t == "Category":
return Category(element)
elif t == "Text":
return Text(element)
else:
return None
class NilValues(AbstractSWE):
def __init__(self, element):
super(NilValues, self).__init__(element)
self.nilValue = filter(None, [nilValue(x) for x in element.findall(nspv("swe20:nilValue"))]) # string, min=0, max=X
class nilValue(object):
def __init__(self, element):
self.reason = testXMLAttribute(element, "reason")
self.value = testXMLValue(element)
class AllowedTokens(AbstractSWE):
def __init__(self, element):
super(AllowedTokens, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]) # string, min=0, max=X
self.pattern = testXMLValue(element.find(nspv("swe20:pattern"))) # string (Unicode Technical Standard #18, Version 13), min=0
class AllowedValues(AbstractSWE):
def __init__(self, element):
super(AllowedValues, self).__init__(element)
self.value = filter(None, map(lambda x: get_float(x), [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]))
self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class AllowedTimes(AbstractSWE):
def __init__(self, element):
super(AllowedTimes, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))])
self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class Boolean(AbstractSimpleComponent):
def __init__(self, element):
super(Boolean, self).__init__(element)
# Elements
"""
6.2.1 Boolean
A Boolean representation of a proptery can take only two values that should be "true/false" or "yes/no".
"""
value = get_boolean(testXMLValue(element.find(nspv("swe20:value")))) # boolean, min=0, max=1
class Text(AbstractSimpleComponent):
def __init__(self, element):
super(Text, self).__init__(element)
# Elements
"""
Req 6. A textual representation shall at least consist of a character string.
"""
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
self.constraint = None
class Category(AbstractSimpleComponent):
def __init__(self, element):
super(Category, self).__init__(element)
# Elements
self.codeSpace = testXMLAttribute(element.find(nspv("swe20:codeSpace")), nspv("xlink:href")) # Reference, min=0, max=1
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
self.constraint = None
class CategoryRange(Category):
def __init__(self, element):
super(CategoryRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.values = make_pair(value) if value is not None else None
class Count(AbstractSimpleComponent):
def __init__(self, element):
super(Count, self).__init__(element)
# Elements
self.value = get_int(testXMLValue(element.find(nspv("swe20:value")))) # integer, min=0, max=1
try:
self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1
except:
self.constraint = None
class CountRange(Count):
def __init__(self, element):
super(CountRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.value = make_pair(value,int) if value is not None else None
class Quantity(AbstractSimpleComponent):
def __init__(self, element):
super(Quantity, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
self.value = get_float(testXMLValue(element.find(nspv("swe20:value")))) # double, min=0, max=1
try:
self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1
except:
self.constraint = None
class QuantityRange(Quantity):
def __init__(self, element):
super(QuantityRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.value = make_pair(value,float) if value is not None else None
def get_time(value, referenceTime, uom):
try:
value = parser.parse(value)
except (AttributeError, ValueError): # Most likely an integer/float using a referenceTime
try:
if uom.lower() == "s":
value = referenceTime + timedelta(seconds=float(value))
elif uom.lower() == "min":
value = referenceTime + timedelta(minutes=float(value))
elif uom.lower() == "h":
value = referenceTime + timedelta(hours=float(value))
elif uom.lower() == "d":
value = referenceTime + timedelta(days=float(value))
except (AttributeError, ValueError):
pass
except OverflowError: # Too many numbers (> 10) or INF/-INF
if value.lower() == "inf":
value = InfiniteDateTime()
elif value.lower() == "-inf":
value = NegativeInfiniteDateTime()
return value
class Time(AbstractSimpleComponent):
def __init__(self, element):
super(Time, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
try:
self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1
except:
self.constraint = None
# Attributes
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
try:
self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional
except (AttributeError, ValueError):
self.referenceTime = None
value = testXMLValue(element.find(nspv("swe20:value"))) # TimePosition, min=0, max=1
self.value = get_time(value, self.referenceTime, self.uom)
class TimeRange(AbstractSimpleComponent):
def __init__(self, element):
super(TimeRange, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
try:
self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1
except:
self.constraint = None
# Attributes
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
try:
self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional
except (AttributeError, ValueError):
self.referenceTime = None
values = make_pair(testXMLValue(element.find(nspv("swe20:value")))) # TimePosition, min=0, max=1
self.value = [get_time(t, self.referenceTime, self.uom) for t in values]
class DataRecord(AbstractDataComponent):
def __init__(self, element):
super(DataRecord, self).__init__(element)
# Elements
self.field = [Field(x) for x in element.findall(nspv("swe20:field"))]
def get_by_name(self, name):
return next((x for x in self.field if x.name == name), None)
class Field(NamedObject):
def __init__(self, element):
super(Field, self).__init__(element)
class Vector(AbstractDataComponent):
def __init__(self, element):
super(Vector, self).__init__(element)
# Elements
self.coordinate = [Coordinate(x) for x in element.findall(nspv("swe20:coordinate"))]
# Attributes
self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, required
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
def get_by_name(self, name):
return next((x for x in self.coordinate if x.name == name), None)
class Coordinate(NamedObject):
def __init__(self, element):
super(Coordinate, self).__init__(element)
#if element[-1].tag not in AnyNumerical:
# print "Coordinate does not appear to be an AnyNumerical member"
class DataChoice(AbstractDataComponent):
def __init__(self, element):
super(DataChoice, self).__init__(element)
self.item = [Item(x) for x in element.findall(nspv("swe20:item"))]
def get_by_name(self, name):
return next((x for x in self.item if x.name == name), None)
class Item(NamedObject):
def __init__(self, element):
super(Item, self).__init__(element)
class DataArray(AbstractDataComponent):
def __init__(self, element):
super(DataArray, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required
self.values = testXMLValue(element.find(nspv("swe20:values")))
try:
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
except:
self.encoding = None
class Matrix(AbstractDataComponent):
def __init__(self, element):
super(Matrix, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
self.values = testXMLValue(element.find(nspv("swe20:values")))
self.referenceFrame = testXMLAttribute(element, "referenceFrame") # anyURI, required
self.localFrame = testXMLAttribute(element, "localFrame") # anyURI, optional
class DataStream(AbstractSWEIdentifiable):
def __init__(self, element):
super(DataStream, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # optional
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # optional
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
self.values = testXMLValue(element.find(nspv("swe20:values")))
class ElementType(NamedObject):
def __init__(self, element):
super(ElementType, self).__init__(element)
class AbstractEncoding(object):
def __new__(cls, element):
t = element[-1].tag.split("}")[-1]
if t == "TextEncoding":
return super(AbstractEncoding, cls).__new__(TextEncoding, element)
elif t == "XMLEncoding":
return super(AbstractEncoding, cls).__new__(XMLEncoding, element)
elif t == "BinaryEncoding":
return super(AbstractEncoding, cls).__new__(BinaryEncoding, element)
class TextEncoding(AbstractEncoding):
def __init__(self, element):
self.tokenSeparator = testXMLAttribute(element[-1], "tokenSeparator") # string, required
self.blockSeparator = testXMLAttribute(element[-1], "blockSeparator") # string, required
self.decimalSeparator = testXMLAttribute(element[-1], "decimalSeparator") or "." # string, optional, default="."
self.collapseWhiteSpaces = get_boolean(testXMLAttribute(element[-1], "collapseWhiteSpaces")) or True # boolean, optional, default=True
class XMLEncoding(AbstractEncoding):
def __init__(self, element):
raise NotImplementedError
class BinaryEncoding(AbstractEncoding):
def __init__(self, element):
raise NotImplementedError
| <filename>owscapable/swe/common.py
from __future__ import (absolute_import, division, print_function)
from owscapable.util import nspath_eval
from owscapable.namespaces import Namespaces
from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime
from dateutil import parser
from datetime import timedelta
from owscapable.etree import etree
def get_namespaces():
ns = Namespaces()
return ns.get_namespaces(["swe20", "xlink"])
namespaces = get_namespaces()
def nspv(path):
return nspath_eval(path, namespaces)
def make_pair(string, cast=None):
if string is None:
return None
string = string.split(" ")
if cast is not None:
try:
string = map(lambda x: cast(x), string)
except:
print("Could not cast pair to correct type. Setting to an empty tuple!")
string = ""
return tuple(string)
def get_uom(element):
uom = testXMLAttribute(element, "code")
if uom is None:
uom = testXMLAttribute(element, nspv("xlink:href"))
return uom
def get_boolean(value):
if value is None:
return None
if value is True or value.lower() in ["yes","true"]:
return True
elif value is False or value.lower() in ["no","false"]:
return False
else:
return None
def get_int(value):
try:
return int(value)
except:
return None
def get_float(value):
try:
return float(value)
except:
return None
AnyScalar = map(lambda x: nspv(x), ["swe20:Boolean", "swe20:Count", "swe20:Quantity", "swe20:Time", "swe20:Category", "swe20:Text"])
AnyNumerical = map(lambda x: nspv(x), ["swe20:Count", "swe20:Quantity", "swe20:Time"])
AnyRange = map(lambda x: nspv(x), ["swe20:QuantityRange", "swe20:TimeRange", "swe20:CountRange", "swe20:CategoryRange"])
class NamedObject(object):
def __init__(self, element):
# No call to super(), the type object will process that.
self.name = testXMLAttribute(element, "name")
try:
self.content = eval(element[-1].tag.split("}")[-1])(element[-1])
except IndexError:
self.content = None
except BaseException:
raise
# Revert to the content if attribute does not exists
def __getattr__(self, name):
return getattr(self.content, name)
class AbstractSWE(object):
def __init__(self, element):
# Attributes
self.id = testXMLAttribute(element,"id") # string, optional
# Elements
self.extention = [] # anyType, min=0, max=X
class AbstractSWEIdentifiable(AbstractSWE):
def __init__(self, element):
super(AbstractSWEIdentifiable, self).__init__(element)
# Elements
self.identifier = testXMLValue(element.find(nspv("swe20:identifier"))) # anyURI, min=0
self.label = testXMLValue(element.find(nspv("swe20:label"))) # string, min=0
self.description = testXMLValue(element.find(nspv("swe20:description"))) # string, min=0
class AbstractDataComponent(AbstractSWEIdentifiable):
def __init__(self, element):
super(AbstractDataComponent, self).__init__(element)
# Attributes
self.definition = testXMLAttribute(element,"definition") # anyURI, required
self.updatable = get_boolean(testXMLAttribute(element,"updatable")) # boolean, optional
self.optional = get_boolean(testXMLAttribute(element,"optional")) or False # boolean, default=False
class AbstractSimpleComponent(AbstractDataComponent):
def __init__(self, element):
super(AbstractSimpleComponent, self).__init__(element)
# Attributes
self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, optional
self.axisID = testXMLAttribute(element,"axisID") # string, optional
# Elements
self.quality = filter(None, [Quality(q) for q in [e.find('*') for e in element.findall(nspv("swe20:quality"))] if q is not None])
try:
self.nilValues = NilValues(element.find(nspv("swe20:nilValues")))
except:
self.nilValues = None
class Quality(object):
def __new__(cls, element):
t = element.tag.split("}")[-1]
if t == "Quantity":
return Quantity(element)
elif t == "QuantityRange":
return QuantityRange(element)
elif t == "Category":
return Category(element)
elif t == "Text":
return Text(element)
else:
return None
class NilValues(AbstractSWE):
def __init__(self, element):
super(NilValues, self).__init__(element)
self.nilValue = filter(None, [nilValue(x) for x in element.findall(nspv("swe20:nilValue"))]) # string, min=0, max=X
class nilValue(object):
def __init__(self, element):
self.reason = testXMLAttribute(element, "reason")
self.value = testXMLValue(element)
class AllowedTokens(AbstractSWE):
def __init__(self, element):
super(AllowedTokens, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]) # string, min=0, max=X
self.pattern = testXMLValue(element.find(nspv("swe20:pattern"))) # string (Unicode Technical Standard #18, Version 13), min=0
class AllowedValues(AbstractSWE):
def __init__(self, element):
super(AllowedValues, self).__init__(element)
self.value = filter(None, map(lambda x: get_float(x), [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]))
self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class AllowedTimes(AbstractSWE):
def __init__(self, element):
super(AllowedTimes, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))])
self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class Boolean(AbstractSimpleComponent):
def __init__(self, element):
super(Boolean, self).__init__(element)
# Elements
"""
6.2.1 Boolean
A Boolean representation of a proptery can take only two values that should be "true/false" or "yes/no".
"""
value = get_boolean(testXMLValue(element.find(nspv("swe20:value")))) # boolean, min=0, max=1
class Text(AbstractSimpleComponent):
def __init__(self, element):
super(Text, self).__init__(element)
# Elements
"""
Req 6. A textual representation shall at least consist of a character string.
"""
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
self.constraint = None
class Category(AbstractSimpleComponent):
def __init__(self, element):
super(Category, self).__init__(element)
# Elements
self.codeSpace = testXMLAttribute(element.find(nspv("swe20:codeSpace")), nspv("xlink:href")) # Reference, min=0, max=1
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
self.constraint = None
class CategoryRange(Category):
def __init__(self, element):
super(CategoryRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.values = make_pair(value) if value is not None else None
class Count(AbstractSimpleComponent):
def __init__(self, element):
super(Count, self).__init__(element)
# Elements
self.value = get_int(testXMLValue(element.find(nspv("swe20:value")))) # integer, min=0, max=1
try:
self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1
except:
self.constraint = None
class CountRange(Count):
def __init__(self, element):
super(CountRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.value = make_pair(value,int) if value is not None else None
class Quantity(AbstractSimpleComponent):
def __init__(self, element):
super(Quantity, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
self.value = get_float(testXMLValue(element.find(nspv("swe20:value")))) # double, min=0, max=1
try:
self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1
except:
self.constraint = None
class QuantityRange(Quantity):
def __init__(self, element):
super(QuantityRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.value = make_pair(value,float) if value is not None else None
def get_time(value, referenceTime, uom):
try:
value = parser.parse(value)
except (AttributeError, ValueError): # Most likely an integer/float using a referenceTime
try:
if uom.lower() == "s":
value = referenceTime + timedelta(seconds=float(value))
elif uom.lower() == "min":
value = referenceTime + timedelta(minutes=float(value))
elif uom.lower() == "h":
value = referenceTime + timedelta(hours=float(value))
elif uom.lower() == "d":
value = referenceTime + timedelta(days=float(value))
except (AttributeError, ValueError):
pass
except OverflowError: # Too many numbers (> 10) or INF/-INF
if value.lower() == "inf":
value = InfiniteDateTime()
elif value.lower() == "-inf":
value = NegativeInfiniteDateTime()
return value
class Time(AbstractSimpleComponent):
def __init__(self, element):
super(Time, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
try:
self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1
except:
self.constraint = None
# Attributes
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
try:
self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional
except (AttributeError, ValueError):
self.referenceTime = None
value = testXMLValue(element.find(nspv("swe20:value"))) # TimePosition, min=0, max=1
self.value = get_time(value, self.referenceTime, self.uom)
class TimeRange(AbstractSimpleComponent):
def __init__(self, element):
super(TimeRange, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
try:
self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1
except:
self.constraint = None
# Attributes
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
try:
self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional
except (AttributeError, ValueError):
self.referenceTime = None
values = make_pair(testXMLValue(element.find(nspv("swe20:value")))) # TimePosition, min=0, max=1
self.value = [get_time(t, self.referenceTime, self.uom) for t in values]
class DataRecord(AbstractDataComponent):
def __init__(self, element):
super(DataRecord, self).__init__(element)
# Elements
self.field = [Field(x) for x in element.findall(nspv("swe20:field"))]
def get_by_name(self, name):
return next((x for x in self.field if x.name == name), None)
class Field(NamedObject):
def __init__(self, element):
super(Field, self).__init__(element)
class Vector(AbstractDataComponent):
def __init__(self, element):
super(Vector, self).__init__(element)
# Elements
self.coordinate = [Coordinate(x) for x in element.findall(nspv("swe20:coordinate"))]
# Attributes
self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, required
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
def get_by_name(self, name):
return next((x for x in self.coordinate if x.name == name), None)
class Coordinate(NamedObject):
def __init__(self, element):
super(Coordinate, self).__init__(element)
#if element[-1].tag not in AnyNumerical:
# print "Coordinate does not appear to be an AnyNumerical member"
class DataChoice(AbstractDataComponent):
def __init__(self, element):
super(DataChoice, self).__init__(element)
self.item = [Item(x) for x in element.findall(nspv("swe20:item"))]
def get_by_name(self, name):
return next((x for x in self.item if x.name == name), None)
class Item(NamedObject):
def __init__(self, element):
super(Item, self).__init__(element)
class DataArray(AbstractDataComponent):
def __init__(self, element):
super(DataArray, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required
self.values = testXMLValue(element.find(nspv("swe20:values")))
try:
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
except:
self.encoding = None
class Matrix(AbstractDataComponent):
def __init__(self, element):
super(Matrix, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
self.values = testXMLValue(element.find(nspv("swe20:values")))
self.referenceFrame = testXMLAttribute(element, "referenceFrame") # anyURI, required
self.localFrame = testXMLAttribute(element, "localFrame") # anyURI, optional
class DataStream(AbstractSWEIdentifiable):
def __init__(self, element):
super(DataStream, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # optional
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # optional
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
self.values = testXMLValue(element.find(nspv("swe20:values")))
class ElementType(NamedObject):
def __init__(self, element):
super(ElementType, self).__init__(element)
class AbstractEncoding(object):
def __new__(cls, element):
t = element[-1].tag.split("}")[-1]
if t == "TextEncoding":
return super(AbstractEncoding, cls).__new__(TextEncoding, element)
elif t == "XMLEncoding":
return super(AbstractEncoding, cls).__new__(XMLEncoding, element)
elif t == "BinaryEncoding":
return super(AbstractEncoding, cls).__new__(BinaryEncoding, element)
class TextEncoding(AbstractEncoding):
def __init__(self, element):
self.tokenSeparator = testXMLAttribute(element[-1], "tokenSeparator") # string, required
self.blockSeparator = testXMLAttribute(element[-1], "blockSeparator") # string, required
self.decimalSeparator = testXMLAttribute(element[-1], "decimalSeparator") or "." # string, optional, default="."
self.collapseWhiteSpaces = get_boolean(testXMLAttribute(element[-1], "collapseWhiteSpaces")) or True # boolean, optional, default=True
class XMLEncoding(AbstractEncoding):
def __init__(self, element):
raise NotImplementedError
class BinaryEncoding(AbstractEncoding):
def __init__(self, element):
raise NotImplementedError
| en | 0.356925 | # No call to super(), the type object will process that. # Revert to the content if attribute does not exists # Attributes # string, optional # Elements # anyType, min=0, max=X # Elements # anyURI, min=0 # string, min=0 # string, min=0 # Attributes # anyURI, required # boolean, optional # boolean, default=False # Attributes # anyURI, optional # string, optional # Elements # string, min=0, max=X # string, min=0, max=X # string (Unicode Technical Standard #18, Version 13), min=0 # integer, min=0 # integer, min=0 # Elements 6.2.1 Boolean A Boolean representation of a proptery can take only two values that should be "true/false" or "yes/no". # boolean, min=0, max=1 # Elements Req 6. A textual representation shall at least consist of a character string. # string, min=0, max=1 # AllowedTokens, min=0, max=1 # Elements # Reference, min=0, max=1 # string, min=0, max=1 # AllowedTokens, min=0, max=1 # Elements # Elements # integer, min=0, max=1 # AllowedValues, min=0, max=1 # Elements # Elements # double, min=0, max=1 # AllowedValues, min=0, max=1 # Elements # Most likely an integer/float using a referenceTime # Too many numbers (> 10) or INF/-INF # Elements # AllowedTimes, min=0, max=1 # Attributes # anyURI, optional # dateTime, optional # TimePosition, min=0, max=1 # Elements # AllowedTimes, min=0, max=1 # Attributes # anyURI, optional # dateTime, optional # TimePosition, min=0, max=1 # Elements # Elements # Attributes # anyURI, required # anyURI, optional #if element[-1].tag not in AnyNumerical: # print "Coordinate does not appear to be an AnyNumerical member" # required # required # required # required # anyURI, required # anyURI, optional # optional # optional # string, required # string, required # string, optional, default="." # boolean, optional, default=True | 2.161804 | 2 |
main_fed.py | gao969/scaffold-dgc-clustering | 0 | 8851 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import os
import torch.distributed as dist
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import LocalUpdateF
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from torch.multiprocessing import Process
from deep_gradient_compression import DGC
import json
# __name__是内置的变量,在执行当前文件(main_fed.py)时,默认值为__main__
# 但是如果其他.py文件import当前文件(main_fed.py)时,在其他文件中执行main_fed.py中的__name__,此时main_fed.py中的__name__默认值为文件名main_fed.py
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu))
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
rank = 0
device_id = rank
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend='gloo', rank=rank, world_size=args.world_size)
# if torch.cuda.is_available() and args.gpu != -1 else 'cpu'
# load dataset and split users
if args.dataset == 'mnist':
# ToTensor():归一数据到(0,1),Normalize():(date-0.1307)/0.3081,将数据分布到(-1, 1)
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
if trans_mnist is not None:
print(1)
print(trans_mnist)
# 测试(60000)和训练集(10000)
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
# Noniid数据
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# print('df ',img_size) [1,28,28]
# build model
# print(args.model)
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
# print('x取值',x)
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
# add
control_global = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
# 设置为训练模型
net_glob.train()
print(net_glob)
control_weights =control_global.state_dict()
# copy weights
# 初始化全局权重
w_glob = net_glob.state_dict()
c_glob = copy.deepcopy(net_glob.state_dict())
# print(w_glob)
# training
loss_train = []
accuracy = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
count = 0, 0
test_acc_list = []
if args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(args.num_users)]
# add
else:
# 初始化本地权重
c_local = [MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) for i in
range(args.num_users)]
for net in c_local:
net.load_state_dict(control_weights)
delta_c = copy.deepcopy(net_glob.state_dict())
# delta_x = copy.deepcopy(net_glob.state_dict())
# with open("test.txt", "w") as f:
# for i in range(0, len(c_local)):
# for k,v in c_local[i].state_dict().items():
# f.write(f"{k},{v}\n".format(k,v))
# with open("test.txt", "a") as f:
# for i in range(0, len(c_local)):
# for k, v in w_locals[i].items():
# f.write(f"{k},{v}\n".format(k, v))
# add 初始化变化量
# print("why?")
for iter in range(args.epochs):
# 初始换控制变量
for i in delta_c:
delta_c[i] = 0.0
# for i in delta_x:
# delta_x[i] = 0.0
loss_locals = []
if not args.all_clients:
w_locals = []
m = max(int(args.frac * args.num_users), 1)
# 每次随机十位幸运观众
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
# momentum法SGD
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss, local_delta_c, local_delta, control_local_w= local.train(net=copy.deepcopy(net_glob).to(args.device), control_local
= c_local[idx], control_global=control_global, rank=rank, device_id=device_id, size=args.world_size)
# add
if iter != 0:
c_local[idx].load_state_dict(control_local_w)
if args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
# add
loss_locals.append(copy.deepcopy(loss))
# add
for i in delta_c:
if iter != 0:
delta_c[i] += w[i]
else:
delta_c[i] += local_delta_c[i]
# delta_x[i] += local_delta[i]
# add
# update the delta C
for i in delta_c:
delta_c[i] /= m
# delta_x[i] /= m
# update global weights
w_glob = FedAvg(w_locals)
# add 更新全局c,w
# w_glob = net_glob.state_dict()
control_global_w = control_global.state_dict()
for i in control_global_w:
if iter !=0:
# w_glob[i] = delta_x[i]
# else:
# w_glob[i] += delta_x[i]
control_global_w[i] += (m / args.num_users) * delta_c[i]
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# add
control_global.load_state_dict(control_global_w)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
loss_train.append(loss_avg)
# acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
accuracy.append(acc_test)
# add
for c in range(args.num_users):
local_model = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
torch.cuda.empty_cache()
# net_glob.eval()
# print("Training accuracy: {:.2f}".format(acc_train))
# print("Testing accuracy: {:.2f}".format(acc_test))
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
# Fedavg
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_globF = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_globF = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_globF = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_globF)
net_globF.train()
# copy weights
w_globF = net_globF.state_dict()
# training
loss_trainF = []
accuracyF = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
if args.all_clients:
print("Aggregation over all clients")
w_localsF = [w_globF for i in range(args.num_users)]
for iter in range(args.epochs):
loss_locals = []
if not args.all_clients:
w_localsF = []
m = max(int(args.frac * args.num_users), 1)
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
localF = LocalUpdateF(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = localF.train(net=copy.deepcopy(net_globF).to(args.device))
if args.all_clients:
w_localsF[idx] = copy.deepcopy(w)
else:
w_localsF.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_globF = FedAvg(w_localsF)
# copy weight to net_globF
net_globF.load_state_dict(w_globF)
# print loss
loss_avgF = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avgF))
loss_trainF.append(loss_avgF)
acc_test, loss_test = test_img(net_globF, dataset_test, args)
accuracyF.append(acc_test)
# plot loss curve
plt.figure()
print(loss_train, loss_trainF)
plt.plot(range(len(loss_train)), loss_train, label='Scaffold', zorder=2)
plt.plot(range(len(loss_trainF)), loss_trainF, 'r', label='FedAvg',zorder=1)
plt.ylabel('train_loss')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'train_loss', args.iid))
# testing
net_glob.eval()
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train))
print("Testing accuracy: {:.2f}".format(acc_test))
# plot loss curve
plt.figure()
# plt.plot((np.arange(1, len(accuracy)), 1), accuracy, 'r')
plt.plot(range(len(accuracy)), accuracy, label='Scaffold', zorder=2)
plt.plot(range(len(accuracyF)), accuracyF, 'r', label='FedAvg', zorder=1)
plt.ylabel('test_acc')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'acc_test', args.iid))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import os
import torch.distributed as dist
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import LocalUpdateF
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from torch.multiprocessing import Process
from deep_gradient_compression import DGC
import json
# __name__是内置的变量,在执行当前文件(main_fed.py)时,默认值为__main__
# 但是如果其他.py文件import当前文件(main_fed.py)时,在其他文件中执行main_fed.py中的__name__,此时main_fed.py中的__name__默认值为文件名main_fed.py
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu))
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
rank = 0
device_id = rank
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend='gloo', rank=rank, world_size=args.world_size)
# if torch.cuda.is_available() and args.gpu != -1 else 'cpu'
# load dataset and split users
if args.dataset == 'mnist':
# ToTensor():归一数据到(0,1),Normalize():(date-0.1307)/0.3081,将数据分布到(-1, 1)
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
if trans_mnist is not None:
print(1)
print(trans_mnist)
# 测试(60000)和训练集(10000)
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
# Noniid数据
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# print('df ',img_size) [1,28,28]
# build model
# print(args.model)
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
# print('x取值',x)
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
# add
control_global = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
# 设置为训练模型
net_glob.train()
print(net_glob)
control_weights =control_global.state_dict()
# copy weights
# 初始化全局权重
w_glob = net_glob.state_dict()
c_glob = copy.deepcopy(net_glob.state_dict())
# print(w_glob)
# training
loss_train = []
accuracy = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
count = 0, 0
test_acc_list = []
if args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(args.num_users)]
# add
else:
# 初始化本地权重
c_local = [MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) for i in
range(args.num_users)]
for net in c_local:
net.load_state_dict(control_weights)
delta_c = copy.deepcopy(net_glob.state_dict())
# delta_x = copy.deepcopy(net_glob.state_dict())
# with open("test.txt", "w") as f:
# for i in range(0, len(c_local)):
# for k,v in c_local[i].state_dict().items():
# f.write(f"{k},{v}\n".format(k,v))
# with open("test.txt", "a") as f:
# for i in range(0, len(c_local)):
# for k, v in w_locals[i].items():
# f.write(f"{k},{v}\n".format(k, v))
# add 初始化变化量
# print("why?")
for iter in range(args.epochs):
# 初始换控制变量
for i in delta_c:
delta_c[i] = 0.0
# for i in delta_x:
# delta_x[i] = 0.0
loss_locals = []
if not args.all_clients:
w_locals = []
m = max(int(args.frac * args.num_users), 1)
# 每次随机十位幸运观众
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
# momentum法SGD
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss, local_delta_c, local_delta, control_local_w= local.train(net=copy.deepcopy(net_glob).to(args.device), control_local
= c_local[idx], control_global=control_global, rank=rank, device_id=device_id, size=args.world_size)
# add
if iter != 0:
c_local[idx].load_state_dict(control_local_w)
if args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
# add
loss_locals.append(copy.deepcopy(loss))
# add
for i in delta_c:
if iter != 0:
delta_c[i] += w[i]
else:
delta_c[i] += local_delta_c[i]
# delta_x[i] += local_delta[i]
# add
# update the delta C
for i in delta_c:
delta_c[i] /= m
# delta_x[i] /= m
# update global weights
w_glob = FedAvg(w_locals)
# add 更新全局c,w
# w_glob = net_glob.state_dict()
control_global_w = control_global.state_dict()
for i in control_global_w:
if iter !=0:
# w_glob[i] = delta_x[i]
# else:
# w_glob[i] += delta_x[i]
control_global_w[i] += (m / args.num_users) * delta_c[i]
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# add
control_global.load_state_dict(control_global_w)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
loss_train.append(loss_avg)
# acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
accuracy.append(acc_test)
# add
for c in range(args.num_users):
local_model = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
torch.cuda.empty_cache()
# net_glob.eval()
# print("Training accuracy: {:.2f}".format(acc_train))
# print("Testing accuracy: {:.2f}".format(acc_test))
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
# Fedavg
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_globF = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_globF = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_globF = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_globF)
net_globF.train()
# copy weights
w_globF = net_globF.state_dict()
# training
loss_trainF = []
accuracyF = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
if args.all_clients:
print("Aggregation over all clients")
w_localsF = [w_globF for i in range(args.num_users)]
for iter in range(args.epochs):
loss_locals = []
if not args.all_clients:
w_localsF = []
m = max(int(args.frac * args.num_users), 1)
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
localF = LocalUpdateF(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = localF.train(net=copy.deepcopy(net_globF).to(args.device))
if args.all_clients:
w_localsF[idx] = copy.deepcopy(w)
else:
w_localsF.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_globF = FedAvg(w_localsF)
# copy weight to net_globF
net_globF.load_state_dict(w_globF)
# print loss
loss_avgF = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avgF))
loss_trainF.append(loss_avgF)
acc_test, loss_test = test_img(net_globF, dataset_test, args)
accuracyF.append(acc_test)
# plot loss curve
plt.figure()
print(loss_train, loss_trainF)
plt.plot(range(len(loss_train)), loss_train, label='Scaffold', zorder=2)
plt.plot(range(len(loss_trainF)), loss_trainF, 'r', label='FedAvg',zorder=1)
plt.ylabel('train_loss')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'train_loss', args.iid))
# testing
net_glob.eval()
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train))
print("Testing accuracy: {:.2f}".format(acc_test))
# plot loss curve
plt.figure()
# plt.plot((np.arange(1, len(accuracy)), 1), accuracy, 'r')
plt.plot(range(len(accuracy)), accuracy, label='Scaffold', zorder=2)
plt.plot(range(len(accuracyF)), accuracyF, 'r', label='FedAvg', zorder=1)
plt.ylabel('test_acc')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'acc_test', args.iid))
| de | 0.184808 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Python version: 3.6 # __name__是内置的变量,在执行当前文件(main_fed.py)时,默认值为__main__ # 但是如果其他.py文件import当前文件(main_fed.py)时,在其他文件中执行main_fed.py中的__name__,此时main_fed.py中的__name__默认值为文件名main_fed.py # parse args # if torch.cuda.is_available() and args.gpu != -1 else 'cpu' # load dataset and split users # ToTensor():归一数据到(0,1),Normalize():(date-0.1307)/0.3081,将数据分布到(-1, 1) # 测试(60000)和训练集(10000) # sample users # Noniid数据 # print('df ',img_size) [1,28,28] # build model # print(args.model) # print('x取值',x) # add # 设置为训练模型 # copy weights # 初始化全局权重 # print(w_glob) # training # add # 初始化本地权重 # delta_x = copy.deepcopy(net_glob.state_dict()) # with open("test.txt", "w") as f: # for i in range(0, len(c_local)): # for k,v in c_local[i].state_dict().items(): # f.write(f"{k},{v}\n".format(k,v)) # with open("test.txt", "a") as f: # for i in range(0, len(c_local)): # for k, v in w_locals[i].items(): # f.write(f"{k},{v}\n".format(k, v)) # add 初始化变化量 # print("why?") # 初始换控制变量 # for i in delta_x: # delta_x[i] = 0.0 # 每次随机十位幸运观众 # momentum法SGD # add # add # add # delta_x[i] += local_delta[i] # add # update the delta C # delta_x[i] /= m # update global weights # add 更新全局c,w # w_glob = net_glob.state_dict() # w_glob[i] = delta_x[i] # else: # w_glob[i] += delta_x[i] # copy weight to net_glob # add # print loss # acc_train, loss_train = test_img(net_glob, dataset_train, args) # add # net_glob.eval() # print("Training accuracy: {:.2f}".format(acc_train)) # print("Testing accuracy: {:.2f}".format(acc_test)) ####################################################################################################################### ####################################################################################################################### ####################################################################################################################### ####################################################################################################################### # Fedavg # build model # copy weights # training # update global weights # copy weight to net_globF # print loss # plot loss curve # testing # plot loss curve # plt.plot((np.arange(1, len(accuracy)), 1), accuracy, 'r') | 2.147085 | 2 |
b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py | gkazla/B.LambdaLayerCommon | 0 | 8852 | from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Stack, Duration
from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack
from b_cfn_lambda_layer.package_version import PackageVersion
from b_lambda_layer_common.layer import Layer
from b_lambda_layer_common_test.unit import root
class FunctionWithUnitTests(Function):
"""
Function that lets us run unit tests inside lambda function. We want to run unit
tests both locally and remotely.
"""
def __init__(self, scope: Stack):
super().__init__(
scope=scope,
id=f'{TestingStack.global_prefix()}FunctionWithUnitTests',
code=Code.from_asset(root),
handler='handler.handler',
runtime=Runtime.PYTHON_3_8,
timeout=Duration.minutes(5),
memory_size=512,
layers=[
Layer(
scope=scope,
name=f'{TestingStack.global_prefix()}TestingLayerWithUnitTests',
dependencies={
# These dependencies are required for running unit tests inside lambda functions.
# Pytest is used for running actual unit tests.
'pytest': PackageVersion.from_string_version('6.2.5'),
# Pook is used for HTTP mocking, therefore it is also needed here.
'pook': PackageVersion.from_string_version('1.0.1'),
# Not sure about this dependency. Lambda runtime throws errors if its missing.
'aws-cdk.core': PackageVersion.from_string_version('1.99.0'),
# This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this.
# For some reason it doesn't.
# Tests would fail with import error otherwise.
'importlib-resources': PackageVersion.from_string_version('5.4.0')
}
)
]
)
| from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Stack, Duration
from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack
from b_cfn_lambda_layer.package_version import PackageVersion
from b_lambda_layer_common.layer import Layer
from b_lambda_layer_common_test.unit import root
class FunctionWithUnitTests(Function):
"""
Function that lets us run unit tests inside lambda function. We want to run unit
tests both locally and remotely.
"""
def __init__(self, scope: Stack):
super().__init__(
scope=scope,
id=f'{TestingStack.global_prefix()}FunctionWithUnitTests',
code=Code.from_asset(root),
handler='handler.handler',
runtime=Runtime.PYTHON_3_8,
timeout=Duration.minutes(5),
memory_size=512,
layers=[
Layer(
scope=scope,
name=f'{TestingStack.global_prefix()}TestingLayerWithUnitTests',
dependencies={
# These dependencies are required for running unit tests inside lambda functions.
# Pytest is used for running actual unit tests.
'pytest': PackageVersion.from_string_version('6.2.5'),
# Pook is used for HTTP mocking, therefore it is also needed here.
'pook': PackageVersion.from_string_version('1.0.1'),
# Not sure about this dependency. Lambda runtime throws errors if its missing.
'aws-cdk.core': PackageVersion.from_string_version('1.99.0'),
# This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this.
# For some reason it doesn't.
# Tests would fail with import error otherwise.
'importlib-resources': PackageVersion.from_string_version('5.4.0')
}
)
]
)
| en | 0.938411 | Function that lets us run unit tests inside lambda function. We want to run unit tests both locally and remotely. # These dependencies are required for running unit tests inside lambda functions. # Pytest is used for running actual unit tests. # Pook is used for HTTP mocking, therefore it is also needed here. # Not sure about this dependency. Lambda runtime throws errors if its missing. # This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this. # For some reason it doesn't. # Tests would fail with import error otherwise. | 2.196634 | 2 |
tests/metarl/tf/baselines/test_baselines.py | neurips2020submission11699/metarl | 2 | 8853 | <reponame>neurips2020submission11699/metarl
"""
This script creates a test that fails when
metarl.tf.baselines failed to initialize.
"""
import tensorflow as tf
from metarl.envs import MetaRLEnv
from metarl.tf.baselines import ContinuousMLPBaseline
from metarl.tf.baselines import GaussianMLPBaseline
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestTfBaselines(TfGraphTestCase):
def test_baseline(self):
"""Test the baseline initialization."""
box_env = MetaRLEnv(DummyBoxEnv())
deterministic_mlp_baseline = ContinuousMLPBaseline(env_spec=box_env)
gaussian_mlp_baseline = GaussianMLPBaseline(env_spec=box_env)
self.sess.run(tf.compat.v1.global_variables_initializer())
deterministic_mlp_baseline.get_param_values()
gaussian_mlp_baseline.get_param_values()
box_env.close()
| """
This script creates a test that fails when
metarl.tf.baselines failed to initialize.
"""
import tensorflow as tf
from metarl.envs import MetaRLEnv
from metarl.tf.baselines import ContinuousMLPBaseline
from metarl.tf.baselines import GaussianMLPBaseline
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestTfBaselines(TfGraphTestCase):
def test_baseline(self):
"""Test the baseline initialization."""
box_env = MetaRLEnv(DummyBoxEnv())
deterministic_mlp_baseline = ContinuousMLPBaseline(env_spec=box_env)
gaussian_mlp_baseline = GaussianMLPBaseline(env_spec=box_env)
self.sess.run(tf.compat.v1.global_variables_initializer())
deterministic_mlp_baseline.get_param_values()
gaussian_mlp_baseline.get_param_values()
box_env.close() | en | 0.585414 | This script creates a test that fails when metarl.tf.baselines failed to initialize. Test the baseline initialization. | 2.415685 | 2 |
api/files/api/app/monthly_report.py | trackit/trackit-legacy | 2 | 8854 | <reponame>trackit/trackit-legacy
import jinja2
import json
from send_email import send_email
from app.models import User, MyResourcesAWS, db
from app.es.awsdetailedlineitem import AWSDetailedLineitem
from sqlalchemy import desc
import subprocess
import datetime
from flask import render_template
def monthly_html_template():
template_dir = '/usr/trackit/templates'
loader = jinja2.FileSystemLoader(template_dir)
env = jinja2.Environment(loader=loader)
template = env.get_template('emailPDFreport.html')
now = datetime.datetime.now()
try:
users = User.query.all()
for user in users:
if user.report_last_emailed_at == None:
user.report_last_emailed_at = datetime.datetime.utcnow()
db.session.add(user)
db.session.commit()
last_emailed_days = (now - user.report_last_emailed_at).days
if last_emailed_days >= 30:
for key in user.aws_keys:
date = "{} {}".format(now.strftime("%B"), now.year)
pretty_key = user.get_aws_key(key.key).pretty + ' ' + key.key
monthly_cost = AWSDetailedLineitem.get_monthly_cost_by_product(key.get_aws_user_id())
estimation_hour, estimation_month = get_estimation(user, key)
total = sum(float(i.get("cost")) for i in monthly_cost['products'])
email_template = template.render(email=user.email, date=date, key=pretty_key, products=monthly_cost['products'], total=total, hourly_cost=estimation_hour, monthly_cost=estimation_month)
if user.email.endswith("msolution.io"):
send_email(user.email, 'Trackit monthly report', email_template.encode('utf-8').strip(), True)
user.report_last_emailed_at = datetime.datetime.utcnow()
db.session.add(user)
db.session.commit()
except Exception, e:
print("ERROR " + str(e))
def get_estimation(user, key):
estimation = MyResourcesAWS.query.filter(MyResourcesAWS.key == key.key).order_by(desc(MyResourcesAWS.date)).first()
estimation = [] if not estimation else estimation.json()
cost = sum(estimation_cost(e) for e in estimation)
return cost, cost*720
def estimation_cost(estimation):
return sum(item['cost'] for item in estimation['prices'] if item['name'] == 'aws')
| import jinja2
import json
from send_email import send_email
from app.models import User, MyResourcesAWS, db
from app.es.awsdetailedlineitem import AWSDetailedLineitem
from sqlalchemy import desc
import subprocess
import datetime
from flask import render_template
def monthly_html_template():
template_dir = '/usr/trackit/templates'
loader = jinja2.FileSystemLoader(template_dir)
env = jinja2.Environment(loader=loader)
template = env.get_template('emailPDFreport.html')
now = datetime.datetime.now()
try:
users = User.query.all()
for user in users:
if user.report_last_emailed_at == None:
user.report_last_emailed_at = datetime.datetime.utcnow()
db.session.add(user)
db.session.commit()
last_emailed_days = (now - user.report_last_emailed_at).days
if last_emailed_days >= 30:
for key in user.aws_keys:
date = "{} {}".format(now.strftime("%B"), now.year)
pretty_key = user.get_aws_key(key.key).pretty + ' ' + key.key
monthly_cost = AWSDetailedLineitem.get_monthly_cost_by_product(key.get_aws_user_id())
estimation_hour, estimation_month = get_estimation(user, key)
total = sum(float(i.get("cost")) for i in monthly_cost['products'])
email_template = template.render(email=user.email, date=date, key=pretty_key, products=monthly_cost['products'], total=total, hourly_cost=estimation_hour, monthly_cost=estimation_month)
if user.email.endswith("msolution.io"):
send_email(user.email, 'Trackit monthly report', email_template.encode('utf-8').strip(), True)
user.report_last_emailed_at = datetime.datetime.utcnow()
db.session.add(user)
db.session.commit()
except Exception, e:
print("ERROR " + str(e))
def get_estimation(user, key):
estimation = MyResourcesAWS.query.filter(MyResourcesAWS.key == key.key).order_by(desc(MyResourcesAWS.date)).first()
estimation = [] if not estimation else estimation.json()
cost = sum(estimation_cost(e) for e in estimation)
return cost, cost*720
def estimation_cost(estimation):
return sum(item['cost'] for item in estimation['prices'] if item['name'] == 'aws') | none | 1 | 2.375761 | 2 |
|
slow_tests/boot_test.py | rdturnermtl/mlpaper | 9 | 8855 | # <NAME> (<EMAIL>)
from __future__ import division, print_function
from builtins import range
import numpy as np
import scipy.stats as ss
import mlpaper.constants as cc
import mlpaper.mlpaper as bt
import mlpaper.perf_curves as pc
from mlpaper.classification import DEFAULT_NGRID, curve_boot
from mlpaper.test_constants import FPR
from mlpaper.util import area, interp1d
_FPR = FPR / 3.0 # Divide by number of test funcs
def fail_check_stat(fail, runs, expect_p_fail, fpr):
pvals_2side = [ss.binom_test(ff, runs, expect_p_fail) for ff in fail]
pvals_1side = [ss.binom_test(ff, runs, expect_p_fail, alternative="greater") for ff in fail]
# Note that we are not going multiple comparison correction between the
# two sided and one sided tests.
print(fail)
print(pvals_2side)
assert np.min(pvals_2side) >= fpr / len(pvals_2side)
print(pvals_1side)
assert np.min(pvals_1side) >= fpr / len(pvals_1side)
def test_boot(runs=100):
N = 201
confidence = 0.95
# Drawing more seeds than we need to be safe
seeds = np.nditer(np.random.randint(low=0, high=int(1e6), size=runs * 5))
def run_trial(y_true, y_score, y_score_ref, true_curve, curve_f, seed, x_grid=None):
epsilon = 1e-6
curve, _ = curve_f(y_true, y_score[:, 1])
auc, = area(*curve)
curve, _ = curve_f(y_true, y_score_ref[:, 1])
auc_ref, = area(*curve)
true_value, = area(*true_curve)
np.random.seed(seed)
(auc_, EB, pval), curve = curve_boot(
y_true, y_score, ref=true_value, curve_f=curve_f, confidence=confidence, x_grid=x_grid
)
true_curve_grid, = interp1d(curve[cc.XGRID].values, *true_curve)
assert auc_ == auc
fail_EB = np.abs(auc - true_value) > EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P = pval < 1.0 - confidence
fail_curve = (true_curve_grid < curve[cc.LB].values - epsilon) | (
curve[cc.UB].values + epsilon < true_curve_grid
)
assert (x_grid is None) or np.all(curve[cc.XGRID].values == x_grid)
np.random.seed(seed)
(auc_, EB_, pval), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=False, x_grid=x_grid
)
assert auc_ == auc
assert EB_ == EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P2 = pval < 1.0 - confidence
assert np.all(curve_.values == curve.values)
np.random.seed(seed)
(auc_, EB, pval_), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=True, x_grid=x_grid
)
assert auc_ == auc
fail_EB2 = np.abs(auc - auc_ref) > EB
# Could also test distn with 1-sided KS test but this easier for now
assert pval_ == pval
assert np.all(curve_.values == curve.values)
return fail_EB, fail_P, fail_EB2, fail_P2, fail_curve
fail = [0] * 12
fail_curve_roc = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_ap = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_prg = np.zeros(DEFAULT_NGRID, dtype=int)
for ii in range(runs):
mu = np.random.randn(2)
S = np.random.randn(2, 2)
S = np.dot(S, S.T)
# Coverage, esp at edges, is worse for imbalanced data. See issue #20.
p = 0.5
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 1.0]]), pc.LINEAR)
y_true = np.random.rand(N) <= p
y_score = np.random.multivariate_normal(mu, S, size=N)
if np.random.randn() <= 0.5: # resample to test dupes
idx = np.random.choice(N, size=N, replace=True)
y_score = y_score[idx, :]
y_score, y_score_ref = y_score.T
y_score = np.stack((np.zeros(N), y_score), axis=1)
y_score_ref = np.stack((np.zeros(N), y_score_ref), axis=1)
# Coverage doesn't hold at edges, hence [0.05, 0.95]. See issue #20.
x_grid = np.linspace(0.05, 0.95, DEFAULT_NGRID)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.roc_curve, next(seeds), x_grid
)
fail[0] += fail_EB
fail[1] += fail_P
fail[2] += fail_EB2
fail[3] += fail_P2
fail_curve_roc += fail_curve
true_curve = (np.array([[0.0, 1.0]]), np.array([[p, p]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.recall_precision_curve, next(seeds), x_grid
)
fail[4] += fail_EB
fail[5] += fail_P
fail[6] += fail_EB2
fail[7] += fail_P2
fail_curve_ap += fail_curve
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 0.0]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.prg_curve, next(seeds), x_grid
)
fail[8] += fail_EB
fail[9] += fail_P
fail[10] += fail_EB2
fail[11] += fail_P2
fail_curve_prg += fail_curve
sub_FPR = _FPR / 4.0
expect_p_fail = 1.0 - confidence
fail_check_stat(fail, runs, expect_p_fail, sub_FPR)
print("ROC curve")
fail_check_stat(fail_curve_roc, runs, expect_p_fail, sub_FPR)
print("RP curve")
fail_check_stat(fail_curve_ap, runs, expect_p_fail, sub_FPR)
print("PRG curve")
fail_check_stat(fail_curve_prg, runs, expect_p_fail, sub_FPR)
def test_boot_mean(runs=100):
N = 201
confidence = 0.95
fail = 0
for ii in range(runs):
mu = np.random.randn()
S = np.abs(np.random.randn())
x = mu + S * np.random.randn(N)
mu_est = np.mean(x)
EB = bt.boot_EB(x, confidence=0.95)
fail += np.abs(mu - mu_est) > EB
expect_p_fail = 1.0 - confidence
print("boot mean")
fail_check_stat([fail], runs, expect_p_fail, _FPR)
def test_boot_EB_and_test(runs=100):
"""Arguably this should do out to its own file since it tests bt core."""
mu = np.random.randn()
stdev = np.abs(np.random.randn())
N = 201
confidence = 0.95
def run_trial(x, true_value):
_, _, CI = bt._boot_EB_and_test(x, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI = (true_value < LB) or (UB < true_value)
_, pval, CI = bt._boot_EB_and_test(x - true_value, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI2 = (0 < LB) or (UB < 0)
fail_P = pval < 1.0 - confidence
return fail_CI, fail_CI2, fail_P
fail = [0] * 3
for ii in range(runs):
x = mu + stdev * np.random.randn(N)
fail_CI, fail_CI2, fail_P = run_trial(x, mu)
fail[0] += fail_CI
fail[1] += fail_CI2
fail[2] += fail_P
expect_p_fail = 1.0 - confidence
print("boot mean and test")
fail_check_stat(fail, runs, expect_p_fail, _FPR)
if __name__ == "__main__":
np.random.seed(56467)
test_boot()
test_boot_mean()
test_boot_EB_and_test()
print("passed")
| # <NAME> (<EMAIL>)
from __future__ import division, print_function
from builtins import range
import numpy as np
import scipy.stats as ss
import mlpaper.constants as cc
import mlpaper.mlpaper as bt
import mlpaper.perf_curves as pc
from mlpaper.classification import DEFAULT_NGRID, curve_boot
from mlpaper.test_constants import FPR
from mlpaper.util import area, interp1d
_FPR = FPR / 3.0 # Divide by number of test funcs
def fail_check_stat(fail, runs, expect_p_fail, fpr):
pvals_2side = [ss.binom_test(ff, runs, expect_p_fail) for ff in fail]
pvals_1side = [ss.binom_test(ff, runs, expect_p_fail, alternative="greater") for ff in fail]
# Note that we are not going multiple comparison correction between the
# two sided and one sided tests.
print(fail)
print(pvals_2side)
assert np.min(pvals_2side) >= fpr / len(pvals_2side)
print(pvals_1side)
assert np.min(pvals_1side) >= fpr / len(pvals_1side)
def test_boot(runs=100):
N = 201
confidence = 0.95
# Drawing more seeds than we need to be safe
seeds = np.nditer(np.random.randint(low=0, high=int(1e6), size=runs * 5))
def run_trial(y_true, y_score, y_score_ref, true_curve, curve_f, seed, x_grid=None):
epsilon = 1e-6
curve, _ = curve_f(y_true, y_score[:, 1])
auc, = area(*curve)
curve, _ = curve_f(y_true, y_score_ref[:, 1])
auc_ref, = area(*curve)
true_value, = area(*true_curve)
np.random.seed(seed)
(auc_, EB, pval), curve = curve_boot(
y_true, y_score, ref=true_value, curve_f=curve_f, confidence=confidence, x_grid=x_grid
)
true_curve_grid, = interp1d(curve[cc.XGRID].values, *true_curve)
assert auc_ == auc
fail_EB = np.abs(auc - true_value) > EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P = pval < 1.0 - confidence
fail_curve = (true_curve_grid < curve[cc.LB].values - epsilon) | (
curve[cc.UB].values + epsilon < true_curve_grid
)
assert (x_grid is None) or np.all(curve[cc.XGRID].values == x_grid)
np.random.seed(seed)
(auc_, EB_, pval), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=False, x_grid=x_grid
)
assert auc_ == auc
assert EB_ == EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P2 = pval < 1.0 - confidence
assert np.all(curve_.values == curve.values)
np.random.seed(seed)
(auc_, EB, pval_), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=True, x_grid=x_grid
)
assert auc_ == auc
fail_EB2 = np.abs(auc - auc_ref) > EB
# Could also test distn with 1-sided KS test but this easier for now
assert pval_ == pval
assert np.all(curve_.values == curve.values)
return fail_EB, fail_P, fail_EB2, fail_P2, fail_curve
fail = [0] * 12
fail_curve_roc = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_ap = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_prg = np.zeros(DEFAULT_NGRID, dtype=int)
for ii in range(runs):
mu = np.random.randn(2)
S = np.random.randn(2, 2)
S = np.dot(S, S.T)
# Coverage, esp at edges, is worse for imbalanced data. See issue #20.
p = 0.5
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 1.0]]), pc.LINEAR)
y_true = np.random.rand(N) <= p
y_score = np.random.multivariate_normal(mu, S, size=N)
if np.random.randn() <= 0.5: # resample to test dupes
idx = np.random.choice(N, size=N, replace=True)
y_score = y_score[idx, :]
y_score, y_score_ref = y_score.T
y_score = np.stack((np.zeros(N), y_score), axis=1)
y_score_ref = np.stack((np.zeros(N), y_score_ref), axis=1)
# Coverage doesn't hold at edges, hence [0.05, 0.95]. See issue #20.
x_grid = np.linspace(0.05, 0.95, DEFAULT_NGRID)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.roc_curve, next(seeds), x_grid
)
fail[0] += fail_EB
fail[1] += fail_P
fail[2] += fail_EB2
fail[3] += fail_P2
fail_curve_roc += fail_curve
true_curve = (np.array([[0.0, 1.0]]), np.array([[p, p]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.recall_precision_curve, next(seeds), x_grid
)
fail[4] += fail_EB
fail[5] += fail_P
fail[6] += fail_EB2
fail[7] += fail_P2
fail_curve_ap += fail_curve
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 0.0]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.prg_curve, next(seeds), x_grid
)
fail[8] += fail_EB
fail[9] += fail_P
fail[10] += fail_EB2
fail[11] += fail_P2
fail_curve_prg += fail_curve
sub_FPR = _FPR / 4.0
expect_p_fail = 1.0 - confidence
fail_check_stat(fail, runs, expect_p_fail, sub_FPR)
print("ROC curve")
fail_check_stat(fail_curve_roc, runs, expect_p_fail, sub_FPR)
print("RP curve")
fail_check_stat(fail_curve_ap, runs, expect_p_fail, sub_FPR)
print("PRG curve")
fail_check_stat(fail_curve_prg, runs, expect_p_fail, sub_FPR)
def test_boot_mean(runs=100):
N = 201
confidence = 0.95
fail = 0
for ii in range(runs):
mu = np.random.randn()
S = np.abs(np.random.randn())
x = mu + S * np.random.randn(N)
mu_est = np.mean(x)
EB = bt.boot_EB(x, confidence=0.95)
fail += np.abs(mu - mu_est) > EB
expect_p_fail = 1.0 - confidence
print("boot mean")
fail_check_stat([fail], runs, expect_p_fail, _FPR)
def test_boot_EB_and_test(runs=100):
"""Arguably this should do out to its own file since it tests bt core."""
mu = np.random.randn()
stdev = np.abs(np.random.randn())
N = 201
confidence = 0.95
def run_trial(x, true_value):
_, _, CI = bt._boot_EB_and_test(x, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI = (true_value < LB) or (UB < true_value)
_, pval, CI = bt._boot_EB_and_test(x - true_value, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI2 = (0 < LB) or (UB < 0)
fail_P = pval < 1.0 - confidence
return fail_CI, fail_CI2, fail_P
fail = [0] * 3
for ii in range(runs):
x = mu + stdev * np.random.randn(N)
fail_CI, fail_CI2, fail_P = run_trial(x, mu)
fail[0] += fail_CI
fail[1] += fail_CI2
fail[2] += fail_P
expect_p_fail = 1.0 - confidence
print("boot mean and test")
fail_check_stat(fail, runs, expect_p_fail, _FPR)
if __name__ == "__main__":
np.random.seed(56467)
test_boot()
test_boot_mean()
test_boot_EB_and_test()
print("passed")
| en | 0.939979 | # <NAME> (<EMAIL>) # Divide by number of test funcs # Note that we are not going multiple comparison correction between the # two sided and one sided tests. # Drawing more seeds than we need to be safe # Could also test distn with 1-sided KS test but this easier for now # Could also test distn with 1-sided KS test but this easier for now # Could also test distn with 1-sided KS test but this easier for now # Coverage, esp at edges, is worse for imbalanced data. See issue #20. # resample to test dupes # Coverage doesn't hold at edges, hence [0.05, 0.95]. See issue #20. Arguably this should do out to its own file since it tests bt core. | 2.1556 | 2 |
TTBenchmark/check_benchmark.py | yuqil725/benchmark_lib | 0 | 8856 | def check_difference():
pass
def update_benchmark():
pass
| def check_difference():
pass
def update_benchmark():
pass
| none | 1 | 0.855307 | 1 |
|
core/test/test_timeseries_study.py | ajmal017/amp | 0 | 8857 | from typing import Any, Dict
import numpy as np
import pandas as pd
import core.artificial_signal_generators as sig_gen
import core.statistics as stats
import core.timeseries_study as tss
import helpers.unit_test as hut
class TestTimeSeriesDailyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsds = tss.TimeSeriesDailyStudy(ts)
tsds.execute()
class TestTimeSeriesMinutelyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31", freq="5T")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsms = tss.TimeSeriesMinutelyStudy(ts, freq_name="5 minutes")
tsms.execute()
class TestMapDictToDataframeTest1(hut.TestCase):
def test1(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict, functions=stat_funcs
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
add_prefix=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
progress_bar=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = sig_gen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
def _get_dict_of_series(self, seed: int) -> Dict[Any, pd.Series]:
n_items = 15
test_keys = ["test_key_" + str(x) for x in range(n_items)]
result_dict = {key: self._get_series(seed) for key in test_keys}
return result_dict
| from typing import Any, Dict
import numpy as np
import pandas as pd
import core.artificial_signal_generators as sig_gen
import core.statistics as stats
import core.timeseries_study as tss
import helpers.unit_test as hut
class TestTimeSeriesDailyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsds = tss.TimeSeriesDailyStudy(ts)
tsds.execute()
class TestTimeSeriesMinutelyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31", freq="5T")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsms = tss.TimeSeriesMinutelyStudy(ts, freq_name="5 minutes")
tsms.execute()
class TestMapDictToDataframeTest1(hut.TestCase):
def test1(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict, functions=stat_funcs
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
add_prefix=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
progress_bar=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = sig_gen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
def _get_dict_of_series(self, seed: int) -> Dict[Any, pd.Series]:
n_items = 15
test_keys = ["test_key_" + str(x) for x in range(n_items)]
result_dict = {key: self._get_series(seed) for key in test_keys}
return result_dict
| none | 1 | 2.42283 | 2 |
|
util.py | takat0m0/infoGAN | 0 | 8858 | <filename>util.py
#! -*- coding:utf-8 -*-
import os
import sys
import cv2
import numpy as np
def _resizing(img):
#return cv2.resize(img, (256, 256))
return cv2.resize(img, (32, 32))
def _reg(img):
return img/127.5 - 1.0
def _re_reg(img):
return (img + 1.0) * 127.5
def get_figs(target_dir):
ret = []
for file_name in os.listdir(target_dir):
target_file = os.path.join(target_dir, file_name)
img = cv2.imread(target_file, 0)
ret.append(_reg(_resizing(img)))
return np.asarray(ret, dtype = np.float32)
def dump_figs(figs, dump_dir):
for i, fig in enumerate(figs):
target_file = os.path.join(dump_dir, '{}.jpg'.format(i))
cv2.imwrite(target_file, _re_reg(fig))
| <filename>util.py
#! -*- coding:utf-8 -*-
import os
import sys
import cv2
import numpy as np
def _resizing(img):
#return cv2.resize(img, (256, 256))
return cv2.resize(img, (32, 32))
def _reg(img):
return img/127.5 - 1.0
def _re_reg(img):
return (img + 1.0) * 127.5
def get_figs(target_dir):
ret = []
for file_name in os.listdir(target_dir):
target_file = os.path.join(target_dir, file_name)
img = cv2.imread(target_file, 0)
ret.append(_reg(_resizing(img)))
return np.asarray(ret, dtype = np.float32)
def dump_figs(figs, dump_dir):
for i, fig in enumerate(figs):
target_file = os.path.join(dump_dir, '{}.jpg'.format(i))
cv2.imwrite(target_file, _re_reg(fig))
| en | 0.362879 | #! -*- coding:utf-8 -*- #return cv2.resize(img, (256, 256)) | 2.793318 | 3 |
myhoodApp/migrations/0002_healthfacilities_hospital_image.py | MutuaFranklin/MyHood | 0 | 8859 | <reponame>MutuaFranklin/MyHood
# Generated by Django 3.2.7 on 2021-09-23 20:01
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myhoodApp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='healthfacilities',
name='hospital_image',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, verbose_name='Hospital Image'),
),
]
| # Generated by Django 3.2.7 on 2021-09-23 20:01
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myhoodApp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='healthfacilities',
name='hospital_image',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, verbose_name='Hospital Image'),
),
] | en | 0.815358 | # Generated by Django 3.2.7 on 2021-09-23 20:01 | 1.851888 | 2 |
forecasting_algorithms/Multiple_Timeseries/VAR/var.py | ans682/SafePredict_and_Forecasting | 1 | 8860 | # VAR example
from statsmodels.tsa.vector_ar.var_model import VAR
from random import random
# contrived dataset with dependency
data = list()
for i in range(100):
v1 = i + random()
v2 = v1 + random()
row = [v1, v2]
data.append(row)
# fit model
model = VAR(data)
model_fit = model.fit()
# make prediction
yhat = model_fit.forecast(model_fit.y, steps=1)
print(yhat)
| # VAR example
from statsmodels.tsa.vector_ar.var_model import VAR
from random import random
# contrived dataset with dependency
data = list()
for i in range(100):
v1 = i + random()
v2 = v1 + random()
row = [v1, v2]
data.append(row)
# fit model
model = VAR(data)
model_fit = model.fit()
# make prediction
yhat = model_fit.forecast(model_fit.y, steps=1)
print(yhat)
| en | 0.798805 | # VAR example # contrived dataset with dependency # fit model # make prediction | 2.979384 | 3 |
candidate-scrape.py | jonykarki/hamroscraper | 2 | 8861 | <reponame>jonykarki/hamroscraper<filename>candidate-scrape.py
import json
import urllib.request
import MySQLdb
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="", # your password
db="election")
cur = db.cursor()
# user_agent for sending headers with the request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
# header
headers={'User-Agent':user_agent,}
district = input("Enter the Name of the district: ")
url = "http://election.ujyaaloonline.com/api/candidates?district=" + district
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
source = response.read()
# print(source)
data = json.loads(source)
#print(data['candidates']['2']['400'][0]['cName'])
election_area = data['election_areas']
# get all the possible election-areas from the district
# data needed for the database
'''
resultno :> autoincrement
constituencyname :>
stateno :> Remove the column?
districtno :>
candidate :>
gender :> Remove the column???
votes :> set to zero for now
'''
i = 0
j = 0
for key, value in election_area.items():
area_key = key
district_name = data['district_slug']
try:
for item in data["candidates"]['1'][area_key]:
print(item['aName'])
print(item["cName"])
i = i + 1
except:
for item in data["candidates"]['2'][area_key]:
constituencyname = item['aName'].encode('utf-8')
candidatename = item["cName"].encode('utf-8')
sql = "INSERT INTO `test` (`id`, `candidatename`, `constituencyname`) VALUES (NULL, %s, %s)"
cur.execute(sql, (candidatename, constituencyname))
db.commit()
print('INSERTED ' + item["cName"] + " into the database")
j = j + 1
print(data['district_slug'] + " has " + str(i) + " candidates in provincial election")
print(data['district_slug'] + " has " + str(j) + " candidates in federal election")
print("Total: " + str(i + j) + " candidates added to the database")
| import json
import urllib.request
import MySQLdb
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="", # your password
db="election")
cur = db.cursor()
# user_agent for sending headers with the request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
# header
headers={'User-Agent':user_agent,}
district = input("Enter the Name of the district: ")
url = "http://election.ujyaaloonline.com/api/candidates?district=" + district
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
source = response.read()
# print(source)
data = json.loads(source)
#print(data['candidates']['2']['400'][0]['cName'])
election_area = data['election_areas']
# get all the possible election-areas from the district
# data needed for the database
'''
resultno :> autoincrement
constituencyname :>
stateno :> Remove the column?
districtno :>
candidate :>
gender :> Remove the column???
votes :> set to zero for now
'''
i = 0
j = 0
for key, value in election_area.items():
area_key = key
district_name = data['district_slug']
try:
for item in data["candidates"]['1'][area_key]:
print(item['aName'])
print(item["cName"])
i = i + 1
except:
for item in data["candidates"]['2'][area_key]:
constituencyname = item['aName'].encode('utf-8')
candidatename = item["cName"].encode('utf-8')
sql = "INSERT INTO `test` (`id`, `candidatename`, `constituencyname`) VALUES (NULL, %s, %s)"
cur.execute(sql, (candidatename, constituencyname))
db.commit()
print('INSERTED ' + item["cName"] + " into the database")
j = j + 1
print(data['district_slug'] + " has " + str(i) + " candidates in provincial election")
print(data['district_slug'] + " has " + str(j) + " candidates in federal election")
print("Total: " + str(i + j) + " candidates added to the database") | en | 0.675548 | # your host, usually localhost # your username # your password # user_agent for sending headers with the request # header # print(source) #print(data['candidates']['2']['400'][0]['cName']) # get all the possible election-areas from the district # data needed for the database resultno :> autoincrement constituencyname :> stateno :> Remove the column? districtno :> candidate :> gender :> Remove the column??? votes :> set to zero for now | 3.259907 | 3 |
sangita/hindi/lemmatizer.py | ashiscs/sangita | 36 | 8862 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 23:28:21 2017
@author: samriddhi
"""
import re
import sangita.hindi.tokenizer as tok
import sangita.hindi.corpora.lemmata as lt
def numericLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
num_match = re.compile(r'([०१२३४५६७८९]+[\.\,]*)+[०१२३४५६७८९]+|([-+]*\d+[\.\,]*)+\d+|([०१२३४५६७८९]+|\d+)')
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if num_match.search(str(item[0])):
instr[index] = (instr[index][1], instr[index][1])
else:
if num_match.search(str(item)):
instr[index] = (instr[index], instr[index][1])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
numericLemmatizer(instr)
else:
print("not supported")
return(instr)
def defaultLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) != tup):
instr[index] = (instr[index], instr[index])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
defaultLemmatizer(instr)
else:
print("not supported")
return(instr)
def lookupLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
lemmatalist = lt.drawlist()
words = []
lemma = []
for item in lemmatalist:
words.append(item.split("\t")[0])
lemma.append(item.split("\t")[1])
tokens = set(words)
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index][1],tag)
else:
if(type(item) != tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index], tag)
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
lookupLemmatizer(instr)
else:
print("not supported")
return(instr)
def Lemmatizer(instr):
instr = lookupLemmatizer(instr)
instr = numericLemmatizer(instr)
instr = defaultLemmatizer(instr)
return(instr)
if __name__ == '__main__':
input_str = 'पुंछ में हुई मुठभेड़ के बारे में एक सरकारी अधिकारी ने बताया कि १३वीं सिख लाईट इनफेंट्री द्वारा लश्कर-ए - ताइबा गुट के आतंकियों को नियंत्रण-रेखा पर चुनौती देने पर मुठभेड़ रात ११.४५ बजे शुरू हुई।'
print(lookupLemmatizer(input_str))
print(numericLemmatizer(input_str))
print(defaultLemmatizer(input_str))
print(Lemmatizer(input_str))
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 23:28:21 2017
@author: samriddhi
"""
import re
import sangita.hindi.tokenizer as tok
import sangita.hindi.corpora.lemmata as lt
def numericLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
num_match = re.compile(r'([०१२३४५६७८९]+[\.\,]*)+[०१२३४५६७८९]+|([-+]*\d+[\.\,]*)+\d+|([०१२३४५६७८९]+|\d+)')
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if num_match.search(str(item[0])):
instr[index] = (instr[index][1], instr[index][1])
else:
if num_match.search(str(item)):
instr[index] = (instr[index], instr[index][1])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
numericLemmatizer(instr)
else:
print("not supported")
return(instr)
def defaultLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) != tup):
instr[index] = (instr[index], instr[index])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
defaultLemmatizer(instr)
else:
print("not supported")
return(instr)
def lookupLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
lemmatalist = lt.drawlist()
words = []
lemma = []
for item in lemmatalist:
words.append(item.split("\t")[0])
lemma.append(item.split("\t")[1])
tokens = set(words)
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index][1],tag)
else:
if(type(item) != tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index], tag)
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
lookupLemmatizer(instr)
else:
print("not supported")
return(instr)
def Lemmatizer(instr):
instr = lookupLemmatizer(instr)
instr = numericLemmatizer(instr)
instr = defaultLemmatizer(instr)
return(instr)
if __name__ == '__main__':
input_str = 'पुंछ में हुई मुठभेड़ के बारे में एक सरकारी अधिकारी ने बताया कि १३वीं सिख लाईट इनफेंट्री द्वारा लश्कर-ए - ताइबा गुट के आतंकियों को नियंत्रण-रेखा पर चुनौती देने पर मुठभेड़ रात ११.४५ बजे शुरू हुई।'
print(lookupLemmatizer(input_str))
print(numericLemmatizer(input_str))
print(defaultLemmatizer(input_str))
print(Lemmatizer(input_str))
| en | 0.575727 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Fri Jun 9 23:28:21 2017 @author: samriddhi | 3.42794 | 3 |
gslib/tests/test_stet_util.py | ttobisawa/gsutil | 0 | 8863 | <reponame>ttobisawa/gsutil<filename>gslib/tests/test_stet_util.py
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stet_util.py."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
from gslib import storage_url
from gslib.tests import testcase
from gslib.tests import util
from gslib.tests.util import unittest
from gslib.utils import execution_util
from gslib.utils import stet_util
import mock
class TestStetUtil(testcase.GsUtilUnitTestCase):
"""Test STET utils."""
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_upload_uses_binary_and_config_from_boto(
self, mock_execute_external_command):
fake_config_path = self.CreateTempFile()
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', fake_config_path),
]):
out_file_url = stet_util.encrypt_upload(source_url, destination_url,
mock_logger)
self.assertEqual(out_file_url,
storage_url.StorageUrlFromString('in_.stet_tmp'))
mock_execute_external_command.assert_called_once_with([
'fake_binary_path',
'encrypt',
'--config-file={}'.format(fake_config_path),
'--blob-id=gs://bucket/obj',
'in',
'in_.stet_tmp',
])
mock_logger.debug.assert_called_once_with('stderr')
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_upload_runs_with_binary_from_path_with_correct_settings(
self, mock_execute_external_command):
fake_config_path = self.CreateTempFile()
temporary_path_directory = self.CreateTempDir()
fake_stet_binary_path = self.CreateTempFile(tmpdir=temporary_path_directory,
file_name='stet')
previous_path = os.getenv('PATH')
os.environ['PATH'] += os.path.pathsep + temporary_path_directory
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', None),
('GSUtil', 'stet_config_path', fake_config_path),
]):
out_file_url = stet_util.encrypt_upload(source_url, destination_url,
mock_logger)
self.assertEqual(out_file_url,
storage_url.StorageUrlFromString('in_.stet_tmp'))
mock_execute_external_command.assert_called_once_with([
fake_stet_binary_path,
'encrypt',
'--config-file={}'.format(fake_config_path),
'--blob-id=gs://bucket/obj',
'in',
'in_.stet_tmp',
])
mock_logger.debug.assert_called_once_with('stderr')
os.environ['PATH'] = previous_path
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_upload_uses_config_from_default_path_with_correct_settings(
self, mock_execute_external_command):
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', None),
]):
with mock.patch.object(os.path,
'exists',
new=mock.Mock(return_value=True)):
out_file_url = stet_util.encrypt_upload(source_url, destination_url,
mock_logger)
self.assertEqual(out_file_url,
storage_url.StorageUrlFromString('in_.stet_tmp'))
mock_execute_external_command.assert_called_once_with([
'fake_binary_path',
'encrypt',
'--config-file={}'.format(
os.path.expanduser(stet_util.DEFAULT_STET_CONFIG_PATH)),
'--blob-id=gs://bucket/obj',
'in',
'in_.stet_tmp',
])
mock_logger.debug.assert_called_once_with('stderr')
@mock.patch.object(shutil, 'move')
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_download_runs_binary_and_replaces_temp_file(
self, mock_execute_external_command, mock_move):
fake_config_path = self.CreateTempFile()
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('gs://bucket/obj')
destination_url = storage_url.StorageUrlFromString('out')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', fake_config_path),
]):
stet_util.decrypt_download(source_url, destination_url, mock_logger)
mock_execute_external_command.assert_called_once_with([
'fake_binary_path', 'decrypt',
'--config-file={}'.format(fake_config_path),
'--blob-id=gs://bucket/obj', 'out', 'out_.stet_tmp'
])
mock_logger.debug.assert_called_once_with('stderr')
mock_move.assert_called_once_with('out_.stet_tmp', 'out')
@mock.patch.object(stet_util,
'_get_stet_binary_from_path',
new=mock.Mock(return_value=None))
def test_stet_util_errors_if_no_binary(self):
fake_config_path = self.CreateTempFile()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', None),
('GSUtil', 'stet_config_path', fake_config_path),
]):
with self.assertRaises(KeyError):
stet_util.encrypt_upload(source_url, destination_url, None)
def test_stet_util_errors_if_no_config(self):
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', None),
]):
with mock.patch.object(os.path,
'exists',
new=mock.Mock(return_value=False)):
with self.assertRaises(KeyError):
stet_util.encrypt_upload(source_url, destination_url, None)
@mock.patch.object(os.path, 'expanduser', autospec=True)
@mock.patch.object(execution_util,
'ExecuteExternalCommand',
new=mock.Mock(return_value=('stdout', 'stderr')))
def test_stet_util_expands_home_directory_symbol(self, mock_expanduser):
fake_config_path = self.CreateTempFile()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', fake_config_path),
]):
stet_util.encrypt_upload(source_url, destination_url, mock.Mock())
mock_expanduser.assert_has_calls(
[mock.call('fake_binary_path'),
mock.call(fake_config_path)])
| # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stet_util.py."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
from gslib import storage_url
from gslib.tests import testcase
from gslib.tests import util
from gslib.tests.util import unittest
from gslib.utils import execution_util
from gslib.utils import stet_util
import mock
class TestStetUtil(testcase.GsUtilUnitTestCase):
"""Test STET utils."""
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_upload_uses_binary_and_config_from_boto(
self, mock_execute_external_command):
fake_config_path = self.CreateTempFile()
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', fake_config_path),
]):
out_file_url = stet_util.encrypt_upload(source_url, destination_url,
mock_logger)
self.assertEqual(out_file_url,
storage_url.StorageUrlFromString('in_.stet_tmp'))
mock_execute_external_command.assert_called_once_with([
'fake_binary_path',
'encrypt',
'--config-file={}'.format(fake_config_path),
'--blob-id=gs://bucket/obj',
'in',
'in_.stet_tmp',
])
mock_logger.debug.assert_called_once_with('stderr')
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_upload_runs_with_binary_from_path_with_correct_settings(
self, mock_execute_external_command):
fake_config_path = self.CreateTempFile()
temporary_path_directory = self.CreateTempDir()
fake_stet_binary_path = self.CreateTempFile(tmpdir=temporary_path_directory,
file_name='stet')
previous_path = os.getenv('PATH')
os.environ['PATH'] += os.path.pathsep + temporary_path_directory
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', None),
('GSUtil', 'stet_config_path', fake_config_path),
]):
out_file_url = stet_util.encrypt_upload(source_url, destination_url,
mock_logger)
self.assertEqual(out_file_url,
storage_url.StorageUrlFromString('in_.stet_tmp'))
mock_execute_external_command.assert_called_once_with([
fake_stet_binary_path,
'encrypt',
'--config-file={}'.format(fake_config_path),
'--blob-id=gs://bucket/obj',
'in',
'in_.stet_tmp',
])
mock_logger.debug.assert_called_once_with('stderr')
os.environ['PATH'] = previous_path
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_upload_uses_config_from_default_path_with_correct_settings(
self, mock_execute_external_command):
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', None),
]):
with mock.patch.object(os.path,
'exists',
new=mock.Mock(return_value=True)):
out_file_url = stet_util.encrypt_upload(source_url, destination_url,
mock_logger)
self.assertEqual(out_file_url,
storage_url.StorageUrlFromString('in_.stet_tmp'))
mock_execute_external_command.assert_called_once_with([
'fake_binary_path',
'encrypt',
'--config-file={}'.format(
os.path.expanduser(stet_util.DEFAULT_STET_CONFIG_PATH)),
'--blob-id=gs://bucket/obj',
'in',
'in_.stet_tmp',
])
mock_logger.debug.assert_called_once_with('stderr')
@mock.patch.object(shutil, 'move')
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_download_runs_binary_and_replaces_temp_file(
self, mock_execute_external_command, mock_move):
fake_config_path = self.CreateTempFile()
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('gs://bucket/obj')
destination_url = storage_url.StorageUrlFromString('out')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', fake_config_path),
]):
stet_util.decrypt_download(source_url, destination_url, mock_logger)
mock_execute_external_command.assert_called_once_with([
'fake_binary_path', 'decrypt',
'--config-file={}'.format(fake_config_path),
'--blob-id=gs://bucket/obj', 'out', 'out_.stet_tmp'
])
mock_logger.debug.assert_called_once_with('stderr')
mock_move.assert_called_once_with('out_.stet_tmp', 'out')
@mock.patch.object(stet_util,
'_get_stet_binary_from_path',
new=mock.Mock(return_value=None))
def test_stet_util_errors_if_no_binary(self):
fake_config_path = self.CreateTempFile()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', None),
('GSUtil', 'stet_config_path', fake_config_path),
]):
with self.assertRaises(KeyError):
stet_util.encrypt_upload(source_url, destination_url, None)
def test_stet_util_errors_if_no_config(self):
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', None),
]):
with mock.patch.object(os.path,
'exists',
new=mock.Mock(return_value=False)):
with self.assertRaises(KeyError):
stet_util.encrypt_upload(source_url, destination_url, None)
@mock.patch.object(os.path, 'expanduser', autospec=True)
@mock.patch.object(execution_util,
'ExecuteExternalCommand',
new=mock.Mock(return_value=('stdout', 'stderr')))
def test_stet_util_expands_home_directory_symbol(self, mock_expanduser):
fake_config_path = self.CreateTempFile()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', fake_config_path),
]):
stet_util.encrypt_upload(source_url, destination_url, mock.Mock())
mock_expanduser.assert_has_calls(
[mock.call('fake_binary_path'),
mock.call(fake_config_path)]) | en | 0.834239 | # -*- coding: utf-8 -*- # Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for stet_util.py. Test STET utils. | 1.95908 | 2 |
markdown_editing/tests/test_extension.py | makyo/markdown-editing | 0 | 8864 | <reponame>makyo/markdown-editing<gh_stars>0
from markdown import markdown
from unittest import TestCase
from markdown_editing.extension import EditingExtension
class TestExtension(TestCase):
def test_substitution(self):
source = '~{out with the old}{in with the new}'
expected = '<p><span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
# Only need to test this once.
html = markdown(source, extensions=['markdown_editing'])
self.assertEqual(html, expected)
def test_addition(self):
source = 'foo +{bar} baz +{qux}(yap)'
expected = '<p>foo <ins class="addition">bar</ins> baz <ins class="addition">qux<q class="comment">yap</q></ins></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_deletion(self):
source = 'foo -{bar} baz -{qux}(yap)'
expected = '<p>foo <del class="deletion">bar</del> baz <del class="deletion">qux<q class="comment">yap</q></del></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_selected(self):
source = 'foo ?{bar}(qux) baz'
expected = '<p>foo <mark class="selected">bar<q class="comment">qux</q></mark> baz</p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_comments(self):
self.maxDiff = None
source = """
* Substitution: ~{out with the old}{in with the new}
* With comment: ~{out with the old}{in with the new}(is what I always say)
* With attribution: ~{out with the old}{in with the new}(is what I always say (Makyo))
* With date: ~{out with the old}{in with the new}(is what I always say (Makyo 2020-04-21))
* Comment thread: +{Foxes}(More foxes are always good)!{SGTM}
* Comment with attribution: !{SGTM}(Makyo 2020-04-22)
""".strip()
expected = """
<ul>
<li>Substitution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></li>
<li>With comment: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say</q></span></li>
<li>With attribution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span></q></span></li>
<li>With date: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span><span class="date">2020-04-21</span></q></span></li>
<li>Comment thread: <ins class="addition">Foxes<q class="comment">More foxes are always good</q></ins><q class="comment">SGTM</q></li>
<li>Comment with attribution: <q class="comment">SGTM<span class="attribution">Makyo</span><span class="date">2020-04-22</span></q></li>
</ul>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_level(self):
source = """
```
?{Some text}(bad wolf)
```
?{Some text}(bad wolf)
> ?{Some text}(good doggy)
""".strip()
expected = """
<p><code>?{Some text}(bad wolf)</code></p>
<pre><code>?{Some text}(bad wolf)
</code></pre>
<blockquote>
<p><mark class="selected">Some text<q class="comment">good doggy</q></mark></p>
</blockquote>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_nesting(self):
source = """
?{The only currently working form of nesting}(But what if...!{NO})
""".strip()
expected = """
<p><mark class="selected">The only currently working form of nesting<q class="comment">But what if...<q class="comment">NO</q></q></mark></p>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_mixed(self):
source = """
+{some *fancy* new stuff}(With a **fancy** comment)
""".strip()
expected = """
<p><ins class="addition">some <em>fancy</em> new stuff<q class="comment">With a <strong>fancy</strong> comment</q></ins></p>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
| from markdown import markdown
from unittest import TestCase
from markdown_editing.extension import EditingExtension
class TestExtension(TestCase):
def test_substitution(self):
source = '~{out with the old}{in with the new}'
expected = '<p><span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
# Only need to test this once.
html = markdown(source, extensions=['markdown_editing'])
self.assertEqual(html, expected)
def test_addition(self):
source = 'foo +{bar} baz +{qux}(yap)'
expected = '<p>foo <ins class="addition">bar</ins> baz <ins class="addition">qux<q class="comment">yap</q></ins></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_deletion(self):
source = 'foo -{bar} baz -{qux}(yap)'
expected = '<p>foo <del class="deletion">bar</del> baz <del class="deletion">qux<q class="comment">yap</q></del></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_selected(self):
source = 'foo ?{bar}(qux) baz'
expected = '<p>foo <mark class="selected">bar<q class="comment">qux</q></mark> baz</p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_comments(self):
self.maxDiff = None
source = """
* Substitution: ~{out with the old}{in with the new}
* With comment: ~{out with the old}{in with the new}(is what I always say)
* With attribution: ~{out with the old}{in with the new}(is what I always say (Makyo))
* With date: ~{out with the old}{in with the new}(is what I always say (Makyo 2020-04-21))
* Comment thread: +{Foxes}(More foxes are always good)!{SGTM}
* Comment with attribution: !{SGTM}(Makyo 2020-04-22)
""".strip()
expected = """
<ul>
<li>Substitution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></li>
<li>With comment: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say</q></span></li>
<li>With attribution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span></q></span></li>
<li>With date: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span><span class="date">2020-04-21</span></q></span></li>
<li>Comment thread: <ins class="addition">Foxes<q class="comment">More foxes are always good</q></ins><q class="comment">SGTM</q></li>
<li>Comment with attribution: <q class="comment">SGTM<span class="attribution">Makyo</span><span class="date">2020-04-22</span></q></li>
</ul>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_level(self):
source = """
```
?{Some text}(bad wolf)
```
?{Some text}(bad wolf)
> ?{Some text}(good doggy)
""".strip()
expected = """
<p><code>?{Some text}(bad wolf)</code></p>
<pre><code>?{Some text}(bad wolf)
</code></pre>
<blockquote>
<p><mark class="selected">Some text<q class="comment">good doggy</q></mark></p>
</blockquote>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_nesting(self):
source = """
?{The only currently working form of nesting}(But what if...!{NO})
""".strip()
expected = """
<p><mark class="selected">The only currently working form of nesting<q class="comment">But what if...<q class="comment">NO</q></q></mark></p>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_mixed(self):
source = """
+{some *fancy* new stuff}(With a **fancy** comment)
""".strip()
expected = """
<p><ins class="addition">some <em>fancy</em> new stuff<q class="comment">With a <strong>fancy</strong> comment</q></ins></p>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected) | en | 0.601168 | # Only need to test this once. * Substitution: ~{out with the old}{in with the new} * With comment: ~{out with the old}{in with the new}(is what I always say) * With attribution: ~{out with the old}{in with the new}(is what I always say (Makyo)) * With date: ~{out with the old}{in with the new}(is what I always say (Makyo 2020-04-21)) * Comment thread: +{Foxes}(More foxes are always good)!{SGTM} * Comment with attribution: !{SGTM}(Makyo 2020-04-22) <ul> <li>Substitution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></li> <li>With comment: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say</q></span></li> <li>With attribution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span></q></span></li> <li>With date: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span><span class="date">2020-04-21</span></q></span></li> <li>Comment thread: <ins class="addition">Foxes<q class="comment">More foxes are always good</q></ins><q class="comment">SGTM</q></li> <li>Comment with attribution: <q class="comment">SGTM<span class="attribution">Makyo</span><span class="date">2020-04-22</span></q></li> </ul> ``` ?{Some text}(bad wolf) ``` ?{Some text}(bad wolf) > ?{Some text}(good doggy) <p><code>?{Some text}(bad wolf)</code></p> <pre><code>?{Some text}(bad wolf) </code></pre> <blockquote> <p><mark class="selected">Some text<q class="comment">good doggy</q></mark></p> </blockquote> ?{The only currently working form of nesting}(But what if...!{NO}) <p><mark class="selected">The only currently working form of nesting<q class="comment">But what if...<q class="comment">NO</q></q></mark></p> +{some *fancy* new stuff}(With a **fancy** comment) <p><ins class="addition">some <em>fancy</em> new stuff<q class="comment">With a <strong>fancy</strong> comment</q></ins></p> | 2.776137 | 3 |
apps/siren/test_handlers.py | thomasyi17/diana2 | 15 | 8865 | """
SIREN/DIANA basic functionality testing framework
Requires env vars:
- GMAIL_USER
- GMAIL_APP_PASSWORD
- GMAIL_BASE_NAME -- ie, abc -> <EMAIL>
These env vars are set to default:
- ORTHANC_PASSWORD
- SPLUNK_PASSWORD
- SPLUNK_HEC_TOKEN
TODO: Move stuff to archive after collected
TODO: Write data into daily folder or something from mi-share ingress
TODO: Suppress dicom-simplify missing (series) creation time
"""
import time
import logging
import shutil
import io
import tempfile
from pathlib import Path
from pprint import pformat
from contextlib import redirect_stdout
from multiprocessing import Process
from datetime import datetime, timedelta
from interruptingcow import timeout
from crud.manager import EndpointManager
from crud.abc import Watcher, Trigger
from crud.endpoints import Splunk
from wuphf.endpoints import SmtpMessenger
from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir
from diana.dixel import Dixel, ShamDixel
from diana.utils.dicom import DicomLevel as DLv, DicomEventType as DEv
from wuphf.cli.string_descs import *
from diana.utils import unpack_data
from crud.utils import deserialize_dict
from diana.utils.gateways import suppress_urllib_debug
from diana.utils.endpoint.watcher import suppress_watcher_debug
from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, \
handle_file_arrived, start_watcher, tagged_studies
from trial_dispatcher import TrialDispatcher as Dispatcher
LOCAL_SERVICES = False # Set False to use UMich services
USE_GMAIL = True # Set False to use UMich smtp
DO_DIR_UPLOAD = False
CHECK_SPLUNK = False # Set False to skip long wait for dixel to index
CHECK_WATCH_STUDIES= False # Set False to skip long wait for orthanc watcher
EMAIL_DRYRUN = False # Set False to send live emails
# CONFIG
_services = "@services.yaml"
_subscriptions = "@subscriptions.yaml"
os.environ["SPLUNK_INDEX"] = "testing"
SMTP_MESSENGER_NAME = "smtp_server"
if LOCAL_SERVICES:
# Set everythin back to default
os.environ["UMICH_HOST"] = "localhost" # For testing
del os.environ["ORTHANC_USER"]
del os.environ["ORTHANC_PASSWORD"]
del os.environ["SPLUNK_USER"]
del os.environ["SPLUNK_PASSWORD"]
if USE_GMAIL:
SMTP_MESSENGER_NAME = "gmail:"
test_email_addr1 = "<EMAIL>"
#test_email_addr1 = "<EMAIL>"
#test_email_addr1 = os.environ.get("TEST_EMAIL_ADDR1")
# os.environ["TEST_GMAIL_BASE"] = test_email_addr1.split("@")[0]
anon_salt = "Test+Test+Test"
fkey = b'o-KzB3u1a_Vlb8Ji1CdyfTFpZ2FvdsPK4yQCRzFCcss='
msg_t = """to: {{ recipient.email }}\nfrom: {{ from_addr }}\nsubject: Test Message\n\nThis is the message text: "{{ item.msg_text }}"\n"""
notify_msg_t = "@./notify.txt.j2"
# TESTING CONfIG
test_sample_zip = os.path.abspath("../../tests/resources/dcm_zip/test.zip")
test_sample_file = os.path.abspath("../../tests/resources/dcm/IM2263")
test_sample_dir = os.path.expanduser("~/data/test") # Need to dl separately
# TESTS
def test_upload_one(orth: Orthanc, dixel: Dixel):
print("Testing can upload")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
assert (len(orth.studies()) > 0)
assert (orth.exists(dixel))
print("Passed!")
return True
def test_anonymize_one(orth: Orthanc, dixel: Dixel):
print("Testing can anonymize, tag, and untag")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
anon = ShamDixel.from_dixel(dixel, salt=anon_salt)
afile = orth.anonymize(anon, replacement_map=anon.orthanc_sham_map())
anon.file = afile
orth.put(anon)
orth.putm(anon.sham_parent_oid(DLv.STUDIES),
level=DLv.STUDIES,
key="signature",
value=anon.pack_fields(fkey))
assert (len(orth.studies()) == 2)
orth.delete(dixel)
assert (len(orth.studies()) == 1)
oid = orth.studies()[0]
test = orth.get(oid)
assert( test.tags["PatientName"] == anon.meta["ShamName"] )
enc = orth.getm(test, key="signature")
tags = unpack_data(enc, fkey)
assert( tags["PatientName"] in dixel.tags["PatientName"] )
print("Passed!")
return True
def test_index_one( splunk: Splunk, dixel: Dixel, check_exists=CHECK_SPLUNK ):
print("Testing can index")
splunk.put(dixel, index=os.environ.get("SPLUNK_INDEX"))
if check_exists:
print("Waiting for 1 min to index")
time.sleep(60)
time_range = [
datetime.now()-timedelta(minutes=2),
datetime.now()
]
r = splunk.find("search index=testing", time_range=time_range)
logging.debug(r)
assert( len(r) > 0 )
print("Passed")
return True
def test_email_messenger( messenger: SmtpMessenger, dryrun=EMAIL_DRYRUN ):
print("Testing can email from template")
outgoing = "The quick brown fox jumped over the lazy dog"
data = {"item": {"msg_text": outgoing},
"recipient": {"email": test_email_addr1}}
msg = messenger.get(data, target=test_email_addr1)
assert( test_email_addr1 in msg )
assert( outgoing in msg )
if not dryrun:
messenger.send(data, target=test_email_addr1)
print("Passed!")
return True
def test_distribute( subscriptions, messenger: SmtpMessenger ):
print("Testing can dispatch")
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(channel_tags=ch)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
logging.debug(pformat(dispatch.subscribers))
data = {"tags": {"AccessionNumber": "ABC123",
"PatientName": "DOE^JOHN^S"},
"meta": {"signature":
{"trial": "hobit",
"site": "duke"}
}
}
sent = dispatch.put(data, dryrun=EMAIL_DRYRUN)
data["meta"]["signature"]["site"] = "detroit"
sent += dispatch.put(data, dryrun=EMAIL_DRYRUN)
print(sent)
msgs = [x['msg'] for x in sent]
msgs = "\n".join(msgs)
# logging.debug(pformat(msgs))
assert( "SIREN/HOBIT" in msgs )
assert( <EMAIL>" in msgs )
assert( 'subject jacket for "DOE^JOHN^S"' in msgs )
print("Passed!")
return True
def test_upload_dir_handler(dcm_dir: DcmDir, orth: Orthanc):
print("Testing can upload dir w handler")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
handle_upload_dir(dcm_dir, orth, fkey, anon_salt=anon_salt)
assert (len(orth.instances()) > 20)
print("Passed!")
return True
def test_upload_zip_handler(zip_file, orth: Orthanc):
print("Testing can upload zip w handler")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
handle_upload_zip(DcmDir(), zip_file, orth, fkey, anon_salt=anon_salt)
assert (len(orth.instances()) > 1)
print("Passed!")
return True
def test_file_arrived_handler(dcm_file, zip_file, orth: Orthanc):
print("Testing can handle file arrived")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "my_trial", "my_site")
os.makedirs(site_path)
shutil.copy(zip_file, site_path)
data = {"fn": os.path.join( site_path, Path(zip_file).name )}
handle_file_arrived(data, DcmDir(path=watch_path), orth,
fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature")
assert (len(orth.instances()) > 1)
oid = orth.studies()[0]
data = orth.getm(oid, key="signature")
clear = unpack_data(data, fkey)
print(pformat(clear))
assert(clear["trial"] == "my_trial")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
shutil.copy(dcm_file, site_path)
data = {"fn": os.path.join(site_path, Path(dcm_file).name)}
handle_file_arrived(data, DcmDir(path=watch_path), orth,
fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature")
assert (len(orth.instances()) == 1)
time.sleep(1.0)
oid = orth.studies()[0]
data = orth.getm(oid, key="signature")
clear = unpack_data(data, fkey)
print(pformat(clear))
assert(clear["trial"] == "my_trial")
orth.clear()
assert (len(orth.studies()) == 0)
shutil.rmtree(watch_path, ignore_errors=True)
print("Passed!")
return True
def test_notify_handler(dixel, orth: Orthanc,
subscriptions, messenger: SmtpMessenger,
indexer: Splunk, dryrun=EMAIL_DRYRUN):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
dixel.meta["trial"] = "hobit"
dixel.meta["site"] = "testing"
orth.putm(dixel.parent_oid(DLv.STUDIES),
level=DLv.STUDIES,
key="signature",
value=dixel.pack_fields(fkey, fields=["trial", "site"]))
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(
channel_tags=ch
)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
data = {"oid": dixel.parent_oid(DLv.STUDIES)}
handle_notify_study(data, source=orth,
dispatcher=dispatch, dryrun=dryrun,
indexer=indexer, index_name=SPLUNK_INDEX,
fkey=fkey)
print("Passed!")
return True
def test_watch_orthanc(test_dixel, orth: ObservableOrthanc):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
watcher = Watcher()
trigger0 = Trigger(
evtype=DEv.INSTANCE_ADDED,
source=orth,
action=orth.say)
watcher.add_trigger(trigger0)
trigger1 = Trigger(
evtype=DEv.STUDY_ADDED,
source=orth,
action=orth.say)
watcher.add_trigger(trigger1)
def runner():
"""Pause to start watcher and then copy sample file to incoming"""
time.sleep(1.0)
orth.put(test_dixel)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting watcher")
with redirect_stdout(f):
print("In capture")
try:
with timeout(5): # Give it a little time to say the instance
watcher.run()
except RuntimeError:
print("Stopping watcher")
finally:
watcher.stop()
out = f.getvalue()
print("Watcher output:")
print(out)
if dixel.oid() in out:
print("Passed!")
return True
def test_watch_dir(test_file):
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "my_trial", "my_site")
os.makedirs(site_path)
dcm_dir = ObservableDcmDir(path=watch_path)
watcher = Watcher()
trigger = Trigger(
evtype=DEv.FILE_ADDED,
source=dcm_dir,
action=dcm_dir.say)
watcher.add_trigger(trigger)
def runner():
"""Pause to start watcher and then copy sample file to incoming"""
time.sleep(1.0)
shutil.copy(test_file, site_path)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting watcher")
with redirect_stdout(f):
print("In capture")
try:
with timeout(5): # Give it a little time to say the filename
watcher.run()
except RuntimeError:
print("Stopping watcher")
finally:
watcher.stop()
out = f.getvalue()
print("Watcher output:")
print(out)
shutil.rmtree(watch_path, ignore_errors=True)
from pathlib import Path
if Path(test_file).name in out:
print("Passed!")
return True
def test_siren_receiver(test_file, orth: Orthanc,
subscriptions, messenger: SmtpMessenger,
indexer: Splunk, dryrun=EMAIL_DRYRUN):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(
channel_tags=ch
)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "hobit", "testing")
os.makedirs(site_path)
incoming = ObservableDcmDir(path=watch_path)
def runner():
"""Pause to start watcher and then copy sample file to incoming/trial/site"""
time.sleep(1.0)
shutil.copy(test_file, site_path)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting SIREN Receiver")
with redirect_stdout(f):
print("In capture")
try:
with timeout(90): # Give it a little time for the study to settle
watcher = start_watcher(
incoming,
orth,
fkey=fkey,
anon_salt=anon_salt,
dispatcher=dispatch,
dryrun=dryrun,
indexer=indexer,
index_name=os.environ.get("SPLUNK_INDEX")
)
except RuntimeError:
print("Stopping watcher subprocess")
out = f.getvalue()
print("SIREN Reciever output:")
print(out)
shutil.rmtree(watch_path, ignore_errors=True)
return True
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
suppress_urllib_debug()
suppress_watcher_debug()
# Create service endpoints
services = EndpointManager(serialized_ep_descs=_services)
print(pformat(services.ep_descs))
orth: ObservableOrthanc = services.get("hobit")
orth.polling_interval = 2.0
messenger: SmtpMessenger = services.get(SMTP_MESSENGER_NAME)
messenger.msg_t = msg_t
splunk: Splunk = services.get("splunk")
dcm_dir = DcmDir(path=test_sample_dir)
# Load a dixel
dixel = dcm_dir.get("HOBIT1172/IM0", file=True)
# assert( dixel )
# assert( dixel.file )
#
# # Verify that all endpoints are online
# assert( orth.check() )
# assert( messenger.check() )
# assert( splunk.check() )
#
# # Verify basic capabilities:
# # - upload
# # - anonymize
# # - index
# # - message
# # - distribute
#
# assert( test_upload_one(orth, dixel) )
# assert( test_anonymize_one(orth, dixel) )
# assert( test_index_one(splunk, dixel) )
assert( test_email_messenger(messenger) )
# assert( test_distribute(_subscriptions, messenger) )
exit()
# Verify observer daemons:
# - watch dir
# - watch orth
assert( test_watch_dir(test_sample_file) )
assert( test_watch_orthanc(dixel, orth) )
# Verify handlers:
# - directory
# - zip
# - file
# - notify
if DO_DIR_UPLOAD:
assert( test_upload_dir_handler(dcm_dir, orth) )
assert( test_upload_zip_handler(test_sample_zip, orth) )
assert( test_file_arrived_handler(test_sample_file, test_sample_zip, orth) )
assert( test_notify_handler(dixel, orth, _subscriptions, messenger, splunk) )
# Verify watcher pipeline
# - run watcher
assert( test_siren_receiver(test_sample_file, orth, _subscriptions, messenger, splunk) )
| """
SIREN/DIANA basic functionality testing framework
Requires env vars:
- GMAIL_USER
- GMAIL_APP_PASSWORD
- GMAIL_BASE_NAME -- ie, abc -> <EMAIL>
These env vars are set to default:
- ORTHANC_PASSWORD
- SPLUNK_PASSWORD
- SPLUNK_HEC_TOKEN
TODO: Move stuff to archive after collected
TODO: Write data into daily folder or something from mi-share ingress
TODO: Suppress dicom-simplify missing (series) creation time
"""
import time
import logging
import shutil
import io
import tempfile
from pathlib import Path
from pprint import pformat
from contextlib import redirect_stdout
from multiprocessing import Process
from datetime import datetime, timedelta
from interruptingcow import timeout
from crud.manager import EndpointManager
from crud.abc import Watcher, Trigger
from crud.endpoints import Splunk
from wuphf.endpoints import SmtpMessenger
from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir
from diana.dixel import Dixel, ShamDixel
from diana.utils.dicom import DicomLevel as DLv, DicomEventType as DEv
from wuphf.cli.string_descs import *
from diana.utils import unpack_data
from crud.utils import deserialize_dict
from diana.utils.gateways import suppress_urllib_debug
from diana.utils.endpoint.watcher import suppress_watcher_debug
from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, \
handle_file_arrived, start_watcher, tagged_studies
from trial_dispatcher import TrialDispatcher as Dispatcher
LOCAL_SERVICES = False # Set False to use UMich services
USE_GMAIL = True # Set False to use UMich smtp
DO_DIR_UPLOAD = False
CHECK_SPLUNK = False # Set False to skip long wait for dixel to index
CHECK_WATCH_STUDIES= False # Set False to skip long wait for orthanc watcher
EMAIL_DRYRUN = False # Set False to send live emails
# CONFIG
_services = "@services.yaml"
_subscriptions = "@subscriptions.yaml"
os.environ["SPLUNK_INDEX"] = "testing"
SMTP_MESSENGER_NAME = "smtp_server"
if LOCAL_SERVICES:
# Set everythin back to default
os.environ["UMICH_HOST"] = "localhost" # For testing
del os.environ["ORTHANC_USER"]
del os.environ["ORTHANC_PASSWORD"]
del os.environ["SPLUNK_USER"]
del os.environ["SPLUNK_PASSWORD"]
if USE_GMAIL:
SMTP_MESSENGER_NAME = "gmail:"
test_email_addr1 = "<EMAIL>"
#test_email_addr1 = "<EMAIL>"
#test_email_addr1 = os.environ.get("TEST_EMAIL_ADDR1")
# os.environ["TEST_GMAIL_BASE"] = test_email_addr1.split("@")[0]
anon_salt = "Test+Test+Test"
fkey = b'o-KzB3u1a_Vlb8Ji1CdyfTFpZ2FvdsPK4yQCRzFCcss='
msg_t = """to: {{ recipient.email }}\nfrom: {{ from_addr }}\nsubject: Test Message\n\nThis is the message text: "{{ item.msg_text }}"\n"""
notify_msg_t = "@./notify.txt.j2"
# TESTING CONfIG
test_sample_zip = os.path.abspath("../../tests/resources/dcm_zip/test.zip")
test_sample_file = os.path.abspath("../../tests/resources/dcm/IM2263")
test_sample_dir = os.path.expanduser("~/data/test") # Need to dl separately
# TESTS
def test_upload_one(orth: Orthanc, dixel: Dixel):
print("Testing can upload")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
assert (len(orth.studies()) > 0)
assert (orth.exists(dixel))
print("Passed!")
return True
def test_anonymize_one(orth: Orthanc, dixel: Dixel):
print("Testing can anonymize, tag, and untag")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
anon = ShamDixel.from_dixel(dixel, salt=anon_salt)
afile = orth.anonymize(anon, replacement_map=anon.orthanc_sham_map())
anon.file = afile
orth.put(anon)
orth.putm(anon.sham_parent_oid(DLv.STUDIES),
level=DLv.STUDIES,
key="signature",
value=anon.pack_fields(fkey))
assert (len(orth.studies()) == 2)
orth.delete(dixel)
assert (len(orth.studies()) == 1)
oid = orth.studies()[0]
test = orth.get(oid)
assert( test.tags["PatientName"] == anon.meta["ShamName"] )
enc = orth.getm(test, key="signature")
tags = unpack_data(enc, fkey)
assert( tags["PatientName"] in dixel.tags["PatientName"] )
print("Passed!")
return True
def test_index_one( splunk: Splunk, dixel: Dixel, check_exists=CHECK_SPLUNK ):
print("Testing can index")
splunk.put(dixel, index=os.environ.get("SPLUNK_INDEX"))
if check_exists:
print("Waiting for 1 min to index")
time.sleep(60)
time_range = [
datetime.now()-timedelta(minutes=2),
datetime.now()
]
r = splunk.find("search index=testing", time_range=time_range)
logging.debug(r)
assert( len(r) > 0 )
print("Passed")
return True
def test_email_messenger( messenger: SmtpMessenger, dryrun=EMAIL_DRYRUN ):
print("Testing can email from template")
outgoing = "The quick brown fox jumped over the lazy dog"
data = {"item": {"msg_text": outgoing},
"recipient": {"email": test_email_addr1}}
msg = messenger.get(data, target=test_email_addr1)
assert( test_email_addr1 in msg )
assert( outgoing in msg )
if not dryrun:
messenger.send(data, target=test_email_addr1)
print("Passed!")
return True
def test_distribute( subscriptions, messenger: SmtpMessenger ):
print("Testing can dispatch")
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(channel_tags=ch)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
logging.debug(pformat(dispatch.subscribers))
data = {"tags": {"AccessionNumber": "ABC123",
"PatientName": "DOE^JOHN^S"},
"meta": {"signature":
{"trial": "hobit",
"site": "duke"}
}
}
sent = dispatch.put(data, dryrun=EMAIL_DRYRUN)
data["meta"]["signature"]["site"] = "detroit"
sent += dispatch.put(data, dryrun=EMAIL_DRYRUN)
print(sent)
msgs = [x['msg'] for x in sent]
msgs = "\n".join(msgs)
# logging.debug(pformat(msgs))
assert( "SIREN/HOBIT" in msgs )
assert( <EMAIL>" in msgs )
assert( 'subject jacket for "DOE^JOHN^S"' in msgs )
print("Passed!")
return True
def test_upload_dir_handler(dcm_dir: DcmDir, orth: Orthanc):
print("Testing can upload dir w handler")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
handle_upload_dir(dcm_dir, orth, fkey, anon_salt=anon_salt)
assert (len(orth.instances()) > 20)
print("Passed!")
return True
def test_upload_zip_handler(zip_file, orth: Orthanc):
print("Testing can upload zip w handler")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
handle_upload_zip(DcmDir(), zip_file, orth, fkey, anon_salt=anon_salt)
assert (len(orth.instances()) > 1)
print("Passed!")
return True
def test_file_arrived_handler(dcm_file, zip_file, orth: Orthanc):
print("Testing can handle file arrived")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "my_trial", "my_site")
os.makedirs(site_path)
shutil.copy(zip_file, site_path)
data = {"fn": os.path.join( site_path, Path(zip_file).name )}
handle_file_arrived(data, DcmDir(path=watch_path), orth,
fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature")
assert (len(orth.instances()) > 1)
oid = orth.studies()[0]
data = orth.getm(oid, key="signature")
clear = unpack_data(data, fkey)
print(pformat(clear))
assert(clear["trial"] == "my_trial")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
shutil.copy(dcm_file, site_path)
data = {"fn": os.path.join(site_path, Path(dcm_file).name)}
handle_file_arrived(data, DcmDir(path=watch_path), orth,
fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature")
assert (len(orth.instances()) == 1)
time.sleep(1.0)
oid = orth.studies()[0]
data = orth.getm(oid, key="signature")
clear = unpack_data(data, fkey)
print(pformat(clear))
assert(clear["trial"] == "my_trial")
orth.clear()
assert (len(orth.studies()) == 0)
shutil.rmtree(watch_path, ignore_errors=True)
print("Passed!")
return True
def test_notify_handler(dixel, orth: Orthanc,
subscriptions, messenger: SmtpMessenger,
indexer: Splunk, dryrun=EMAIL_DRYRUN):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
dixel.meta["trial"] = "hobit"
dixel.meta["site"] = "testing"
orth.putm(dixel.parent_oid(DLv.STUDIES),
level=DLv.STUDIES,
key="signature",
value=dixel.pack_fields(fkey, fields=["trial", "site"]))
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(
channel_tags=ch
)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
data = {"oid": dixel.parent_oid(DLv.STUDIES)}
handle_notify_study(data, source=orth,
dispatcher=dispatch, dryrun=dryrun,
indexer=indexer, index_name=SPLUNK_INDEX,
fkey=fkey)
print("Passed!")
return True
def test_watch_orthanc(test_dixel, orth: ObservableOrthanc):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
watcher = Watcher()
trigger0 = Trigger(
evtype=DEv.INSTANCE_ADDED,
source=orth,
action=orth.say)
watcher.add_trigger(trigger0)
trigger1 = Trigger(
evtype=DEv.STUDY_ADDED,
source=orth,
action=orth.say)
watcher.add_trigger(trigger1)
def runner():
"""Pause to start watcher and then copy sample file to incoming"""
time.sleep(1.0)
orth.put(test_dixel)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting watcher")
with redirect_stdout(f):
print("In capture")
try:
with timeout(5): # Give it a little time to say the instance
watcher.run()
except RuntimeError:
print("Stopping watcher")
finally:
watcher.stop()
out = f.getvalue()
print("Watcher output:")
print(out)
if dixel.oid() in out:
print("Passed!")
return True
def test_watch_dir(test_file):
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "my_trial", "my_site")
os.makedirs(site_path)
dcm_dir = ObservableDcmDir(path=watch_path)
watcher = Watcher()
trigger = Trigger(
evtype=DEv.FILE_ADDED,
source=dcm_dir,
action=dcm_dir.say)
watcher.add_trigger(trigger)
def runner():
"""Pause to start watcher and then copy sample file to incoming"""
time.sleep(1.0)
shutil.copy(test_file, site_path)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting watcher")
with redirect_stdout(f):
print("In capture")
try:
with timeout(5): # Give it a little time to say the filename
watcher.run()
except RuntimeError:
print("Stopping watcher")
finally:
watcher.stop()
out = f.getvalue()
print("Watcher output:")
print(out)
shutil.rmtree(watch_path, ignore_errors=True)
from pathlib import Path
if Path(test_file).name in out:
print("Passed!")
return True
def test_siren_receiver(test_file, orth: Orthanc,
subscriptions, messenger: SmtpMessenger,
indexer: Splunk, dryrun=EMAIL_DRYRUN):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(
channel_tags=ch
)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "hobit", "testing")
os.makedirs(site_path)
incoming = ObservableDcmDir(path=watch_path)
def runner():
"""Pause to start watcher and then copy sample file to incoming/trial/site"""
time.sleep(1.0)
shutil.copy(test_file, site_path)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting SIREN Receiver")
with redirect_stdout(f):
print("In capture")
try:
with timeout(90): # Give it a little time for the study to settle
watcher = start_watcher(
incoming,
orth,
fkey=fkey,
anon_salt=anon_salt,
dispatcher=dispatch,
dryrun=dryrun,
indexer=indexer,
index_name=os.environ.get("SPLUNK_INDEX")
)
except RuntimeError:
print("Stopping watcher subprocess")
out = f.getvalue()
print("SIREN Reciever output:")
print(out)
shutil.rmtree(watch_path, ignore_errors=True)
return True
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
suppress_urllib_debug()
suppress_watcher_debug()
# Create service endpoints
services = EndpointManager(serialized_ep_descs=_services)
print(pformat(services.ep_descs))
orth: ObservableOrthanc = services.get("hobit")
orth.polling_interval = 2.0
messenger: SmtpMessenger = services.get(SMTP_MESSENGER_NAME)
messenger.msg_t = msg_t
splunk: Splunk = services.get("splunk")
dcm_dir = DcmDir(path=test_sample_dir)
# Load a dixel
dixel = dcm_dir.get("HOBIT1172/IM0", file=True)
# assert( dixel )
# assert( dixel.file )
#
# # Verify that all endpoints are online
# assert( orth.check() )
# assert( messenger.check() )
# assert( splunk.check() )
#
# # Verify basic capabilities:
# # - upload
# # - anonymize
# # - index
# # - message
# # - distribute
#
# assert( test_upload_one(orth, dixel) )
# assert( test_anonymize_one(orth, dixel) )
# assert( test_index_one(splunk, dixel) )
assert( test_email_messenger(messenger) )
# assert( test_distribute(_subscriptions, messenger) )
exit()
# Verify observer daemons:
# - watch dir
# - watch orth
assert( test_watch_dir(test_sample_file) )
assert( test_watch_orthanc(dixel, orth) )
# Verify handlers:
# - directory
# - zip
# - file
# - notify
if DO_DIR_UPLOAD:
assert( test_upload_dir_handler(dcm_dir, orth) )
assert( test_upload_zip_handler(test_sample_zip, orth) )
assert( test_file_arrived_handler(test_sample_file, test_sample_zip, orth) )
assert( test_notify_handler(dixel, orth, _subscriptions, messenger, splunk) )
# Verify watcher pipeline
# - run watcher
assert( test_siren_receiver(test_sample_file, orth, _subscriptions, messenger, splunk) )
| en | 0.576554 | SIREN/DIANA basic functionality testing framework Requires env vars: - GMAIL_USER - GMAIL_APP_PASSWORD - GMAIL_BASE_NAME -- ie, abc -> <EMAIL> These env vars are set to default: - ORTHANC_PASSWORD - SPLUNK_PASSWORD - SPLUNK_HEC_TOKEN TODO: Move stuff to archive after collected TODO: Write data into daily folder or something from mi-share ingress TODO: Suppress dicom-simplify missing (series) creation time # Set False to use UMich services # Set False to use UMich smtp # Set False to skip long wait for dixel to index # Set False to skip long wait for orthanc watcher # Set False to send live emails # CONFIG # Set everythin back to default # For testing #test_email_addr1 = "<EMAIL>" #test_email_addr1 = os.environ.get("TEST_EMAIL_ADDR1") # os.environ["TEST_GMAIL_BASE"] = test_email_addr1.split("@")[0] to: {{ recipient.email }}\nfrom: {{ from_addr }}\nsubject: Test Message\n\nThis is the message text: "{{ item.msg_text }}"\n # TESTING CONfIG # Need to dl separately # TESTS # logging.debug(pformat(msgs)) Pause to start watcher and then copy sample file to incoming # Give it a little time to say the instance Pause to start watcher and then copy sample file to incoming # Give it a little time to say the filename Pause to start watcher and then copy sample file to incoming/trial/site # Give it a little time for the study to settle # Create service endpoints # Load a dixel # assert( dixel ) # assert( dixel.file ) # # # Verify that all endpoints are online # assert( orth.check() ) # assert( messenger.check() ) # assert( splunk.check() ) # # # Verify basic capabilities: # # - upload # # - anonymize # # - index # # - message # # - distribute # # assert( test_upload_one(orth, dixel) ) # assert( test_anonymize_one(orth, dixel) ) # assert( test_index_one(splunk, dixel) ) # assert( test_distribute(_subscriptions, messenger) ) # Verify observer daemons: # - watch dir # - watch orth # Verify handlers: # - directory # - zip # - file # - notify # Verify watcher pipeline # - run watcher | 1.641462 | 2 |
deptree.py | jeking3/boost-deptree | 0 | 8866 | <gh_stars>0
#
# Copyright (c) 2019 <NAME>
#
# Use, modification, and distribution are subject to the
# Boost Software License, Version 1.0. (See accompanying file
# LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
#
import json
import networkx
import re
from pathlib import Path
class BoostDependencyTree(object):
"""
Generates a PlantUML dependency tree to visualize the dependencies.
One of the benefits of generating a visual graph is that cycles become
immediately evident.
"""
EDGES = {
2: "-->",
1: "..>"
}
STRENGTHS = {
"include": 2,
"src": 2,
"test": 1,
"tests": 1
}
def __init__(self, root: Path, out: Path):
"""
Arguments:
root: path to BOOST_ROOT
out: path to output file
"""
self.exp = re.compile(r"^\s*#\s*include\s*[<\"](?P<header>[^>\"]+)[>\"]\s*$")
self.graph = networkx.DiGraph()
self.headers = {} # key: header include path; value: repo key
self.repos = {} # key: repo key; value: repo path
self.out = out
self.root = root
self.libs = self.root / "libs"
with (self.libs / "config" / "include" / "boost" / "version.hpp").open() as fp:
vlines = fp.readlines()
for vline in vlines:
if "BOOST_LIB_VERSION" in vline:
#define BOOST_LIB_VERSION "1_71"
tokens = vline.split(" ")
self.boost_version = tokens[2].strip()[1:-1].replace("_", ".")
def load(self):
self.collect()
self.analyze()
def collect(self):
"""
Locate every .hpp and .h file and associate it with a repository.
"""
metas = self.libs.glob("**/libraries.json")
for meta in metas:
with meta.open() as fp:
metadata = json.loads(fp.read())
repodir = meta.parent.parent
metadata = metadata[0] if isinstance(metadata, list) else metadata # for boost/core
repokey = metadata["key"]
repoinc = repodir / "include"
if repoinc.is_dir(): # libs/geometry/index has no include but looks like a repo?
self.graph.add_node(repokey)
self.repos[repokey] = repodir
headers = repoinc.glob("**/*.h??")
for header in headers:
# print(str(header))
incpath = header.relative_to(repoinc)
assert incpath not in self.headers,\
f"{incpath} in {repokey} already in header map from "\
f"{self.headers[incpath]} - duplicate header paths!"
self.headers[str(incpath)] = repokey
def analyze(self):
"""
Find every include statement and create a graph of dependencies.
"""
for repokey, repodir in self.repos.items():
for ext in ["c", "cpp", "h", "hpp", "ipp"]:
files = repodir.glob("**/*." + ext)
for code in files:
inside = code.relative_to(repodir).parts[0]
if inside not in self.STRENGTHS.keys():
continue
weight = self.STRENGTHS[inside]
with code.open() as fp:
try:
#print(str(code))
source = fp.readlines()
except UnicodeDecodeError:
continue
for line in source:
match = self.exp.search(line)
if match:
include = match.group("header")
if include in self.headers:
deprepo = self.headers[include]
if repokey != deprepo: # avoid self-references
data = self.graph.get_edge_data(repokey, deprepo, {"weight": 0})
if data["weight"] > 0 and data["weight"] < weight:
self.graph.remove_edge(repokey, deprepo)
data["weight"] = 0
if data["weight"] == 0:
self.graph.add_edge(repokey, deprepo, weight=weight)
def report_cycles(self):
with self.out.open("w") as fp:
fp.write("@startuml\n")
fp.write("\n")
fp.write(f"title Boost {self.boost_version} Direct Dependency Cycles\n")
fp.write("footer Generated by boost-deptree (C) 2019 <NAME> III\n")
fp.write("\n")
for edge in self.graph.edges:
fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"]
if fwdweight > 1:
if self.graph.get_edge_data(edge[1], edge[0], {"weight": 0})["weight"] > 1:
fp.write(f"['{edge[0]}'] --> ['{edge[1]}']\n")
fp.write("\n")
fp.write("@enduml\n")
def report_dependencies_from(self, repokey):
with self.out.open("w") as fp:
fp.write("@startuml\n")
fp.write("\n")
fp.write(f"title Boost {self.boost_version} dependencies of {repokey}\n")
fp.write("footer Generated by boost-deptree (C) 2019 <NAME> III\n")
fp.write("\n")
for edge in self.graph.edges:
if edge[0] == repokey:
fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"]
fp.write(f"['{edge[0]}'] {self.EDGES[fwdweight]} ['{edge[1]}']\n")
fp.write("\n")
fp.write("@enduml\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Generate PlantUML dependency tree.')
parser.add_argument('root', type=str, help='Boost root directory.')
parser.add_argument('out', type=str, help='Output filename.')
require_one = parser.add_mutually_exclusive_group(required=True)
require_one.add_argument('--cycles', action='store_true', help='Show direct repository dependency cycles.')
require_one.add_argument('--from', help='Show dependencies from a given repository.')
args = parser.parse_args()
root = Path(args.root)
assert root.is_dir(), "root is not a directory"
out = Path(args.out)
tree = BoostDependencyTree(root, out)
tree.load()
if args.cycles:
tree.report_cycles()
else:
tree.report_dependencies_from(args.__dict__["from"])
| #
# Copyright (c) 2019 <NAME>
#
# Use, modification, and distribution are subject to the
# Boost Software License, Version 1.0. (See accompanying file
# LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
#
import json
import networkx
import re
from pathlib import Path
class BoostDependencyTree(object):
"""
Generates a PlantUML dependency tree to visualize the dependencies.
One of the benefits of generating a visual graph is that cycles become
immediately evident.
"""
EDGES = {
2: "-->",
1: "..>"
}
STRENGTHS = {
"include": 2,
"src": 2,
"test": 1,
"tests": 1
}
def __init__(self, root: Path, out: Path):
"""
Arguments:
root: path to BOOST_ROOT
out: path to output file
"""
self.exp = re.compile(r"^\s*#\s*include\s*[<\"](?P<header>[^>\"]+)[>\"]\s*$")
self.graph = networkx.DiGraph()
self.headers = {} # key: header include path; value: repo key
self.repos = {} # key: repo key; value: repo path
self.out = out
self.root = root
self.libs = self.root / "libs"
with (self.libs / "config" / "include" / "boost" / "version.hpp").open() as fp:
vlines = fp.readlines()
for vline in vlines:
if "BOOST_LIB_VERSION" in vline:
#define BOOST_LIB_VERSION "1_71"
tokens = vline.split(" ")
self.boost_version = tokens[2].strip()[1:-1].replace("_", ".")
def load(self):
self.collect()
self.analyze()
def collect(self):
"""
Locate every .hpp and .h file and associate it with a repository.
"""
metas = self.libs.glob("**/libraries.json")
for meta in metas:
with meta.open() as fp:
metadata = json.loads(fp.read())
repodir = meta.parent.parent
metadata = metadata[0] if isinstance(metadata, list) else metadata # for boost/core
repokey = metadata["key"]
repoinc = repodir / "include"
if repoinc.is_dir(): # libs/geometry/index has no include but looks like a repo?
self.graph.add_node(repokey)
self.repos[repokey] = repodir
headers = repoinc.glob("**/*.h??")
for header in headers:
# print(str(header))
incpath = header.relative_to(repoinc)
assert incpath not in self.headers,\
f"{incpath} in {repokey} already in header map from "\
f"{self.headers[incpath]} - duplicate header paths!"
self.headers[str(incpath)] = repokey
def analyze(self):
"""
Find every include statement and create a graph of dependencies.
"""
for repokey, repodir in self.repos.items():
for ext in ["c", "cpp", "h", "hpp", "ipp"]:
files = repodir.glob("**/*." + ext)
for code in files:
inside = code.relative_to(repodir).parts[0]
if inside not in self.STRENGTHS.keys():
continue
weight = self.STRENGTHS[inside]
with code.open() as fp:
try:
#print(str(code))
source = fp.readlines()
except UnicodeDecodeError:
continue
for line in source:
match = self.exp.search(line)
if match:
include = match.group("header")
if include in self.headers:
deprepo = self.headers[include]
if repokey != deprepo: # avoid self-references
data = self.graph.get_edge_data(repokey, deprepo, {"weight": 0})
if data["weight"] > 0 and data["weight"] < weight:
self.graph.remove_edge(repokey, deprepo)
data["weight"] = 0
if data["weight"] == 0:
self.graph.add_edge(repokey, deprepo, weight=weight)
def report_cycles(self):
with self.out.open("w") as fp:
fp.write("@startuml\n")
fp.write("\n")
fp.write(f"title Boost {self.boost_version} Direct Dependency Cycles\n")
fp.write("footer Generated by boost-deptree (C) 2019 <NAME> III\n")
fp.write("\n")
for edge in self.graph.edges:
fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"]
if fwdweight > 1:
if self.graph.get_edge_data(edge[1], edge[0], {"weight": 0})["weight"] > 1:
fp.write(f"['{edge[0]}'] --> ['{edge[1]}']\n")
fp.write("\n")
fp.write("@enduml\n")
def report_dependencies_from(self, repokey):
with self.out.open("w") as fp:
fp.write("@startuml\n")
fp.write("\n")
fp.write(f"title Boost {self.boost_version} dependencies of {repokey}\n")
fp.write("footer Generated by boost-deptree (C) 2019 <NAME> III\n")
fp.write("\n")
for edge in self.graph.edges:
if edge[0] == repokey:
fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"]
fp.write(f"['{edge[0]}'] {self.EDGES[fwdweight]} ['{edge[1]}']\n")
fp.write("\n")
fp.write("@enduml\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Generate PlantUML dependency tree.')
parser.add_argument('root', type=str, help='Boost root directory.')
parser.add_argument('out', type=str, help='Output filename.')
require_one = parser.add_mutually_exclusive_group(required=True)
require_one.add_argument('--cycles', action='store_true', help='Show direct repository dependency cycles.')
require_one.add_argument('--from', help='Show dependencies from a given repository.')
args = parser.parse_args()
root = Path(args.root)
assert root.is_dir(), "root is not a directory"
out = Path(args.out)
tree = BoostDependencyTree(root, out)
tree.load()
if args.cycles:
tree.report_cycles()
else:
tree.report_dependencies_from(args.__dict__["from"]) | en | 0.810133 | # # Copyright (c) 2019 <NAME> # # Use, modification, and distribution are subject to the # Boost Software License, Version 1.0. (See accompanying file # LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt) # Generates a PlantUML dependency tree to visualize the dependencies. One of the benefits of generating a visual graph is that cycles become immediately evident. Arguments: root: path to BOOST_ROOT out: path to output file #\s*include\s*[<\"](?P<header>[^>\"]+)[>\"]\s*$") # key: header include path; value: repo key # key: repo key; value: repo path #define BOOST_LIB_VERSION "1_71" Locate every .hpp and .h file and associate it with a repository. # for boost/core # libs/geometry/index has no include but looks like a repo? # print(str(header)) Find every include statement and create a graph of dependencies. #print(str(code)) # avoid self-references | 2.234292 | 2 |
uberbackend.py | adiHusky/uber_backend | 0 | 8867 | <reponame>adiHusky/uber_backend
from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort
from flask_cors import CORS
# from flask import status
from datetime import date, datetime, timedelta
from calendar import monthrange
from dateutil.parser import parse
import pytz
import os
import sys
import time
import uuid
import json
import random
import string
import pathlib
import io
from uuid import UUID
from bson.objectid import ObjectId
# straight mongo access
from pymongo import MongoClient
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn="https://[email protected]/5685529",
integrations=[FlaskIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
# By default the SDK will try to use the SENTRY_RELEASE
# environment variable, or infer a git commit
# SHA as release, however you may want to set
# something more human-readable.
# release="[email protected]",
)
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
# mongo
# mongo_client = MongoClient('mongodb://localhost:27017/')
mongo_client = MongoClient(
"mongodb+srv://Mahitha-Maddi:<EMAIL>/test")
app = Flask(__name__)
# CORS(app)
CORS(app, resources={r"/*": {"origins": "*"}})
basedir = os.path.abspath(os.path.dirname(__file__))
# Here are my datasets
bookings = dict()
################
# Apply to mongo
################
def atlas_connect():
# Node
# const MongoClient = require('mongodb').MongoClient;
# const uri = "mongodb+srv://admin:<password><EMAIL>/myFirstDatabase?retryWrites=true&w=majority";
# const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true });
# client.connect(err => {
# const collection = client.db("test").collection("devices");
# // perform actions on the collection object
# client.close();
# });
# Python
client = pymongo.MongoClient(
"mongodb+srv://Mahitha-Maddi:<EMAIL>%<EMAIL>/test")
db = client.test
# database access layer
def insert_one(r):
start_time = datetime.now()
with mongo_client:
# start_time_db = datetime.now()
db = mongo_client['Uber']
# microseconds_caching_db = (datetime.now() - start_time_db).microseconds
# print("*** It took " + str(microseconds_caching_db) + " microseconds to cache mongo handle.")
print("...insert_one() to mongo: ", r)
try:
mongo_collection = db['bookings']
result = mongo_collection.insert_one(r)
print("inserted _ids: ", result.inserted_id)
except Exception as e:
print(e)
microseconds_doing_mongo_work = (datetime.now() - start_time).microseconds
print("*** It took " + str(microseconds_doing_mongo_work) +
" microseconds to insert_one.")
def tryexcept(requesto, key, default):
lhs = None
try:
lhs = requesto.json[key]
# except Exception as e:
except:
lhs = default
return lhs
def ssm():
now = datetime.now()
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
return str((now - midnight).seconds)
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# endpoint to check Availability
@app.route("/checkAvailability", methods=["POST"])
def check_availability():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
with mongo_client:
#raise InvalidUsage('This view is gone', status_code=410)
db = mongo_client['Uber']
mongo_collection = db['available']
print(source)
myquery = {"source": {"$regex": str(source)}, "destination": {
"$regex": str(destination)}, "date": {"$regex": str(date)}}
cursor = dict()
cursor = mongo_collection.find(myquery, {"_id": 0})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
print(type(sorted_records))
return jsonify(sorted_records)
# endpoint to create new Booking
@app.route("/book", methods=["POST"])
def book_bus():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
startTime = request.json['startTime']
endTime = request.json['endTime']
user = request.json['user']
busnumber = request.json['busnumber']
booking = dict(user=user, source=source, destination=destination, busnumber=busnumber,
date=date, startTime=startTime, endTime=endTime, bookeddate=datetime.now(
).strftime("%Y-%m-%d %H:%M:%S"),
_id=str(ObjectId()))
insert_one(booking)
return jsonify(booking)
@app.route("/bookings-results", methods=["GET"])
def get_tweets_results():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['bookings']
cursor = mongo_collection.find({})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
return jsonify(sorted_records)
##################
# Apply from mongo
##################
def applyRecordLevelUpdates():
return None
def applyCollectionLevelUpdates():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['available']
cursor = mongo_collection.find({})
records = list(cursor)
# bookings[0] = records[0]
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
# return json.dumps({"results": sorted_records })
for booking in sorted_records:
bookings[booking['_id']] = booking
@app.route("/")
def home():
return """Welcome to Uber backend!<br/>"""
##################
# ADMINISTRATION #
##################
# This runs once before the first single request
# Used to bootstrap our collections
@app.before_first_request
def before_first_request_func():
applyCollectionLevelUpdates()
# This runs once before any request
@app.before_request
def before_request_func():
applyRecordLevelUpdates()
############################
# INFO on containerization #
############################
# To containerize a flask app:
# https://pythonise.com/series/learning-flask/building-a-flask-app-with-docker-compose
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort
from flask_cors import CORS
# from flask import status
from datetime import date, datetime, timedelta
from calendar import monthrange
from dateutil.parser import parse
import pytz
import os
import sys
import time
import uuid
import json
import random
import string
import pathlib
import io
from uuid import UUID
from bson.objectid import ObjectId
# straight mongo access
from pymongo import MongoClient
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn="https://[email protected]/5685529",
integrations=[FlaskIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
# By default the SDK will try to use the SENTRY_RELEASE
# environment variable, or infer a git commit
# SHA as release, however you may want to set
# something more human-readable.
# release="[email protected]",
)
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
# mongo
# mongo_client = MongoClient('mongodb://localhost:27017/')
mongo_client = MongoClient(
"mongodb+srv://Mahitha-Maddi:<EMAIL>/test")
app = Flask(__name__)
# CORS(app)
CORS(app, resources={r"/*": {"origins": "*"}})
basedir = os.path.abspath(os.path.dirname(__file__))
# Here are my datasets
bookings = dict()
################
# Apply to mongo
################
def atlas_connect():
# Node
# const MongoClient = require('mongodb').MongoClient;
# const uri = "mongodb+srv://admin:<password><EMAIL>/myFirstDatabase?retryWrites=true&w=majority";
# const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true });
# client.connect(err => {
# const collection = client.db("test").collection("devices");
# // perform actions on the collection object
# client.close();
# });
# Python
client = pymongo.MongoClient(
"mongodb+srv://Mahitha-Maddi:<EMAIL>%<EMAIL>/test")
db = client.test
# database access layer
def insert_one(r):
start_time = datetime.now()
with mongo_client:
# start_time_db = datetime.now()
db = mongo_client['Uber']
# microseconds_caching_db = (datetime.now() - start_time_db).microseconds
# print("*** It took " + str(microseconds_caching_db) + " microseconds to cache mongo handle.")
print("...insert_one() to mongo: ", r)
try:
mongo_collection = db['bookings']
result = mongo_collection.insert_one(r)
print("inserted _ids: ", result.inserted_id)
except Exception as e:
print(e)
microseconds_doing_mongo_work = (datetime.now() - start_time).microseconds
print("*** It took " + str(microseconds_doing_mongo_work) +
" microseconds to insert_one.")
def tryexcept(requesto, key, default):
lhs = None
try:
lhs = requesto.json[key]
# except Exception as e:
except:
lhs = default
return lhs
def ssm():
now = datetime.now()
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
return str((now - midnight).seconds)
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# endpoint to check Availability
@app.route("/checkAvailability", methods=["POST"])
def check_availability():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
with mongo_client:
#raise InvalidUsage('This view is gone', status_code=410)
db = mongo_client['Uber']
mongo_collection = db['available']
print(source)
myquery = {"source": {"$regex": str(source)}, "destination": {
"$regex": str(destination)}, "date": {"$regex": str(date)}}
cursor = dict()
cursor = mongo_collection.find(myquery, {"_id": 0})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
print(type(sorted_records))
return jsonify(sorted_records)
# endpoint to create new Booking
@app.route("/book", methods=["POST"])
def book_bus():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
startTime = request.json['startTime']
endTime = request.json['endTime']
user = request.json['user']
busnumber = request.json['busnumber']
booking = dict(user=user, source=source, destination=destination, busnumber=busnumber,
date=date, startTime=startTime, endTime=endTime, bookeddate=datetime.now(
).strftime("%Y-%m-%d %H:%M:%S"),
_id=str(ObjectId()))
insert_one(booking)
return jsonify(booking)
@app.route("/bookings-results", methods=["GET"])
def get_tweets_results():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['bookings']
cursor = mongo_collection.find({})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
return jsonify(sorted_records)
##################
# Apply from mongo
##################
def applyRecordLevelUpdates():
return None
def applyCollectionLevelUpdates():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['available']
cursor = mongo_collection.find({})
records = list(cursor)
# bookings[0] = records[0]
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
# return json.dumps({"results": sorted_records })
for booking in sorted_records:
bookings[booking['_id']] = booking
@app.route("/")
def home():
return """Welcome to Uber backend!<br/>"""
##################
# ADMINISTRATION #
##################
# This runs once before the first single request
# Used to bootstrap our collections
@app.before_first_request
def before_first_request_func():
applyCollectionLevelUpdates()
# This runs once before any request
@app.before_request
def before_request_func():
applyRecordLevelUpdates()
############################
# INFO on containerization #
############################
# To containerize a flask app:
# https://pythonise.com/series/learning-flask/building-a-flask-app-with-docker-compose
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') | en | 0.545229 | # from flask import status # straight mongo access # Set traces_sample_rate to 1.0 to capture 100% # of transactions for performance monitoring. # We recommend adjusting this value in production. # By default the SDK will try to use the SENTRY_RELEASE # environment variable, or infer a git commit # SHA as release, however you may want to set # something more human-readable. # release="[email protected]", # mongo # mongo_client = MongoClient('mongodb://localhost:27017/') # CORS(app) # Here are my datasets ################ # Apply to mongo ################ # Node # const MongoClient = require('mongodb').MongoClient; # const uri = "mongodb+srv://admin:<password><EMAIL>/myFirstDatabase?retryWrites=true&w=majority"; # const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true }); # client.connect(err => { # const collection = client.db("test").collection("devices"); # // perform actions on the collection object # client.close(); # }); # Python # database access layer # start_time_db = datetime.now() # microseconds_caching_db = (datetime.now() - start_time_db).microseconds # print("*** It took " + str(microseconds_caching_db) + " microseconds to cache mongo handle.") # except Exception as e: # endpoint to check Availability #raise InvalidUsage('This view is gone', status_code=410) # endpoint to create new Booking ################## # Apply from mongo ################## # bookings[0] = records[0] # return json.dumps({"results": sorted_records }) Welcome to Uber backend!<br/> ################## # ADMINISTRATION # ################## # This runs once before the first single request # Used to bootstrap our collections # This runs once before any request ############################ # INFO on containerization # ############################ # To containerize a flask app: # https://pythonise.com/series/learning-flask/building-a-flask-app-with-docker-compose | 1.937294 | 2 |
sppas/sppas/src/models/acm/htkscripts.py | mirfan899/MTTS | 0 | 8868 | <reponame>mirfan899/MTTS
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.models.acm.htkscripts.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import os.path
import logging
# ---------------------------------------------------------------------------
class sppasHtkScripts(object):
"""HTK-ASCII scripts reader/writer.
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
:author: <NAME>
:contact: <EMAIL>
This class is able to write all scripts of the VoxForge tutorial.
They are used to train acoustic models thanks to the HTK toolbox.
For details, refer to: http://www.voxforge.org/
"""
def __init__(self):
"""Create a sppasHtkScripts instance."""
self.configfile = ""
self.globalfile = ""
self.mkphones0file = ""
self.mkphones1file = ""
self.mktrifile = ""
self.maketriphonesfile = ""
self.silfile = ""
# -----------------------------------------------------------------------
def write_all(self, dirname):
"""Write all scripts at once.
Write scripts with their default name, in the given directory.
:param dirname: (str) a directory name (existing or to be created).
"""
if os.path.exists(dirname) is False:
os.mkdir(dirname)
self.write_global_ded(os.path.join(dirname, "global.ded"))
self.write_mkphones0_led(os.path.join(dirname, "mkphones0.led"))
self.write_mkphones1_led(os.path.join(dirname, "mkphones1.led"))
self.write_mktri_led(os.path.join(dirname, "mktri.led"))
self.write_maketriphones_ded(os.path.join(dirname, "maketriphones.ded"))
self.write_sil_hed(os.path.join(dirname, "sil.hed"))
# -----------------------------------------------------------------------
def write_global_ded(self, filename):
"""Write the htk script `global.ded`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AS sp\n")
fp.write("RS cmu\n")
fp.write("MP sil sil sp\n")
fp.write("\n")
fp.close()
self.globalfile = filename
# -----------------------------------------------------------------------
def write_mkphones0_led(self, filename):
"""Write the htk script `mkphones0.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("EX\n")
fp.write("IS sil sil\n")
fp.write("DE sp\n")
fp.write("\n")
fp.close()
self.mkphones0file = filename
# -----------------------------------------------------------------------
def write_mkphones1_led(self, filename):
"""Write the htk script `mkphones1.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("EX\n")
fp.write("IS sil sil\n")
fp.write("\n")
fp.close()
self.mkphones1file = filename
# -----------------------------------------------------------------------
def write_mktri_led(self, filename):
"""Write the htk script `mktri.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("WB sp\n")
fp.write("WB sil\n")
fp.write("TC\n")
fp.write("\n")
fp.close()
self.mktrifile = filename
# -----------------------------------------------------------------------
def write_maketriphones_ded(self, filename):
"""Write the htk script `maketriphones.ded`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AS sp\n")
fp.write("MP sil sil sp\n")
fp.write("TC\n")
fp.write("\n")
fp.close()
self.maketriphonesfile = filename
# -----------------------------------------------------------------------
def write_sil_hed(self, filename):
"""Write the htk script `sil.hed`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AT 2 4 0.2 {sil.transP}\n")
fp.write("AT 4 2 0.2 {sil.transP}\n")
fp.write("AT 1 3 0.3 {sp.transP}\n")
fp.write("TI silst {sil.state[3],sp.state[2]}\n")
fp.write("\n")
fp.close()
self.silfile = filename
| """
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.models.acm.htkscripts.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import os.path
import logging
# ---------------------------------------------------------------------------
class sppasHtkScripts(object):
"""HTK-ASCII scripts reader/writer.
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
:author: <NAME>
:contact: <EMAIL>
This class is able to write all scripts of the VoxForge tutorial.
They are used to train acoustic models thanks to the HTK toolbox.
For details, refer to: http://www.voxforge.org/
"""
def __init__(self):
"""Create a sppasHtkScripts instance."""
self.configfile = ""
self.globalfile = ""
self.mkphones0file = ""
self.mkphones1file = ""
self.mktrifile = ""
self.maketriphonesfile = ""
self.silfile = ""
# -----------------------------------------------------------------------
def write_all(self, dirname):
"""Write all scripts at once.
Write scripts with their default name, in the given directory.
:param dirname: (str) a directory name (existing or to be created).
"""
if os.path.exists(dirname) is False:
os.mkdir(dirname)
self.write_global_ded(os.path.join(dirname, "global.ded"))
self.write_mkphones0_led(os.path.join(dirname, "mkphones0.led"))
self.write_mkphones1_led(os.path.join(dirname, "mkphones1.led"))
self.write_mktri_led(os.path.join(dirname, "mktri.led"))
self.write_maketriphones_ded(os.path.join(dirname, "maketriphones.ded"))
self.write_sil_hed(os.path.join(dirname, "sil.hed"))
# -----------------------------------------------------------------------
def write_global_ded(self, filename):
"""Write the htk script `global.ded`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AS sp\n")
fp.write("RS cmu\n")
fp.write("MP sil sil sp\n")
fp.write("\n")
fp.close()
self.globalfile = filename
# -----------------------------------------------------------------------
def write_mkphones0_led(self, filename):
"""Write the htk script `mkphones0.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("EX\n")
fp.write("IS sil sil\n")
fp.write("DE sp\n")
fp.write("\n")
fp.close()
self.mkphones0file = filename
# -----------------------------------------------------------------------
def write_mkphones1_led(self, filename):
"""Write the htk script `mkphones1.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("EX\n")
fp.write("IS sil sil\n")
fp.write("\n")
fp.close()
self.mkphones1file = filename
# -----------------------------------------------------------------------
def write_mktri_led(self, filename):
"""Write the htk script `mktri.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("WB sp\n")
fp.write("WB sil\n")
fp.write("TC\n")
fp.write("\n")
fp.close()
self.mktrifile = filename
# -----------------------------------------------------------------------
def write_maketriphones_ded(self, filename):
"""Write the htk script `maketriphones.ded`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AS sp\n")
fp.write("MP sil sil sp\n")
fp.write("TC\n")
fp.write("\n")
fp.close()
self.maketriphonesfile = filename
# -----------------------------------------------------------------------
def write_sil_hed(self, filename):
"""Write the htk script `sil.hed`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AT 2 4 0.2 {sil.transP}\n")
fp.write("AT 4 2 0.2 {sil.transP}\n")
fp.write("AT 1 3 0.3 {sp.transP}\n")
fp.write("TI silst {sil.state[3],sp.state[2]}\n")
fp.write("\n")
fp.close()
self.silfile = filename | en | 0.580576 | .. --------------------------------------------------------------------- ___ __ __ __ ___ / | \ | \ | \ / the automatic \__ |__/ |__/ |___| \__ annotation and \ | | | | \ analysis ___/ | | | | ___/ of speech http://www.sppas.org/ Use of this software is governed by the GNU Public License, version 3. SPPAS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. SPPAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with SPPAS. If not, see <http://www.gnu.org/licenses/>. This banner notice must not be removed. --------------------------------------------------------------------- src.models.acm.htkscripts.py ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # --------------------------------------------------------------------------- HTK-ASCII scripts reader/writer. :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :license: GPL, v3 :copyright: Copyright (C) 2011-2018 <NAME> :author: <NAME> :contact: <EMAIL> This class is able to write all scripts of the VoxForge tutorial. They are used to train acoustic models thanks to the HTK toolbox. For details, refer to: http://www.voxforge.org/ Create a sppasHtkScripts instance. # ----------------------------------------------------------------------- Write all scripts at once. Write scripts with their default name, in the given directory. :param dirname: (str) a directory name (existing or to be created). # ----------------------------------------------------------------------- Write the htk script `global.ded`. :param filename: (str) Name of the script file. # ----------------------------------------------------------------------- Write the htk script `mkphones0.led`. :param filename: (str) Name of the script file. # ----------------------------------------------------------------------- Write the htk script `mkphones1.led`. :param filename: (str) Name of the script file. # ----------------------------------------------------------------------- Write the htk script `mktri.led`. :param filename: (str) Name of the script file. # ----------------------------------------------------------------------- Write the htk script `maketriphones.ded`. :param filename: (str) Name of the script file. # ----------------------------------------------------------------------- Write the htk script `sil.hed`. :param filename: (str) Name of the script file. | 1.467779 | 1 |
icekit/plugins/map/tests.py | ic-labs/django-icekit | 52 | 8869 | from mock import patch
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.core import exceptions
from django_dynamic_fixture import G
from django_webtest import WebTest
from icekit.models import Layout
from icekit.page_types.layout_page.models import LayoutPage
from icekit.utils import fluent_contents
from . import models
User = get_user_model()
class MapItemTestCase(WebTest):
def setUp(self):
self.embed_code = '''
<iframe
src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670"
width="600"
height="450"
frameborder="0"
style="border:0"
allowfullscreen
></iframe>
'''
self.cleaned_embed_code = '<iframe allowfullscreen="" frameborder="0" src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670" style="border: 0;"></iframe>'
self.layout_1 = G(
Layout,
template_name='icekit/layouts/default.html',
)
self.layout_1.content_types.add(
ContentType.objects.get_for_model(LayoutPage))
self.layout_1.save()
self.staff_1 = User.objects.create(
email='<EMAIL>',
is_staff=True,
is_active=True,
is_superuser=True,
)
self.page_1 = LayoutPage()
self.page_1.title = 'Test Page'
self.page_1.slug = 'test-page'
self.page_1.parent_site = Site.objects.first()
self.page_1.layout = self.layout_1
self.page_1.author = self.staff_1
self.page_1.status = LayoutPage.PUBLISHED
self.page_1.save()
self.map_1 = fluent_contents.create_content_instance(
models.MapItem,
self.page_1,
_embed_code=self.embed_code,
)
self.map_item = models.MapItem(
parent_type=ContentType.objects.get_for_model(type(self.page_1)),
parent_id=self.page_1.id,
placeholder=self.page_1.get_placeholder_by_slot('main')[0],
_embed_code=self.embed_code,
)
self.page_1.publish()
def test_map_renders(self):
response = self.app.get(self.page_1.get_published().get_absolute_url())
response.mustcontain(self.cleaned_embed_code)
def test_cleaned_embed_code(self):
self.assertEqual(self.map_1._cleaned_embed_code.strip(), self.cleaned_embed_code)
| from mock import patch
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.core import exceptions
from django_dynamic_fixture import G
from django_webtest import WebTest
from icekit.models import Layout
from icekit.page_types.layout_page.models import LayoutPage
from icekit.utils import fluent_contents
from . import models
User = get_user_model()
class MapItemTestCase(WebTest):
def setUp(self):
self.embed_code = '''
<iframe
src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670"
width="600"
height="450"
frameborder="0"
style="border:0"
allowfullscreen
></iframe>
'''
self.cleaned_embed_code = '<iframe allowfullscreen="" frameborder="0" src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670" style="border: 0;"></iframe>'
self.layout_1 = G(
Layout,
template_name='icekit/layouts/default.html',
)
self.layout_1.content_types.add(
ContentType.objects.get_for_model(LayoutPage))
self.layout_1.save()
self.staff_1 = User.objects.create(
email='<EMAIL>',
is_staff=True,
is_active=True,
is_superuser=True,
)
self.page_1 = LayoutPage()
self.page_1.title = 'Test Page'
self.page_1.slug = 'test-page'
self.page_1.parent_site = Site.objects.first()
self.page_1.layout = self.layout_1
self.page_1.author = self.staff_1
self.page_1.status = LayoutPage.PUBLISHED
self.page_1.save()
self.map_1 = fluent_contents.create_content_instance(
models.MapItem,
self.page_1,
_embed_code=self.embed_code,
)
self.map_item = models.MapItem(
parent_type=ContentType.objects.get_for_model(type(self.page_1)),
parent_id=self.page_1.id,
placeholder=self.page_1.get_placeholder_by_slot('main')[0],
_embed_code=self.embed_code,
)
self.page_1.publish()
def test_map_renders(self):
response = self.app.get(self.page_1.get_published().get_absolute_url())
response.mustcontain(self.cleaned_embed_code)
def test_cleaned_embed_code(self):
self.assertEqual(self.map_1._cleaned_embed_code.strip(), self.cleaned_embed_code)
| en | 0.337118 | <iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670" width="600" height="450" frameborder="0" style="border:0" allowfullscreen ></iframe> | 1.873873 | 2 |
example/example.py | saravanabalagi/imshowtools | 4 | 8870 | <gh_stars>1-10
from imshowtools import imshow
import cv2
if __name__ == '__main__':
image_lenna = cv2.imread("lenna.png")
imshow(image_lenna, mode='BGR', window_title="LennaWindow", title="Lenna")
image_lenna_bgr = cv2.imread("lenna_bgr.png")
imshow(image_lenna, image_lenna_bgr, mode=['BGR', 'RGB'], title=['lenna_rgb', 'lenna_bgr'])
imshow(*[image_lenna for _ in range(12)], title=["Lenna" for _ in range(12)], window_title="LennaWindow")
imshow(*[image_lenna for _ in range(30)], title="Lenna", padding=(1, 1, 0, (0, 0, 0.8, 0.8)))
| from imshowtools import imshow
import cv2
if __name__ == '__main__':
image_lenna = cv2.imread("lenna.png")
imshow(image_lenna, mode='BGR', window_title="LennaWindow", title="Lenna")
image_lenna_bgr = cv2.imread("lenna_bgr.png")
imshow(image_lenna, image_lenna_bgr, mode=['BGR', 'RGB'], title=['lenna_rgb', 'lenna_bgr'])
imshow(*[image_lenna for _ in range(12)], title=["Lenna" for _ in range(12)], window_title="LennaWindow")
imshow(*[image_lenna for _ in range(30)], title="Lenna", padding=(1, 1, 0, (0, 0, 0.8, 0.8))) | none | 1 | 3.106779 | 3 |
|
terminalone/models/concept.py | amehta1/t1-python | 24 | 8871 | # -*- coding: utf-8 -*-
"""Provides concept object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entity
class Concept(Entity):
"""Concept entity."""
collection = 'concepts'
resource = 'concept'
_relations = {
'advertiser',
}
_pull = {
'advertiser_id': int,
'created_on': t1types.strpt,
'id': int,
'name': None,
'status': t1types.int_to_bool,
'updated_on': t1types.strpt,
'version': int,
}
_push = _pull.copy()
_push.update({
'status': int,
})
def __init__(self, session, properties=None, **kwargs):
super(Concept, self).__init__(session, properties, **kwargs)
| # -*- coding: utf-8 -*-
"""Provides concept object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entity
class Concept(Entity):
"""Concept entity."""
collection = 'concepts'
resource = 'concept'
_relations = {
'advertiser',
}
_pull = {
'advertiser_id': int,
'created_on': t1types.strpt,
'id': int,
'name': None,
'status': t1types.int_to_bool,
'updated_on': t1types.strpt,
'version': int,
}
_push = _pull.copy()
_push.update({
'status': int,
})
def __init__(self, session, properties=None, **kwargs):
super(Concept, self).__init__(session, properties, **kwargs)
| en | 0.888859 | # -*- coding: utf-8 -*- Provides concept object. Concept entity. | 2.341564 | 2 |
videofeed.py | dmeklund/asyncdemo | 0 | 8872 | """
Mock up a video feed pipeline
"""
import asyncio
import logging
import sys
import cv2
logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s")
logger = logging.getLogger('async')
logger.setLevel(logging.INFO)
async def process_video(filename):
cap = cv2.VideoCapture(filename)
tasks = list()
frame_ind = 0
while cap.isOpened():
ret, frame = cap.read()
tasks.append(asyncio.ensure_future(process_frame(frame, frame_ind)))
frame_ind += 1
await asyncio.sleep(0)
await asyncio.gather(tasks)
async def process_frame(frame, frame_ind):
logger.info("Processing frame {}".format(frame_ind))
await asyncio.sleep(20.0)
logger.info("Finished processing frame {}".format(frame_ind))
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(process_video(sys.argv[1]))
logger.info("Completed")
if __name__ == '__main__':
main()
| """
Mock up a video feed pipeline
"""
import asyncio
import logging
import sys
import cv2
logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s")
logger = logging.getLogger('async')
logger.setLevel(logging.INFO)
async def process_video(filename):
cap = cv2.VideoCapture(filename)
tasks = list()
frame_ind = 0
while cap.isOpened():
ret, frame = cap.read()
tasks.append(asyncio.ensure_future(process_frame(frame, frame_ind)))
frame_ind += 1
await asyncio.sleep(0)
await asyncio.gather(tasks)
async def process_frame(frame, frame_ind):
logger.info("Processing frame {}".format(frame_ind))
await asyncio.sleep(20.0)
logger.info("Finished processing frame {}".format(frame_ind))
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(process_video(sys.argv[1]))
logger.info("Completed")
if __name__ == '__main__':
main()
| en | 0.765273 | Mock up a video feed pipeline | 3.135046 | 3 |
parsers/read_lspci_and_glxinfo.py | mikeus9908/peracotta | 3 | 8873 | #!/usr/bin/python3
"""
Read "lspci -v" and "glxinfo" outputs
"""
import re
from dataclasses import dataclass
from InputFileNotFoundError import InputFileNotFoundError
@dataclass
class VideoCard:
type = "graphics-card"
manufacturer_brand = ""
reseller_brand = ""
internal_name = ""
model = ""
capacity = -1 # bytes
warning = ""
def parse_lspci_output(gpu: VideoCard, lspci_path: str, interactive: bool = False):
try:
with open(lspci_path, "r") as f:
lspci_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(lspci_path)
lspci_sections = lspci_output.split("\n\n")
for section in lspci_sections:
if "VGA compatible controller" in section:
first_line = section.splitlines()[0].split(": ", 1)[
1
] # removes "VGA compatible controller:"
second_line = section.splitlines()[1]
part_between_square_brackets = None
try:
# take the first string between [] from the first line
part_between_square_brackets = first_line.split("[")[1].split("]")[0]
except IndexError:
# there may not be an argument in between []
pass
if "Subsystem:" in second_line:
# The model or model family is often repeated here, but removing it automatically is complicated
gpu.reseller_brand = (
second_line.split("Subsystem: ")[1].split("[", 1)[0].strip()
)
gpu.reseller_brand = gpu.reseller_brand.replace(
"Integrated Graphics Controller", ""
)
# -----------------------------------------------------------------
# AMD/ATI
# -----------------------------------------------------------------
if part_between_square_brackets is not None and (
"AMD" in part_between_square_brackets
or "ATI" in part_between_square_brackets
):
gpu.manufacturer_brand = part_between_square_brackets
# take second string between []
gpu.model = first_line.split("[")[2].split("]")[0]
if "controller" in gpu.model:
gpu.model = section.splitlines()[1].split(" ")[-1]
# -----------------------------------------------------------------
# Nvidia
# -----------------------------------------------------------------
elif "NVIDIA" in first_line.upper():
gpu.manufacturer_brand = "Nvidia"
gpu.model = part_between_square_brackets
if gpu.reseller_brand != "":
pieces = gpu.reseller_brand.rsplit(" ", 1)
gpu.reseller_brand = pieces[0]
gpu.internal_name = pieces[1]
# -----------------------------------------------------------------
# Intel
# -----------------------------------------------------------------
elif "INTEL" in first_line.upper():
gpu.manufacturer_brand = "Intel"
if "Integrated Graphics" in first_line:
tmp_model = first_line.split("Intel Corporation ")[1].split(
" Integrated Graphics"
)[0]
# if there are no numbers, e.g. "Core Processor", tmp_model is not a model number
if not re.search("\\d+", tmp_model):
tmp_model = ""
elif "HD Graphics" in first_line:
tmp_model = (
first_line.split("Intel Corporation ")[1]
.split("(", 1)[0]
.strip()
)
elif "[" in first_line and "]" in first_line:
tmp_model = first_line.split("[")[1].split("]")[0]
else:
tmp_model = ""
if tmp_model != "":
gpu.model = tmp_model
else:
gpu.model = ""
# -----------------------------------------------------------------
# VIA
# -----------------------------------------------------------------
elif first_line.startswith("VIA"):
gpu.manufacturer_brand = "VIA"
gpu.model = part_between_square_brackets
tmp_model = first_line.split("[")[0]
i = 0
for i, char in enumerate("VIA Technologies, Inc. "):
if tmp_model[i] != char:
break
gpu.internal_name = tmp_model[i:].strip()
# -----------------------------------------------------------------
# SiS
# -----------------------------------------------------------------
elif part_between_square_brackets == "SiS":
# May be written somewhere else on other models, but we have so few SiS cards that it's difficult to
# find more examples. Also, they haven't made any video card in the last 15 years or so.
gpu.manufacturer_brand = part_between_square_brackets
if gpu.reseller_brand.lower() == "silicon integrated systems":
gpu.reseller_brand = "SiS"
gpu.model = first_line.split("]", 1)[1]
# These may be useful for non-integrated cards, however the example ones are all integrated
if " PCIE" in gpu.model:
gpu.model = gpu.model.split(" PCIE", 1)[0].strip()
elif " PCI/AGP" in gpu.model:
gpu.model = gpu.model.split(" PCI/AGP", 1)[0].strip()
if gpu.model in gpu.reseller_brand:
gpu.reseller_brand = gpu.reseller_brand.split(gpu.model, 1)[
0
].strip()
else:
gpu.manufacturer_brand = None
error = (
"I couldn't find the Video Card brand. The model was set to 'None' and is to be edited "
"logging into the TARALLO afterwards. The information you're looking for should be in the "
f"following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
if gpu.model is None:
error = (
"I couldn't find the Integrated Graphics model. The model was set to 'None' and is to be "
"edited logging into the TARALLO afterwards. The information you're looking for should be in "
f"the following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
else:
# Try to remove duplicate information
gpu.reseller_brand = gpu.reseller_brand.replace(gpu.model, "").strip()
if gpu.internal_name is not None:
# Same
gpu.reseller_brand = gpu.reseller_brand.replace(
gpu.internal_name, ""
).strip()
break
def parse_glxinfo_output(gpu: VideoCard, glxinfo_path: str):
try:
with open(glxinfo_path, "r") as f:
glxinfo_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(glxinfo_path)
for i, line in enumerate(glxinfo_output.splitlines()):
# this line comes before the "Dedicated video memory" line
# this basically saves a default value if the dedicated memory line cannot be found
if "Video memory" in line:
try:
tmp_vid_mem = int(line.split(" ")[6].split(" ")[0][:-2])
tmp_vid_mem_multiplier = line[-2:]
except ValueError:
exit(-1)
return # To stop complaints from PyCharm
gpu.capacity = convert_video_memory_size(
tmp_vid_mem, tmp_vid_mem_multiplier
)
if "Dedicated video memory" in line:
try:
tmp_vram = int(line.split(" ")[7].split(" ")[0])
tmp_vram_multiplier = line[-2:]
except ValueError:
exit(-1)
return
capacity = convert_video_memory_size(tmp_vram, tmp_vram_multiplier)
if capacity < 0:
gpu.warning = "Could not find dedicated video memory"
if gpu.capacity < 0:
gpu.warning += ". The value cannot be trusted."
else:
gpu.capacity = capacity
break
if gpu.capacity > 0:
# Round to the next power of 2
# this may be different from human readable capacity...
rounded = 2 ** (gpu.capacity - 1).bit_length()
one_and_half = int(rounded / 2 * 1.5)
# Accounts for 3 GB VRAM cards and similar
# Yes they do exist, try to remove this part and watch tests fail (and the card was manually verified to be 3 GB)
if one_and_half >= gpu.capacity:
gpu.capacity = one_and_half
else:
gpu.capacity = rounded
def convert_video_memory_size(capacity, units_of_measure):
if units_of_measure == "GB":
capacity *= 1024 * 1024 * 1024
elif units_of_measure == "MB":
capacity *= 1024 * 1024
elif units_of_measure.upper() == "KB":
capacity *= 1024
else:
capacity = -1
return capacity
def read_lspci_and_glxinfo(
has_dedicated: bool, lspci_path: str, glxinfo_path: str, interactive: bool = False
):
gpu = VideoCard()
if has_dedicated:
parse_lspci_output(gpu, lspci_path, interactive)
parse_glxinfo_output(gpu, glxinfo_path)
else: # integrated_in_mobo or integrated_in_cpu
parse_lspci_output(gpu, lspci_path, interactive)
# don't parse glxinfo because the VRAM is part of the RAM and varies
gpu.capacity = None
# print("The VRAM capacity could not be detected. "
# "Please try looking for it on the Video Card or on the Internet. "
# "The capacity value defaulted to 'None'. "
# "For an integrated GPU, the VRAM may also be shared with the system RAM, so an empty value is acceptable.")
result = {
"type": "graphics-card",
"brand": gpu.reseller_brand.strip(),
"model": gpu.model.strip(),
"internal-name": gpu.internal_name.strip(),
"capacity-byte": gpu.capacity,
"working": "yes", # Indeed it is working
}
if gpu.manufacturer_brand is not None and gpu.reseller_brand is not None:
if gpu.manufacturer_brand.lower() != gpu.reseller_brand.lower():
result["brand-manufacturer"] = gpu.manufacturer_brand
return result
if __name__ == "__main__":
import argparse
import json
parser = argparse.ArgumentParser(description="Parse lspci/glxinfo output")
parser.add_argument("lspci", type=str, nargs=1, help="path to lspci output")
parser.add_argument("glxinfo", type=str, nargs=1, help="path to glxinfo output")
parser.add_argument(
"-d",
"--dedicated",
action="store_true",
default=False,
help="computer has dedicated GPU",
)
args = parser.parse_args()
try:
print(
json.dumps(
read_lspci_and_glxinfo(args.dedicated, args.lspci[0], args.glxinfo[0]),
indent=2,
)
)
except InputFileNotFoundError as e:
print(str(e))
exit(1)
| #!/usr/bin/python3
"""
Read "lspci -v" and "glxinfo" outputs
"""
import re
from dataclasses import dataclass
from InputFileNotFoundError import InputFileNotFoundError
@dataclass
class VideoCard:
type = "graphics-card"
manufacturer_brand = ""
reseller_brand = ""
internal_name = ""
model = ""
capacity = -1 # bytes
warning = ""
def parse_lspci_output(gpu: VideoCard, lspci_path: str, interactive: bool = False):
try:
with open(lspci_path, "r") as f:
lspci_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(lspci_path)
lspci_sections = lspci_output.split("\n\n")
for section in lspci_sections:
if "VGA compatible controller" in section:
first_line = section.splitlines()[0].split(": ", 1)[
1
] # removes "VGA compatible controller:"
second_line = section.splitlines()[1]
part_between_square_brackets = None
try:
# take the first string between [] from the first line
part_between_square_brackets = first_line.split("[")[1].split("]")[0]
except IndexError:
# there may not be an argument in between []
pass
if "Subsystem:" in second_line:
# The model or model family is often repeated here, but removing it automatically is complicated
gpu.reseller_brand = (
second_line.split("Subsystem: ")[1].split("[", 1)[0].strip()
)
gpu.reseller_brand = gpu.reseller_brand.replace(
"Integrated Graphics Controller", ""
)
# -----------------------------------------------------------------
# AMD/ATI
# -----------------------------------------------------------------
if part_between_square_brackets is not None and (
"AMD" in part_between_square_brackets
or "ATI" in part_between_square_brackets
):
gpu.manufacturer_brand = part_between_square_brackets
# take second string between []
gpu.model = first_line.split("[")[2].split("]")[0]
if "controller" in gpu.model:
gpu.model = section.splitlines()[1].split(" ")[-1]
# -----------------------------------------------------------------
# Nvidia
# -----------------------------------------------------------------
elif "NVIDIA" in first_line.upper():
gpu.manufacturer_brand = "Nvidia"
gpu.model = part_between_square_brackets
if gpu.reseller_brand != "":
pieces = gpu.reseller_brand.rsplit(" ", 1)
gpu.reseller_brand = pieces[0]
gpu.internal_name = pieces[1]
# -----------------------------------------------------------------
# Intel
# -----------------------------------------------------------------
elif "INTEL" in first_line.upper():
gpu.manufacturer_brand = "Intel"
if "Integrated Graphics" in first_line:
tmp_model = first_line.split("Intel Corporation ")[1].split(
" Integrated Graphics"
)[0]
# if there are no numbers, e.g. "Core Processor", tmp_model is not a model number
if not re.search("\\d+", tmp_model):
tmp_model = ""
elif "HD Graphics" in first_line:
tmp_model = (
first_line.split("Intel Corporation ")[1]
.split("(", 1)[0]
.strip()
)
elif "[" in first_line and "]" in first_line:
tmp_model = first_line.split("[")[1].split("]")[0]
else:
tmp_model = ""
if tmp_model != "":
gpu.model = tmp_model
else:
gpu.model = ""
# -----------------------------------------------------------------
# VIA
# -----------------------------------------------------------------
elif first_line.startswith("VIA"):
gpu.manufacturer_brand = "VIA"
gpu.model = part_between_square_brackets
tmp_model = first_line.split("[")[0]
i = 0
for i, char in enumerate("VIA Technologies, Inc. "):
if tmp_model[i] != char:
break
gpu.internal_name = tmp_model[i:].strip()
# -----------------------------------------------------------------
# SiS
# -----------------------------------------------------------------
elif part_between_square_brackets == "SiS":
# May be written somewhere else on other models, but we have so few SiS cards that it's difficult to
# find more examples. Also, they haven't made any video card in the last 15 years or so.
gpu.manufacturer_brand = part_between_square_brackets
if gpu.reseller_brand.lower() == "silicon integrated systems":
gpu.reseller_brand = "SiS"
gpu.model = first_line.split("]", 1)[1]
# These may be useful for non-integrated cards, however the example ones are all integrated
if " PCIE" in gpu.model:
gpu.model = gpu.model.split(" PCIE", 1)[0].strip()
elif " PCI/AGP" in gpu.model:
gpu.model = gpu.model.split(" PCI/AGP", 1)[0].strip()
if gpu.model in gpu.reseller_brand:
gpu.reseller_brand = gpu.reseller_brand.split(gpu.model, 1)[
0
].strip()
else:
gpu.manufacturer_brand = None
error = (
"I couldn't find the Video Card brand. The model was set to 'None' and is to be edited "
"logging into the TARALLO afterwards. The information you're looking for should be in the "
f"following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
if gpu.model is None:
error = (
"I couldn't find the Integrated Graphics model. The model was set to 'None' and is to be "
"edited logging into the TARALLO afterwards. The information you're looking for should be in "
f"the following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
else:
# Try to remove duplicate information
gpu.reseller_brand = gpu.reseller_brand.replace(gpu.model, "").strip()
if gpu.internal_name is not None:
# Same
gpu.reseller_brand = gpu.reseller_brand.replace(
gpu.internal_name, ""
).strip()
break
def parse_glxinfo_output(gpu: VideoCard, glxinfo_path: str):
try:
with open(glxinfo_path, "r") as f:
glxinfo_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(glxinfo_path)
for i, line in enumerate(glxinfo_output.splitlines()):
# this line comes before the "Dedicated video memory" line
# this basically saves a default value if the dedicated memory line cannot be found
if "Video memory" in line:
try:
tmp_vid_mem = int(line.split(" ")[6].split(" ")[0][:-2])
tmp_vid_mem_multiplier = line[-2:]
except ValueError:
exit(-1)
return # To stop complaints from PyCharm
gpu.capacity = convert_video_memory_size(
tmp_vid_mem, tmp_vid_mem_multiplier
)
if "Dedicated video memory" in line:
try:
tmp_vram = int(line.split(" ")[7].split(" ")[0])
tmp_vram_multiplier = line[-2:]
except ValueError:
exit(-1)
return
capacity = convert_video_memory_size(tmp_vram, tmp_vram_multiplier)
if capacity < 0:
gpu.warning = "Could not find dedicated video memory"
if gpu.capacity < 0:
gpu.warning += ". The value cannot be trusted."
else:
gpu.capacity = capacity
break
if gpu.capacity > 0:
# Round to the next power of 2
# this may be different from human readable capacity...
rounded = 2 ** (gpu.capacity - 1).bit_length()
one_and_half = int(rounded / 2 * 1.5)
# Accounts for 3 GB VRAM cards and similar
# Yes they do exist, try to remove this part and watch tests fail (and the card was manually verified to be 3 GB)
if one_and_half >= gpu.capacity:
gpu.capacity = one_and_half
else:
gpu.capacity = rounded
def convert_video_memory_size(capacity, units_of_measure):
if units_of_measure == "GB":
capacity *= 1024 * 1024 * 1024
elif units_of_measure == "MB":
capacity *= 1024 * 1024
elif units_of_measure.upper() == "KB":
capacity *= 1024
else:
capacity = -1
return capacity
def read_lspci_and_glxinfo(
has_dedicated: bool, lspci_path: str, glxinfo_path: str, interactive: bool = False
):
gpu = VideoCard()
if has_dedicated:
parse_lspci_output(gpu, lspci_path, interactive)
parse_glxinfo_output(gpu, glxinfo_path)
else: # integrated_in_mobo or integrated_in_cpu
parse_lspci_output(gpu, lspci_path, interactive)
# don't parse glxinfo because the VRAM is part of the RAM and varies
gpu.capacity = None
# print("The VRAM capacity could not be detected. "
# "Please try looking for it on the Video Card or on the Internet. "
# "The capacity value defaulted to 'None'. "
# "For an integrated GPU, the VRAM may also be shared with the system RAM, so an empty value is acceptable.")
result = {
"type": "graphics-card",
"brand": gpu.reseller_brand.strip(),
"model": gpu.model.strip(),
"internal-name": gpu.internal_name.strip(),
"capacity-byte": gpu.capacity,
"working": "yes", # Indeed it is working
}
if gpu.manufacturer_brand is not None and gpu.reseller_brand is not None:
if gpu.manufacturer_brand.lower() != gpu.reseller_brand.lower():
result["brand-manufacturer"] = gpu.manufacturer_brand
return result
if __name__ == "__main__":
import argparse
import json
parser = argparse.ArgumentParser(description="Parse lspci/glxinfo output")
parser.add_argument("lspci", type=str, nargs=1, help="path to lspci output")
parser.add_argument("glxinfo", type=str, nargs=1, help="path to glxinfo output")
parser.add_argument(
"-d",
"--dedicated",
action="store_true",
default=False,
help="computer has dedicated GPU",
)
args = parser.parse_args()
try:
print(
json.dumps(
read_lspci_and_glxinfo(args.dedicated, args.lspci[0], args.glxinfo[0]),
indent=2,
)
)
except InputFileNotFoundError as e:
print(str(e))
exit(1)
| en | 0.752305 | #!/usr/bin/python3 Read "lspci -v" and "glxinfo" outputs # bytes # removes "VGA compatible controller:" # take the first string between [] from the first line # there may not be an argument in between [] # The model or model family is often repeated here, but removing it automatically is complicated # ----------------------------------------------------------------- # AMD/ATI # ----------------------------------------------------------------- # take second string between [] # ----------------------------------------------------------------- # Nvidia # ----------------------------------------------------------------- # ----------------------------------------------------------------- # Intel # ----------------------------------------------------------------- # if there are no numbers, e.g. "Core Processor", tmp_model is not a model number # ----------------------------------------------------------------- # VIA # ----------------------------------------------------------------- # ----------------------------------------------------------------- # SiS # ----------------------------------------------------------------- # May be written somewhere else on other models, but we have so few SiS cards that it's difficult to # find more examples. Also, they haven't made any video card in the last 15 years or so. # These may be useful for non-integrated cards, however the example ones are all integrated # Try to remove duplicate information # Same # this line comes before the "Dedicated video memory" line # this basically saves a default value if the dedicated memory line cannot be found # To stop complaints from PyCharm # Round to the next power of 2 # this may be different from human readable capacity... # Accounts for 3 GB VRAM cards and similar # Yes they do exist, try to remove this part and watch tests fail (and the card was manually verified to be 3 GB) # integrated_in_mobo or integrated_in_cpu # don't parse glxinfo because the VRAM is part of the RAM and varies # print("The VRAM capacity could not be detected. " # "Please try looking for it on the Video Card or on the Internet. " # "The capacity value defaulted to 'None'. " # "For an integrated GPU, the VRAM may also be shared with the system RAM, so an empty value is acceptable.") # Indeed it is working | 2.980895 | 3 |
upload.py | snymainn/tools- | 0 | 8874 | <reponame>snymainn/tools-<gh_stars>0
#!/usr/bin/python
import sys
from loglib import SNYLogger
import ftplib
import argparse
import re
import os
import calendar
import time
def read_skipfile(infile, log):
skiplines = list()
skipfile = open(infile, 'r')
for line in skipfile:
newline = line.rstrip('\r\n')
linelength = len(newline)
if linelength>0:
log.debug("Adding "+newline+" to skiplines")
tmpobjects = re.compile(newline)
skiplines.append(tmpobjects)
skipfile.close()
return skiplines
#GET LOCAL FILELIST
def get_local_files(localpath,log):
locallist = list()
os.chdir(localpath)
log.debug("*** GETTING LOCAL FILELIST ***")
for name in os.listdir("."):
if (not name.startswith('.')):
statinfo = os.stat(name)
if (statinfo.st_mode>=32768):
entrytype = "file"
else:
entrytype = "dir"
size = statinfo.st_size
date = statinfo.st_mtime
log.debug("Date:"+str(int(date))+" type:"+entrytype+", name:"+name+" size:"+str(size))
locallist.append({'name':name,'type':entrytype,'modify':int(date),'size':size})
return locallist
#
# login to ftp server
#
def ftp_login(args, log):
ftp = ftplib.FTP()
port = 21
ftp.connect(args.host, port)
try:
log.debug("Logging in...")
ftp.login(args.user, args.password)
log.debug(ftp.getwelcome())
except ftplib.error_perm, resp:
log.logprint(str(resp))
except:
log.logprint("Login section failed..")
return ftp
#
# get remote files
#
def get_remote_files(ftp, remotepath, args, log):
# LIST CONTENTS
contents = list()
dirlist = list()
log.debug("*** GET REMOTE FILELIST ***")
try:
ftp.cwd(remotepath)
# Entry point
ftp.retrlines('MLSD', contents.append)
for line in contents:
# log.debug(line)
entry = line.split(";")
size = "0" #Set this because directories does not report size
for item in entry:
cell = item.split("=")
if (cell[0]=="modify"):
date = cell[1]
modify=calendar.timegm(time.strptime(str(date), "%Y%m%d%H%M%S"))
#for loops/if checks are not blocks in python, i.e. no need to predefine modify
if (cell[0]=="type"):
entrytype=cell[1]
if (cell[0]=="size"):
size = cell[1]
if (len(cell[0])>0) and cell[0].startswith(' '):
#If string does not contain =, cell[1] will not be defined
#and first entry in cell[0] string will be whitespace
name = cell[0].lstrip()
log.debug("Date:"+str(modify)+" type:"+entrytype+" Name:"+name+" size:"+size)
if (entrytype=='file' or entrytype=='dir'): #Do not include current and parent dir entries
dirlist.append({'name':name,'type':entrytype,'modify':int(modify),'size':size})
except ftplib.error_perm, resp:
log.logprint(str(resp))
exit(1)
return dirlist
def touch(fname):
try:
os.utime(fname, None)
except:
log.logprint("Updating mtime failed, "+fname+" does not exist")
def sync_files(ftp, args, skiplines, localpath, remotepath, log):
locallist = get_local_files(localpath,log)
remotelist = get_remote_files(ftp, remotepath, args, log)
#Create dictionaries for easy lookup
localdict = {}
index = 0
for lfile in locallist:
localdict[lfile['name']]=index
index+=1
remotedict = {}
index = 0
for rfile in remotelist:
remotedict[rfile['name']]=index
index+=1
# Traverse local filelist and
# check if local file is present on remote
for lfile in locallist:
#Check if file is present in skipfile
#If present in skipfile, skip to next file in locallist
skiptonext = False
for p in skiplines:
m=p.match(lfile['name'])
if (m):
#log.logprint(lfile['name']+" match "+m.group()+", thus present in skipfile "+args.skipfile)
log.logprint("Skipping: "+lfile['name'])
skiptonext = True
break
if skiptonext: continue
#
#Check if remote has the local file
#if present remote, type file and modify time is older than local file, set upload flag
#
upload = False #Set to True here instead of False since this will handle the case where
#remote does not exist, i.e. always upload except when remote is present
#and up to date
if lfile['name'] in remotedict:
rfile = remotelist[remotedict[lfile['name']]] #Get fileinfo from remotelist using index
if lfile['type']=="file":
log.debug(lfile['name']+" is present remote : "+rfile['name'])
if (lfile['modify']>rfile['modify']):
log.debug("Local file is newer by "+str(lfile['modify']-rfile['modify'])+" seconds, try to upload...")
upload = True
elif lfile['type']=="dir":
log.debug(lfile['name']+" is present remote and is directory: "+rfile['name'])
sync_files(ftp, args, skiplines, lfile['name'], rfile['name'], log)
elif lfile['type']=="dir":
log.debug(lfile['name']+" is NOT present remote and is directory: ")
try:
ftp.mkd(lfile['name'])
log.logprint("CREATED DIR : "+lfile['name'])
sync_files(ftp, args, skiplines, lfile['name'], lfile['name'], log)
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to create directory "+lfile['name']+" - "+str(resp))
elif lfile['type']=="file":
log.debug(lfile['name']+" is NOT present remote and is file")
upload = True
#Handle upload flag
if (upload and lfile['type']=="file"):
try:
touch(lfile['name']) #Touch local file to set modify time to approx the same as the remote will get
ftp.storbinary('STOR '+lfile['name'], open(lfile['name'], 'rb'))
log.logprint("UPLOADED : "+lfile['name'])
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to upload "+lfile['name']+" - "+str(resp))
#Make sure locally deleted items are deleted remotely
for rfile in remotelist:
if rfile['name'] not in localdict:
if rfile['type']=="file":
#Remote file is not present locally=>Delete it
try:
ftp.delete(rfile['name'])
log.logprint("DELETED: "+rfile['name'])
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to delete "+rfile['name']+" - "+str(resp))
elif rfile['type']=="dir":
log.debug("Remote dir "+rfile['name']+" not present locally, delete it recursively")
#Remote dir is not present locally, decend and recursively delete everything
#TODO: recursive_delete(ftp, rfile['name'])
delete_recursive(ftp, args, rfile['name'], log)
ftp.cwd("..")
os.chdir("..")
def delete_recursive(ftp, args, remotepath, log):
remotelist = get_remote_files(ftp, remotepath, args, log)
#Make sure locally deleted items are deleted remotely
for rfile in remotelist:
if rfile['type']=="file":
try:
ftp.delete(rfile['name'])
log.logprint("DELETED: "+rfile['name'])
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to delete "+rfile['name']+" - "+str(resp))
elif rfile['type']=="dir":
log.debug("Remote dir "+rfile['name']+" not present locally, delete it recursively")
delete_recursive(ftp, args, rfile['name'], log)
ftp.cwd("..")
try:
ftp.rmd(remotepath)
log.logprint("DELETED DIR: "+remotepath)
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to delete directory "+remotepath+" - "+str(resp))
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--host", help="ftp hostname", required=True)
parser.add_argument("-u", "--user", help="username on ftp server", required=True)
parser.add_argument("-p", "--password", help="password", required=True)
parser.add_argument("-d", "--debug",
help="print debug to terminal, default 0, use multiple times to increase verbosity, i.e. -d -d",
action="count")
parser.add_argument("-b", "--basedir", help="Toplevel directory on ftp server, default www")
parser.add_argument("-t", "--path", help="Local toplevel directory, default ., i.e. current dir")
parser.add_argument("-s", "--skipfile", help="Do not upload files in <skipfile>, default name upload.skip")
parser.set_defaults(debug=0)
parser.set_defaults(skipfile="upload.skip")
parser.set_defaults(basedir="www")
parser.set_defaults(path=".")
args = parser.parse_args()
log = SNYLogger(basename="upload", size_limit=10, no_logfiles=2, stdout=args.debug)
skiplines = read_skipfile(args.skipfile, log)
ftp = ftp_login(args, log)
sync_files(ftp, args, skiplines, args.path, args.basedir, log)
ftp.quit()
| #!/usr/bin/python
import sys
from loglib import SNYLogger
import ftplib
import argparse
import re
import os
import calendar
import time
def read_skipfile(infile, log):
skiplines = list()
skipfile = open(infile, 'r')
for line in skipfile:
newline = line.rstrip('\r\n')
linelength = len(newline)
if linelength>0:
log.debug("Adding "+newline+" to skiplines")
tmpobjects = re.compile(newline)
skiplines.append(tmpobjects)
skipfile.close()
return skiplines
#GET LOCAL FILELIST
def get_local_files(localpath,log):
locallist = list()
os.chdir(localpath)
log.debug("*** GETTING LOCAL FILELIST ***")
for name in os.listdir("."):
if (not name.startswith('.')):
statinfo = os.stat(name)
if (statinfo.st_mode>=32768):
entrytype = "file"
else:
entrytype = "dir"
size = statinfo.st_size
date = statinfo.st_mtime
log.debug("Date:"+str(int(date))+" type:"+entrytype+", name:"+name+" size:"+str(size))
locallist.append({'name':name,'type':entrytype,'modify':int(date),'size':size})
return locallist
#
# login to ftp server
#
def ftp_login(args, log):
ftp = ftplib.FTP()
port = 21
ftp.connect(args.host, port)
try:
log.debug("Logging in...")
ftp.login(args.user, args.password)
log.debug(ftp.getwelcome())
except ftplib.error_perm, resp:
log.logprint(str(resp))
except:
log.logprint("Login section failed..")
return ftp
#
# get remote files
#
def get_remote_files(ftp, remotepath, args, log):
# LIST CONTENTS
contents = list()
dirlist = list()
log.debug("*** GET REMOTE FILELIST ***")
try:
ftp.cwd(remotepath)
# Entry point
ftp.retrlines('MLSD', contents.append)
for line in contents:
# log.debug(line)
entry = line.split(";")
size = "0" #Set this because directories does not report size
for item in entry:
cell = item.split("=")
if (cell[0]=="modify"):
date = cell[1]
modify=calendar.timegm(time.strptime(str(date), "%Y%m%d%H%M%S"))
#for loops/if checks are not blocks in python, i.e. no need to predefine modify
if (cell[0]=="type"):
entrytype=cell[1]
if (cell[0]=="size"):
size = cell[1]
if (len(cell[0])>0) and cell[0].startswith(' '):
#If string does not contain =, cell[1] will not be defined
#and first entry in cell[0] string will be whitespace
name = cell[0].lstrip()
log.debug("Date:"+str(modify)+" type:"+entrytype+" Name:"+name+" size:"+size)
if (entrytype=='file' or entrytype=='dir'): #Do not include current and parent dir entries
dirlist.append({'name':name,'type':entrytype,'modify':int(modify),'size':size})
except ftplib.error_perm, resp:
log.logprint(str(resp))
exit(1)
return dirlist
def touch(fname):
try:
os.utime(fname, None)
except:
log.logprint("Updating mtime failed, "+fname+" does not exist")
def sync_files(ftp, args, skiplines, localpath, remotepath, log):
locallist = get_local_files(localpath,log)
remotelist = get_remote_files(ftp, remotepath, args, log)
#Create dictionaries for easy lookup
localdict = {}
index = 0
for lfile in locallist:
localdict[lfile['name']]=index
index+=1
remotedict = {}
index = 0
for rfile in remotelist:
remotedict[rfile['name']]=index
index+=1
# Traverse local filelist and
# check if local file is present on remote
for lfile in locallist:
#Check if file is present in skipfile
#If present in skipfile, skip to next file in locallist
skiptonext = False
for p in skiplines:
m=p.match(lfile['name'])
if (m):
#log.logprint(lfile['name']+" match "+m.group()+", thus present in skipfile "+args.skipfile)
log.logprint("Skipping: "+lfile['name'])
skiptonext = True
break
if skiptonext: continue
#
#Check if remote has the local file
#if present remote, type file and modify time is older than local file, set upload flag
#
upload = False #Set to True here instead of False since this will handle the case where
#remote does not exist, i.e. always upload except when remote is present
#and up to date
if lfile['name'] in remotedict:
rfile = remotelist[remotedict[lfile['name']]] #Get fileinfo from remotelist using index
if lfile['type']=="file":
log.debug(lfile['name']+" is present remote : "+rfile['name'])
if (lfile['modify']>rfile['modify']):
log.debug("Local file is newer by "+str(lfile['modify']-rfile['modify'])+" seconds, try to upload...")
upload = True
elif lfile['type']=="dir":
log.debug(lfile['name']+" is present remote and is directory: "+rfile['name'])
sync_files(ftp, args, skiplines, lfile['name'], rfile['name'], log)
elif lfile['type']=="dir":
log.debug(lfile['name']+" is NOT present remote and is directory: ")
try:
ftp.mkd(lfile['name'])
log.logprint("CREATED DIR : "+lfile['name'])
sync_files(ftp, args, skiplines, lfile['name'], lfile['name'], log)
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to create directory "+lfile['name']+" - "+str(resp))
elif lfile['type']=="file":
log.debug(lfile['name']+" is NOT present remote and is file")
upload = True
#Handle upload flag
if (upload and lfile['type']=="file"):
try:
touch(lfile['name']) #Touch local file to set modify time to approx the same as the remote will get
ftp.storbinary('STOR '+lfile['name'], open(lfile['name'], 'rb'))
log.logprint("UPLOADED : "+lfile['name'])
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to upload "+lfile['name']+" - "+str(resp))
#Make sure locally deleted items are deleted remotely
for rfile in remotelist:
if rfile['name'] not in localdict:
if rfile['type']=="file":
#Remote file is not present locally=>Delete it
try:
ftp.delete(rfile['name'])
log.logprint("DELETED: "+rfile['name'])
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to delete "+rfile['name']+" - "+str(resp))
elif rfile['type']=="dir":
log.debug("Remote dir "+rfile['name']+" not present locally, delete it recursively")
#Remote dir is not present locally, decend and recursively delete everything
#TODO: recursive_delete(ftp, rfile['name'])
delete_recursive(ftp, args, rfile['name'], log)
ftp.cwd("..")
os.chdir("..")
def delete_recursive(ftp, args, remotepath, log):
remotelist = get_remote_files(ftp, remotepath, args, log)
#Make sure locally deleted items are deleted remotely
for rfile in remotelist:
if rfile['type']=="file":
try:
ftp.delete(rfile['name'])
log.logprint("DELETED: "+rfile['name'])
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to delete "+rfile['name']+" - "+str(resp))
elif rfile['type']=="dir":
log.debug("Remote dir "+rfile['name']+" not present locally, delete it recursively")
delete_recursive(ftp, args, rfile['name'], log)
ftp.cwd("..")
try:
ftp.rmd(remotepath)
log.logprint("DELETED DIR: "+remotepath)
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to delete directory "+remotepath+" - "+str(resp))
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--host", help="ftp hostname", required=True)
parser.add_argument("-u", "--user", help="username on ftp server", required=True)
parser.add_argument("-p", "--password", help="password", required=True)
parser.add_argument("-d", "--debug",
help="print debug to terminal, default 0, use multiple times to increase verbosity, i.e. -d -d",
action="count")
parser.add_argument("-b", "--basedir", help="Toplevel directory on ftp server, default www")
parser.add_argument("-t", "--path", help="Local toplevel directory, default ., i.e. current dir")
parser.add_argument("-s", "--skipfile", help="Do not upload files in <skipfile>, default name upload.skip")
parser.set_defaults(debug=0)
parser.set_defaults(skipfile="upload.skip")
parser.set_defaults(basedir="www")
parser.set_defaults(path=".")
args = parser.parse_args()
log = SNYLogger(basename="upload", size_limit=10, no_logfiles=2, stdout=args.debug)
skiplines = read_skipfile(args.skipfile, log)
ftp = ftp_login(args, log)
sync_files(ftp, args, skiplines, args.path, args.basedir, log)
ftp.quit() | en | 0.88191 | #!/usr/bin/python #GET LOCAL FILELIST # # login to ftp server # # # get remote files # # LIST CONTENTS # Entry point # log.debug(line) #Set this because directories does not report size #for loops/if checks are not blocks in python, i.e. no need to predefine modify #If string does not contain =, cell[1] will not be defined #and first entry in cell[0] string will be whitespace #Do not include current and parent dir entries #Create dictionaries for easy lookup # Traverse local filelist and # check if local file is present on remote #Check if file is present in skipfile #If present in skipfile, skip to next file in locallist #log.logprint(lfile['name']+" match "+m.group()+", thus present in skipfile "+args.skipfile) # #Check if remote has the local file #if present remote, type file and modify time is older than local file, set upload flag # #Set to True here instead of False since this will handle the case where #remote does not exist, i.e. always upload except when remote is present #and up to date #Get fileinfo from remotelist using index #Handle upload flag #Touch local file to set modify time to approx the same as the remote will get #Make sure locally deleted items are deleted remotely #Remote file is not present locally=>Delete it #Remote dir is not present locally, decend and recursively delete everything #TODO: recursive_delete(ftp, rfile['name']) #Make sure locally deleted items are deleted remotely | 2.776386 | 3 |
chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py | chris-zen/phd-thesis | 1 | 8875 | #!/usr/bin/env python
"""
Classify oncodrive gene results and prepare for combination
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
* Input:
- oncodrive_ids: The mrna.oncodrive_genes to process
* Output:
- combinations: The mrna.combination prepared to be calculated
* Entities:
- mrna.oncodrive_genes
- mrna.combination
"""
import uuid
import json
from wok.task import Task
from wok.element import DataElement
from intogen.data.entity.server import EntityServer
from intogen.data.entity import types
def run(task):
# Initialization
task.check_conf(["entities"])
conf = task.conf
log = task.logger()
task.check_in_ports(["oncodrive_ids"])
task.check_out_ports(["combinations"])
oncodrive_port = task.ports["oncodrive_ids"]
combination_port = task.ports["combinations"]
es = EntityServer(conf["entities"])
em = es.manager()
log.info("Indexing available combination results ...")
comb_results_index = em.group_ids(
["icdo_topography", "icdo_morphology", "id_type"],
types.MRNA_COMBINATION, unique = True)
ENSEMBL_GENE = "ensembl:gene"
classif = {}
log.info("Classifying oncodrive results ...")
for oid in oncodrive_port:
o = em.find(oid, types.MRNA_ONCODRIVE_GENES)
if o is None:
log.error("{0} not found: {1}".format(types.MRNA_ONCODRIVE_GENES, oid))
continue
okey = (o["study_id"], o["platform_id"], o["icdo_topography"], o["icdo_morphology"])
key = (o["icdo_topography"], o["icdo_morphology"], ENSEMBL_GENE)
log.debug("Oncodrive results ({0}) [{1}] classified into ({2}) ...".format(", ".join(okey), oid, ", ".join(key)))
if key in classif:
classif[key] += [o]
else:
classif[key] = [o]
log.info("Preparing combinations ...")
for key in sorted(classif):
if key in comb_results_index:
cid = comb_results_index[key][0]
c = em.find(cid, types.MRNA_COMBINATION)
if c is None:
log.error("{0} not found: {1}".format(types.MRNA_COMBINATION, cid))
return
else:
c = DataElement(key_sep = "/")
c["id"] = cid = str(uuid.uuid4())
c["icdo_topography"] = key[0]
c["icdo_morphology"] = key[1]
c["id_type"] = ENSEMBL_GENE
olist = classif[key]
log.info("({0}) [{1}] --> {2} results".format(", ".join(key), cid, len(olist)))
ids = c.create_list()
flist = c.create_list()
for o in olist:
ids += [o["id"]]
flist += [o["results_file"]]
c["source"] = src = c.create_element()
src["type"] = types.MRNA_ONCODRIVE_GENES
src["ids"] = ids
c["files"] = flist
combination_port.write(json.dumps(c.to_native()))
em.close()
if __name__ == "__main__":
Task(run).start()
| #!/usr/bin/env python
"""
Classify oncodrive gene results and prepare for combination
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
* Input:
- oncodrive_ids: The mrna.oncodrive_genes to process
* Output:
- combinations: The mrna.combination prepared to be calculated
* Entities:
- mrna.oncodrive_genes
- mrna.combination
"""
import uuid
import json
from wok.task import Task
from wok.element import DataElement
from intogen.data.entity.server import EntityServer
from intogen.data.entity import types
def run(task):
# Initialization
task.check_conf(["entities"])
conf = task.conf
log = task.logger()
task.check_in_ports(["oncodrive_ids"])
task.check_out_ports(["combinations"])
oncodrive_port = task.ports["oncodrive_ids"]
combination_port = task.ports["combinations"]
es = EntityServer(conf["entities"])
em = es.manager()
log.info("Indexing available combination results ...")
comb_results_index = em.group_ids(
["icdo_topography", "icdo_morphology", "id_type"],
types.MRNA_COMBINATION, unique = True)
ENSEMBL_GENE = "ensembl:gene"
classif = {}
log.info("Classifying oncodrive results ...")
for oid in oncodrive_port:
o = em.find(oid, types.MRNA_ONCODRIVE_GENES)
if o is None:
log.error("{0} not found: {1}".format(types.MRNA_ONCODRIVE_GENES, oid))
continue
okey = (o["study_id"], o["platform_id"], o["icdo_topography"], o["icdo_morphology"])
key = (o["icdo_topography"], o["icdo_morphology"], ENSEMBL_GENE)
log.debug("Oncodrive results ({0}) [{1}] classified into ({2}) ...".format(", ".join(okey), oid, ", ".join(key)))
if key in classif:
classif[key] += [o]
else:
classif[key] = [o]
log.info("Preparing combinations ...")
for key in sorted(classif):
if key in comb_results_index:
cid = comb_results_index[key][0]
c = em.find(cid, types.MRNA_COMBINATION)
if c is None:
log.error("{0} not found: {1}".format(types.MRNA_COMBINATION, cid))
return
else:
c = DataElement(key_sep = "/")
c["id"] = cid = str(uuid.uuid4())
c["icdo_topography"] = key[0]
c["icdo_morphology"] = key[1]
c["id_type"] = ENSEMBL_GENE
olist = classif[key]
log.info("({0}) [{1}] --> {2} results".format(", ".join(key), cid, len(olist)))
ids = c.create_list()
flist = c.create_list()
for o in olist:
ids += [o["id"]]
flist += [o["results_file"]]
c["source"] = src = c.create_element()
src["type"] = types.MRNA_ONCODRIVE_GENES
src["ids"] = ids
c["files"] = flist
combination_port.write(json.dumps(c.to_native()))
em.close()
if __name__ == "__main__":
Task(run).start()
| en | 0.595995 | #!/usr/bin/env python Classify oncodrive gene results and prepare for combination * Configuration parameters: - The ones required by intogen.data.entity.EntityManagerFactory * Input: - oncodrive_ids: The mrna.oncodrive_genes to process * Output: - combinations: The mrna.combination prepared to be calculated * Entities: - mrna.oncodrive_genes - mrna.combination # Initialization | 2.292009 | 2 |
src/FunctionApps/DevOps/tests/test_get_ip.py | CDCgov/prime-public-health-data-infrastructure | 3 | 8876 | def test_get_ip_placeholder():
"""placeholder so pytest does not fail"""
pass
| def test_get_ip_placeholder():
"""placeholder so pytest does not fail"""
pass
| en | 0.838032 | placeholder so pytest does not fail | 1.20992 | 1 |
data/models/svm_benchmark.py | Laurenhut/Machine_Learning_Final | 0 | 8877 | <gh_stars>0
#!/usr/bin/env python
from sklearn import svm
import csv_io
def main():
training, target = csv_io.read_data("../Data/train.csv")
training = [x[1:] for x in training]
target = [float(x) for x in target]
test, throwaway = csv_io.read_data("../Data/test.csv")
test = [x[1:] for x in test]
svc = svm.SVC(kernel='poly', degree=2)
scores = cross_val_score(rf, training, target, cv=10)
print np.mean(scores)
# svc.fit(training, target)
# predicted_probs = svc.predict_proba(test)
# predicted_probs = [[min(max(x,0.001),0.999) for x in y]
# for y in predicted_probs]
# predicted_probs = [["%f" % x for x in y] for y in predicted_probs]
# csv_io.write_delimited_file("../Submissions/svm_benchmark.csv",
# predicted_probs)
if __name__=="__main__":
main()
| #!/usr/bin/env python
from sklearn import svm
import csv_io
def main():
training, target = csv_io.read_data("../Data/train.csv")
training = [x[1:] for x in training]
target = [float(x) for x in target]
test, throwaway = csv_io.read_data("../Data/test.csv")
test = [x[1:] for x in test]
svc = svm.SVC(kernel='poly', degree=2)
scores = cross_val_score(rf, training, target, cv=10)
print np.mean(scores)
# svc.fit(training, target)
# predicted_probs = svc.predict_proba(test)
# predicted_probs = [[min(max(x,0.001),0.999) for x in y]
# for y in predicted_probs]
# predicted_probs = [["%f" % x for x in y] for y in predicted_probs]
# csv_io.write_delimited_file("../Submissions/svm_benchmark.csv",
# predicted_probs)
if __name__=="__main__":
main() | en | 0.583503 | #!/usr/bin/env python # svc.fit(training, target) # predicted_probs = svc.predict_proba(test) # predicted_probs = [[min(max(x,0.001),0.999) for x in y] # for y in predicted_probs] # predicted_probs = [["%f" % x for x in y] for y in predicted_probs] # csv_io.write_delimited_file("../Submissions/svm_benchmark.csv", # predicted_probs) | 2.931732 | 3 |
configs/utils/config_generator.py | user-wu/SOD_eval_metrics | 0 | 8878 | <reponame>user-wu/SOD_eval_metrics<gh_stars>0
# -*- coding: utf-8 -*-
from matplotlib import colors
# max = 148
_COLOR_Genarator = iter(
sorted(
[
color
for name, color in colors.cnames.items()
if name not in ["red", "white"] or not name.startswith("light") or "gray" in name
]
)
)
def curve_info_generator():
line_style_flag = True
def _template_generator(
method_info: dict, method_name: str, line_color: str = None, line_width: int = 3
) -> dict:
nonlocal line_style_flag
template_info = dict(
path_dict=method_info,
curve_setting=dict(
line_style="-" if line_style_flag else "--",
line_label=method_name,
line_width=line_width,
),
)
print(method_name)
if method_name == "Ours":
template_info["curve_setting"]["line_color"] = 'red'
template_info["curve_setting"]["line_style"] = '-'
# line_style_flag = not line_style_flag
else:
if line_color is not None:
template_info["curve_setting"]["line_color"] = line_color
else:
template_info["curve_setting"]["line_color"] = next(_COLOR_Genarator)
line_style_flag = not line_style_flag
return template_info
return _template_generator
def simple_info_generator():
def _template_generator(method_info: dict, method_name: str) -> dict:
template_info = dict(path_dict=method_info, label=method_name)
return template_info
return _template_generator
| # -*- coding: utf-8 -*-
from matplotlib import colors
# max = 148
_COLOR_Genarator = iter(
sorted(
[
color
for name, color in colors.cnames.items()
if name not in ["red", "white"] or not name.startswith("light") or "gray" in name
]
)
)
def curve_info_generator():
line_style_flag = True
def _template_generator(
method_info: dict, method_name: str, line_color: str = None, line_width: int = 3
) -> dict:
nonlocal line_style_flag
template_info = dict(
path_dict=method_info,
curve_setting=dict(
line_style="-" if line_style_flag else "--",
line_label=method_name,
line_width=line_width,
),
)
print(method_name)
if method_name == "Ours":
template_info["curve_setting"]["line_color"] = 'red'
template_info["curve_setting"]["line_style"] = '-'
# line_style_flag = not line_style_flag
else:
if line_color is not None:
template_info["curve_setting"]["line_color"] = line_color
else:
template_info["curve_setting"]["line_color"] = next(_COLOR_Genarator)
line_style_flag = not line_style_flag
return template_info
return _template_generator
def simple_info_generator():
def _template_generator(method_info: dict, method_name: str) -> dict:
template_info = dict(path_dict=method_info, label=method_name)
return template_info
return _template_generator | en | 0.59024 | # -*- coding: utf-8 -*- # max = 148 # line_style_flag = not line_style_flag | 2.372807 | 2 |
core/sms_service.py | kartik1000/jcc-registration-portal | 0 | 8879 | <reponame>kartik1000/jcc-registration-portal<filename>core/sms_service.py<gh_stars>0
from urllib.parse import urlencode
from decouple import config
import hashlib
import requests
BASE = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
auth_key = config('AUTH_KEY')
url = 'http://sms.globehost.com/api/sendhttp.php?'
def encode_base(num, array=BASE):
if(num == 0):
return array[0]
retarr = []
base = len(array)
while num:
num, res = divmod(num, base)
retarr.append(array[res])
retarr.reverse()
return ''.join(retarr)[:6]
def generate(alphanum):
short = (hashlib.md5(alphanum.encode())).hexdigest()
short = int(short, 16)
short = encode_base(short)
return short
def send_message(team_name, team_id, contact):
message = 'Your unique team ID for Junior Code Cracker 2k18 is ' + \
team_id + '.Kindly take note and submit this at the event.'
data = {
'authkey': auth_key,
'mobiles': contact,
'message': message,
'sender': 'GNULUG',
'route': '4',
}
data_encoded = urlencode(data)
r = requests.get(url + data_encoded)
print('Message Sent Successfully !!')
return r.status_code
| from urllib.parse import urlencode
from decouple import config
import hashlib
import requests
BASE = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
auth_key = config('AUTH_KEY')
url = 'http://sms.globehost.com/api/sendhttp.php?'
def encode_base(num, array=BASE):
if(num == 0):
return array[0]
retarr = []
base = len(array)
while num:
num, res = divmod(num, base)
retarr.append(array[res])
retarr.reverse()
return ''.join(retarr)[:6]
def generate(alphanum):
short = (hashlib.md5(alphanum.encode())).hexdigest()
short = int(short, 16)
short = encode_base(short)
return short
def send_message(team_name, team_id, contact):
message = 'Your unique team ID for Junior Code Cracker 2k18 is ' + \
team_id + '.Kindly take note and submit this at the event.'
data = {
'authkey': auth_key,
'mobiles': contact,
'message': message,
'sender': 'GNULUG',
'route': '4',
}
data_encoded = urlencode(data)
r = requests.get(url + data_encoded)
print('Message Sent Successfully !!')
return r.status_code | none | 1 | 2.571455 | 3 |
|
scripts/fetch_images.py | Protagonistss/sanic-for-v3 | 0 | 8880 | <reponame>Protagonistss/sanic-for-v3
import sys
import os
sys.path.append(os.pardir)
import random
import time
import requests
from contextlib import closing
from help import utils
from threading import Thread
def get_train_set_path(path: str):
create_path = utils.join_root_path(path)
return create_path
def create_train_set_dir(path='auth-set'):
create_path = get_train_set_path(path)
is_existed = os.path.exists(create_path)
if not is_existed:
os.mkdir(create_path)
def gen_image_name(char_pool):
prefix = ''
for i in range(4):
prefix += random.choice(char_pool)
suffix = str(time.time()).replace('.', '')
return "{}_{}".format(prefix, suffix)
def gen_image_all_url(path):
rule = '0123456789'
return '{}/{}.png'.format(path, gen_image_name(rule))
def get_image(url, count=20000, path='auth-set'):
create_train_set_dir(path)
for loop in range(count):
response = requests.get(url, verify=False, stream=True)
with closing(response) as response:
with open(gen_image_all_url(get_train_set_path(path)), 'wb') as f:
for i in response.iter_content(chunk_size=512):
f.write(i)
print('第{}张图片保存成功'.format(loop + 1))
def main():
get_image('https://gray.930pm.cn/home.php/Login/verify_c', path='auth-set')
if __name__ == '__main__':
t1 = Thread(target=main)
t2 = Thread(target=main)
t3 = Thread(target=main)
t4 = Thread(target=main)
t1.start()
t2.start()
t3.start()
t4.start()
| import sys
import os
sys.path.append(os.pardir)
import random
import time
import requests
from contextlib import closing
from help import utils
from threading import Thread
def get_train_set_path(path: str):
create_path = utils.join_root_path(path)
return create_path
def create_train_set_dir(path='auth-set'):
create_path = get_train_set_path(path)
is_existed = os.path.exists(create_path)
if not is_existed:
os.mkdir(create_path)
def gen_image_name(char_pool):
prefix = ''
for i in range(4):
prefix += random.choice(char_pool)
suffix = str(time.time()).replace('.', '')
return "{}_{}".format(prefix, suffix)
def gen_image_all_url(path):
rule = '0123456789'
return '{}/{}.png'.format(path, gen_image_name(rule))
def get_image(url, count=20000, path='auth-set'):
create_train_set_dir(path)
for loop in range(count):
response = requests.get(url, verify=False, stream=True)
with closing(response) as response:
with open(gen_image_all_url(get_train_set_path(path)), 'wb') as f:
for i in response.iter_content(chunk_size=512):
f.write(i)
print('第{}张图片保存成功'.format(loop + 1))
def main():
get_image('https://gray.930pm.cn/home.php/Login/verify_c', path='auth-set')
if __name__ == '__main__':
t1 = Thread(target=main)
t2 = Thread(target=main)
t3 = Thread(target=main)
t4 = Thread(target=main)
t1.start()
t2.start()
t3.start()
t4.start() | none | 1 | 2.531138 | 3 |
|
GeneralStats/example.py | haoruilee/statslibrary | 58 | 8881 | import GeneralStats as gs
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosistest
import pandas as pd
if __name__ == "__main__":
gen=gs.GeneralStats()
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
print("data = ", data)
print("data1 = ", data1)
res=gen.average(data,rowvar=True)
res1=gen.average(data1,rowvar=True)
print("data平均值 = ",res)
print("data1平均值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.median(data,rowvar=True)
res1=gen.median(data1,rowvar=True)
print("data中位值 = ",res)
print("data1中位值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.mode(data,rowvar=True)
res1=gen.mode(data1,rowvar=True)
print("data众数值 = ",res)
print("data1众数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #若元素个数为偶数,则模式为'midpoint'的0.5分位数值等价于中位数
res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #若元素个数为奇数,则模式为'lower'的0.5分位数值等价于中位数
print("data 0.5分位数值 = ",res)
print("data1 0.5分位数值 = ",res1)
res=gen.quantile(data,0.25,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower')
print("data 0.25分位数值s = ",res)
print("data1 0.25分位数值 = ",res1)
res=gen.quantile(data,0.75,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower')
print("data 0.75分位数值 = ",res)
print("data1 0.75分位数值 = ",res1)
res=gen.quantile(data,1.0,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower')
print("data 1.0分位数值 = ",res)
print("data1 1.0分位数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.range(data,rowvar=True)
res1=gen.range(data1,rowvar=True)
print("data极差 = ",res)
print("data1极差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.variance(data,rowvar=True)
res1=gen.variance(data1,rowvar=True)
print("data方差 = ",res)
print("data1方差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.standard_dev(data,rowvar=True)
res1=gen.standard_dev(data1,rowvar=True)
print("data标准差 = ",res)
print("data1标准差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.skewness(data,rowvar=True)
res1=gen.skewness(data1,rowvar=True)
print("data偏度 = ",res)
print("data1偏度 = ",res1)
res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])])
print("使用scipy skew方法验证的data偏度 = ",res)
res1=np.array(skew(data1))
print("使用scipy skew方法验证的data1偏度 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([53, 61, 49, 66, 78, 47])
res=gen.kurtosis(data,rowvar=True)
res1=gen.kurtosis(data1,rowvar=True)
print("data峰度 = ",res)
print("data1峰度 = ",res1)
data_0=pd.Series(data[0])
data_1=pd.Series(data[1])
data_2=pd.Series(data[2])
data_3=pd.Series(data[3])
print("使用pandas kurt方法验证的data峰度 = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()])
data1=pd.Series(data1)
print("使用pandas kurt方法验证的data1峰度 = ",data1.kurt())
| import GeneralStats as gs
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosistest
import pandas as pd
if __name__ == "__main__":
gen=gs.GeneralStats()
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
print("data = ", data)
print("data1 = ", data1)
res=gen.average(data,rowvar=True)
res1=gen.average(data1,rowvar=True)
print("data平均值 = ",res)
print("data1平均值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.median(data,rowvar=True)
res1=gen.median(data1,rowvar=True)
print("data中位值 = ",res)
print("data1中位值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.mode(data,rowvar=True)
res1=gen.mode(data1,rowvar=True)
print("data众数值 = ",res)
print("data1众数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #若元素个数为偶数,则模式为'midpoint'的0.5分位数值等价于中位数
res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #若元素个数为奇数,则模式为'lower'的0.5分位数值等价于中位数
print("data 0.5分位数值 = ",res)
print("data1 0.5分位数值 = ",res1)
res=gen.quantile(data,0.25,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower')
print("data 0.25分位数值s = ",res)
print("data1 0.25分位数值 = ",res1)
res=gen.quantile(data,0.75,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower')
print("data 0.75分位数值 = ",res)
print("data1 0.75分位数值 = ",res1)
res=gen.quantile(data,1.0,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower')
print("data 1.0分位数值 = ",res)
print("data1 1.0分位数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.range(data,rowvar=True)
res1=gen.range(data1,rowvar=True)
print("data极差 = ",res)
print("data1极差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.variance(data,rowvar=True)
res1=gen.variance(data1,rowvar=True)
print("data方差 = ",res)
print("data1方差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.standard_dev(data,rowvar=True)
res1=gen.standard_dev(data1,rowvar=True)
print("data标准差 = ",res)
print("data1标准差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.skewness(data,rowvar=True)
res1=gen.skewness(data1,rowvar=True)
print("data偏度 = ",res)
print("data1偏度 = ",res1)
res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])])
print("使用scipy skew方法验证的data偏度 = ",res)
res1=np.array(skew(data1))
print("使用scipy skew方法验证的data1偏度 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([53, 61, 49, 66, 78, 47])
res=gen.kurtosis(data,rowvar=True)
res1=gen.kurtosis(data1,rowvar=True)
print("data峰度 = ",res)
print("data1峰度 = ",res1)
data_0=pd.Series(data[0])
data_1=pd.Series(data[1])
data_2=pd.Series(data[2])
data_3=pd.Series(data[3])
print("使用pandas kurt方法验证的data峰度 = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()])
data1=pd.Series(data1)
print("使用pandas kurt方法验证的data1峰度 = ",data1.kurt())
| zh | 0.761161 | #若元素个数为偶数,则模式为'midpoint'的0.5分位数值等价于中位数 #若元素个数为奇数,则模式为'lower'的0.5分位数值等价于中位数 | 2.691244 | 3 |
bootstrap.py | tqchen/yarn-ec2 | 35 | 8882 | <gh_stars>10-100
#!/usr/bin/env python
# encoding: utf-8
"""
script to install all the necessary things
for working on a linux machine with nothing
Installing minimum dependencies
"""
import sys
import os
import logging
import subprocess
import xml.etree.ElementTree as ElementTree
import xml.dom.minidom as minidom
import socket
import time
import pwd
###---------------------------------------------------##
# Configuration Section, will be modified by script #
###---------------------------------------------------##
node_apt_packages = [
'emacs',
'git',
'g++',
'make',
'python-numpy',
'libprotobuf-dev',
'libcurl4-openssl-dev']
# master only packages
master_apt_packages = [
'protobuf-compiler']
# List of r packages to be installed in master
master_r_packages = [
'r-base-dev',
'r-base',
'r-cran-statmod',
'r-cran-RCurl',
'r-cran-rjson'
]
# download link of hadoop.
hadoop_url = 'http://apache.claz.org/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz'
hadoop_dir = 'hadoop-2.8.0'
# customized installation script.
# See optional installation scripts for options.
def custom_master_install():
#install_spark()
#install_r()
pass
# customized installation script for all nodes.
def custom_all_nodes_install():
install_gcc()
pass
###---------------------------------------------------##
# Automatically set by script #
###---------------------------------------------------##
USER_NAME = 'ubuntu'
# setup variables
MASTER = os.getenv('MY_MASTER_DNS', '')
# node type the type of current node
NODE_TYPE = os.getenv('MY_NODE_TYPE', 'm3.xlarge')
NODE_VMEM = int(os.getenv('MY_NODE_VMEM', str(1024*15)))
NODE_VCPU = int(os.getenv('MY_NODE_VCPU', '4'))
AWS_ID = os.getenv('AWS_ACCESS_KEY_ID', 'undefined')
AWS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', 'undefined')
JAVA_HOME = os.getenv('JAVA_HOME')
HADOOP_HOME = os.getenv('HADOOP_HOME')
DISK_LIST = [('xvd' + chr(ord('b') + i)) for i in range(10)]
ENVIRON = os.environ.copy()
###--------------------------------##
# Optional installation scripts. #
###--------------------------------##
def install_r():
if master_r_packages:
sudo("apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E084DAB9")
sudo("echo deb https://cran.r-project.org/bin/linux/ubuntu trusty/ >>/etc/apt/sources.list")
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(master_r_packages)))
def install_spark():
run('wget https://www.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz')
run('tar xf spark-2.1.1-bin-hadoop2.7.tgz')
run('rm -rf spark-2.1.1-bin-hadoop2.7.tgz')
with open('.bashrc', 'a') as fo:
fo.write('\nexport PATH=${PATH}:spark-2.1.1-bin-hadoop2.7\n')
def install_xgboost():
run('git clone --recursive https://github.com/dmlc/xgboost')
run('cd xgboost; cp make/config.mk .; echo USE_S3=1 >> config.mk; make -j4')
### Script section ###
def run(cmd):
try:
print cmd
logging.info(cmd)
proc = subprocess.Popen(cmd, shell=True, env = ENVIRON,
stdout=subprocess.PIPE, stderr = subprocess.PIPE)
out, err = proc.communicate()
retcode = proc.poll()
if retcode != 0:
logging.error('Command %s returns %d' % (cmd,retcode))
logging.error(out)
logging.error(err)
else:
print out
except Exception as e:
print(str(e))
logging.error('Exception running: %s' % cmd)
logging.error(str(e))
pass
def sudo(cmd):
run('sudo %s' % cmd)
### Installation helpers ###
def install_packages(pkgs):
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(pkgs)))
# install g++4.9, needed for regex match.
def install_gcc():
sudo('add-apt-repository -y ppa:ubuntu-toolchain-r/test')
sudo('apt-get -y update')
sudo('apt-get -y install g++-4.9')
def install_java():
"""
install java and setup environment variables
Returns environment variables that needs to be exported
"""
if not os.path.exists('jdk1.8.0_131'):
run('wget --no-check-certificate --no-cookies'\
' --header \"Cookie: oraclelicense=accept-securebackup-cookie\"'\
' http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz')
run('tar xf jdk-8u131-linux-x64.tar.gz')
run('rm -f jdk-8u131-linux-x64.tar.gz')
global JAVA_HOME
if JAVA_HOME is None:
JAVA_HOME = os.path.abspath('jdk1.8.0_131')
return [('JAVA_HOME', JAVA_HOME)]
def install_hadoop(is_master):
def update_site(fname, rmap):
"""
update the site script
"""
try:
tree = ElementTree.parse(fname)
root = tree.getroot()
except Exception:
cfg = ElementTree.Element("configuration")
tree = ElementTree.ElementTree(cfg)
root = tree.getroot()
rset = set()
for prop in root.getiterator('property'):
prop = dict((p.tag, p) for p in prop)
name = prop['name'].text.strip()
if name in rmap:
prop['value'].text = str(rmap[name])
rset.add(name)
for name, text in rmap.iteritems():
if name in rset:
continue
prop = ElementTree.SubElement(root, 'property')
ElementTree.SubElement(prop, 'name').text = name
ElementTree.SubElement(prop, 'value').text = str(text)
rough_string = ElementTree.tostring(root, 'utf-8')
reparsed = minidom.parseString(rough_string)
pretty = reparsed.toprettyxml(indent='\t')
fo = open(fname, 'w')
fo.write(pretty)
fo.close()
def setup_hadoop_site(master, hadoop_dir, hdfs_dir, vcpu, vmem):
"""
setup hadoop side given the parameters
Parameters
----------
master: the dns to master uri
hadoop_dir: the directory to store temp files
hdfs_dir: the directories for hdfs
vcpu: the number of cpus current machine have
vmem: the memory(MB) current machine have
"""
if vmem < 4 * 1024:
reserved_ram = 256
elif vmem < 8 * 1024:
reserved_ram = 1 * 1024
elif vmem < 24 * 1024 :
reserved_ram = 2 * 1024
elif vmem < 48 * 1024:
reserved_ram = 2 * 1024
elif vmem < 64 * 1024:
reserved_ram = 6 * 1024
else:
reserved_ram = 8 * 1024
ram_per_container = (vmem - reserved_ram) / vcpu
if is_master:
vcpu = vcpu - 2
tmp_dir = hadoop_dir[0]
core_site = {
'fs.defaultFS': 'hdfs://%s:9000/' % master,
'fs.s3n.impl': 'org.apache.hadoop.fs.s3native.NativeS3FileSystem',
'hadoop.tmp.dir': tmp_dir
}
if AWS_ID != 'undefined':
core_site['fs.s3n.awsAccessKeyId'] = AWS_ID
core_site['fs.s3n.awsSecretAccessKey'] = AWS_KEY
update_site('%s/etc/hadoop/core-site.xml' % HADOOP_HOME, core_site)
hdfs_site = {
'dfs.data.dir': ','.join(['%s/data' % d for d in hdfs_dir]),
'dfs.permissions': 'false',
'dfs.replication': '1'
}
update_site('%s/etc/hadoop/hdfs-site.xml' % HADOOP_HOME, hdfs_site)
yarn_site = {
'yarn.resourcemanager.resource-tracker.address': '%s:8025' % master,
'yarn.resourcemanager.scheduler.address': '%s:8030' % master,
'yarn.resourcemanager.address': '%s:8032' % master,
'yarn.scheduler.minimum-allocation-mb': 512,
'yarn.scheduler.maximum-allocation-mb': 640000,
'yarn.scheduler.minimum-allocation-vcores': 1,
'yarn.scheduler.maximum-allocation-vcores': 32,
'yarn.nodemanager.resource.memory-mb': vcpu * ram_per_container,
'yarn.nodemanager.resource.cpu-vcores': vcpu,
'yarn.log-aggregation-enable': 'true',
'yarn.nodemanager.vmem-check-enabled': 'false',
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.nodemanager.aux-services.mapreduce.shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler',
'yarn.nodemanager.remote-app-log-dir': os.path.join(tmp_dir, 'logs'),
'yarn.nodemanager.log-dirs': os.path.join(tmp_dir, 'userlogs'),
'yarn.nodemanager.local-dirs': ','.join(['%s/yarn/nm-local-dir' % d for d in hadoop_dir])
}
update_site('%s/etc/hadoop/yarn-site.xml' % HADOOP_HOME, yarn_site)
mapred_site = {
'mapreduce.application.classpath' : ':'.join(['$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*',
'$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*',
'$HADOOP_MAPRED_HOME/share/hadoop/tools/lib/*']),
'yarn.app.mapreduce.am.resource.mb': 2 * ram_per_container,
'yarn.app.mapreduce.am.command-opts': '-Xmx%dm' % int(0.8 * 2 * ram_per_container),
'mapreduce.framework.name': 'yarn',
'mapreduce.map.cpu.vcores': 1,
'mapreduce.map.memory.mb': ram_per_container,
'mapreduce.map.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container),
'mapreduce.reduce.cpu.vcores': 1,
'mapreduce.reduce.memory.mb': 2 * ram_per_container,
'mapreduce.reduce.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container)
}
update_site('%s/etc/hadoop/mapred-site.xml' % HADOOP_HOME, mapred_site)
capacity_site = {
'yarn.scheduler.capacity.resource-calculator': 'org.apache.hadoop.yarn.util.resource.DominantResourceCalculator'
}
update_site('%s/etc/hadoop/capacity-scheduler.xml' % HADOOP_HOME, capacity_site)
fo = open('%s/etc/hadoop/hadoop-env.sh' % HADOOP_HOME, 'w')
fo.write('export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$HADOOP_PREFIX/share/hadoop/tools/lib/*\n')
fo.write('export HADOOP_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export YARN_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export JAVA_HOME=\"%s\"\n' % JAVA_HOME)
fo.close()
fo = open('%s/etc/hadoop/slaves' % HADOOP_HOME, 'w')
fo.write(master + '\n')
fo.close()
def run_install():
if not os.path.exists('hadoop-2.8.0'):
run('wget %s' % hadoop_url)
run('tar xf hadoop-2.8.0.tar.gz')
run('rm -f hadoop-2.8.0.tar.gz')
global HADOOP_HOME
if HADOOP_HOME is None:
HADOOP_HOME = os.path.abspath('hadoop-2.8.0')
env = [('HADOOP_HOME', HADOOP_HOME)]
env += [('HADOOP_PREFIX', HADOOP_HOME)]
env += [('HADOOP_MAPRED_HOME', HADOOP_HOME)]
env += [('HADOOP_COMMON_HOME', HADOOP_HOME)]
env += [('HADOOP_HDFS_HOME', HADOOP_HOME)]
env += [('YARN_HOME', HADOOP_HOME)]
env += [('YARN_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
env += [('HADOOP_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
disks = ['/disk/%s' % d for d in DISK_LIST if os.path.exists('/dev/%s' % d)]
setup_hadoop_site(MASTER,
['%s/hadoop' % d for d in disks],
['%s/hadoop/dfs' % d for d in disks],
NODE_VCPU, NODE_VMEM)
return env
return run_install()
def regsshkey(fname):
for dns in (open(fname).readlines() + ['localhost', '0.0.0.0']):
try:
run('ssh-keygen -R %s' % dns.strip())
except:
pass
run('ssh-keyscan %s >> ~/.ssh/known_hosts' % dns.strip())
# main script to install all dependencies
def install_main(is_master):
if is_master:
install_packages(master_apt_packages + node_apt_packages)
else:
install_packages(node_apt_packages)
env = []
env += install_java()
env += install_hadoop(is_master)
path = ['$HADOOP_HOME/bin', '$HADOOP_HOME/sbin', '$JAVA_HOME/bin']
env += [('LD_LIBRARY_PATH', '$HADOOP_HOME/native/lib')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:$HADOOP_HDFS_HOME/lib/native:$JAVA_HOME/jre/lib/amd64/server')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:/usr/local/lib')]
env += [('LIBHDFS_OPTS', '--Xmx128m')]
env += [('MY_MASTER_DNS', MASTER)]
env += [('MY_NODE_TYPE', NODE_TYPE)]
env += [('MY_NODE_VMEM', str(NODE_VMEM))]
env += [('MY_NODE_VCPU', str(NODE_VCPU))]
if AWS_ID != 'undefined':
env += [('AWS_ACCESS_KEY_ID', AWS_ID)]
if AWS_KEY != 'undefined':
env += [('AWS_SECRET_ACCESS_KEY', AWS_KEY)]
# setup environments
fo = open('.hadoop_env', 'w')
for k, v in env:
fo.write('export %s=%s\n' % (k,v))
ENVIRON[k] = v
fo.write('export PATH=$PATH:%s\n' % (':'.join(path)))
fo.write('export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib\n')
fo.close()
for l in open('.bashrc'):
if l.find('.hadoop_env') != -1:
return
run('echo source ~/.hadoop_env >> ~/.bashrc')
# allow ssh, if they already share the key.
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
run(key_setup)
regsshkey('%s/etc/hadoop/slaves' % HADOOP_HOME)
# end of instalation.
# Make startup script for bulding
def make_startup_script(is_master):
assert JAVA_HOME is not None
assert HADOOP_HOME is not None
assert NODE_VCPU is not None
assert NODE_VMEM is not None
disks = []
cmds = []
if is_master:
cmds.append('$HADOOP_HOME/sbin/stop-all.sh')
for d in DISK_LIST:
if os.path.exists('/dev/%s' % d):
cmds.append('sudo umount /dev/%s' % d)
cmds.append('sudo mkfs -t ext4 /dev/%s' % d)
cmds.append('sudo mkdir -p /disk/%s' % d)
cmds.append('sudo mount /dev/%s /disk/%s' % (d, d))
disks.append('/disk/%s' % d)
for d in disks:
cmds.append('sudo mkdir -p %s/hadoop' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/hadoop' % d)
cmds.append('sudo mkdir -p %s/tmp' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/tmp' % d)
cmds.append('rm -rf %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs/name' % d)
cmds.append('mkdir %s/hadoop/dfs/data' % d)
# run command
if is_master:
cmds.append('$HADOOP_HOME/bin/hadoop namenode -format')
cmds.append('$HADOOP_HOME/sbin/start-all.sh')
else:
cmds.append('export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec &&'\
' $HADOOP_HOME/sbin/yarn-daemon.sh --config $HADOOP_HOME/etc/hadoop start nodemanager')
with open('startup.sh', 'w') as fo:
fo.write('#!/bin/bash\n')
fo.write('set -v\n')
fo.write('\n'.join(cmds))
run('chmod +x startup.sh')
run('./startup.sh')
def main():
global MASTER
logging.basicConfig(filename = 'bootstrap.log', level = logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
if MASTER == '':
is_master = True
MASTER = socket.getfqdn()
logging.info('assuming master is myself as %s' % MASTER)
else:
is_master = socket.getfqdn() == MASTER
tstart = time.time()
install_main(is_master)
tmid = time.time()
logging.info('installation finishes in %g secs' % (tmid - tstart))
make_startup_script(is_master)
ENVIRON['HADOOP_HOME'] = HADOOP_HOME
ENVIRON['JAVA_HOME'] = JAVA_HOME
tend = time.time()
if is_master:
custom_master_install()
custom_all_nodes_install()
logging.info('boostrap finishes in %g secs' % (tend - tmid))
logging.info('all finishes in %g secs' % (tend - tstart))
if __name__ == '__main__':
pw_record = pwd.getpwnam(USER_NAME)
user_name = pw_record.pw_name
user_home_dir = pw_record.pw_dir
user_uid = pw_record.pw_uid
user_gid = pw_record.pw_gid
env = os.environ.copy()
cwd = user_home_dir
ENVIRON['HOME'] = user_home_dir
os.setgid(user_gid)
os.setuid(user_uid)
os.chdir(user_home_dir)
main()
| #!/usr/bin/env python
# encoding: utf-8
"""
script to install all the necessary things
for working on a linux machine with nothing
Installing minimum dependencies
"""
import sys
import os
import logging
import subprocess
import xml.etree.ElementTree as ElementTree
import xml.dom.minidom as minidom
import socket
import time
import pwd
###---------------------------------------------------##
# Configuration Section, will be modified by script #
###---------------------------------------------------##
node_apt_packages = [
'emacs',
'git',
'g++',
'make',
'python-numpy',
'libprotobuf-dev',
'libcurl4-openssl-dev']
# master only packages
master_apt_packages = [
'protobuf-compiler']
# List of r packages to be installed in master
master_r_packages = [
'r-base-dev',
'r-base',
'r-cran-statmod',
'r-cran-RCurl',
'r-cran-rjson'
]
# download link of hadoop.
hadoop_url = 'http://apache.claz.org/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz'
hadoop_dir = 'hadoop-2.8.0'
# customized installation script.
# See optional installation scripts for options.
def custom_master_install():
#install_spark()
#install_r()
pass
# customized installation script for all nodes.
def custom_all_nodes_install():
install_gcc()
pass
###---------------------------------------------------##
# Automatically set by script #
###---------------------------------------------------##
USER_NAME = 'ubuntu'
# setup variables
MASTER = os.getenv('MY_MASTER_DNS', '')
# node type the type of current node
NODE_TYPE = os.getenv('MY_NODE_TYPE', 'm3.xlarge')
NODE_VMEM = int(os.getenv('MY_NODE_VMEM', str(1024*15)))
NODE_VCPU = int(os.getenv('MY_NODE_VCPU', '4'))
AWS_ID = os.getenv('AWS_ACCESS_KEY_ID', 'undefined')
AWS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', 'undefined')
JAVA_HOME = os.getenv('JAVA_HOME')
HADOOP_HOME = os.getenv('HADOOP_HOME')
DISK_LIST = [('xvd' + chr(ord('b') + i)) for i in range(10)]
ENVIRON = os.environ.copy()
###--------------------------------##
# Optional installation scripts. #
###--------------------------------##
def install_r():
if master_r_packages:
sudo("apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E084DAB9")
sudo("echo deb https://cran.r-project.org/bin/linux/ubuntu trusty/ >>/etc/apt/sources.list")
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(master_r_packages)))
def install_spark():
run('wget https://www.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz')
run('tar xf spark-2.1.1-bin-hadoop2.7.tgz')
run('rm -rf spark-2.1.1-bin-hadoop2.7.tgz')
with open('.bashrc', 'a') as fo:
fo.write('\nexport PATH=${PATH}:spark-2.1.1-bin-hadoop2.7\n')
def install_xgboost():
run('git clone --recursive https://github.com/dmlc/xgboost')
run('cd xgboost; cp make/config.mk .; echo USE_S3=1 >> config.mk; make -j4')
### Script section ###
def run(cmd):
try:
print cmd
logging.info(cmd)
proc = subprocess.Popen(cmd, shell=True, env = ENVIRON,
stdout=subprocess.PIPE, stderr = subprocess.PIPE)
out, err = proc.communicate()
retcode = proc.poll()
if retcode != 0:
logging.error('Command %s returns %d' % (cmd,retcode))
logging.error(out)
logging.error(err)
else:
print out
except Exception as e:
print(str(e))
logging.error('Exception running: %s' % cmd)
logging.error(str(e))
pass
def sudo(cmd):
run('sudo %s' % cmd)
### Installation helpers ###
def install_packages(pkgs):
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(pkgs)))
# install g++4.9, needed for regex match.
def install_gcc():
sudo('add-apt-repository -y ppa:ubuntu-toolchain-r/test')
sudo('apt-get -y update')
sudo('apt-get -y install g++-4.9')
def install_java():
"""
install java and setup environment variables
Returns environment variables that needs to be exported
"""
if not os.path.exists('jdk1.8.0_131'):
run('wget --no-check-certificate --no-cookies'\
' --header \"Cookie: oraclelicense=accept-securebackup-cookie\"'\
' http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz')
run('tar xf jdk-8u131-linux-x64.tar.gz')
run('rm -f jdk-8u131-linux-x64.tar.gz')
global JAVA_HOME
if JAVA_HOME is None:
JAVA_HOME = os.path.abspath('jdk1.8.0_131')
return [('JAVA_HOME', JAVA_HOME)]
def install_hadoop(is_master):
def update_site(fname, rmap):
"""
update the site script
"""
try:
tree = ElementTree.parse(fname)
root = tree.getroot()
except Exception:
cfg = ElementTree.Element("configuration")
tree = ElementTree.ElementTree(cfg)
root = tree.getroot()
rset = set()
for prop in root.getiterator('property'):
prop = dict((p.tag, p) for p in prop)
name = prop['name'].text.strip()
if name in rmap:
prop['value'].text = str(rmap[name])
rset.add(name)
for name, text in rmap.iteritems():
if name in rset:
continue
prop = ElementTree.SubElement(root, 'property')
ElementTree.SubElement(prop, 'name').text = name
ElementTree.SubElement(prop, 'value').text = str(text)
rough_string = ElementTree.tostring(root, 'utf-8')
reparsed = minidom.parseString(rough_string)
pretty = reparsed.toprettyxml(indent='\t')
fo = open(fname, 'w')
fo.write(pretty)
fo.close()
def setup_hadoop_site(master, hadoop_dir, hdfs_dir, vcpu, vmem):
"""
setup hadoop side given the parameters
Parameters
----------
master: the dns to master uri
hadoop_dir: the directory to store temp files
hdfs_dir: the directories for hdfs
vcpu: the number of cpus current machine have
vmem: the memory(MB) current machine have
"""
if vmem < 4 * 1024:
reserved_ram = 256
elif vmem < 8 * 1024:
reserved_ram = 1 * 1024
elif vmem < 24 * 1024 :
reserved_ram = 2 * 1024
elif vmem < 48 * 1024:
reserved_ram = 2 * 1024
elif vmem < 64 * 1024:
reserved_ram = 6 * 1024
else:
reserved_ram = 8 * 1024
ram_per_container = (vmem - reserved_ram) / vcpu
if is_master:
vcpu = vcpu - 2
tmp_dir = hadoop_dir[0]
core_site = {
'fs.defaultFS': 'hdfs://%s:9000/' % master,
'fs.s3n.impl': 'org.apache.hadoop.fs.s3native.NativeS3FileSystem',
'hadoop.tmp.dir': tmp_dir
}
if AWS_ID != 'undefined':
core_site['fs.s3n.awsAccessKeyId'] = AWS_ID
core_site['fs.s3n.awsSecretAccessKey'] = AWS_KEY
update_site('%s/etc/hadoop/core-site.xml' % HADOOP_HOME, core_site)
hdfs_site = {
'dfs.data.dir': ','.join(['%s/data' % d for d in hdfs_dir]),
'dfs.permissions': 'false',
'dfs.replication': '1'
}
update_site('%s/etc/hadoop/hdfs-site.xml' % HADOOP_HOME, hdfs_site)
yarn_site = {
'yarn.resourcemanager.resource-tracker.address': '%s:8025' % master,
'yarn.resourcemanager.scheduler.address': '%s:8030' % master,
'yarn.resourcemanager.address': '%s:8032' % master,
'yarn.scheduler.minimum-allocation-mb': 512,
'yarn.scheduler.maximum-allocation-mb': 640000,
'yarn.scheduler.minimum-allocation-vcores': 1,
'yarn.scheduler.maximum-allocation-vcores': 32,
'yarn.nodemanager.resource.memory-mb': vcpu * ram_per_container,
'yarn.nodemanager.resource.cpu-vcores': vcpu,
'yarn.log-aggregation-enable': 'true',
'yarn.nodemanager.vmem-check-enabled': 'false',
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.nodemanager.aux-services.mapreduce.shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler',
'yarn.nodemanager.remote-app-log-dir': os.path.join(tmp_dir, 'logs'),
'yarn.nodemanager.log-dirs': os.path.join(tmp_dir, 'userlogs'),
'yarn.nodemanager.local-dirs': ','.join(['%s/yarn/nm-local-dir' % d for d in hadoop_dir])
}
update_site('%s/etc/hadoop/yarn-site.xml' % HADOOP_HOME, yarn_site)
mapred_site = {
'mapreduce.application.classpath' : ':'.join(['$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*',
'$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*',
'$HADOOP_MAPRED_HOME/share/hadoop/tools/lib/*']),
'yarn.app.mapreduce.am.resource.mb': 2 * ram_per_container,
'yarn.app.mapreduce.am.command-opts': '-Xmx%dm' % int(0.8 * 2 * ram_per_container),
'mapreduce.framework.name': 'yarn',
'mapreduce.map.cpu.vcores': 1,
'mapreduce.map.memory.mb': ram_per_container,
'mapreduce.map.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container),
'mapreduce.reduce.cpu.vcores': 1,
'mapreduce.reduce.memory.mb': 2 * ram_per_container,
'mapreduce.reduce.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container)
}
update_site('%s/etc/hadoop/mapred-site.xml' % HADOOP_HOME, mapred_site)
capacity_site = {
'yarn.scheduler.capacity.resource-calculator': 'org.apache.hadoop.yarn.util.resource.DominantResourceCalculator'
}
update_site('%s/etc/hadoop/capacity-scheduler.xml' % HADOOP_HOME, capacity_site)
fo = open('%s/etc/hadoop/hadoop-env.sh' % HADOOP_HOME, 'w')
fo.write('export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$HADOOP_PREFIX/share/hadoop/tools/lib/*\n')
fo.write('export HADOOP_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export YARN_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export JAVA_HOME=\"%s\"\n' % JAVA_HOME)
fo.close()
fo = open('%s/etc/hadoop/slaves' % HADOOP_HOME, 'w')
fo.write(master + '\n')
fo.close()
def run_install():
if not os.path.exists('hadoop-2.8.0'):
run('wget %s' % hadoop_url)
run('tar xf hadoop-2.8.0.tar.gz')
run('rm -f hadoop-2.8.0.tar.gz')
global HADOOP_HOME
if HADOOP_HOME is None:
HADOOP_HOME = os.path.abspath('hadoop-2.8.0')
env = [('HADOOP_HOME', HADOOP_HOME)]
env += [('HADOOP_PREFIX', HADOOP_HOME)]
env += [('HADOOP_MAPRED_HOME', HADOOP_HOME)]
env += [('HADOOP_COMMON_HOME', HADOOP_HOME)]
env += [('HADOOP_HDFS_HOME', HADOOP_HOME)]
env += [('YARN_HOME', HADOOP_HOME)]
env += [('YARN_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
env += [('HADOOP_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
disks = ['/disk/%s' % d for d in DISK_LIST if os.path.exists('/dev/%s' % d)]
setup_hadoop_site(MASTER,
['%s/hadoop' % d for d in disks],
['%s/hadoop/dfs' % d for d in disks],
NODE_VCPU, NODE_VMEM)
return env
return run_install()
def regsshkey(fname):
for dns in (open(fname).readlines() + ['localhost', '0.0.0.0']):
try:
run('ssh-keygen -R %s' % dns.strip())
except:
pass
run('ssh-keyscan %s >> ~/.ssh/known_hosts' % dns.strip())
# main script to install all dependencies
def install_main(is_master):
if is_master:
install_packages(master_apt_packages + node_apt_packages)
else:
install_packages(node_apt_packages)
env = []
env += install_java()
env += install_hadoop(is_master)
path = ['$HADOOP_HOME/bin', '$HADOOP_HOME/sbin', '$JAVA_HOME/bin']
env += [('LD_LIBRARY_PATH', '$HADOOP_HOME/native/lib')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:$HADOOP_HDFS_HOME/lib/native:$JAVA_HOME/jre/lib/amd64/server')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:/usr/local/lib')]
env += [('LIBHDFS_OPTS', '--Xmx128m')]
env += [('MY_MASTER_DNS', MASTER)]
env += [('MY_NODE_TYPE', NODE_TYPE)]
env += [('MY_NODE_VMEM', str(NODE_VMEM))]
env += [('MY_NODE_VCPU', str(NODE_VCPU))]
if AWS_ID != 'undefined':
env += [('AWS_ACCESS_KEY_ID', AWS_ID)]
if AWS_KEY != 'undefined':
env += [('AWS_SECRET_ACCESS_KEY', AWS_KEY)]
# setup environments
fo = open('.hadoop_env', 'w')
for k, v in env:
fo.write('export %s=%s\n' % (k,v))
ENVIRON[k] = v
fo.write('export PATH=$PATH:%s\n' % (':'.join(path)))
fo.write('export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib\n')
fo.close()
for l in open('.bashrc'):
if l.find('.hadoop_env') != -1:
return
run('echo source ~/.hadoop_env >> ~/.bashrc')
# allow ssh, if they already share the key.
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
run(key_setup)
regsshkey('%s/etc/hadoop/slaves' % HADOOP_HOME)
# end of instalation.
# Make startup script for bulding
def make_startup_script(is_master):
assert JAVA_HOME is not None
assert HADOOP_HOME is not None
assert NODE_VCPU is not None
assert NODE_VMEM is not None
disks = []
cmds = []
if is_master:
cmds.append('$HADOOP_HOME/sbin/stop-all.sh')
for d in DISK_LIST:
if os.path.exists('/dev/%s' % d):
cmds.append('sudo umount /dev/%s' % d)
cmds.append('sudo mkfs -t ext4 /dev/%s' % d)
cmds.append('sudo mkdir -p /disk/%s' % d)
cmds.append('sudo mount /dev/%s /disk/%s' % (d, d))
disks.append('/disk/%s' % d)
for d in disks:
cmds.append('sudo mkdir -p %s/hadoop' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/hadoop' % d)
cmds.append('sudo mkdir -p %s/tmp' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/tmp' % d)
cmds.append('rm -rf %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs/name' % d)
cmds.append('mkdir %s/hadoop/dfs/data' % d)
# run command
if is_master:
cmds.append('$HADOOP_HOME/bin/hadoop namenode -format')
cmds.append('$HADOOP_HOME/sbin/start-all.sh')
else:
cmds.append('export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec &&'\
' $HADOOP_HOME/sbin/yarn-daemon.sh --config $HADOOP_HOME/etc/hadoop start nodemanager')
with open('startup.sh', 'w') as fo:
fo.write('#!/bin/bash\n')
fo.write('set -v\n')
fo.write('\n'.join(cmds))
run('chmod +x startup.sh')
run('./startup.sh')
def main():
global MASTER
logging.basicConfig(filename = 'bootstrap.log', level = logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
if MASTER == '':
is_master = True
MASTER = socket.getfqdn()
logging.info('assuming master is myself as %s' % MASTER)
else:
is_master = socket.getfqdn() == MASTER
tstart = time.time()
install_main(is_master)
tmid = time.time()
logging.info('installation finishes in %g secs' % (tmid - tstart))
make_startup_script(is_master)
ENVIRON['HADOOP_HOME'] = HADOOP_HOME
ENVIRON['JAVA_HOME'] = JAVA_HOME
tend = time.time()
if is_master:
custom_master_install()
custom_all_nodes_install()
logging.info('boostrap finishes in %g secs' % (tend - tmid))
logging.info('all finishes in %g secs' % (tend - tstart))
if __name__ == '__main__':
pw_record = pwd.getpwnam(USER_NAME)
user_name = pw_record.pw_name
user_home_dir = pw_record.pw_dir
user_uid = pw_record.pw_uid
user_gid = pw_record.pw_gid
env = os.environ.copy()
cwd = user_home_dir
ENVIRON['HOME'] = user_home_dir
os.setgid(user_gid)
os.setuid(user_uid)
os.chdir(user_home_dir)
main() | en | 0.55248 | #!/usr/bin/env python # encoding: utf-8 script to install all the necessary things for working on a linux machine with nothing Installing minimum dependencies ###---------------------------------------------------## # Configuration Section, will be modified by script # ###---------------------------------------------------## # master only packages # List of r packages to be installed in master # download link of hadoop. # customized installation script. # See optional installation scripts for options. #install_spark() #install_r() # customized installation script for all nodes. ###---------------------------------------------------## # Automatically set by script # ###---------------------------------------------------## # setup variables # node type the type of current node ###--------------------------------## # Optional installation scripts. # ###--------------------------------## ### Script section ### ### Installation helpers ### # install g++4.9, needed for regex match. install java and setup environment variables Returns environment variables that needs to be exported update the site script setup hadoop side given the parameters Parameters ---------- master: the dns to master uri hadoop_dir: the directory to store temp files hdfs_dir: the directories for hdfs vcpu: the number of cpus current machine have vmem: the memory(MB) current machine have # main script to install all dependencies # setup environments # allow ssh, if they already share the key. [ -f ~/.ssh/id_rsa ] || (ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa && cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys) # end of instalation. # Make startup script for bulding # run command | 1.88631 | 2 |
intro/matplotlib/examples/plot_good.py | zmoon/scipy-lecture-notes | 2,538 | 8883 | """
A simple, good-looking plot
===========================
Demoing some simple features of matplotlib
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5, 4), dpi=72)
axes = fig.add_axes([0.01, 0.01, .98, 0.98])
X = np.linspace(0, 2, 200)
Y = np.sin(2*np.pi*X)
plt.plot(X, Y, lw=2)
plt.ylim(-1.1, 1.1)
plt.grid()
plt.show()
| """
A simple, good-looking plot
===========================
Demoing some simple features of matplotlib
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5, 4), dpi=72)
axes = fig.add_axes([0.01, 0.01, .98, 0.98])
X = np.linspace(0, 2, 200)
Y = np.sin(2*np.pi*X)
plt.plot(X, Y, lw=2)
plt.ylim(-1.1, 1.1)
plt.grid()
plt.show()
| en | 0.741695 | A simple, good-looking plot =========================== Demoing some simple features of matplotlib | 3.696145 | 4 |
pfio/_context.py | HiroakiMikami/pfio | 24 | 8884 | import os
import re
from typing import Tuple
from pfio._typing import Union
from pfio.container import Container
from pfio.io import IO, create_fs_handler
class FileSystemDriverList(object):
def __init__(self):
# TODO(tianqi): dynamically create this list
# as well as the patterns upon loading the pfio module.
self.scheme_list = ["hdfs", "posix"]
self.posix_pattern = re.compile(r"file:\/\/(?P<path>.+)")
self.hdfs_pattern = re.compile(r"(?P<path>hdfs:\/\/.+)")
self.pattern_list = {"hdfs": self.hdfs_pattern,
"posix": self.posix_pattern, }
def _determine_fs_type(self, path: str) -> Tuple[str, str, bool]:
if None is not path:
for fs_type, pattern in self.pattern_list.items():
ret = pattern.match(path)
if ret:
return (fs_type, ret.groupdict()["path"], True)
return ("posix", path, False)
def format_path(self, fs: IO, path: str) -> Tuple[str, bool]:
fs_type = fs.type
if fs_type in self.pattern_list.keys():
pattern = self.pattern_list[fs_type]
ret = pattern.match(path)
if ret:
return (ret.groupdict()["path"], True)
else:
return (path, False)
else:
return (path, False)
def get_handler_from_path(self, path: str) -> Tuple[IO, str, bool]:
(fs_type, actual_path, is_URI) = self._determine_fs_type(path)
handler = create_fs_handler(fs_type)
return (handler, actual_path, is_URI)
def get_handler_for_root(self,
uri_or_handler_name: str) -> Tuple[IO, str, bool]:
if uri_or_handler_name in self.pattern_list.keys():
return (create_fs_handler(uri_or_handler_name), "", False)
else:
(new_handler, actual_path, is_URI) = self.get_handler_from_path(
uri_or_handler_name)
new_handler.root = actual_path
return (new_handler, actual_path, is_URI)
def is_supported_scheme(self, scheme: str) -> bool:
return scheme in self.scheme_list
class DefaultContext(object):
def __init__(self):
self._fs_handler_list = FileSystemDriverList()
self._root = ""
self._default_context = \
self._fs_handler_list.get_handler_for_root("posix")[0]
def set_root(self, uri_or_handler: Union[str, IO]) -> None:
# TODO(check) if root is directory
if isinstance(uri_or_handler, IO):
handler = uri_or_handler
self._root = ""
else:
(handler, self._root, is_URI) = \
self.get_handler_by_name(uri_or_handler)
assert handler is not None
if self._root:
if not handler.isdir(self._root):
raise RuntimeError("the URI does not point to a directory")
self._default_context = handler
def get_handler(self, path: str = "") -> Tuple[IO, str]:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
return (self._default_context, actual_path)
else:
return (handler, formatted_path)
def open_as_container(self, path: str) -> Container:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
handler = self._default_context
else:
actual_path = formatted_path
self._root = ""
return handler.open_as_container(actual_path)
def get_handler_by_name(self, path: str) -> Tuple[IO, str, bool]:
return self._fs_handler_list.get_handler_for_root(path)
def get_root_dir(self) -> str:
return self._root
def is_supported_scheme(self, scheme: str) -> bool:
return self._fs_handler_list.is_supported_scheme(scheme)
| import os
import re
from typing import Tuple
from pfio._typing import Union
from pfio.container import Container
from pfio.io import IO, create_fs_handler
class FileSystemDriverList(object):
def __init__(self):
# TODO(tianqi): dynamically create this list
# as well as the patterns upon loading the pfio module.
self.scheme_list = ["hdfs", "posix"]
self.posix_pattern = re.compile(r"file:\/\/(?P<path>.+)")
self.hdfs_pattern = re.compile(r"(?P<path>hdfs:\/\/.+)")
self.pattern_list = {"hdfs": self.hdfs_pattern,
"posix": self.posix_pattern, }
def _determine_fs_type(self, path: str) -> Tuple[str, str, bool]:
if None is not path:
for fs_type, pattern in self.pattern_list.items():
ret = pattern.match(path)
if ret:
return (fs_type, ret.groupdict()["path"], True)
return ("posix", path, False)
def format_path(self, fs: IO, path: str) -> Tuple[str, bool]:
fs_type = fs.type
if fs_type in self.pattern_list.keys():
pattern = self.pattern_list[fs_type]
ret = pattern.match(path)
if ret:
return (ret.groupdict()["path"], True)
else:
return (path, False)
else:
return (path, False)
def get_handler_from_path(self, path: str) -> Tuple[IO, str, bool]:
(fs_type, actual_path, is_URI) = self._determine_fs_type(path)
handler = create_fs_handler(fs_type)
return (handler, actual_path, is_URI)
def get_handler_for_root(self,
uri_or_handler_name: str) -> Tuple[IO, str, bool]:
if uri_or_handler_name in self.pattern_list.keys():
return (create_fs_handler(uri_or_handler_name), "", False)
else:
(new_handler, actual_path, is_URI) = self.get_handler_from_path(
uri_or_handler_name)
new_handler.root = actual_path
return (new_handler, actual_path, is_URI)
def is_supported_scheme(self, scheme: str) -> bool:
return scheme in self.scheme_list
class DefaultContext(object):
def __init__(self):
self._fs_handler_list = FileSystemDriverList()
self._root = ""
self._default_context = \
self._fs_handler_list.get_handler_for_root("posix")[0]
def set_root(self, uri_or_handler: Union[str, IO]) -> None:
# TODO(check) if root is directory
if isinstance(uri_or_handler, IO):
handler = uri_or_handler
self._root = ""
else:
(handler, self._root, is_URI) = \
self.get_handler_by_name(uri_or_handler)
assert handler is not None
if self._root:
if not handler.isdir(self._root):
raise RuntimeError("the URI does not point to a directory")
self._default_context = handler
def get_handler(self, path: str = "") -> Tuple[IO, str]:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
return (self._default_context, actual_path)
else:
return (handler, formatted_path)
def open_as_container(self, path: str) -> Container:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
handler = self._default_context
else:
actual_path = formatted_path
self._root = ""
return handler.open_as_container(actual_path)
def get_handler_by_name(self, path: str) -> Tuple[IO, str, bool]:
return self._fs_handler_list.get_handler_for_root(path)
def get_root_dir(self) -> str:
return self._root
def is_supported_scheme(self, scheme: str) -> bool:
return self._fs_handler_list.is_supported_scheme(scheme)
| en | 0.753563 | # TODO(tianqi): dynamically create this list # as well as the patterns upon loading the pfio module. # TODO(check) if root is directory | 2.348954 | 2 |
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py | Josue-Zea/tytus | 35 | 8885 | <filename>parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py
import Analisis_Ascendente.Instrucciones.PLPGSQL.EjecutarFuncion as EjecutarFuncion
from Analisis_Ascendente.Instrucciones.PLPGSQL.plasignacion import Plasignacion
from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
from Analisis_Ascendente.Instrucciones.Create.createTable import CreateTable
from Analisis_Ascendente.Instrucciones.Create.createDatabase import CreateReplace
from Analisis_Ascendente.Instrucciones.Select.select import Select
from Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB import Use
from Analisis_Ascendente.Instrucciones.Select.select1 import selectTime
import Analisis_Ascendente.Instrucciones.Insert.insert as insert_import
from Analisis_Ascendente.Instrucciones.Select.Select2 import Selectp3
from Analisis_Ascendente.Instrucciones.Select import selectInst
from Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion
from Analisis_Ascendente.Instrucciones.Drop.drop import Drop
from Analisis_Ascendente.Instrucciones.Alter.alterDatabase import AlterDatabase
from Analisis_Ascendente.Instrucciones.Alter.alterTable import AlterTable
from Analisis_Ascendente.Instrucciones.Update.Update import Update
from Analisis_Ascendente.Instrucciones.Delete.delete import Delete
from Analisis_Ascendente.Instrucciones.Select import SelectDist
from Analisis_Ascendente.Instrucciones.Type.type import CreateType
#----------------------------------Imports FASE2--------------------------
from Analisis_Ascendente.Instrucciones.Index.Index import Index
from Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import CreateFunction
from Analisis_Ascendente.Instrucciones.Index.DropIndex import DropIndex
from Analisis_Ascendente.Instrucciones.Index.AlterIndex import AlterIndex
from Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure import DropProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure import CreateProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL import CasePL
from Analisis_Ascendente.Instrucciones.PLPGSQL.plCall import plCall
from Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction import DropFunction
import C3D.GeneradorEtiquetas as GeneradorEtiquetas
import C3D.GeneradorTemporales as GeneradorTemporales
import Analisis_Ascendente.reportes.Reportes as Reportes
class Ifpl(Instruccion):
''' #1 If
#2 If elif else
#3 If else '''
def __init__(self, caso,e_if,s_if,elif_s,s_else, fila, columna):
self.caso = caso
self.e_if = e_if
self.s_if = s_if
self.elif_s = elif_s
self.s_else = s_else
self.fila = fila
self.columna = columna
def ejecutar(self,tsglobal,ts, consola, exceptions):
try:
if self.caso == 1:
resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions)
if resultado == True:
for x in range(0, len(self.s_if)):
self.procesar_instrucciones(self.s_if[x],ts,consola,exceptions,tsglobal)
else:
pass
elif self.caso == 2:
print('hola')
else:
resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions)
if resultado == True:
for x in range(0, len(self.s_if)):
self.procesar_instrucciones(self.s_if[x], ts, consola, exceptions,tsglobal)
else:
for x in range(0, len(self.s_else)):
self.procesar_instrucciones(self.s_else[x],ts,consola,exceptions,tsglobal)
except:
consola.append("XX000 : internal_error")
def procesar_instrucciones(self,instr,ts,consola,exceptions,tsglobal):
if isinstance(instr, CreateReplace):
CreateReplace.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Select):
if instr.caso == 1:
consola.append('caso 1')
selectTime.ejecutar(instr, ts, consola, exceptions, True)
elif instr.caso == 2:
consola.append('caso 2')
variable = SelectDist.Select_Dist()
SelectDist.Select_Dist.ejecutar(variable, instr, ts, consola, exceptions)
elif instr.caso == 3:
consola.append('caso 3')
variable = selectInst.Select_inst()
selectInst.Select_inst.ejecutar(variable, instr, ts, consola, exceptions)
elif instr.caso == 4:
consola.append('caso 4')
Selectp3.ejecutar(instr, ts, consola, exceptions, True)
elif instr.caso == 6:
consola.append('caso 6')
elif isinstance(instr, CreateTable):
CreateTable.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Use):
Use.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, insert_import.InsertInto):
insert_import.InsertInto.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute un insert")
elif isinstance(instr, Drop):
Drop.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute drop")
elif isinstance(instr, AlterDatabase):
AlterDatabase.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute alter database")
elif isinstance(instr, AlterTable):
AlterTable.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute alter table")
elif isinstance(instr, Delete):
Delete.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute delete")
elif isinstance(instr, Update):
Update.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CreateType):
CreateType.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Index):
Index.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute Index")
elif isinstance(instr, CreateFunction):
CreateFunction.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropFunction):
DropFunction.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropIndex):
DropIndex.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, AlterIndex):
AlterIndex.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropProcedure):
DropProcedure.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CreateProcedure):
CreateProcedure.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CasePL):
CasePL.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, plCall):
plCall.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Plasignacion):
EjecutarFuncion.ejecutarPlasignacionIf(instr,ts,consola,exceptions,tsglobal)
elif isinstance(instr, Ifpl):
instr.ejecutar(tsglobal,ts,consola,exceptions)
else:
return
def getC3D(self, lista_optimizaciones_C3D):
etiqueta_if = GeneradorEtiquetas.nueva_etiqueta()
etiqueta_else = GeneradorEtiquetas.nueva_etiqueta()
etiqueta_salida = GeneradorEtiquetas.nueva_etiqueta()
e_if = self.e_if.getC3D(lista_optimizaciones_C3D)
noOptimizado = '''if %s: goto .%s <br>
goto .%s<br>
label .%s<br>
<instrucciones><br>
label .%s''' % (e_if['tmp'], etiqueta_if, etiqueta_else, etiqueta_if, etiqueta_else)
optimizado = '''if not %s: goto .%s <br>
<instrucciones><br>
label .%s''' % (e_if['tmp'], etiqueta_else, etiqueta_else)
optimizacion1 = Reportes.ListaOptimizacion(noOptimizado, optimizado, Reportes.TipoOptimizacion.REGLA3)
lista_optimizaciones_C3D.append(optimizacion1)
sentencias_if = ''
for sentencias in self.s_if:
sentencias_if += sentencias.getC3D(lista_optimizaciones_C3D)
c3d = '''
%s
if not %s: goto .%s
%s
goto .%s
''' % (e_if['code'], e_if['tmp'], etiqueta_else, sentencias_if, etiqueta_salida)
if self.s_else is not None:
sentencias_else = ''
for sentencias in self.s_else:
sentencias_else += sentencias.getC3D(lista_optimizaciones_C3D)
c3d += ''' label .%s
%s
label .%s''' % (etiqueta_else, sentencias_else, etiqueta_salida)
else:
c3d += ''' label .%s
label .%s
''' % (etiqueta_else, etiqueta_salida)
return c3d
def get_quemado(self):
sententias_if = ''
for sentencia in self.s_if:
sententias_if += sentencia.get_quemado() + ';\n'
quemado = ''' if %s then
%s
''' % (self.e_if.get_quemado(), sententias_if)
if self.s_else is not None:
sentencias_else = ''
for sentencia in self.s_else:
sentencias_else += sentencia.get_quemado() + ';\n'
quemado += '''ELSE
%s
''' % sentencias_else
quemado += ' end if'
return quemado
| <filename>parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py
import Analisis_Ascendente.Instrucciones.PLPGSQL.EjecutarFuncion as EjecutarFuncion
from Analisis_Ascendente.Instrucciones.PLPGSQL.plasignacion import Plasignacion
from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
from Analisis_Ascendente.Instrucciones.Create.createTable import CreateTable
from Analisis_Ascendente.Instrucciones.Create.createDatabase import CreateReplace
from Analisis_Ascendente.Instrucciones.Select.select import Select
from Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB import Use
from Analisis_Ascendente.Instrucciones.Select.select1 import selectTime
import Analisis_Ascendente.Instrucciones.Insert.insert as insert_import
from Analisis_Ascendente.Instrucciones.Select.Select2 import Selectp3
from Analisis_Ascendente.Instrucciones.Select import selectInst
from Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion
from Analisis_Ascendente.Instrucciones.Drop.drop import Drop
from Analisis_Ascendente.Instrucciones.Alter.alterDatabase import AlterDatabase
from Analisis_Ascendente.Instrucciones.Alter.alterTable import AlterTable
from Analisis_Ascendente.Instrucciones.Update.Update import Update
from Analisis_Ascendente.Instrucciones.Delete.delete import Delete
from Analisis_Ascendente.Instrucciones.Select import SelectDist
from Analisis_Ascendente.Instrucciones.Type.type import CreateType
#----------------------------------Imports FASE2--------------------------
from Analisis_Ascendente.Instrucciones.Index.Index import Index
from Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import CreateFunction
from Analisis_Ascendente.Instrucciones.Index.DropIndex import DropIndex
from Analisis_Ascendente.Instrucciones.Index.AlterIndex import AlterIndex
from Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure import DropProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure import CreateProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL import CasePL
from Analisis_Ascendente.Instrucciones.PLPGSQL.plCall import plCall
from Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction import DropFunction
import C3D.GeneradorEtiquetas as GeneradorEtiquetas
import C3D.GeneradorTemporales as GeneradorTemporales
import Analisis_Ascendente.reportes.Reportes as Reportes
class Ifpl(Instruccion):
''' #1 If
#2 If elif else
#3 If else '''
def __init__(self, caso,e_if,s_if,elif_s,s_else, fila, columna):
self.caso = caso
self.e_if = e_if
self.s_if = s_if
self.elif_s = elif_s
self.s_else = s_else
self.fila = fila
self.columna = columna
def ejecutar(self,tsglobal,ts, consola, exceptions):
try:
if self.caso == 1:
resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions)
if resultado == True:
for x in range(0, len(self.s_if)):
self.procesar_instrucciones(self.s_if[x],ts,consola,exceptions,tsglobal)
else:
pass
elif self.caso == 2:
print('hola')
else:
resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions)
if resultado == True:
for x in range(0, len(self.s_if)):
self.procesar_instrucciones(self.s_if[x], ts, consola, exceptions,tsglobal)
else:
for x in range(0, len(self.s_else)):
self.procesar_instrucciones(self.s_else[x],ts,consola,exceptions,tsglobal)
except:
consola.append("XX000 : internal_error")
def procesar_instrucciones(self,instr,ts,consola,exceptions,tsglobal):
if isinstance(instr, CreateReplace):
CreateReplace.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Select):
if instr.caso == 1:
consola.append('caso 1')
selectTime.ejecutar(instr, ts, consola, exceptions, True)
elif instr.caso == 2:
consola.append('caso 2')
variable = SelectDist.Select_Dist()
SelectDist.Select_Dist.ejecutar(variable, instr, ts, consola, exceptions)
elif instr.caso == 3:
consola.append('caso 3')
variable = selectInst.Select_inst()
selectInst.Select_inst.ejecutar(variable, instr, ts, consola, exceptions)
elif instr.caso == 4:
consola.append('caso 4')
Selectp3.ejecutar(instr, ts, consola, exceptions, True)
elif instr.caso == 6:
consola.append('caso 6')
elif isinstance(instr, CreateTable):
CreateTable.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Use):
Use.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, insert_import.InsertInto):
insert_import.InsertInto.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute un insert")
elif isinstance(instr, Drop):
Drop.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute drop")
elif isinstance(instr, AlterDatabase):
AlterDatabase.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute alter database")
elif isinstance(instr, AlterTable):
AlterTable.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute alter table")
elif isinstance(instr, Delete):
Delete.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute delete")
elif isinstance(instr, Update):
Update.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CreateType):
CreateType.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Index):
Index.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute Index")
elif isinstance(instr, CreateFunction):
CreateFunction.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropFunction):
DropFunction.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropIndex):
DropIndex.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, AlterIndex):
AlterIndex.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropProcedure):
DropProcedure.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CreateProcedure):
CreateProcedure.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CasePL):
CasePL.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, plCall):
plCall.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Plasignacion):
EjecutarFuncion.ejecutarPlasignacionIf(instr,ts,consola,exceptions,tsglobal)
elif isinstance(instr, Ifpl):
instr.ejecutar(tsglobal,ts,consola,exceptions)
else:
return
def getC3D(self, lista_optimizaciones_C3D):
etiqueta_if = GeneradorEtiquetas.nueva_etiqueta()
etiqueta_else = GeneradorEtiquetas.nueva_etiqueta()
etiqueta_salida = GeneradorEtiquetas.nueva_etiqueta()
e_if = self.e_if.getC3D(lista_optimizaciones_C3D)
noOptimizado = '''if %s: goto .%s <br>
goto .%s<br>
label .%s<br>
<instrucciones><br>
label .%s''' % (e_if['tmp'], etiqueta_if, etiqueta_else, etiqueta_if, etiqueta_else)
optimizado = '''if not %s: goto .%s <br>
<instrucciones><br>
label .%s''' % (e_if['tmp'], etiqueta_else, etiqueta_else)
optimizacion1 = Reportes.ListaOptimizacion(noOptimizado, optimizado, Reportes.TipoOptimizacion.REGLA3)
lista_optimizaciones_C3D.append(optimizacion1)
sentencias_if = ''
for sentencias in self.s_if:
sentencias_if += sentencias.getC3D(lista_optimizaciones_C3D)
c3d = '''
%s
if not %s: goto .%s
%s
goto .%s
''' % (e_if['code'], e_if['tmp'], etiqueta_else, sentencias_if, etiqueta_salida)
if self.s_else is not None:
sentencias_else = ''
for sentencias in self.s_else:
sentencias_else += sentencias.getC3D(lista_optimizaciones_C3D)
c3d += ''' label .%s
%s
label .%s''' % (etiqueta_else, sentencias_else, etiqueta_salida)
else:
c3d += ''' label .%s
label .%s
''' % (etiqueta_else, etiqueta_salida)
return c3d
def get_quemado(self):
sententias_if = ''
for sentencia in self.s_if:
sententias_if += sentencia.get_quemado() + ';\n'
quemado = ''' if %s then
%s
''' % (self.e_if.get_quemado(), sententias_if)
if self.s_else is not None:
sentencias_else = ''
for sentencia in self.s_else:
sentencias_else += sentencia.get_quemado() + ';\n'
quemado += '''ELSE
%s
''' % sentencias_else
quemado += ' end if'
return quemado
| es | 0.166141 | #----------------------------------Imports FASE2-------------------------- #1 If
#2 If elif else
#3 If else # print("Ejecute un insert") # print("Ejecute drop") # print("Ejecute alter database") # print("Ejecute alter table") # print("Ejecute delete") # print("Ejecute Index") if %s: goto .%s <br>
goto .%s<br>
label .%s<br>
<instrucciones><br>
label .%s if not %s: goto .%s <br>
<instrucciones><br>
label .%s %s
if not %s: goto .%s
%s
goto .%s label .%s
%s
label .%s label .%s
label .%s if %s then
%s ELSE
%s | 1.420263 | 1 |
epages_client/dataobjects/enum_fetch_operator.py | vilkasgroup/epages_client | 3 | 8886 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class FetchOperator(object):
'''Defines values for fetch operators'''
ADD = 1
REMOVE = 2
REPLACE = 3
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class FetchOperator(object):
'''Defines values for fetch operators'''
ADD = 1
REMOVE = 2
REPLACE = 3
| en | 0.54046 | # -*- coding: utf-8 -*- Defines values for fetch operators | 1.767618 | 2 |
pyhwpscan/hwp_scan.py | orca-eaa5a/dokkaebi_scanner | 0 | 8887 | from threading import current_thread
from jsbeautifier.javascript.beautifier import remove_redundant_indentation
from pyparser.oleparser import OleParser
from pyparser.hwp_parser import HwpParser
from scan.init_scan import init_hwp5_scan
from scan.bindata_scanner import BinData_Scanner
from scan.jscript_scanner import JS_Scanner
from scan.paratext_scanner import ParaText_Scanner
import zipfile
import os
import sys
import platform
from common.errors import *
from utils.dumphex import print_hexdump
js_scanner = None
bindata_scanner = None
paratext_scanner = None
_platform = None
binary_info = {
"type": "",
"p": None
}
def cmd_handler(cmdline):
global binary_info
global js_scanner
global bindata_scanner
global paratext_scanner
global _platform
ty = binary_info["type"]
parser = binary_info["p"]
s_cmd = cmdline.split(" ")
cmd = s_cmd[0]
arg = s_cmd[1:]
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
print(">> "+cmdline)
if cmd == "help":
print("> tree")
print(" Print the structure of target Binary")
print("> dump [binary_name] [directory]")
print(" Dump OLE or Zipped Binary at specific direcotry (default is current direcotry)")
print("> show-hex [binary_name]")
print(" Print hexcidecimal view of specific OLE or Zipped Binary")
print("> scan")
print(" re-scanning the target file")
print("> exit")
print(" quit command liner")
return 1
elif cmd == "clear":
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
return 0
elif cmd == "tree":
if ty == "hwp":
parser.ole_container.print_dir_entry_all()
else:
for file in parser.filelist:
print(file.filename)
return 0
elif cmd == "dump":
if len(arg) > 1:
binary_name, target_dir = arg[0], arg[1]
else:
binary_name, target_dir = arg[0], None
if not target_dir:
target_dir = os.getcwd()
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
targ = ""
for file in parser.filelist:
fname = file.filename.split("/")[-1]
if fname == binary_name:
targ = file.filename
break
if not targ:
print("no file exist")
return 0
stream = parser.read(targ)
with open(target_dir+"/"+binary_name, "wb") as f:
f.write(stream)
print("dump succeed..")
return 1
elif cmd == "show-hex":
binary_name = arg[0]
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
stream = parser.read(binary_name)
print_hexdump(stream)
return 1
elif cmd == "scan":
if ty == "hwp":
bindata_scanner.scan()
js_scanner.scan()
else:
paratext_scanner.scan()
return 1
elif cmd == "exit":
return -1
else:
print("unknown command..")
return 0
print()
class HWPScanner:
def __init__(self) -> None:
self.__platform__ = platform.platform()
self.hwpx_flag = False
self.ole_parser = OleParser()
self.hwp_parser = None
pass
def parse_hwpdoc(self, file_name):
self.file_name = file_name
self.ole_parser.read_ole_binary(file_name)
try:
self.ole_parser.parse()
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def parse_hwpdoc(self):
try:
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def setup_scanner(self):
if not self.hwpx_flag:
self.js_scanner = JS_Scanner(self.hwp_parser)
self.bindata_scanner = BinData_Scanner(self.hwp_parser)
else:
self.paratext_scanner = ParaText_Scanner(self.hwpx_docs)
def get_file_structure(self):
strt = {}
if not self.hwpx_flag:
self.ole_parser.get_dir_entry_all(strt, entry_id=0, depth=0)
else:
for _file in self.hwpx_docs.filelist:
_path = os.path.split( _file.filename)
if _path[0] not in strt:
# root
if _path[0]:
strt[_path[0]] = {}
else:
strt[_path[1]] = _file.file_size
continue
cur_strt = strt[_path[0]]
for path in _path:
if path not in strt:
if path == _path[-1]:
cur_strt[path] = _file.file_size
else:
cur_strt[path] = {}
cur_strt = cur_strt[path]
else:
cur_strt = strt[path]
return strt
def scan(self):
scan_result = ""
if not self.hwpx_flag:
scan_result += self.js_scanner.scan()
scan_result += self.bindata_scanner.scan()
else:
scan_result += self.paratext_scanner.scan()
return scan_result | from threading import current_thread
from jsbeautifier.javascript.beautifier import remove_redundant_indentation
from pyparser.oleparser import OleParser
from pyparser.hwp_parser import HwpParser
from scan.init_scan import init_hwp5_scan
from scan.bindata_scanner import BinData_Scanner
from scan.jscript_scanner import JS_Scanner
from scan.paratext_scanner import ParaText_Scanner
import zipfile
import os
import sys
import platform
from common.errors import *
from utils.dumphex import print_hexdump
js_scanner = None
bindata_scanner = None
paratext_scanner = None
_platform = None
binary_info = {
"type": "",
"p": None
}
def cmd_handler(cmdline):
global binary_info
global js_scanner
global bindata_scanner
global paratext_scanner
global _platform
ty = binary_info["type"]
parser = binary_info["p"]
s_cmd = cmdline.split(" ")
cmd = s_cmd[0]
arg = s_cmd[1:]
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
print(">> "+cmdline)
if cmd == "help":
print("> tree")
print(" Print the structure of target Binary")
print("> dump [binary_name] [directory]")
print(" Dump OLE or Zipped Binary at specific direcotry (default is current direcotry)")
print("> show-hex [binary_name]")
print(" Print hexcidecimal view of specific OLE or Zipped Binary")
print("> scan")
print(" re-scanning the target file")
print("> exit")
print(" quit command liner")
return 1
elif cmd == "clear":
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
return 0
elif cmd == "tree":
if ty == "hwp":
parser.ole_container.print_dir_entry_all()
else:
for file in parser.filelist:
print(file.filename)
return 0
elif cmd == "dump":
if len(arg) > 1:
binary_name, target_dir = arg[0], arg[1]
else:
binary_name, target_dir = arg[0], None
if not target_dir:
target_dir = os.getcwd()
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
targ = ""
for file in parser.filelist:
fname = file.filename.split("/")[-1]
if fname == binary_name:
targ = file.filename
break
if not targ:
print("no file exist")
return 0
stream = parser.read(targ)
with open(target_dir+"/"+binary_name, "wb") as f:
f.write(stream)
print("dump succeed..")
return 1
elif cmd == "show-hex":
binary_name = arg[0]
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
stream = parser.read(binary_name)
print_hexdump(stream)
return 1
elif cmd == "scan":
if ty == "hwp":
bindata_scanner.scan()
js_scanner.scan()
else:
paratext_scanner.scan()
return 1
elif cmd == "exit":
return -1
else:
print("unknown command..")
return 0
print()
class HWPScanner:
def __init__(self) -> None:
self.__platform__ = platform.platform()
self.hwpx_flag = False
self.ole_parser = OleParser()
self.hwp_parser = None
pass
def parse_hwpdoc(self, file_name):
self.file_name = file_name
self.ole_parser.read_ole_binary(file_name)
try:
self.ole_parser.parse()
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def parse_hwpdoc(self):
try:
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def setup_scanner(self):
if not self.hwpx_flag:
self.js_scanner = JS_Scanner(self.hwp_parser)
self.bindata_scanner = BinData_Scanner(self.hwp_parser)
else:
self.paratext_scanner = ParaText_Scanner(self.hwpx_docs)
def get_file_structure(self):
strt = {}
if not self.hwpx_flag:
self.ole_parser.get_dir_entry_all(strt, entry_id=0, depth=0)
else:
for _file in self.hwpx_docs.filelist:
_path = os.path.split( _file.filename)
if _path[0] not in strt:
# root
if _path[0]:
strt[_path[0]] = {}
else:
strt[_path[1]] = _file.file_size
continue
cur_strt = strt[_path[0]]
for path in _path:
if path not in strt:
if path == _path[-1]:
cur_strt[path] = _file.file_size
else:
cur_strt[path] = {}
cur_strt = cur_strt[path]
else:
cur_strt = strt[path]
return strt
def scan(self):
scan_result = ""
if not self.hwpx_flag:
scan_result += self.js_scanner.scan()
scan_result += self.bindata_scanner.scan()
else:
scan_result += self.paratext_scanner.scan()
return scan_result | en | 0.126041 | def parse_hwpdoc(self): try: self.hwp_parser = HwpParser(self.ole_parser) self.hwp_parser.parse() if not init_hwp5_scan(self.hwp_parser.hwp_header): exit(-1) except: self.hwpx_docs = zipfile.ZipFile(self.file_name, "r") self.hwpx_flag = True pass # root | 2.393603 | 2 |
tests/core/test_plugins.py | franalgaba/nile | 0 | 8888 | """
Tests for plugins in core module.
Only unit tests for now.
"""
from unittest.mock import patch
import click
from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit
def test_skip_click_exit():
def dummy_method(a, b):
return a + b
dummy_result = dummy_method(1, 2)
decorated = skip_click_exit(dummy_method)
decorated_result = decorated(1, 2)
assert callable(decorated)
assert dummy_result == decorated_result
def testget_installed_plugins():
class Dummy:
value = "nile.core.plugins.get_installed_plugins"
name = "get_installed_plugins"
with patch("nile.core.plugins.entry_points", return_value=[Dummy()]):
installed_plugins = get_installed_plugins()
assert "get_installed_plugins" in installed_plugins
def test_load_plugins():
@click.group()
def cli():
"""Nile CLI group."""
pass
def dummy():
print("dummy_result")
with patch(
"nile.core.plugins.get_installed_plugins", return_value={"dummy": dummy}
):
app = load_plugins(cli)
assert callable(app)
| """
Tests for plugins in core module.
Only unit tests for now.
"""
from unittest.mock import patch
import click
from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit
def test_skip_click_exit():
def dummy_method(a, b):
return a + b
dummy_result = dummy_method(1, 2)
decorated = skip_click_exit(dummy_method)
decorated_result = decorated(1, 2)
assert callable(decorated)
assert dummy_result == decorated_result
def testget_installed_plugins():
class Dummy:
value = "nile.core.plugins.get_installed_plugins"
name = "get_installed_plugins"
with patch("nile.core.plugins.entry_points", return_value=[Dummy()]):
installed_plugins = get_installed_plugins()
assert "get_installed_plugins" in installed_plugins
def test_load_plugins():
@click.group()
def cli():
"""Nile CLI group."""
pass
def dummy():
print("dummy_result")
with patch(
"nile.core.plugins.get_installed_plugins", return_value={"dummy": dummy}
):
app = load_plugins(cli)
assert callable(app)
| en | 0.857956 | Tests for plugins in core module. Only unit tests for now. Nile CLI group. | 2.422254 | 2 |
commands/source.py | Open-Source-eUdeC/UdeCursos-bot | 3 | 8889 | async def source(update, context):
source_code = "https://github.com/Open-Source-eUdeC/UdeCursos-bot"
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=(
"*UdeCursos bot v2.0*\n\n"
f"Código fuente: [GitHub]({source_code})"
),
parse_mode="Markdown"
)
| async def source(update, context):
source_code = "https://github.com/Open-Source-eUdeC/UdeCursos-bot"
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=(
"*UdeCursos bot v2.0*\n\n"
f"Código fuente: [GitHub]({source_code})"
),
parse_mode="Markdown"
)
| none | 1 | 1.735592 | 2 |
|
history/tests.py | MPIB/Lagerregal | 24 | 8890 | from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.client import Client
from model_mommy import mommy
from devices.models import Device
from users.models import Lageruser
class HistoryTests(TestCase):
def setUp(self):
self.client = Client()
self.admin = Lageruser.objects.create_superuser('test', '<EMAIL>', "test")
self.client.login(username="test", password="<PASSWORD>")
def test_global_view(self):
response = self.client.get('/history/global/')
self.assertEqual(response.status_code, 200)
def test_list_view(self):
content_type = ContentType.objects.get(model='device')
device = mommy.make(Device)
response = self.client.get('/history/%i/%i/' % (content_type.pk, device.pk))
self.assertEqual(response.status_code, 200)
def test_detail_view(self):
device = mommy.make(Device)
response = self.client.post('/devices/%i/edit/' % device.pk, data={
'name': 'test',
'creator': self.admin.pk,
})
self.assertEqual(response.status_code, 302)
response = self.client.get('/history/version/1/')
self.assertEqual(response.status_code, 200)
| from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.client import Client
from model_mommy import mommy
from devices.models import Device
from users.models import Lageruser
class HistoryTests(TestCase):
def setUp(self):
self.client = Client()
self.admin = Lageruser.objects.create_superuser('test', '<EMAIL>', "test")
self.client.login(username="test", password="<PASSWORD>")
def test_global_view(self):
response = self.client.get('/history/global/')
self.assertEqual(response.status_code, 200)
def test_list_view(self):
content_type = ContentType.objects.get(model='device')
device = mommy.make(Device)
response = self.client.get('/history/%i/%i/' % (content_type.pk, device.pk))
self.assertEqual(response.status_code, 200)
def test_detail_view(self):
device = mommy.make(Device)
response = self.client.post('/devices/%i/edit/' % device.pk, data={
'name': 'test',
'creator': self.admin.pk,
})
self.assertEqual(response.status_code, 302)
response = self.client.get('/history/version/1/')
self.assertEqual(response.status_code, 200)
| none | 1 | 2.257052 | 2 |
|
django_git_info/management/commands/get_git_info.py | spapas/django-git | 1 | 8891 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from django_git_info import get_git_info
class Command(BaseCommand):
help = 'Gets git info'
#@transaction.commit_manually
def handle(self, *args, **options):
info = get_git_info()
for key in info.keys():
print '{0}={1}'.format(key, info[key]) | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from django_git_info import get_git_info
class Command(BaseCommand):
help = 'Gets git info'
#@transaction.commit_manually
def handle(self, *args, **options):
info = get_git_info()
for key in info.keys():
print '{0}={1}'.format(key, info[key]) | en | 0.684803 | # -*- coding: utf-8 -*- #@transaction.commit_manually | 2.179939 | 2 |
mevis/_internal/conversion.py | robert-haas/mevis | 2 | 8892 | <filename>mevis/_internal/conversion.py
from collections.abc import Callable as _Callable
import networkx as _nx
from opencog.type_constructors import AtomSpace as _AtomSpace
from .args import check_arg as _check_arg
def convert(data, graph_annotated=True, graph_directed=True,
node_label=None, node_color=None, node_opacity=None, node_size=None, node_shape=None,
node_border_color=None, node_border_size=None,
node_label_color=None, node_label_size=None, node_hover=None, node_click=None,
node_image=None, node_properties=None,
edge_label=None, edge_color=None, edge_opacity=None, edge_size=None,
edge_label_color=None, edge_label_size=None, edge_hover=None, edge_click=None):
"""Convert an Atomspace or list of Atoms to a NetworkX graph with annotations.
Several arguments accept a Callable.
- In case of node annotations, the Callable gets an Atom as input,
which the node represents in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
- In case of edge annotations, the Callable gets two Atoms as input,
which the edge connects in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
Several arguments accept a color, which can be in following formats:
- Name: ``"black"``, ``"red"``, ``"green"``, ...
- Color code
- 6 digit hex RGB code: ``"#05ac05"``
- 3 digit hex RGB code: ``"#0a0"`` (equivalent to ``"#00aa00"``)
Parameters
----------
data : Atomspace, list of Atoms
Input that gets converted to a graph.
graph_annotated : bool
If ``False``, no annotations are added to the graph. This could be used for
converting large AtomSpaces quickly to graphs that use less RAM and can
be exported to smaller files (e.g. also compressed as gml.gz) for inspection
with other tools.
graph_directed : bool
If ``True``, a NetworkX DiGraph is created. If ``False``, a NetworkX Graph is created.
node_label : str, Callable
Set a label for each node, which is shown as text below it.
node_color : str, Callable
Set a color for each node, which becomes the fill color of its shape.
node_opacity : float between 0.0 and 1.0
Set an opacity for each node, which becomes the opacity of its shape.
Caution: This is only supported by d3.
node_size : int, float, Callable
Set a size for each node, which becomes the height and width of its shape.
node_shape : str, Callable
Set a shape for each node, which is some geometrical form that has the
node coordinates in its center.
Possible values: ``"circle"``, ``"rectangle"``, ``"hexagon"``
node_border_color : str, Callable
Set a border color for each node, which influences the border drawn around its shape.
node_border_size : int, float, Callable
Set a border size for each node, which influences the border drawn around its shape.
node_label_color : str, Callable
Set a label color for each node, which determines the font color
of the text below the node.
node_label_size : int, float, Callable
Set a label size for each node, which determines the font size
of the text below the node.
node_hover : str, Callable
Set a hover text for each node, which shows up besides the mouse cursor
when hovering over a node.
node_click : str, Callable
Set a click text for each node, which shows up in a div element below the plot
when clicking on a node and can easily be copied and pasted.
node_image : str, Callable
Set an image for each node, which appears within its shape.
Possible values:
- URL pointing to an image
- Data URL encoding the image
node_properties : str, dict, Callable
Set additional properties for each node, which may not immediately be translated
into a visual element, but can be chosen in the data selection menu in the
interactive HTML visualizations to map them on some plot element.
These properties also appear when exporting a graph to a file in a format
such as GML and may be recognized by external visualization tools.
Note that a Callable needs to return a dict in this case, and each key becomes
a property, which is equivalent to the other properties such as node_size and
node_color.
Special cases:
- ``node_properties="tv"`` is a shortcut for using a function that returns
``{"mean": atom.tv.mean, "confidence": atom.tv.confidence}``
- Keys ``"x"``, ``"y"`` and ``"z"`` properties are translated into node coordinates.
Examples:
- ``dict(x=0.0)``: This fixes the x coordinate of each node to 0.0, so that the
JavaScript layout algorithm does not influence it, but the nodes remain
free to move in the y and z directions.
- ``lambda atom: dict(x=2.0) if atom.is_node() else None``:
This fixes the x coordinate of each Atom of type Node to 2.0
but allows each Atom of type Link to move freely.
- ``lambda atom: dict(y=-len(atom.out)*100) if atom.is_link() else dict(y=0)``
This fixes the y coordinates of Atoms at different heights. Atoms of type Node
are put at the bottom and Atoms of type Link are ordered by the number of their
outgoing edges. The results is a hierarchical visualization that has some
similarity with the "dot" layout.
- ``lambda atom: dict(x=-100) if atom.is_node() else dict(x=100)``:
This fixes the x coordinate of Node Atoms at -100 and of Link Atoms at 100.
The results is a visualization with two lines of nodes that has some
similarity with the "bipartite" layout.
edge_label : str, Callable
Set a label for each edge, which becomes the text plotted in the middle of the edge.
edge_color : str, Callable
Set a color for each edge, which becomes the color of the line representing the edge.
edge_opacity : int, float, Callable
Set an opacity for each edge, which allows to make it transparent to some degree.
edge_size : int, float, Callable
Set a size for each edge, which becomes the width of the line representing the edge.
edge_label_color : str, Callable
Set a color for each edge label, which becomes the color of the text in the midpoint
of the edge.
edge_label_size : int, float, Callable
Set a size for each edge label, which becomes the size of the text in the midpoint
of the edge.
edge_hover : str, Callable
edge_click : str, Callable
Returns
-------
graph : NetworkX Graph or DiGraph
Whether an undirected or directed graph is created depends on the argument "directed".
"""
# Argument processing
_check_arg(data, 'data', (list, _AtomSpace))
_check_arg(graph_annotated, 'graph_annotated', bool)
_check_arg(graph_directed, 'graph_directed', bool)
_check_arg(node_label, 'node_label', (str, _Callable), allow_none=True)
_check_arg(node_color, 'node_color', (str, _Callable), allow_none=True)
_check_arg(node_opacity, 'node_opacity', (int, float, _Callable), allow_none=True)
_check_arg(node_size, 'node_size', (int, float, _Callable), allow_none=True)
_check_arg(node_shape, 'node_shape', (str, _Callable), allow_none=True)
_check_arg(node_border_color, 'node_border_color', (str, _Callable), allow_none=True)
_check_arg(node_border_size, 'node_border_size', (int, float, _Callable), allow_none=True)
_check_arg(node_label_color, 'node_label_color', (str, _Callable), allow_none=True)
_check_arg(node_label_size, 'node_label_size', (int, float, _Callable), allow_none=True)
_check_arg(node_hover, 'node_hover', (str, _Callable), allow_none=True)
_check_arg(node_click, 'node_click', (str, _Callable), allow_none=True)
_check_arg(node_image, 'node_image', (str, _Callable), allow_none=True)
_check_arg(node_properties, 'node_properties', (str, dict, _Callable), allow_none=True)
_check_arg(edge_label, 'edge_label', (str, _Callable), allow_none=True)
_check_arg(edge_color, 'edge_color', (str, _Callable), allow_none=True)
_check_arg(edge_opacity, 'edge_opacity', (int, float, _Callable), allow_none=True)
_check_arg(edge_size, 'edge_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_label_color, 'edge_label_color', (str, _Callable), allow_none=True)
_check_arg(edge_label_size, 'edge_label_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_hover, 'edge_hover', (str, _Callable), allow_none=True)
_check_arg(edge_click, 'edge_click', (str, _Callable), allow_none=True)
# Prepare annoation functions
if graph_annotated:
node_ann = prepare_node_func(
node_label, node_color, node_opacity, node_size, node_shape, node_border_color,
node_border_size, node_label_color, node_label_size, node_hover, node_click,
node_image, node_properties)
edge_ann = prepare_edge_func(
edge_label, edge_color, edge_opacity, edge_size,
edge_label_color, edge_label_size, edge_hover, edge_click)
else:
empty = dict()
def node_ann(atom):
return empty
def edge_ann(atom1, atom2):
return empty
# Create the NetworkX graph
graph = _nx.DiGraph() if graph_directed else _nx.Graph()
# 0) Set graph annotations
graph.graph['node_click'] = '$hover' # node_click will by default show content of node_hover
# 1) Add vertices and their annotations
for atom in data:
graph.add_node(to_uid(atom), **node_ann(atom))
# 2) Add edges and their annotations (separate step to exclude edges to filtered vertices)
for atom in data:
uid = to_uid(atom)
if atom.is_link():
# for all that is incoming to the Atom
for atom2 in atom.incoming:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid2, uid, **edge_ann(atom2, atom))
# for all that is outgoing of the Atom
for atom2 in atom.out:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid, uid2, **edge_ann(atom, atom2))
return graph
def prepare_node_func(node_label, node_color, node_opacity, node_size, node_shape,
node_border_color, node_border_size, node_label_color, node_label_size,
node_hover, node_click, node_image, node_properties):
"""Prepare a function that calculates all annoations for a node representing an Atom."""
# individual node annotation functions
node_label = use_node_def_or_str(node_label, node_label_default)
node_color = use_node_def_or_str(node_color, node_color_default)
node_opacity = use_node_def_or_num(node_opacity, node_opacity_default)
node_size = use_node_def_or_num(node_size, node_size_default)
node_shape = use_node_def_or_str(node_shape, node_shape_default)
node_border_color = use_node_def_or_str(node_border_color, node_border_color_default)
node_border_size = use_node_def_or_num(node_border_size, node_border_size_default)
node_label_color = use_node_def_or_str(node_label_color, node_label_color_default)
node_label_size = use_node_def_or_num(node_label_size, node_label_size_default)
node_hover = use_node_def_or_str(node_hover, node_hover_default)
node_click = use_node_def_or_str(node_click, node_click_default)
node_image = use_node_def_or_str(node_image, node_image_default)
# special case: additional user-defined node properties by a function that returns a dict
if node_properties is None:
node_properties = node_properties_default
elif isinstance(node_properties, dict):
val = node_properties
def node_properties(atom):
return val
elif node_properties == 'tv':
node_properties = node_properties_tv
# combined node annotation function: calls each of the individual ones
name_func = (
('label', node_label),
('color', node_color),
('opacity', node_opacity),
('size', node_size),
('shape', node_shape),
('border_color', node_border_color),
('border_size', node_border_size),
('label_color', node_label_color),
('label_size', node_label_size),
('hover', node_hover),
('click', node_click),
('image', node_image),
)
def func(atom):
data = {}
for n, f in name_func:
val = f(atom)
if val is not None:
data[n] = val
try:
data.update(node_properties(atom))
except Exception:
pass
return data
return func
def prepare_edge_func(edge_label, edge_color, edge_opacity, edge_size, edge_label_color,
edge_label_size, edge_hover, edge_click):
"""Prepare a function that calculates all annoations for an edge between Atoms."""
# individual edge annotation functions
edge_label = use_edge_def_or_str(edge_label, edge_label_default)
edge_color = use_edge_def_or_str(edge_color, edge_color_default)
edge_opacity = use_edge_def_or_num(edge_opacity, edge_opacity_default)
edge_size = use_edge_def_or_num(edge_size, edge_size_default)
edge_label_color = use_edge_def_or_str(edge_label_color, edge_label_color_default)
edge_label_size = use_edge_def_or_num(edge_label_size, edge_label_size_default)
edge_hover = use_edge_def_or_str(edge_hover, edge_hover_default)
edge_click = use_edge_def_or_str(edge_click, edge_click_default)
# combined edge annotation function: calls each of the individual ones
name_func = (
('label', edge_label),
('color', edge_color),
('opacity', edge_opacity),
('size', edge_size),
('label_color', edge_label_color),
('label_size', edge_label_size),
('hover', edge_hover),
('click', edge_click),
)
def func(atom1, atom2):
data = {}
for n, f in name_func:
val = f(atom1, atom2)
if val is not None:
data[n] = val
return data
return func
def use_node_def_or_str(given_value, default_func):
"""Transform a value of type (None, str, Callable) to a node annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, str):
given_value = str(given_value)
def func(atom):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_node_def_or_num(given_value, default_func):
"""Transform a value of type (None, int, float, Callable) to a node annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, (int, float)):
given_value = float(given_value)
def func(atom):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_edge_def_or_str(given_value, default_func):
"""Transform a value of type (None, str, Callable) to an edge annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, str):
given_value = str(given_value)
def func(atom1, atom2):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_edge_def_or_num(given_value, default_func):
"""Transform a value of type (None, int, float, Callable) to an edge annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, (int, float)):
given_value = float(given_value)
def func(atom1, atom2):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def to_uid(atom):
"""Return a unique identifier for an Atom."""
return atom.id_string()
# Default functions for node annotations
# - "return None" means that the attribute and value won't be included
# to the output data, so that defaults of the JS library are used and files get smaller
# - A return of a value in some cases and None in other cases means that the
# default value of the JS library is used in None cases and again files get smaller
def node_label_default(atom):
# None => no node labels
return '{} "{}"'.format(atom.type_name, atom.name) if atom.is_node() else atom.type_name
def node_color_default(atom):
# None => black
return 'red' if atom.is_node() else None
def node_opacity_default(atom):
# None => 1.0
return None
def node_size_default(atom):
# None => 10
return None
def node_shape_default(atom):
# None => circle
return 'rectangle' if atom.is_node() else None
def node_border_color_default(atom):
# None => black
return None
def node_border_size_default(atom):
# None => 0.0
return None
def node_label_color_default(atom):
# None => black
return None
def node_label_size_default(atom):
# None => 12.0
return None
def node_hover_default(atom):
# None => no hover text
return atom.short_string()
def node_click_default(atom):
# None => no click text (in addition to always shown "Node: <id>" in header)
return None
def node_image_default(atom):
# None => no image inside node
return None
def node_properties_default(atom):
# None => no extra node annotations
return None
def node_properties_tv(atom):
return dict(mean=atom.tv.mean, confidence=atom.tv.confidence)
# Default functions for edge annotations
def edge_label_default(atom1, atom2):
# None => no edge label
return None
def edge_color_default(atom1, atom2):
# None => black
return None if atom1.is_link() and atom2.is_link() else 'red'
def edge_opacity_default(atom1, atom2):
# None => 1.0
return None
def edge_size_default(atom1, atom2):
# None => 1.0
return None
def edge_label_color_default(atom1, atom2):
# None => black
return None
def edge_label_size_default(atom1, atom2):
# None => 8.0
return None
def edge_hover_default(atom1, atom2):
# None => no hover text
return None
def edge_click_default(atom1, atom2):
# None => no click text (in addition to always shown "Edge: <id>" in header)
return None
| <filename>mevis/_internal/conversion.py
from collections.abc import Callable as _Callable
import networkx as _nx
from opencog.type_constructors import AtomSpace as _AtomSpace
from .args import check_arg as _check_arg
def convert(data, graph_annotated=True, graph_directed=True,
node_label=None, node_color=None, node_opacity=None, node_size=None, node_shape=None,
node_border_color=None, node_border_size=None,
node_label_color=None, node_label_size=None, node_hover=None, node_click=None,
node_image=None, node_properties=None,
edge_label=None, edge_color=None, edge_opacity=None, edge_size=None,
edge_label_color=None, edge_label_size=None, edge_hover=None, edge_click=None):
"""Convert an Atomspace or list of Atoms to a NetworkX graph with annotations.
Several arguments accept a Callable.
- In case of node annotations, the Callable gets an Atom as input,
which the node represents in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
- In case of edge annotations, the Callable gets two Atoms as input,
which the edge connects in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
Several arguments accept a color, which can be in following formats:
- Name: ``"black"``, ``"red"``, ``"green"``, ...
- Color code
- 6 digit hex RGB code: ``"#05ac05"``
- 3 digit hex RGB code: ``"#0a0"`` (equivalent to ``"#00aa00"``)
Parameters
----------
data : Atomspace, list of Atoms
Input that gets converted to a graph.
graph_annotated : bool
If ``False``, no annotations are added to the graph. This could be used for
converting large AtomSpaces quickly to graphs that use less RAM and can
be exported to smaller files (e.g. also compressed as gml.gz) for inspection
with other tools.
graph_directed : bool
If ``True``, a NetworkX DiGraph is created. If ``False``, a NetworkX Graph is created.
node_label : str, Callable
Set a label for each node, which is shown as text below it.
node_color : str, Callable
Set a color for each node, which becomes the fill color of its shape.
node_opacity : float between 0.0 and 1.0
Set an opacity for each node, which becomes the opacity of its shape.
Caution: This is only supported by d3.
node_size : int, float, Callable
Set a size for each node, which becomes the height and width of its shape.
node_shape : str, Callable
Set a shape for each node, which is some geometrical form that has the
node coordinates in its center.
Possible values: ``"circle"``, ``"rectangle"``, ``"hexagon"``
node_border_color : str, Callable
Set a border color for each node, which influences the border drawn around its shape.
node_border_size : int, float, Callable
Set a border size for each node, which influences the border drawn around its shape.
node_label_color : str, Callable
Set a label color for each node, which determines the font color
of the text below the node.
node_label_size : int, float, Callable
Set a label size for each node, which determines the font size
of the text below the node.
node_hover : str, Callable
Set a hover text for each node, which shows up besides the mouse cursor
when hovering over a node.
node_click : str, Callable
Set a click text for each node, which shows up in a div element below the plot
when clicking on a node and can easily be copied and pasted.
node_image : str, Callable
Set an image for each node, which appears within its shape.
Possible values:
- URL pointing to an image
- Data URL encoding the image
node_properties : str, dict, Callable
Set additional properties for each node, which may not immediately be translated
into a visual element, but can be chosen in the data selection menu in the
interactive HTML visualizations to map them on some plot element.
These properties also appear when exporting a graph to a file in a format
such as GML and may be recognized by external visualization tools.
Note that a Callable needs to return a dict in this case, and each key becomes
a property, which is equivalent to the other properties such as node_size and
node_color.
Special cases:
- ``node_properties="tv"`` is a shortcut for using a function that returns
``{"mean": atom.tv.mean, "confidence": atom.tv.confidence}``
- Keys ``"x"``, ``"y"`` and ``"z"`` properties are translated into node coordinates.
Examples:
- ``dict(x=0.0)``: This fixes the x coordinate of each node to 0.0, so that the
JavaScript layout algorithm does not influence it, but the nodes remain
free to move in the y and z directions.
- ``lambda atom: dict(x=2.0) if atom.is_node() else None``:
This fixes the x coordinate of each Atom of type Node to 2.0
but allows each Atom of type Link to move freely.
- ``lambda atom: dict(y=-len(atom.out)*100) if atom.is_link() else dict(y=0)``
This fixes the y coordinates of Atoms at different heights. Atoms of type Node
are put at the bottom and Atoms of type Link are ordered by the number of their
outgoing edges. The results is a hierarchical visualization that has some
similarity with the "dot" layout.
- ``lambda atom: dict(x=-100) if atom.is_node() else dict(x=100)``:
This fixes the x coordinate of Node Atoms at -100 and of Link Atoms at 100.
The results is a visualization with two lines of nodes that has some
similarity with the "bipartite" layout.
edge_label : str, Callable
Set a label for each edge, which becomes the text plotted in the middle of the edge.
edge_color : str, Callable
Set a color for each edge, which becomes the color of the line representing the edge.
edge_opacity : int, float, Callable
Set an opacity for each edge, which allows to make it transparent to some degree.
edge_size : int, float, Callable
Set a size for each edge, which becomes the width of the line representing the edge.
edge_label_color : str, Callable
Set a color for each edge label, which becomes the color of the text in the midpoint
of the edge.
edge_label_size : int, float, Callable
Set a size for each edge label, which becomes the size of the text in the midpoint
of the edge.
edge_hover : str, Callable
edge_click : str, Callable
Returns
-------
graph : NetworkX Graph or DiGraph
Whether an undirected or directed graph is created depends on the argument "directed".
"""
# Argument processing
_check_arg(data, 'data', (list, _AtomSpace))
_check_arg(graph_annotated, 'graph_annotated', bool)
_check_arg(graph_directed, 'graph_directed', bool)
_check_arg(node_label, 'node_label', (str, _Callable), allow_none=True)
_check_arg(node_color, 'node_color', (str, _Callable), allow_none=True)
_check_arg(node_opacity, 'node_opacity', (int, float, _Callable), allow_none=True)
_check_arg(node_size, 'node_size', (int, float, _Callable), allow_none=True)
_check_arg(node_shape, 'node_shape', (str, _Callable), allow_none=True)
_check_arg(node_border_color, 'node_border_color', (str, _Callable), allow_none=True)
_check_arg(node_border_size, 'node_border_size', (int, float, _Callable), allow_none=True)
_check_arg(node_label_color, 'node_label_color', (str, _Callable), allow_none=True)
_check_arg(node_label_size, 'node_label_size', (int, float, _Callable), allow_none=True)
_check_arg(node_hover, 'node_hover', (str, _Callable), allow_none=True)
_check_arg(node_click, 'node_click', (str, _Callable), allow_none=True)
_check_arg(node_image, 'node_image', (str, _Callable), allow_none=True)
_check_arg(node_properties, 'node_properties', (str, dict, _Callable), allow_none=True)
_check_arg(edge_label, 'edge_label', (str, _Callable), allow_none=True)
_check_arg(edge_color, 'edge_color', (str, _Callable), allow_none=True)
_check_arg(edge_opacity, 'edge_opacity', (int, float, _Callable), allow_none=True)
_check_arg(edge_size, 'edge_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_label_color, 'edge_label_color', (str, _Callable), allow_none=True)
_check_arg(edge_label_size, 'edge_label_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_hover, 'edge_hover', (str, _Callable), allow_none=True)
_check_arg(edge_click, 'edge_click', (str, _Callable), allow_none=True)
# Prepare annoation functions
if graph_annotated:
node_ann = prepare_node_func(
node_label, node_color, node_opacity, node_size, node_shape, node_border_color,
node_border_size, node_label_color, node_label_size, node_hover, node_click,
node_image, node_properties)
edge_ann = prepare_edge_func(
edge_label, edge_color, edge_opacity, edge_size,
edge_label_color, edge_label_size, edge_hover, edge_click)
else:
empty = dict()
def node_ann(atom):
return empty
def edge_ann(atom1, atom2):
return empty
# Create the NetworkX graph
graph = _nx.DiGraph() if graph_directed else _nx.Graph()
# 0) Set graph annotations
graph.graph['node_click'] = '$hover' # node_click will by default show content of node_hover
# 1) Add vertices and their annotations
for atom in data:
graph.add_node(to_uid(atom), **node_ann(atom))
# 2) Add edges and their annotations (separate step to exclude edges to filtered vertices)
for atom in data:
uid = to_uid(atom)
if atom.is_link():
# for all that is incoming to the Atom
for atom2 in atom.incoming:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid2, uid, **edge_ann(atom2, atom))
# for all that is outgoing of the Atom
for atom2 in atom.out:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid, uid2, **edge_ann(atom, atom2))
return graph
def prepare_node_func(node_label, node_color, node_opacity, node_size, node_shape,
node_border_color, node_border_size, node_label_color, node_label_size,
node_hover, node_click, node_image, node_properties):
"""Prepare a function that calculates all annoations for a node representing an Atom."""
# individual node annotation functions
node_label = use_node_def_or_str(node_label, node_label_default)
node_color = use_node_def_or_str(node_color, node_color_default)
node_opacity = use_node_def_or_num(node_opacity, node_opacity_default)
node_size = use_node_def_or_num(node_size, node_size_default)
node_shape = use_node_def_or_str(node_shape, node_shape_default)
node_border_color = use_node_def_or_str(node_border_color, node_border_color_default)
node_border_size = use_node_def_or_num(node_border_size, node_border_size_default)
node_label_color = use_node_def_or_str(node_label_color, node_label_color_default)
node_label_size = use_node_def_or_num(node_label_size, node_label_size_default)
node_hover = use_node_def_or_str(node_hover, node_hover_default)
node_click = use_node_def_or_str(node_click, node_click_default)
node_image = use_node_def_or_str(node_image, node_image_default)
# special case: additional user-defined node properties by a function that returns a dict
if node_properties is None:
node_properties = node_properties_default
elif isinstance(node_properties, dict):
val = node_properties
def node_properties(atom):
return val
elif node_properties == 'tv':
node_properties = node_properties_tv
# combined node annotation function: calls each of the individual ones
name_func = (
('label', node_label),
('color', node_color),
('opacity', node_opacity),
('size', node_size),
('shape', node_shape),
('border_color', node_border_color),
('border_size', node_border_size),
('label_color', node_label_color),
('label_size', node_label_size),
('hover', node_hover),
('click', node_click),
('image', node_image),
)
def func(atom):
data = {}
for n, f in name_func:
val = f(atom)
if val is not None:
data[n] = val
try:
data.update(node_properties(atom))
except Exception:
pass
return data
return func
def prepare_edge_func(edge_label, edge_color, edge_opacity, edge_size, edge_label_color,
edge_label_size, edge_hover, edge_click):
"""Prepare a function that calculates all annoations for an edge between Atoms."""
# individual edge annotation functions
edge_label = use_edge_def_or_str(edge_label, edge_label_default)
edge_color = use_edge_def_or_str(edge_color, edge_color_default)
edge_opacity = use_edge_def_or_num(edge_opacity, edge_opacity_default)
edge_size = use_edge_def_or_num(edge_size, edge_size_default)
edge_label_color = use_edge_def_or_str(edge_label_color, edge_label_color_default)
edge_label_size = use_edge_def_or_num(edge_label_size, edge_label_size_default)
edge_hover = use_edge_def_or_str(edge_hover, edge_hover_default)
edge_click = use_edge_def_or_str(edge_click, edge_click_default)
# combined edge annotation function: calls each of the individual ones
name_func = (
('label', edge_label),
('color', edge_color),
('opacity', edge_opacity),
('size', edge_size),
('label_color', edge_label_color),
('label_size', edge_label_size),
('hover', edge_hover),
('click', edge_click),
)
def func(atom1, atom2):
data = {}
for n, f in name_func:
val = f(atom1, atom2)
if val is not None:
data[n] = val
return data
return func
def use_node_def_or_str(given_value, default_func):
"""Transform a value of type (None, str, Callable) to a node annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, str):
given_value = str(given_value)
def func(atom):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_node_def_or_num(given_value, default_func):
"""Transform a value of type (None, int, float, Callable) to a node annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, (int, float)):
given_value = float(given_value)
def func(atom):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_edge_def_or_str(given_value, default_func):
"""Transform a value of type (None, str, Callable) to an edge annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, str):
given_value = str(given_value)
def func(atom1, atom2):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_edge_def_or_num(given_value, default_func):
"""Transform a value of type (None, int, float, Callable) to an edge annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, (int, float)):
given_value = float(given_value)
def func(atom1, atom2):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def to_uid(atom):
"""Return a unique identifier for an Atom."""
return atom.id_string()
# Default functions for node annotations
# - "return None" means that the attribute and value won't be included
# to the output data, so that defaults of the JS library are used and files get smaller
# - A return of a value in some cases and None in other cases means that the
# default value of the JS library is used in None cases and again files get smaller
def node_label_default(atom):
# None => no node labels
return '{} "{}"'.format(atom.type_name, atom.name) if atom.is_node() else atom.type_name
def node_color_default(atom):
# None => black
return 'red' if atom.is_node() else None
def node_opacity_default(atom):
# None => 1.0
return None
def node_size_default(atom):
# None => 10
return None
def node_shape_default(atom):
# None => circle
return 'rectangle' if atom.is_node() else None
def node_border_color_default(atom):
# None => black
return None
def node_border_size_default(atom):
# None => 0.0
return None
def node_label_color_default(atom):
# None => black
return None
def node_label_size_default(atom):
# None => 12.0
return None
def node_hover_default(atom):
# None => no hover text
return atom.short_string()
def node_click_default(atom):
# None => no click text (in addition to always shown "Node: <id>" in header)
return None
def node_image_default(atom):
# None => no image inside node
return None
def node_properties_default(atom):
# None => no extra node annotations
return None
def node_properties_tv(atom):
return dict(mean=atom.tv.mean, confidence=atom.tv.confidence)
# Default functions for edge annotations
def edge_label_default(atom1, atom2):
# None => no edge label
return None
def edge_color_default(atom1, atom2):
# None => black
return None if atom1.is_link() and atom2.is_link() else 'red'
def edge_opacity_default(atom1, atom2):
# None => 1.0
return None
def edge_size_default(atom1, atom2):
# None => 1.0
return None
def edge_label_color_default(atom1, atom2):
# None => black
return None
def edge_label_size_default(atom1, atom2):
# None => 8.0
return None
def edge_hover_default(atom1, atom2):
# None => no hover text
return None
def edge_click_default(atom1, atom2):
# None => no click text (in addition to always shown "Edge: <id>" in header)
return None
| en | 0.819813 | Convert an Atomspace or list of Atoms to a NetworkX graph with annotations. Several arguments accept a Callable. - In case of node annotations, the Callable gets an Atom as input, which the node represents in the graph. The Callable needs to return one of the other types accepted by the argument, e.g. ``str`` or ``int``/``float``. - In case of edge annotations, the Callable gets two Atoms as input, which the edge connects in the graph. The Callable needs to return one of the other types accepted by the argument, e.g. ``str`` or ``int``/``float``. Several arguments accept a color, which can be in following formats: - Name: ``"black"``, ``"red"``, ``"green"``, ... - Color code - 6 digit hex RGB code: ``"#05ac05"`` - 3 digit hex RGB code: ``"#0a0"`` (equivalent to ``"#00aa00"``) Parameters ---------- data : Atomspace, list of Atoms Input that gets converted to a graph. graph_annotated : bool If ``False``, no annotations are added to the graph. This could be used for converting large AtomSpaces quickly to graphs that use less RAM and can be exported to smaller files (e.g. also compressed as gml.gz) for inspection with other tools. graph_directed : bool If ``True``, a NetworkX DiGraph is created. If ``False``, a NetworkX Graph is created. node_label : str, Callable Set a label for each node, which is shown as text below it. node_color : str, Callable Set a color for each node, which becomes the fill color of its shape. node_opacity : float between 0.0 and 1.0 Set an opacity for each node, which becomes the opacity of its shape. Caution: This is only supported by d3. node_size : int, float, Callable Set a size for each node, which becomes the height and width of its shape. node_shape : str, Callable Set a shape for each node, which is some geometrical form that has the node coordinates in its center. Possible values: ``"circle"``, ``"rectangle"``, ``"hexagon"`` node_border_color : str, Callable Set a border color for each node, which influences the border drawn around its shape. node_border_size : int, float, Callable Set a border size for each node, which influences the border drawn around its shape. node_label_color : str, Callable Set a label color for each node, which determines the font color of the text below the node. node_label_size : int, float, Callable Set a label size for each node, which determines the font size of the text below the node. node_hover : str, Callable Set a hover text for each node, which shows up besides the mouse cursor when hovering over a node. node_click : str, Callable Set a click text for each node, which shows up in a div element below the plot when clicking on a node and can easily be copied and pasted. node_image : str, Callable Set an image for each node, which appears within its shape. Possible values: - URL pointing to an image - Data URL encoding the image node_properties : str, dict, Callable Set additional properties for each node, which may not immediately be translated into a visual element, but can be chosen in the data selection menu in the interactive HTML visualizations to map them on some plot element. These properties also appear when exporting a graph to a file in a format such as GML and may be recognized by external visualization tools. Note that a Callable needs to return a dict in this case, and each key becomes a property, which is equivalent to the other properties such as node_size and node_color. Special cases: - ``node_properties="tv"`` is a shortcut for using a function that returns ``{"mean": atom.tv.mean, "confidence": atom.tv.confidence}`` - Keys ``"x"``, ``"y"`` and ``"z"`` properties are translated into node coordinates. Examples: - ``dict(x=0.0)``: This fixes the x coordinate of each node to 0.0, so that the JavaScript layout algorithm does not influence it, but the nodes remain free to move in the y and z directions. - ``lambda atom: dict(x=2.0) if atom.is_node() else None``: This fixes the x coordinate of each Atom of type Node to 2.0 but allows each Atom of type Link to move freely. - ``lambda atom: dict(y=-len(atom.out)*100) if atom.is_link() else dict(y=0)`` This fixes the y coordinates of Atoms at different heights. Atoms of type Node are put at the bottom and Atoms of type Link are ordered by the number of their outgoing edges. The results is a hierarchical visualization that has some similarity with the "dot" layout. - ``lambda atom: dict(x=-100) if atom.is_node() else dict(x=100)``: This fixes the x coordinate of Node Atoms at -100 and of Link Atoms at 100. The results is a visualization with two lines of nodes that has some similarity with the "bipartite" layout. edge_label : str, Callable Set a label for each edge, which becomes the text plotted in the middle of the edge. edge_color : str, Callable Set a color for each edge, which becomes the color of the line representing the edge. edge_opacity : int, float, Callable Set an opacity for each edge, which allows to make it transparent to some degree. edge_size : int, float, Callable Set a size for each edge, which becomes the width of the line representing the edge. edge_label_color : str, Callable Set a color for each edge label, which becomes the color of the text in the midpoint of the edge. edge_label_size : int, float, Callable Set a size for each edge label, which becomes the size of the text in the midpoint of the edge. edge_hover : str, Callable edge_click : str, Callable Returns ------- graph : NetworkX Graph or DiGraph Whether an undirected or directed graph is created depends on the argument "directed". # Argument processing # Prepare annoation functions # Create the NetworkX graph # 0) Set graph annotations # node_click will by default show content of node_hover # 1) Add vertices and their annotations # 2) Add edges and their annotations (separate step to exclude edges to filtered vertices) # for all that is incoming to the Atom # for all that is outgoing of the Atom Prepare a function that calculates all annoations for a node representing an Atom. # individual node annotation functions # special case: additional user-defined node properties by a function that returns a dict # combined node annotation function: calls each of the individual ones Prepare a function that calculates all annoations for an edge between Atoms. # individual edge annotation functions # combined edge annotation function: calls each of the individual ones Transform a value of type (None, str, Callable) to a node annotation function. # Default: use pre-defined function from this module # Transform: value to function that returns the value # Passthrough: value itself is a function Transform a value of type (None, int, float, Callable) to a node annotation function. # Default: use pre-defined function from this module # Transform: value to function that returns the value # Passthrough: value itself is a function Transform a value of type (None, str, Callable) to an edge annotation function. # Default: use pre-defined function from this module # Transform: value to function that returns the value # Passthrough: value itself is a function Transform a value of type (None, int, float, Callable) to an edge annotation function. # Default: use pre-defined function from this module # Transform: value to function that returns the value # Passthrough: value itself is a function Return a unique identifier for an Atom. # Default functions for node annotations # - "return None" means that the attribute and value won't be included # to the output data, so that defaults of the JS library are used and files get smaller # - A return of a value in some cases and None in other cases means that the # default value of the JS library is used in None cases and again files get smaller # None => no node labels # None => black # None => 1.0 # None => 10 # None => circle # None => black # None => 0.0 # None => black # None => 12.0 # None => no hover text # None => no click text (in addition to always shown "Node: <id>" in header) # None => no image inside node # None => no extra node annotations # Default functions for edge annotations # None => no edge label # None => black # None => 1.0 # None => 1.0 # None => black # None => 8.0 # None => no hover text # None => no click text (in addition to always shown "Edge: <id>" in header) | 3.062774 | 3 |
testData/completion/classMethodCls.py | seandstewart/typical-pycharm-plugin | 0 | 8893 | <filename>testData/completion/classMethodCls.py
from builtins import *
from pydantic import BaseModel
class A(BaseModel):
abc: str
@classmethod
def test(cls):
return cls.<caret>
| <filename>testData/completion/classMethodCls.py
from builtins import *
from pydantic import BaseModel
class A(BaseModel):
abc: str
@classmethod
def test(cls):
return cls.<caret>
| none | 1 | 2.073626 | 2 |
|
watcher_metering/tests/agent/test_agent.py | b-com/watcher-metering | 2 | 8894 | <filename>watcher_metering/tests/agent/test_agent.py<gh_stars>1-10
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import OrderedDict
import os
import types
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import PropertyMock
import msgpack
import operator
from oslo_config import cfg
from oslotest.base import BaseTestCase
from stevedore.driver import DriverManager
from stevedore.extension import Extension
from watcher_metering.agent.agent import Agent
from watcher_metering.agent.measurement import Measurement
from watcher_metering.tests.agent.agent_fixtures import ConfFixture
from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller
from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller
class TestAgent(BaseTestCase):
# patches to be applied for each test in this test suite
patches = []
def setUp(self):
super(TestAgent, self).setUp()
self.conf = cfg.ConfigOpts()
# To load the drivers without using the config file
self.useFixture(ConfFixture(self.conf))
def _fake_parse(self, args=[]):
return cfg.ConfigOpts._parse_cli_opts(self, [])
_fake_parse_method = types.MethodType(_fake_parse, self.conf)
self.conf._parse_cli_opts = _fake_parse_method
# First dependency to be returned
self.dummy_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=DummyMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=DummyMetricPuller,
obj=None,
),
namespace='TESTING',
)
# 2nd dependency to be returned
self.fake_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=FakeMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=FakeMetricPuller,
obj=None,
),
namespace='TESTING',
)
self.defaults_drivers = {
DummyMetricPuller.get_name(): self.dummy_driver_manager,
FakeMetricPuller.get_name(): self.fake_driver_manager,
}
def _fake_loader(name, **kw):
return self.defaults_drivers[name]
# Patches the agent socket
self.m_agent_socket = MagicMock(autospec=True)
self.patches.extend([
# Deactivates the nanomsg socket
patch(
"watcher_metering.agent.agent.nanomsg.Socket",
new=self.m_agent_socket,
),
# Sets the test namespace to 'TESTING'
patch.object(
Agent,
"namespace",
PropertyMock(return_value='TESTING'),
),
# Patches the driver manager to retourn our test drivers
# instead of the real ones
patch(
"watcher_metering.load.loader.DriverManager",
MagicMock(side_effect=_fake_loader),
),
])
# Applies all of our patches before each test
for _patch in self.patches:
_patch.start()
self.agent = Agent(
conf=self.conf,
driver_names=self.conf.agent.driver_names,
use_nanoconfig_service=False,
publisher_endpoint="fake",
nanoconfig_service_endpoint="",
nanoconfig_update_endpoint="",
nanoconfig_profile="nanoconfig://test_profile"
)
# Default ticking is set to 0 to reduce test execution time
self.agent.TICK_INTERVAL = 0
def tearDown(self):
super(TestAgent, self).tearDown()
# The drivers are stored at the class level so we need to clear
# it after each test
self.agent.drivers.clear()
for _patch in self.patches:
_patch.stop()
def test_register_driver(self):
expected_driver1_key = "metrics_driver.dummy_data.puller.dummy"
expected_driver2_key = "metrics_driver.fake_data.puller.fake"
self.agent.register_drivers()
self.assertEqual(
sorted(self.agent.drivers.keys()),
[expected_driver1_key, expected_driver2_key]
)
sorted_drivers = OrderedDict(
sorted(self.agent.drivers.items(), key=operator.itemgetter(0))
)
self.assertEqual(len(sorted_drivers), 2)
driver1 = self.agent.drivers[expected_driver1_key]
driver2 = self.agent.drivers[expected_driver2_key]
self.assertEqual(driver1.title, "metrics_driver.dummy")
self.assertEqual(driver1.probe_id, "data.puller.dummy")
self.assertEqual(driver1.interval, 0.01)
self.assertEqual(driver2.title, "metrics_driver.fake")
self.assertEqual(driver2.probe_id, "data.puller.fake")
self.assertEqual(driver2.interval, 0.01)
self.assertIn(self.agent, driver1._observers)
self.assertIn(self.agent, driver2._observers)
def test_unregister_driver(self):
driver_key = "metrics_driver.dummy_data.puller.dummy"
self.agent.register_drivers()
self.agent.unregister_driver(driver_key)
# Initial is 2 drivers => 2 - 1 == 1
self.assertEqual(len(self.agent.drivers), 1)
@patch.object(Measurement, "as_dict")
def test_send_measurements(self, m_as_dict):
self.agent.register_drivers()
measurement_dict = OrderedDict(
name="dummy.data.puller",
unit="",
type_="",
value=13.37,
resource_id="test_hostname",
host="test_hostname",
timestamp="2015-08-04T15:15:45.703542",
)
m_as_dict.return_value = measurement_dict
measurement = Measurement(**measurement_dict)
for driver in self.agent.drivers.values():
driver.send_measurements([measurement])
break # only the first one
expected_encoded_msg = msgpack.dumps(measurement_dict)
self.m_agent_socket.return_value.send.assert_called_once_with(
expected_encoded_msg
)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive(self, m_lock, m_start, m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.return_value = True # Emulates a thread that is running
m_start.return_value = None
self.agent.register_drivers()
self.agent.check_drivers_alive()
self.assertTrue(m_is_alive.called)
self.assertFalse(m_start.called)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive_with_driver_stopped(self, m_lock, m_start,
m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.side_effect = [False, True]
m_start.side_effect = [RuntimeError, True, True] # Fails once
self.agent.register_drivers()
# should re-run the driver
self.agent.check_drivers_alive()
self.assertEqual(m_is_alive.call_count, 1)
self.assertEqual(m_start.call_count, 2)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_using_default(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = ""
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE") # First call
m_env_getter.assert_called_with("NN_CONFIG_UPDATES") # Last call
self.assertEqual(m_env_setter.call_count, 0)
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"FAKE_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"FAKE_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_custom_values(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_any_call("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
m_env_setter.assert_called_with("NN_CONFIG_UPDATES",
"CUSTOM_NN_CONFIG_UPDATES")
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"CUSTOM_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"CUSTOM_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_service(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_called_once_with("NN_CONFIG_SERVICE")
self.assertEqual(m_env_setter.call_count, 0)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_update(self, m_env_getter, m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = ""
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_called_once_with("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
@patch.object(Agent, 'check_drivers_alive', MagicMock())
@patch("watcher_metering.agent.manager."
"MetricManager.terminated",
new_callable=PropertyMock)
def test_run_agent(self, m_terminated):
# Patches the guard/exit condition of the thread periodic event loop
# -> 1st time = False (carry on) and 2nd = True (Should terminate)
m_terminated.side_effect = [False, True]
self.agent.run()
self.assertEqual(m_terminated.call_count, 2)
@patch.object(DummyMetricPuller, 'send_measurements', MagicMock())
def test_stop_agent(self):
self.agent.register_drivers()
self.agent.start()
self.agent.join(timeout=.01)
self.agent.stop()
self.assertEqual(len(self.agent.drivers.values()), 2)
self.assertTrue(
all([driver.terminated for driver in self.agent.drivers.values()])
)
self.assertTrue(self.agent.terminated)
self.assertFalse(self.agent.is_alive())
| <filename>watcher_metering/tests/agent/test_agent.py<gh_stars>1-10
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import OrderedDict
import os
import types
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import PropertyMock
import msgpack
import operator
from oslo_config import cfg
from oslotest.base import BaseTestCase
from stevedore.driver import DriverManager
from stevedore.extension import Extension
from watcher_metering.agent.agent import Agent
from watcher_metering.agent.measurement import Measurement
from watcher_metering.tests.agent.agent_fixtures import ConfFixture
from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller
from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller
class TestAgent(BaseTestCase):
# patches to be applied for each test in this test suite
patches = []
def setUp(self):
super(TestAgent, self).setUp()
self.conf = cfg.ConfigOpts()
# To load the drivers without using the config file
self.useFixture(ConfFixture(self.conf))
def _fake_parse(self, args=[]):
return cfg.ConfigOpts._parse_cli_opts(self, [])
_fake_parse_method = types.MethodType(_fake_parse, self.conf)
self.conf._parse_cli_opts = _fake_parse_method
# First dependency to be returned
self.dummy_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=DummyMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=DummyMetricPuller,
obj=None,
),
namespace='TESTING',
)
# 2nd dependency to be returned
self.fake_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=FakeMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=FakeMetricPuller,
obj=None,
),
namespace='TESTING',
)
self.defaults_drivers = {
DummyMetricPuller.get_name(): self.dummy_driver_manager,
FakeMetricPuller.get_name(): self.fake_driver_manager,
}
def _fake_loader(name, **kw):
return self.defaults_drivers[name]
# Patches the agent socket
self.m_agent_socket = MagicMock(autospec=True)
self.patches.extend([
# Deactivates the nanomsg socket
patch(
"watcher_metering.agent.agent.nanomsg.Socket",
new=self.m_agent_socket,
),
# Sets the test namespace to 'TESTING'
patch.object(
Agent,
"namespace",
PropertyMock(return_value='TESTING'),
),
# Patches the driver manager to retourn our test drivers
# instead of the real ones
patch(
"watcher_metering.load.loader.DriverManager",
MagicMock(side_effect=_fake_loader),
),
])
# Applies all of our patches before each test
for _patch in self.patches:
_patch.start()
self.agent = Agent(
conf=self.conf,
driver_names=self.conf.agent.driver_names,
use_nanoconfig_service=False,
publisher_endpoint="fake",
nanoconfig_service_endpoint="",
nanoconfig_update_endpoint="",
nanoconfig_profile="nanoconfig://test_profile"
)
# Default ticking is set to 0 to reduce test execution time
self.agent.TICK_INTERVAL = 0
def tearDown(self):
super(TestAgent, self).tearDown()
# The drivers are stored at the class level so we need to clear
# it after each test
self.agent.drivers.clear()
for _patch in self.patches:
_patch.stop()
def test_register_driver(self):
expected_driver1_key = "metrics_driver.dummy_data.puller.dummy"
expected_driver2_key = "metrics_driver.fake_data.puller.fake"
self.agent.register_drivers()
self.assertEqual(
sorted(self.agent.drivers.keys()),
[expected_driver1_key, expected_driver2_key]
)
sorted_drivers = OrderedDict(
sorted(self.agent.drivers.items(), key=operator.itemgetter(0))
)
self.assertEqual(len(sorted_drivers), 2)
driver1 = self.agent.drivers[expected_driver1_key]
driver2 = self.agent.drivers[expected_driver2_key]
self.assertEqual(driver1.title, "metrics_driver.dummy")
self.assertEqual(driver1.probe_id, "data.puller.dummy")
self.assertEqual(driver1.interval, 0.01)
self.assertEqual(driver2.title, "metrics_driver.fake")
self.assertEqual(driver2.probe_id, "data.puller.fake")
self.assertEqual(driver2.interval, 0.01)
self.assertIn(self.agent, driver1._observers)
self.assertIn(self.agent, driver2._observers)
def test_unregister_driver(self):
driver_key = "metrics_driver.dummy_data.puller.dummy"
self.agent.register_drivers()
self.agent.unregister_driver(driver_key)
# Initial is 2 drivers => 2 - 1 == 1
self.assertEqual(len(self.agent.drivers), 1)
@patch.object(Measurement, "as_dict")
def test_send_measurements(self, m_as_dict):
self.agent.register_drivers()
measurement_dict = OrderedDict(
name="dummy.data.puller",
unit="",
type_="",
value=13.37,
resource_id="test_hostname",
host="test_hostname",
timestamp="2015-08-04T15:15:45.703542",
)
m_as_dict.return_value = measurement_dict
measurement = Measurement(**measurement_dict)
for driver in self.agent.drivers.values():
driver.send_measurements([measurement])
break # only the first one
expected_encoded_msg = msgpack.dumps(measurement_dict)
self.m_agent_socket.return_value.send.assert_called_once_with(
expected_encoded_msg
)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive(self, m_lock, m_start, m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.return_value = True # Emulates a thread that is running
m_start.return_value = None
self.agent.register_drivers()
self.agent.check_drivers_alive()
self.assertTrue(m_is_alive.called)
self.assertFalse(m_start.called)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive_with_driver_stopped(self, m_lock, m_start,
m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.side_effect = [False, True]
m_start.side_effect = [RuntimeError, True, True] # Fails once
self.agent.register_drivers()
# should re-run the driver
self.agent.check_drivers_alive()
self.assertEqual(m_is_alive.call_count, 1)
self.assertEqual(m_start.call_count, 2)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_using_default(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = ""
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE") # First call
m_env_getter.assert_called_with("NN_CONFIG_UPDATES") # Last call
self.assertEqual(m_env_setter.call_count, 0)
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"FAKE_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"FAKE_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_custom_values(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_any_call("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
m_env_setter.assert_called_with("NN_CONFIG_UPDATES",
"CUSTOM_NN_CONFIG_UPDATES")
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"CUSTOM_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"CUSTOM_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_service(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_called_once_with("NN_CONFIG_SERVICE")
self.assertEqual(m_env_setter.call_count, 0)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_update(self, m_env_getter, m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = ""
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_called_once_with("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
@patch.object(Agent, 'check_drivers_alive', MagicMock())
@patch("watcher_metering.agent.manager."
"MetricManager.terminated",
new_callable=PropertyMock)
def test_run_agent(self, m_terminated):
# Patches the guard/exit condition of the thread periodic event loop
# -> 1st time = False (carry on) and 2nd = True (Should terminate)
m_terminated.side_effect = [False, True]
self.agent.run()
self.assertEqual(m_terminated.call_count, 2)
@patch.object(DummyMetricPuller, 'send_measurements', MagicMock())
def test_stop_agent(self):
self.agent.register_drivers()
self.agent.start()
self.agent.join(timeout=.01)
self.agent.stop()
self.assertEqual(len(self.agent.drivers.values()), 2)
self.assertTrue(
all([driver.terminated for driver in self.agent.drivers.values()])
)
self.assertTrue(self.agent.terminated)
self.assertFalse(self.agent.is_alive())
| en | 0.860716 | # -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # patches to be applied for each test in this test suite # To load the drivers without using the config file # First dependency to be returned # 2nd dependency to be returned # Patches the agent socket # Deactivates the nanomsg socket # Sets the test namespace to 'TESTING' # Patches the driver manager to retourn our test drivers # instead of the real ones # Applies all of our patches before each test # Default ticking is set to 0 to reduce test execution time # The drivers are stored at the class level so we need to clear # it after each test # Initial is 2 drivers => 2 - 1 == 1 # only the first one # Emulates a thread behavior # Emulates a thread behavior # Emulates a thread that is running # Emulates a thread behavior # Emulates a thread behavior # Fails once # should re-run the driver # Override default where it is set to False # First call # Last call # Override default where it is set to False # Override default where it is set to False # Emulates empty ENV vars # Override default where it is set to False # Emulates empty ENV vars # Patches the guard/exit condition of the thread periodic event loop # -> 1st time = False (carry on) and 2nd = True (Should terminate) | 1.858998 | 2 |
mmtbx/bulk_solvent/mosaic.py | ndevenish/cctbx_project | 0 | 8895 | from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from scitbx import matrix
import math
from libtbx import adopt_init_args
import scitbx.lbfgs
from mmtbx.bulk_solvent import kbu_refinery
from cctbx import maptbx
import mmtbx.masks
import boost_adaptbx.boost.python as bp
asu_map_ext = bp.import_ext("cctbx_asymmetric_map_ext")
from libtbx import group_args
from mmtbx import bulk_solvent
from mmtbx.ncs import tncs
from collections import OrderedDict
import mmtbx.f_model
import sys
from libtbx.test_utils import approx_equal
from mmtbx import masks
from cctbx.masks import vdw_radii_from_xray_structure
ext = bp.import_ext("mmtbx_masks_ext")
mosaic_ext = bp.import_ext("mmtbx_mosaic_ext")
APPLY_SCALE_K1_TO_FOBS = False
def moving_average(x, n):
r = []
for i, xi in enumerate(x):
s = 0
cntr = 0
for j in range(max(0,i-n), min(i+n+1, len(x))):
s+=x[j]
cntr+=1
s = s/cntr
r.append(s)
return r
# Utilities used by algorithm 2 ------------------------------------------------
class minimizer(object):
def __init__(self, max_iterations, calculator):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.cntr=0
exception_handling_params = scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_step_at_lower_bound=True,
)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
exception_handling_params=exception_handling_params,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations))
def compute_functional_and_gradients(self):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
t = self.calculator.target()
g = self.calculator.gradients()
#print "step: %4d"%self.cntr, "target:", t, "params:", \
# " ".join(["%10.6f"%i for i in self.x]), math.log(t)
return t,g
class minimizer2(object):
def __init__(self, calculator, min_iterations=0, max_iterations=2000):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.n = self.x.size()
self.cntr=0
def run(self, use_curvatures=0):
self.minimizer = kbu_refinery.lbfgs_run(
target_evaluator=self,
min_iterations=self.min_iterations,
max_iterations=self.max_iterations,
use_curvatures=use_curvatures)
self(requests_f_and_g=True, requests_diag=False)
return self
def __call__(self, requests_f_and_g, requests_diag):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
if (not requests_f_and_g and not requests_diag):
requests_f_and_g = True
requests_diag = True
if (requests_f_and_g):
self.f = self.calculator.target()
self.g = self.calculator.gradients()
self.d = None
if (requests_diag):
self.d = self.calculator.curvatures()
#assert self.d.all_ne(0)
if(self.d.all_eq(0)): self.d=None
else:
self.d = 1 / self.d
#print "step: %4d"%self.cntr, "target:", self.f, "params:", \
# " ".join(["%10.6f"%i for i in self.x]) #, math.log(self.f)
return self.x, self.f, self.g, self.d
class tg(object):
def __init__(self, x, i_obs, F, use_curvatures):
self.x = x
self.i_obs = i_obs
self.F = F
self.t = None
self.g = None
self.d = None
# Needed to do sums from small to large to prefent loss
s = flex.sort_permutation(self.i_obs.data())
self.i_obs = self.i_obs.select(s)
self.F = [f.select(s) for f in self.F]
#
self.sum_i_obs = flex.sum(self.i_obs.data()) # needed for Python version
self.use_curvatures=use_curvatures
self.tgo = mosaic_ext.alg2_tg(
F = [f.data() for f in self.F],
i_obs = self.i_obs.data())
self.update_target_and_grads(x=x)
def update(self, x):
self.update_target_and_grads(x = x)
def update_target_and_grads(self, x):
self.x = x
self.tgo.update(self.x)
self.t = self.tgo.target()
self.g = self.tgo.gradient()
#
# Reference implementation in Python
# s = 1 #180/math.pi
# i_model = flex.double(self.i_obs.data().size(),0)
# for n, kn in enumerate(self.x):
# for m, km in enumerate(self.x):
# tmp = self.F[n].data()*flex.conj(self.F[m].data())
# i_model += kn*km*flex.real(tmp)
# #pn = self.F[n].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fn = flex.abs(self.F[n].data())
# #Fm = flex.abs(self.F[m].data())
# #i_model += kn*km*Fn*Fm*flex.cos(pn-pm)
# diff = i_model - self.i_obs.data()
# #print (flex.min(diff), flex.max(diff))
# t = flex.sum(diff*diff)/4
# #
# g = flex.double()
# for j in range(len(self.F)):
# tmp = flex.double(self.i_obs.data().size(),0)
# for m, km in enumerate(self.x):
# tmp += km * flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
# #pj = self.F[j].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fj = flex.abs(self.F[j].data())
# #Fm = flex.abs(self.F[m].data())
# #tmp += km * Fj*Fm*flex.cos(pj-pm)
# g.append(flex.sum(diff*tmp))
# self.t = t/self.sum_i_obs
# self.g = g/self.sum_i_obs
# #print (self.t,t1)
# #print (list(self.g))
# #print (list(g1))
# #print ()
# #assert approx_equal(self.t, t1, 5)
# #assert approx_equal(self.g, g1, 1.e-6)
#
if self.use_curvatures:
d = flex.double()
for j in range(len(self.F)):
tmp1 = flex.double(self.i_obs.data().size(),0)
tmp2 = flex.double(self.i_obs.data().size(),0)
for m, km in enumerate(self.x):
zz = flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
tmp1 += km * zz
tmp2 += zz
#pj = self.F[j].phases().data()*s
#pm = self.F[m].phases().data()*s
#Fj = flex.abs(self.F[j].data())
#Fm = flex.abs(self.F[m].data())
#tmp += km * Fj*Fm*flex.cos(pj-pm)
d.append(flex.sum(tmp1*tmp1 + tmp2))
self.d=d
def target(self): return self.t
def gradients(self): return self.g
def gradient(self): return self.gradients()
def curvatures(self): return self.d/self.sum_i_obs
#-------------------------------------------------------------------------------
def write_map_file(crystal_symmetry, map_data, file_name):
from iotbx import mrcfile
mrcfile.write_ccp4_map(
file_name = file_name,
unit_cell = crystal_symmetry.unit_cell(),
space_group = crystal_symmetry.space_group(),
map_data = map_data,
labels = flex.std_string([""]))
class refinery(object):
def __init__(self, fmodel, fv, alg, anomaly=True, log = sys.stdout):
assert alg in ["alg0", "alg2", "alg4", None]
self.log = log
self.f_obs = fmodel.f_obs()
self.r_free_flags = fmodel.r_free_flags()
k_mask_overall = fmodel.k_masks()[0]
self.bin_selections = fmodel.bin_selections
#
k_total = fmodel.k_total()
self.f_calc = fmodel.f_model()
self.F = [self.f_calc.deep_copy()] + fv.keys()
#
n_zones_start = len(self.F)
r4_start = fmodel.r_work4()
for it in range(5):
#
if(it>0):
r4 = self.fmodel.r_work4()
print(r4_start, r4, abs(round(r4-r4_start,4)))
if(abs(round(r4-r4_start,4))<1.e-4):
break
r4_start = r4
#if(it>0 and n_zones_start == len(self.F)): break
#
#if it>0:
# self.F = [self.fmodel.f_model().deep_copy()] + self.F[1:]
self._print("cycle: %2d"%it)
self._print(" volumes: "+" ".join([str(fv[f]) for f in self.F[1:]]))
f_obs = self.f_obs.deep_copy()
if it==0: k_total = fmodel.k_total()
else: k_total = self.fmodel.k_total()
i_obs = f_obs.customized_copy(data = f_obs.data()*f_obs.data())
K_MASKS = OrderedDict()
self.bin_selections = self.f_obs.log_binning(
n_reflections_in_lowest_resolution_bin = 100*len(self.F))
for i_bin, sel in enumerate(self.bin_selections):
d_max, d_min = f_obs.select(sel).d_max_min()
if d_max<3: continue
bin = " bin %2d: %5.2f-%-5.2f: "%(i_bin, d_max, d_min)
F = [f.select(sel) for f in self.F]
k_total_sel = k_total.select(sel)
F_scaled = [F[0].deep_copy()]+[f.customized_copy(data=f.data()*k_total_sel) for f in F[1:]]
#
# XXX WHY NOT THIS INSTEAD (INVESTIGATE LATER)?
#F_scaled = [f.customized_copy(data=f.data()*k_total_sel) for f in F]
#r00=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, F[0].data()*k_total_sel)
# algorithm_0
if(alg=="alg0"):
k_masks = algorithm_0(
f_obs = f_obs.select(sel),
F = F_scaled,
kt=k_total_sel)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r0=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_4
if(alg=="alg4"):
if it==0: phase_source = fmodel.f_model().select(sel)
else: phase_source = self.fmodel.f_model().select(sel)
k_masks = algorithm_4(
f_obs = self.f_obs.select(sel),
F = F_scaled,
auto_converge_eps = 0.0001,
phase_source = phase_source)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r4=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_2
if(alg=="alg2"):
k_masks = algorithm_2(
i_obs = i_obs.select(sel),
F = F_scaled,
x = self._get_x_init(i_bin),
use_curvatures = False)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r2=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
#self._print(bin+" ".join(["%6.2f"%k for k in k_masks])+" %6.4f %6.4f %6.4f %6.4f"%(r00,r0,r4, r2))
k_mean = flex.mean(k_mask_overall.select(sel))
k_masks_plus = [k_masks[0]]+[k_mean + k for k in k_masks[1:]]
self._print(bin+" ".join(["%6.2f"%k for k in k_masks_plus]) )
K_MASKS[sel] = [k_masks, k_masks_plus]
#
if(len(self.F)==2): break # stop and fall back onto using largest mask
#
#
#print()
#self.update_k_masks(K_MASKS)
#for k_masks in K_MASKS.values():
# self._print(bin+" ".join(["%6.2f"%k for k in k_masks]))
#
f_calc_data = self.f_calc.data().deep_copy()
f_bulk_data = flex.complex_double(fmodel.f_calc().data().size(), 0)
for sel, k_masks in zip(K_MASKS.keys(), K_MASKS.values()):
k_masks = k_masks[0] # 1 is shifted!
f_bulk_data_ = flex.complex_double(sel.count(True), 0)
for i_mask, k_mask in enumerate(k_masks):
if i_mask==0:
f_calc_data = f_calc_data.set_selected(sel,
f_calc_data.select(sel)*k_mask)
continue
f_bulk_data_ += self.F[i_mask].data().select(sel)*k_mask
f_bulk_data = f_bulk_data.set_selected(sel,f_bulk_data_)
#
self.update_F(K_MASKS)
f_bulk = fmodel.f_calc().customized_copy(data = f_bulk_data)
if(len(self.F)==2):
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
f_calc = fmodel.f_calc(),
f_mask = self.F[1],
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
else:
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.f_calc,
bin_selections = self.bin_selections,
f_mask = f_bulk,
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
#
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.fmodel.f_calc(),
f_mask = self.fmodel.f_bulk(),
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self._print(self.fmodel.r_factors(prefix=" "))
#self._print(self.fmodel.r_factors(prefix=" "))
self.mc = self.fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
#def update_k_masks(self, K_MASKS):
# tmp = []
# for i_mask, F in enumerate(self.F):
# k_masks = [k_masks_bin[i_mask] for k_masks_bin in K_MASKS.values()]
# found = False
# for i_bin, k_masks_bin in enumerate(K_MASKS.values()):
# if(not found and k_masks_bin[i_mask]<=0.009):
# found = True
# K_MASKS.values()[i_bin][i_mask]=0
# elif found:
# K_MASKS.values()[i_bin][i_mask]=0
def _print(self, m):
if(self.log is not None):
print(m, file=self.log)
def update_F(self, K_MASKS):
tmp = []
for i_mask, F in enumerate(self.F):
k_masks = [k_masks_bin[1][i_mask] for k_masks_bin in K_MASKS.values()]
if(i_mask == 0): tmp.append(self.F[0])
elif moving_average(k_masks,2)[0]>=0.03: tmp.append(F)
self.F = tmp[:]
def _get_x_init(self, i_bin):
return flex.double([1] + [1]*len(self.F[1:]))
#k_maks1_init = 0.35 - i_bin*0.35/len(self.bin_selections)
#x = flex.double([1,k_maks1_init])
#x.extend( flex.double(len(self.F)-2, 0.1))
#return x
def get_f_mask(xrs, ma, step, option = 2, r_shrink = None, r_sol = None):
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs.unit_cell(),
space_group_info = xrs.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
n_real = crystal_gridding.n_real()
atom_radii = vdw_radii_from_xray_structure(xray_structure = xrs)
mask_params = masks.mask_master_params.extract()
grid_step_factor = ma.d_min()/step
if(r_shrink is not None): mask_params.shrink_truncation_radius = r_shrink
if(r_sol is not None): mask_params.solvent_radius = r_sol
mask_params.grid_step_factor = grid_step_factor
# 1
if(option==1):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
group = xrs.space_group(),
resolution = ma.d_min(),
grid_step_factor = grid_step_factor,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 2
elif(option==2):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
space_group = xrs.space_group(),
gridding_n_real = n_real,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 3
elif(option==3):
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xrs,
p1 = True,
for_structure_factors = True,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
n_real = n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
mask = asu_map_ext.asymmetric_map(
xrs.crystal_symmetry().space_group().type(), mask_p1).data()
f_mask = ma.structure_factors_from_asu_map(
asu_map_data = mask, n_real = n_real)
# 4
elif(option==4):
f_mask = masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
grid_step = step,
atom_radii = atom_radii).structure_factors(
miller_set = ma)
elif(option==5):
o = mmtbx.masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
gridding_n_real = n_real,
atom_radii = atom_radii)
assert approx_equal(n_real, o.data.accessor().all())
f_mask = o.structure_factors(ma)
elif(option==6):
# XXX No control over n_real, so results with others don't match
mask_manager = masks.manager(
miller_array = ma,
miller_array_twin = None,
mask_params = mask_params)
f_mask = mask_manager.shell_f_masks(xray_structure=xrs, force_update=True)[0]
else: assert 0
#
return f_mask
def filter_mask(mask_p1, volume_cutoff, crystal_symmetry,
for_structure_factors = False):
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = True,
wrapping = True)
mi, ma = flex.min(mask_p1), flex.max(mask_p1)
print (mask_p1.size(), (mask_p1<0).count(True))
assert mi == 0, mi
assert ma == 1, ma
a,b,c = crystal_symmetry.unit_cell().parameters()[:3]
na,nb,nc = mask_p1.accessor().all()
step = flex.mean(flex.double([a/na, b/nb, c/nc]))
if(crystal_symmetry.space_group_number() != 1):
co.merge_symmetry_related_regions(space_group=crystal_symmetry.space_group())
conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
if(i==0): continue # skip macromolecule
# skip small volume
volume = v*step**3
if volume < volume_cutoff:
conn = conn.set_selected(conn==i, 0)
conn = conn.set_selected(conn>0, 1)
if for_structure_factors:
conn = conn / crystal_symmetry.space_group().order_z()
return conn
class mosaic_f_mask(object):
def __init__(self,
xray_structure,
step,
volume_cutoff=None,
mean_diff_map_threshold=None,
compute_whole=False,
preprocess_against_shallow=True,
largest_only=False,
wrapping=True,
f_obs=None,
r_sol=1.1,
r_shrink=0.9,
f_calc=None,
log = None,
write_masks=False):
adopt_init_args(self, locals())
#
self.dsel = f_obs.d_spacings().data()>=0 # XXX WHY????????????
self.miller_array = f_obs.select(self.dsel)
#
# To avoid "Miller index not in structure factor map" crash
step = min(step, self.miller_array.d_min()/3)
#
self.crystal_symmetry = self.xray_structure.crystal_symmetry()
# compute mask in p1 (via ASU)
self.crystal_gridding = maptbx.crystal_gridding(
unit_cell = xray_structure.unit_cell(),
space_group_info = xray_structure.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
self.n_real = self.crystal_gridding.n_real()
# XXX Where do we want to deal with H and occ==0?
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xray_structure,
p1 = True,
for_structure_factors = True,
solvent_radius = r_sol,
shrink_truncation_radius = r_shrink,
n_real = self.n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
self.f_mask_whole = None
if(compute_whole):
mask = asu_map_ext.asymmetric_map(
xray_structure.crystal_symmetry().space_group().type(), mask_p1).data()
self.f_mask_whole = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask, n_real = self.n_real)
self.solvent_content = 100.*mask_p1.count(1)/mask_p1.size()
if(write_masks):
write_map_file(crystal_symmetry=xray_structure.crystal_symmetry(),
map_data=mask_p1, file_name="mask_whole.mrc")
# conn analysis
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = preprocess_against_shallow,
wrapping = wrapping)
co.merge_symmetry_related_regions(space_group=xray_structure.space_group())
del mask_p1
self.conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
#
f_mask_data_0 = flex.complex_double(f_obs.data().size(), 0)
f_mask_data = flex.complex_double(f_obs.data().size(), 0)
self.FV = OrderedDict()
self.mc = None
diff_map = None
mean_diff_map = None
self.regions = OrderedDict()
self.f_mask_0 = None
self.f_mask = None
#
if(log is not None):
print(" # volume_p1 uc(%) mFo-DFc: min,max,mean,sd", file=log)
#
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
# skip macromolecule
if(i==0): continue
# skip small volume
volume = v*step**3
uc_fraction = v*100./self.conn.size()
if(volume_cutoff is not None):
if volume < volume_cutoff: continue
selection = self.conn==i
mask_i_asu = self.compute_i_mask_asu(selection = selection, volume = volume)
volume_asu = (mask_i_asu>0).count(True)*step**3
if(uc_fraction >= 1):
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data_0 += f_mask_i.data()
elif(largest_only): break
if(uc_fraction < 1 and diff_map is None):
diff_map = self.compute_diff_map(f_mask_data = f_mask_data_0)
mi,ma,me,sd = None,None,None,None
if(diff_map is not None):
blob = diff_map.select(selection.iselection())
mean_diff_map = flex.mean(diff_map.select(selection.iselection()))
mi,ma,me = flex.min(blob), flex.max(blob), flex.mean(blob)
sd = blob.sample_standard_deviation()
if(log is not None):
print("%3d"%i_seq,"%12.3f"%volume, "%8.4f"%round(uc_fraction,4),
"%7s"%str(None) if diff_map is None else "%7.3f %7.3f %7.3f %7.3f"%(
mi,ma,me,sd), file=log)
if(mean_diff_map_threshold is not None and
mean_diff_map is not None and mean_diff_map<=mean_diff_map_threshold):
continue
self.regions[i_seq] = group_args(
id = i,
i_seq = i_seq,
volume = volume,
uc_fraction = uc_fraction,
diff_map = group_args(mi=mi, ma=ma, me=me, sd=sd))
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data += f_mask_i.data()
self.FV[f_mask_i] = [round(volume, 3), round(uc_fraction,1)]
#
self.f_mask_0 = f_obs.customized_copy(data = f_mask_data_0)
self.f_mask = f_obs.customized_copy(data = f_mask_data)
self.do_mosaic = False
self.n_regions = len(self.FV.keys())
if(self.n_regions>1):
self.do_mosaic = True
def compute_f_mask_i(self, mask_i_asu):
f_mask_i = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask_i_asu, n_real = self.n_real)
data = flex.complex_double(self.dsel.size(), 0)
data = data.set_selected(self.dsel, f_mask_i.data())
return self.f_obs.set().array(data = data)
def compute_diff_map(self, f_mask_data):
if(self.f_calc is None): return None
f_mask = self.f_obs.customized_copy(data = f_mask_data)
fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
f_calc = self.f_calc,
f_mask = f_mask)
fmodel = fmodel.select(self.dsel)
fmodel.update_all_scales(remove_outliers=True,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self.mc = fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
fft_map = self.mc.fft_map(crystal_gridding = self.crystal_gridding)
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded()
def compute_i_mask_asu(self, selection, volume):
mask_i = flex.double(flex.grid(self.n_real), 0)
mask_i = mask_i.set_selected(selection, 1)
if(self.write_masks):
write_map_file(
crystal_symmetry = self.crystal_symmetry,
map_data = mask_i,
file_name = "mask_%s.mrc"%str(round(volume,3)))
tmp = asu_map_ext.asymmetric_map(
self.crystal_symmetry.space_group().type(), mask_i).data()
return tmp
def algorithm_0(f_obs, F, kt):
"""
Grid search
"""
fc, f_masks = F[0], F[1:]
k_mask_trial_range=[]
s = -1
while s<1:
k_mask_trial_range.append(s)
s+=0.0001
r = []
fc_data = fc.data()
for i, f_mask in enumerate(f_masks):
#print("mask ",i)
assert f_obs.data().size() == fc.data().size()
assert f_mask.data().size() == fc.data().size()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data))
kmask_, k_ = \
bulk_solvent.k_mask_and_k_overall_grid_search(
f_obs.data()*kt,
fc_data*kt,
f_mask.data()*kt,
flex.double(k_mask_trial_range),
flex.bool(fc.data().size(),True))
r.append(kmask_)
fc_data += fc_data*k_ + kmask_*f_mask.data()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data + kmask_*f_mask.data(),k_))
r = [1,]+r
return r
def algorithm_2(i_obs, F, x, use_curvatures=True, macro_cycles=10):
"""
Unphased one-step search
"""
calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=use_curvatures)
for it in range(macro_cycles):
if(use_curvatures):
m = minimizer(max_iterations=100, calculator=calculator)
else:
#upper = flex.double([1.1] + [1]*(x.size()-1))
#lower = flex.double([0.9] + [-1]*(x.size()-1))
upper = flex.double([1.1] + [5]*(x.size()-1))
lower = flex.double([0.9] + [-5]*(x.size()-1))
#upper = flex.double([10] + [5]*(x.size()-1))
#lower = flex.double([0.1] + [-5]*(x.size()-1))
#upper = flex.double([10] + [0.65]*(x.size()-1))
#lower = flex.double([0.1] + [0]*(x.size()-1))
#upper = flex.double([1] + [0.65]*(x.size()-1))
#lower = flex.double([1] + [0]*(x.size()-1))
#upper = flex.double([1] + [5.65]*(x.size()-1))
#lower = flex.double([1] + [-5]*(x.size()-1))
m = tncs.minimizer(
potential = calculator,
use_bounds = 2,
lower_bound = lower,
upper_bound = upper,
initial_values = x).run()
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
if(use_curvatures):
for it in range(10):
m = minimizer(max_iterations=100, calculator=calculator)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
return m.x
def algorithm_3(i_obs, fc, f_masks):
"""
Unphased two-step search
"""
F = [fc]+f_masks
Gnm = []
cs = {}
cntr=0
nm=[]
# Compute and store Gnm
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
if m < n:
continue
Gnm.append( flex.real( Fn.data()*flex.conj(Fm.data()) ) )
cs[(n,m)] = cntr
cntr+=1
nm.append((n,m))
# Keep track of indices for "upper triangular matrix vs full"
for k,v in zip(list(cs.keys()), list(cs.values())):
i,j=k
if i==j: continue
else: cs[(j,i)]=v
# Generate and solve system Ax=b, x = A_1*b
A = []
b = []
for u, Gnm_u in enumerate(Gnm):
for v, Gnm_v in enumerate(Gnm):
scale = 2
n,m=nm[v]
if n==m: scale=1
A.append( flex.sum(Gnm_u*Gnm_v)*scale )
b.append( flex.sum(Gnm_u * i_obs.data()) )
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
# Expand Xmn from solution x
Xmn = []
for n, Fn in enumerate(F):
rows = []
for m, Fm in enumerate(F):
x_ = x[cs[(n,m)]]
rows.append(x_)
Xmn.append(rows)
# Do formula (19)
lnK = []
for j, Fj in enumerate(F):
t1 = flex.sum( flex.log( flex.double(Xmn[j]) ) )
t2 = 0
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
t2 += math.log(Xmn[n][m])
t2 = t2 / (2*len(F))
lnK.append( 1/len(F)*(t1-t2) )
return [math.exp(x) for x in lnK]
def algorithm_4(f_obs, F, phase_source, max_cycles=100, auto_converge_eps=1.e-7,
use_cpp=True):
"""
Phased simultaneous search (alg4)
"""
fc, f_masks = F[0], F[1:]
fc = fc.deep_copy()
F = [fc]+F[1:]
# C++ version
if(use_cpp):
return mosaic_ext.alg4(
[f.data() for f in F],
f_obs.data(),
phase_source.data(),
max_cycles,
auto_converge_eps)
# Python version (1.2-3 times slower, but much more readable!)
cntr = 0
x_prev = None
while True:
f_obs_cmpl = f_obs.phase_transfer(phase_source = phase_source)
A = []
b = []
for j, Fj in enumerate(F):
A_rows = []
for n, Fn in enumerate(F):
Gjn = flex.real( Fj.data()*flex.conj(Fn.data()) )
A_rows.append( flex.sum(Gjn) )
Hj = flex.real( Fj.data()*flex.conj(f_obs_cmpl.data()) )
b.append(flex.sum(Hj))
A.extend(A_rows)
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
#
fc_d = flex.complex_double(phase_source.indices().size(), 0)
for i, f in enumerate(F):
fc_d += f.data()*x[i]
phase_source = phase_source.customized_copy(data = fc_d)
x_ = x[:]
#
cntr+=1
if(cntr>max_cycles): break
if(x_prev is None): x_prev = x_[:]
else:
max_diff = flex.max(flex.abs(flex.double(x_prev)-flex.double(x_)))
if(max_diff<=auto_converge_eps): break
x_prev = x_[:]
return x_
| from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from scitbx import matrix
import math
from libtbx import adopt_init_args
import scitbx.lbfgs
from mmtbx.bulk_solvent import kbu_refinery
from cctbx import maptbx
import mmtbx.masks
import boost_adaptbx.boost.python as bp
asu_map_ext = bp.import_ext("cctbx_asymmetric_map_ext")
from libtbx import group_args
from mmtbx import bulk_solvent
from mmtbx.ncs import tncs
from collections import OrderedDict
import mmtbx.f_model
import sys
from libtbx.test_utils import approx_equal
from mmtbx import masks
from cctbx.masks import vdw_radii_from_xray_structure
ext = bp.import_ext("mmtbx_masks_ext")
mosaic_ext = bp.import_ext("mmtbx_mosaic_ext")
APPLY_SCALE_K1_TO_FOBS = False
def moving_average(x, n):
r = []
for i, xi in enumerate(x):
s = 0
cntr = 0
for j in range(max(0,i-n), min(i+n+1, len(x))):
s+=x[j]
cntr+=1
s = s/cntr
r.append(s)
return r
# Utilities used by algorithm 2 ------------------------------------------------
class minimizer(object):
def __init__(self, max_iterations, calculator):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.cntr=0
exception_handling_params = scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_step_at_lower_bound=True,
)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
exception_handling_params=exception_handling_params,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations))
def compute_functional_and_gradients(self):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
t = self.calculator.target()
g = self.calculator.gradients()
#print "step: %4d"%self.cntr, "target:", t, "params:", \
# " ".join(["%10.6f"%i for i in self.x]), math.log(t)
return t,g
class minimizer2(object):
def __init__(self, calculator, min_iterations=0, max_iterations=2000):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.n = self.x.size()
self.cntr=0
def run(self, use_curvatures=0):
self.minimizer = kbu_refinery.lbfgs_run(
target_evaluator=self,
min_iterations=self.min_iterations,
max_iterations=self.max_iterations,
use_curvatures=use_curvatures)
self(requests_f_and_g=True, requests_diag=False)
return self
def __call__(self, requests_f_and_g, requests_diag):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
if (not requests_f_and_g and not requests_diag):
requests_f_and_g = True
requests_diag = True
if (requests_f_and_g):
self.f = self.calculator.target()
self.g = self.calculator.gradients()
self.d = None
if (requests_diag):
self.d = self.calculator.curvatures()
#assert self.d.all_ne(0)
if(self.d.all_eq(0)): self.d=None
else:
self.d = 1 / self.d
#print "step: %4d"%self.cntr, "target:", self.f, "params:", \
# " ".join(["%10.6f"%i for i in self.x]) #, math.log(self.f)
return self.x, self.f, self.g, self.d
class tg(object):
def __init__(self, x, i_obs, F, use_curvatures):
self.x = x
self.i_obs = i_obs
self.F = F
self.t = None
self.g = None
self.d = None
# Needed to do sums from small to large to prefent loss
s = flex.sort_permutation(self.i_obs.data())
self.i_obs = self.i_obs.select(s)
self.F = [f.select(s) for f in self.F]
#
self.sum_i_obs = flex.sum(self.i_obs.data()) # needed for Python version
self.use_curvatures=use_curvatures
self.tgo = mosaic_ext.alg2_tg(
F = [f.data() for f in self.F],
i_obs = self.i_obs.data())
self.update_target_and_grads(x=x)
def update(self, x):
self.update_target_and_grads(x = x)
def update_target_and_grads(self, x):
self.x = x
self.tgo.update(self.x)
self.t = self.tgo.target()
self.g = self.tgo.gradient()
#
# Reference implementation in Python
# s = 1 #180/math.pi
# i_model = flex.double(self.i_obs.data().size(),0)
# for n, kn in enumerate(self.x):
# for m, km in enumerate(self.x):
# tmp = self.F[n].data()*flex.conj(self.F[m].data())
# i_model += kn*km*flex.real(tmp)
# #pn = self.F[n].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fn = flex.abs(self.F[n].data())
# #Fm = flex.abs(self.F[m].data())
# #i_model += kn*km*Fn*Fm*flex.cos(pn-pm)
# diff = i_model - self.i_obs.data()
# #print (flex.min(diff), flex.max(diff))
# t = flex.sum(diff*diff)/4
# #
# g = flex.double()
# for j in range(len(self.F)):
# tmp = flex.double(self.i_obs.data().size(),0)
# for m, km in enumerate(self.x):
# tmp += km * flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
# #pj = self.F[j].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fj = flex.abs(self.F[j].data())
# #Fm = flex.abs(self.F[m].data())
# #tmp += km * Fj*Fm*flex.cos(pj-pm)
# g.append(flex.sum(diff*tmp))
# self.t = t/self.sum_i_obs
# self.g = g/self.sum_i_obs
# #print (self.t,t1)
# #print (list(self.g))
# #print (list(g1))
# #print ()
# #assert approx_equal(self.t, t1, 5)
# #assert approx_equal(self.g, g1, 1.e-6)
#
if self.use_curvatures:
d = flex.double()
for j in range(len(self.F)):
tmp1 = flex.double(self.i_obs.data().size(),0)
tmp2 = flex.double(self.i_obs.data().size(),0)
for m, km in enumerate(self.x):
zz = flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
tmp1 += km * zz
tmp2 += zz
#pj = self.F[j].phases().data()*s
#pm = self.F[m].phases().data()*s
#Fj = flex.abs(self.F[j].data())
#Fm = flex.abs(self.F[m].data())
#tmp += km * Fj*Fm*flex.cos(pj-pm)
d.append(flex.sum(tmp1*tmp1 + tmp2))
self.d=d
def target(self): return self.t
def gradients(self): return self.g
def gradient(self): return self.gradients()
def curvatures(self): return self.d/self.sum_i_obs
#-------------------------------------------------------------------------------
def write_map_file(crystal_symmetry, map_data, file_name):
from iotbx import mrcfile
mrcfile.write_ccp4_map(
file_name = file_name,
unit_cell = crystal_symmetry.unit_cell(),
space_group = crystal_symmetry.space_group(),
map_data = map_data,
labels = flex.std_string([""]))
class refinery(object):
def __init__(self, fmodel, fv, alg, anomaly=True, log = sys.stdout):
assert alg in ["alg0", "alg2", "alg4", None]
self.log = log
self.f_obs = fmodel.f_obs()
self.r_free_flags = fmodel.r_free_flags()
k_mask_overall = fmodel.k_masks()[0]
self.bin_selections = fmodel.bin_selections
#
k_total = fmodel.k_total()
self.f_calc = fmodel.f_model()
self.F = [self.f_calc.deep_copy()] + fv.keys()
#
n_zones_start = len(self.F)
r4_start = fmodel.r_work4()
for it in range(5):
#
if(it>0):
r4 = self.fmodel.r_work4()
print(r4_start, r4, abs(round(r4-r4_start,4)))
if(abs(round(r4-r4_start,4))<1.e-4):
break
r4_start = r4
#if(it>0 and n_zones_start == len(self.F)): break
#
#if it>0:
# self.F = [self.fmodel.f_model().deep_copy()] + self.F[1:]
self._print("cycle: %2d"%it)
self._print(" volumes: "+" ".join([str(fv[f]) for f in self.F[1:]]))
f_obs = self.f_obs.deep_copy()
if it==0: k_total = fmodel.k_total()
else: k_total = self.fmodel.k_total()
i_obs = f_obs.customized_copy(data = f_obs.data()*f_obs.data())
K_MASKS = OrderedDict()
self.bin_selections = self.f_obs.log_binning(
n_reflections_in_lowest_resolution_bin = 100*len(self.F))
for i_bin, sel in enumerate(self.bin_selections):
d_max, d_min = f_obs.select(sel).d_max_min()
if d_max<3: continue
bin = " bin %2d: %5.2f-%-5.2f: "%(i_bin, d_max, d_min)
F = [f.select(sel) for f in self.F]
k_total_sel = k_total.select(sel)
F_scaled = [F[0].deep_copy()]+[f.customized_copy(data=f.data()*k_total_sel) for f in F[1:]]
#
# XXX WHY NOT THIS INSTEAD (INVESTIGATE LATER)?
#F_scaled = [f.customized_copy(data=f.data()*k_total_sel) for f in F]
#r00=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, F[0].data()*k_total_sel)
# algorithm_0
if(alg=="alg0"):
k_masks = algorithm_0(
f_obs = f_obs.select(sel),
F = F_scaled,
kt=k_total_sel)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r0=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_4
if(alg=="alg4"):
if it==0: phase_source = fmodel.f_model().select(sel)
else: phase_source = self.fmodel.f_model().select(sel)
k_masks = algorithm_4(
f_obs = self.f_obs.select(sel),
F = F_scaled,
auto_converge_eps = 0.0001,
phase_source = phase_source)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r4=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_2
if(alg=="alg2"):
k_masks = algorithm_2(
i_obs = i_obs.select(sel),
F = F_scaled,
x = self._get_x_init(i_bin),
use_curvatures = False)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r2=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
#self._print(bin+" ".join(["%6.2f"%k for k in k_masks])+" %6.4f %6.4f %6.4f %6.4f"%(r00,r0,r4, r2))
k_mean = flex.mean(k_mask_overall.select(sel))
k_masks_plus = [k_masks[0]]+[k_mean + k for k in k_masks[1:]]
self._print(bin+" ".join(["%6.2f"%k for k in k_masks_plus]) )
K_MASKS[sel] = [k_masks, k_masks_plus]
#
if(len(self.F)==2): break # stop and fall back onto using largest mask
#
#
#print()
#self.update_k_masks(K_MASKS)
#for k_masks in K_MASKS.values():
# self._print(bin+" ".join(["%6.2f"%k for k in k_masks]))
#
f_calc_data = self.f_calc.data().deep_copy()
f_bulk_data = flex.complex_double(fmodel.f_calc().data().size(), 0)
for sel, k_masks in zip(K_MASKS.keys(), K_MASKS.values()):
k_masks = k_masks[0] # 1 is shifted!
f_bulk_data_ = flex.complex_double(sel.count(True), 0)
for i_mask, k_mask in enumerate(k_masks):
if i_mask==0:
f_calc_data = f_calc_data.set_selected(sel,
f_calc_data.select(sel)*k_mask)
continue
f_bulk_data_ += self.F[i_mask].data().select(sel)*k_mask
f_bulk_data = f_bulk_data.set_selected(sel,f_bulk_data_)
#
self.update_F(K_MASKS)
f_bulk = fmodel.f_calc().customized_copy(data = f_bulk_data)
if(len(self.F)==2):
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
f_calc = fmodel.f_calc(),
f_mask = self.F[1],
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
else:
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.f_calc,
bin_selections = self.bin_selections,
f_mask = f_bulk,
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
#
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.fmodel.f_calc(),
f_mask = self.fmodel.f_bulk(),
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self._print(self.fmodel.r_factors(prefix=" "))
#self._print(self.fmodel.r_factors(prefix=" "))
self.mc = self.fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
#def update_k_masks(self, K_MASKS):
# tmp = []
# for i_mask, F in enumerate(self.F):
# k_masks = [k_masks_bin[i_mask] for k_masks_bin in K_MASKS.values()]
# found = False
# for i_bin, k_masks_bin in enumerate(K_MASKS.values()):
# if(not found and k_masks_bin[i_mask]<=0.009):
# found = True
# K_MASKS.values()[i_bin][i_mask]=0
# elif found:
# K_MASKS.values()[i_bin][i_mask]=0
def _print(self, m):
if(self.log is not None):
print(m, file=self.log)
def update_F(self, K_MASKS):
tmp = []
for i_mask, F in enumerate(self.F):
k_masks = [k_masks_bin[1][i_mask] for k_masks_bin in K_MASKS.values()]
if(i_mask == 0): tmp.append(self.F[0])
elif moving_average(k_masks,2)[0]>=0.03: tmp.append(F)
self.F = tmp[:]
def _get_x_init(self, i_bin):
return flex.double([1] + [1]*len(self.F[1:]))
#k_maks1_init = 0.35 - i_bin*0.35/len(self.bin_selections)
#x = flex.double([1,k_maks1_init])
#x.extend( flex.double(len(self.F)-2, 0.1))
#return x
def get_f_mask(xrs, ma, step, option = 2, r_shrink = None, r_sol = None):
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs.unit_cell(),
space_group_info = xrs.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
n_real = crystal_gridding.n_real()
atom_radii = vdw_radii_from_xray_structure(xray_structure = xrs)
mask_params = masks.mask_master_params.extract()
grid_step_factor = ma.d_min()/step
if(r_shrink is not None): mask_params.shrink_truncation_radius = r_shrink
if(r_sol is not None): mask_params.solvent_radius = r_sol
mask_params.grid_step_factor = grid_step_factor
# 1
if(option==1):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
group = xrs.space_group(),
resolution = ma.d_min(),
grid_step_factor = grid_step_factor,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 2
elif(option==2):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
space_group = xrs.space_group(),
gridding_n_real = n_real,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 3
elif(option==3):
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xrs,
p1 = True,
for_structure_factors = True,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
n_real = n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
mask = asu_map_ext.asymmetric_map(
xrs.crystal_symmetry().space_group().type(), mask_p1).data()
f_mask = ma.structure_factors_from_asu_map(
asu_map_data = mask, n_real = n_real)
# 4
elif(option==4):
f_mask = masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
grid_step = step,
atom_radii = atom_radii).structure_factors(
miller_set = ma)
elif(option==5):
o = mmtbx.masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
gridding_n_real = n_real,
atom_radii = atom_radii)
assert approx_equal(n_real, o.data.accessor().all())
f_mask = o.structure_factors(ma)
elif(option==6):
# XXX No control over n_real, so results with others don't match
mask_manager = masks.manager(
miller_array = ma,
miller_array_twin = None,
mask_params = mask_params)
f_mask = mask_manager.shell_f_masks(xray_structure=xrs, force_update=True)[0]
else: assert 0
#
return f_mask
def filter_mask(mask_p1, volume_cutoff, crystal_symmetry,
for_structure_factors = False):
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = True,
wrapping = True)
mi, ma = flex.min(mask_p1), flex.max(mask_p1)
print (mask_p1.size(), (mask_p1<0).count(True))
assert mi == 0, mi
assert ma == 1, ma
a,b,c = crystal_symmetry.unit_cell().parameters()[:3]
na,nb,nc = mask_p1.accessor().all()
step = flex.mean(flex.double([a/na, b/nb, c/nc]))
if(crystal_symmetry.space_group_number() != 1):
co.merge_symmetry_related_regions(space_group=crystal_symmetry.space_group())
conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
if(i==0): continue # skip macromolecule
# skip small volume
volume = v*step**3
if volume < volume_cutoff:
conn = conn.set_selected(conn==i, 0)
conn = conn.set_selected(conn>0, 1)
if for_structure_factors:
conn = conn / crystal_symmetry.space_group().order_z()
return conn
class mosaic_f_mask(object):
def __init__(self,
xray_structure,
step,
volume_cutoff=None,
mean_diff_map_threshold=None,
compute_whole=False,
preprocess_against_shallow=True,
largest_only=False,
wrapping=True,
f_obs=None,
r_sol=1.1,
r_shrink=0.9,
f_calc=None,
log = None,
write_masks=False):
adopt_init_args(self, locals())
#
self.dsel = f_obs.d_spacings().data()>=0 # XXX WHY????????????
self.miller_array = f_obs.select(self.dsel)
#
# To avoid "Miller index not in structure factor map" crash
step = min(step, self.miller_array.d_min()/3)
#
self.crystal_symmetry = self.xray_structure.crystal_symmetry()
# compute mask in p1 (via ASU)
self.crystal_gridding = maptbx.crystal_gridding(
unit_cell = xray_structure.unit_cell(),
space_group_info = xray_structure.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
self.n_real = self.crystal_gridding.n_real()
# XXX Where do we want to deal with H and occ==0?
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xray_structure,
p1 = True,
for_structure_factors = True,
solvent_radius = r_sol,
shrink_truncation_radius = r_shrink,
n_real = self.n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
self.f_mask_whole = None
if(compute_whole):
mask = asu_map_ext.asymmetric_map(
xray_structure.crystal_symmetry().space_group().type(), mask_p1).data()
self.f_mask_whole = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask, n_real = self.n_real)
self.solvent_content = 100.*mask_p1.count(1)/mask_p1.size()
if(write_masks):
write_map_file(crystal_symmetry=xray_structure.crystal_symmetry(),
map_data=mask_p1, file_name="mask_whole.mrc")
# conn analysis
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = preprocess_against_shallow,
wrapping = wrapping)
co.merge_symmetry_related_regions(space_group=xray_structure.space_group())
del mask_p1
self.conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
#
f_mask_data_0 = flex.complex_double(f_obs.data().size(), 0)
f_mask_data = flex.complex_double(f_obs.data().size(), 0)
self.FV = OrderedDict()
self.mc = None
diff_map = None
mean_diff_map = None
self.regions = OrderedDict()
self.f_mask_0 = None
self.f_mask = None
#
if(log is not None):
print(" # volume_p1 uc(%) mFo-DFc: min,max,mean,sd", file=log)
#
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
# skip macromolecule
if(i==0): continue
# skip small volume
volume = v*step**3
uc_fraction = v*100./self.conn.size()
if(volume_cutoff is not None):
if volume < volume_cutoff: continue
selection = self.conn==i
mask_i_asu = self.compute_i_mask_asu(selection = selection, volume = volume)
volume_asu = (mask_i_asu>0).count(True)*step**3
if(uc_fraction >= 1):
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data_0 += f_mask_i.data()
elif(largest_only): break
if(uc_fraction < 1 and diff_map is None):
diff_map = self.compute_diff_map(f_mask_data = f_mask_data_0)
mi,ma,me,sd = None,None,None,None
if(diff_map is not None):
blob = diff_map.select(selection.iselection())
mean_diff_map = flex.mean(diff_map.select(selection.iselection()))
mi,ma,me = flex.min(blob), flex.max(blob), flex.mean(blob)
sd = blob.sample_standard_deviation()
if(log is not None):
print("%3d"%i_seq,"%12.3f"%volume, "%8.4f"%round(uc_fraction,4),
"%7s"%str(None) if diff_map is None else "%7.3f %7.3f %7.3f %7.3f"%(
mi,ma,me,sd), file=log)
if(mean_diff_map_threshold is not None and
mean_diff_map is not None and mean_diff_map<=mean_diff_map_threshold):
continue
self.regions[i_seq] = group_args(
id = i,
i_seq = i_seq,
volume = volume,
uc_fraction = uc_fraction,
diff_map = group_args(mi=mi, ma=ma, me=me, sd=sd))
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data += f_mask_i.data()
self.FV[f_mask_i] = [round(volume, 3), round(uc_fraction,1)]
#
self.f_mask_0 = f_obs.customized_copy(data = f_mask_data_0)
self.f_mask = f_obs.customized_copy(data = f_mask_data)
self.do_mosaic = False
self.n_regions = len(self.FV.keys())
if(self.n_regions>1):
self.do_mosaic = True
def compute_f_mask_i(self, mask_i_asu):
f_mask_i = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask_i_asu, n_real = self.n_real)
data = flex.complex_double(self.dsel.size(), 0)
data = data.set_selected(self.dsel, f_mask_i.data())
return self.f_obs.set().array(data = data)
def compute_diff_map(self, f_mask_data):
if(self.f_calc is None): return None
f_mask = self.f_obs.customized_copy(data = f_mask_data)
fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
f_calc = self.f_calc,
f_mask = f_mask)
fmodel = fmodel.select(self.dsel)
fmodel.update_all_scales(remove_outliers=True,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self.mc = fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
fft_map = self.mc.fft_map(crystal_gridding = self.crystal_gridding)
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded()
def compute_i_mask_asu(self, selection, volume):
mask_i = flex.double(flex.grid(self.n_real), 0)
mask_i = mask_i.set_selected(selection, 1)
if(self.write_masks):
write_map_file(
crystal_symmetry = self.crystal_symmetry,
map_data = mask_i,
file_name = "mask_%s.mrc"%str(round(volume,3)))
tmp = asu_map_ext.asymmetric_map(
self.crystal_symmetry.space_group().type(), mask_i).data()
return tmp
def algorithm_0(f_obs, F, kt):
"""
Grid search
"""
fc, f_masks = F[0], F[1:]
k_mask_trial_range=[]
s = -1
while s<1:
k_mask_trial_range.append(s)
s+=0.0001
r = []
fc_data = fc.data()
for i, f_mask in enumerate(f_masks):
#print("mask ",i)
assert f_obs.data().size() == fc.data().size()
assert f_mask.data().size() == fc.data().size()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data))
kmask_, k_ = \
bulk_solvent.k_mask_and_k_overall_grid_search(
f_obs.data()*kt,
fc_data*kt,
f_mask.data()*kt,
flex.double(k_mask_trial_range),
flex.bool(fc.data().size(),True))
r.append(kmask_)
fc_data += fc_data*k_ + kmask_*f_mask.data()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data + kmask_*f_mask.data(),k_))
r = [1,]+r
return r
def algorithm_2(i_obs, F, x, use_curvatures=True, macro_cycles=10):
"""
Unphased one-step search
"""
calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=use_curvatures)
for it in range(macro_cycles):
if(use_curvatures):
m = minimizer(max_iterations=100, calculator=calculator)
else:
#upper = flex.double([1.1] + [1]*(x.size()-1))
#lower = flex.double([0.9] + [-1]*(x.size()-1))
upper = flex.double([1.1] + [5]*(x.size()-1))
lower = flex.double([0.9] + [-5]*(x.size()-1))
#upper = flex.double([10] + [5]*(x.size()-1))
#lower = flex.double([0.1] + [-5]*(x.size()-1))
#upper = flex.double([10] + [0.65]*(x.size()-1))
#lower = flex.double([0.1] + [0]*(x.size()-1))
#upper = flex.double([1] + [0.65]*(x.size()-1))
#lower = flex.double([1] + [0]*(x.size()-1))
#upper = flex.double([1] + [5.65]*(x.size()-1))
#lower = flex.double([1] + [-5]*(x.size()-1))
m = tncs.minimizer(
potential = calculator,
use_bounds = 2,
lower_bound = lower,
upper_bound = upper,
initial_values = x).run()
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
if(use_curvatures):
for it in range(10):
m = minimizer(max_iterations=100, calculator=calculator)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
return m.x
def algorithm_3(i_obs, fc, f_masks):
"""
Unphased two-step search
"""
F = [fc]+f_masks
Gnm = []
cs = {}
cntr=0
nm=[]
# Compute and store Gnm
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
if m < n:
continue
Gnm.append( flex.real( Fn.data()*flex.conj(Fm.data()) ) )
cs[(n,m)] = cntr
cntr+=1
nm.append((n,m))
# Keep track of indices for "upper triangular matrix vs full"
for k,v in zip(list(cs.keys()), list(cs.values())):
i,j=k
if i==j: continue
else: cs[(j,i)]=v
# Generate and solve system Ax=b, x = A_1*b
A = []
b = []
for u, Gnm_u in enumerate(Gnm):
for v, Gnm_v in enumerate(Gnm):
scale = 2
n,m=nm[v]
if n==m: scale=1
A.append( flex.sum(Gnm_u*Gnm_v)*scale )
b.append( flex.sum(Gnm_u * i_obs.data()) )
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
# Expand Xmn from solution x
Xmn = []
for n, Fn in enumerate(F):
rows = []
for m, Fm in enumerate(F):
x_ = x[cs[(n,m)]]
rows.append(x_)
Xmn.append(rows)
# Do formula (19)
lnK = []
for j, Fj in enumerate(F):
t1 = flex.sum( flex.log( flex.double(Xmn[j]) ) )
t2 = 0
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
t2 += math.log(Xmn[n][m])
t2 = t2 / (2*len(F))
lnK.append( 1/len(F)*(t1-t2) )
return [math.exp(x) for x in lnK]
def algorithm_4(f_obs, F, phase_source, max_cycles=100, auto_converge_eps=1.e-7,
use_cpp=True):
"""
Phased simultaneous search (alg4)
"""
fc, f_masks = F[0], F[1:]
fc = fc.deep_copy()
F = [fc]+F[1:]
# C++ version
if(use_cpp):
return mosaic_ext.alg4(
[f.data() for f in F],
f_obs.data(),
phase_source.data(),
max_cycles,
auto_converge_eps)
# Python version (1.2-3 times slower, but much more readable!)
cntr = 0
x_prev = None
while True:
f_obs_cmpl = f_obs.phase_transfer(phase_source = phase_source)
A = []
b = []
for j, Fj in enumerate(F):
A_rows = []
for n, Fn in enumerate(F):
Gjn = flex.real( Fj.data()*flex.conj(Fn.data()) )
A_rows.append( flex.sum(Gjn) )
Hj = flex.real( Fj.data()*flex.conj(f_obs_cmpl.data()) )
b.append(flex.sum(Hj))
A.extend(A_rows)
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
#
fc_d = flex.complex_double(phase_source.indices().size(), 0)
for i, f in enumerate(F):
fc_d += f.data()*x[i]
phase_source = phase_source.customized_copy(data = fc_d)
x_ = x[:]
#
cntr+=1
if(cntr>max_cycles): break
if(x_prev is None): x_prev = x_[:]
else:
max_diff = flex.max(flex.abs(flex.double(x_prev)-flex.double(x_)))
if(max_diff<=auto_converge_eps): break
x_prev = x_[:]
return x_
| en | 0.327837 | # Utilities used by algorithm 2 ------------------------------------------------ #print "step: %4d"%self.cntr, "target:", t, "params:", \ # " ".join(["%10.6f"%i for i in self.x]), math.log(t) #assert self.d.all_ne(0) #print "step: %4d"%self.cntr, "target:", self.f, "params:", \ # " ".join(["%10.6f"%i for i in self.x]) #, math.log(self.f) # Needed to do sums from small to large to prefent loss # # needed for Python version # # Reference implementation in Python # s = 1 #180/math.pi # i_model = flex.double(self.i_obs.data().size(),0) # for n, kn in enumerate(self.x): # for m, km in enumerate(self.x): # tmp = self.F[n].data()*flex.conj(self.F[m].data()) # i_model += kn*km*flex.real(tmp) # #pn = self.F[n].phases().data()*s # #pm = self.F[m].phases().data()*s # #Fn = flex.abs(self.F[n].data()) # #Fm = flex.abs(self.F[m].data()) # #i_model += kn*km*Fn*Fm*flex.cos(pn-pm) # diff = i_model - self.i_obs.data() # #print (flex.min(diff), flex.max(diff)) # t = flex.sum(diff*diff)/4 # # # g = flex.double() # for j in range(len(self.F)): # tmp = flex.double(self.i_obs.data().size(),0) # for m, km in enumerate(self.x): # tmp += km * flex.real( self.F[j].data()*flex.conj(self.F[m].data()) ) # #pj = self.F[j].phases().data()*s # #pm = self.F[m].phases().data()*s # #Fj = flex.abs(self.F[j].data()) # #Fm = flex.abs(self.F[m].data()) # #tmp += km * Fj*Fm*flex.cos(pj-pm) # g.append(flex.sum(diff*tmp)) # self.t = t/self.sum_i_obs # self.g = g/self.sum_i_obs # #print (self.t,t1) # #print (list(self.g)) # #print (list(g1)) # #print () # #assert approx_equal(self.t, t1, 5) # #assert approx_equal(self.g, g1, 1.e-6) # #pj = self.F[j].phases().data()*s #pm = self.F[m].phases().data()*s #Fj = flex.abs(self.F[j].data()) #Fm = flex.abs(self.F[m].data()) #tmp += km * Fj*Fm*flex.cos(pj-pm) #------------------------------------------------------------------------------- # # # #if(it>0 and n_zones_start == len(self.F)): break # #if it>0: # self.F = [self.fmodel.f_model().deep_copy()] + self.F[1:] # # XXX WHY NOT THIS INSTEAD (INVESTIGATE LATER)? #F_scaled = [f.customized_copy(data=f.data()*k_total_sel) for f in F] #r00=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, F[0].data()*k_total_sel) # algorithm_0 #fd = flex.complex_double(F[0].data().size()) #for i,f in enumerate(F): # fd = fd + f.data()*k_masks[i] #r0=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel) # algorithm_4 #fd = flex.complex_double(F[0].data().size()) #for i,f in enumerate(F): # fd = fd + f.data()*k_masks[i] #r4=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel) # algorithm_2 #fd = flex.complex_double(F[0].data().size()) #for i,f in enumerate(F): # fd = fd + f.data()*k_masks[i] #r2=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel) #self._print(bin+" ".join(["%6.2f"%k for k in k_masks])+" %6.4f %6.4f %6.4f %6.4f"%(r00,r0,r4, r2)) # # stop and fall back onto using largest mask # # #print() #self.update_k_masks(K_MASKS) #for k_masks in K_MASKS.values(): # self._print(bin+" ".join(["%6.2f"%k for k in k_masks])) # # 1 is shifted! # #f_calc = self.f_obs.customized_copy(data = f_calc_data), # #f_calc = self.f_obs.customized_copy(data = f_calc_data), #self._print(self.fmodel.r_factors(prefix=" ")) #def update_k_masks(self, K_MASKS): # tmp = [] # for i_mask, F in enumerate(self.F): # k_masks = [k_masks_bin[i_mask] for k_masks_bin in K_MASKS.values()] # found = False # for i_bin, k_masks_bin in enumerate(K_MASKS.values()): # if(not found and k_masks_bin[i_mask]<=0.009): # found = True # K_MASKS.values()[i_bin][i_mask]=0 # elif found: # K_MASKS.values()[i_bin][i_mask]=0 #k_maks1_init = 0.35 - i_bin*0.35/len(self.bin_selections) #x = flex.double([1,k_maks1_init]) #x.extend( flex.double(len(self.F)-2, 0.1)) #return x # 1 # 2 # 3 # 4 # XXX No control over n_real, so results with others don't match # # skip macromolecule # skip small volume # # XXX WHY???????????? # # To avoid "Miller index not in structure factor map" crash # # compute mask in p1 (via ASU) # XXX Where do we want to deal with H and occ==0? # conn analysis # # # volume_p1 uc(%) mFo-DFc: min,max,mean,sd", file=log) # # skip macromolecule # skip small volume # Grid search #print("mask ",i) #print (bulk_solvent.r_factor(f_obs.data(),fc_data)) #print (bulk_solvent.r_factor(f_obs.data(),fc_data + kmask_*f_mask.data(),k_)) Unphased one-step search #upper = flex.double([1.1] + [1]*(x.size()-1)) #lower = flex.double([0.9] + [-1]*(x.size()-1)) #upper = flex.double([10] + [5]*(x.size()-1)) #lower = flex.double([0.1] + [-5]*(x.size()-1)) #upper = flex.double([10] + [0.65]*(x.size()-1)) #lower = flex.double([0.1] + [0]*(x.size()-1)) #upper = flex.double([1] + [0.65]*(x.size()-1)) #lower = flex.double([1] + [0]*(x.size()-1)) #upper = flex.double([1] + [5.65]*(x.size()-1)) #lower = flex.double([1] + [-5]*(x.size()-1)) Unphased two-step search # Compute and store Gnm # Keep track of indices for "upper triangular matrix vs full" # Generate and solve system Ax=b, x = A_1*b # Expand Xmn from solution x # Do formula (19) Phased simultaneous search (alg4) # C++ version # Python version (1.2-3 times slower, but much more readable!) # # | 1.469522 | 1 |
mars/tensor/execution/tests/test_base_execute.py | lmatz/mars | 1 | 8896 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.tensor.execution.core import Executor
from mars import tensor as mt
from mars.tensor.expressions.datasource import tensor, ones, zeros, arange
from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \
expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \
hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, \
flip, flipud, fliplr, repeat, tile, isin
from mars.tensor.expressions.merge import stack
from mars.tensor.expressions.reduction import all as tall
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
def testRechunkExecution(self):
raw = np.random.random((11, 8))
arr = tensor(raw, chunks=3)
arr2 = arr.rechunk(4)
res = self.executor.execute_tensor(arr2)
self.assertTrue(np.array_equal(res[0], raw[:4, :4]))
self.assertTrue(np.array_equal(res[1], raw[:4, 4:]))
self.assertTrue(np.array_equal(res[2], raw[4:8, :4]))
self.assertTrue(np.array_equal(res[3], raw[4:8, 4:]))
self.assertTrue(np.array_equal(res[4], raw[8:, :4]))
self.assertTrue(np.array_equal(res[5], raw[8:, 4:]))
def testCopytoExecution(self):
a = ones((2, 3), chunks=1)
b = tensor([3, -1, 3], chunks=2)
copyto(a, b, where=b > 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.array([[3, 1, 3], [3, 1, 3]])
np.testing.assert_equal(res, expected)
def testAstypeExecution(self):
raw = np.random.random((10, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.astype('i8')))
raw = sps.random(10, 5, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.astype('i8').toarray()))
def testTransposeExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.T))
arr3 = transpose(arr, axes=(-2, -1, -3))
res = self.executor.execute_tensor(arr3, concat=True)
self.assertTrue(np.array_equal(res[0], raw.transpose(1, 2, 0)))
raw = sps.random(11, 8)
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
self.assertTrue(arr2.issparse())
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.T.toarray()))
def testSwapaxesExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.swapaxes(2, 0)))
raw = sps.random(11, 8, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.toarray().swapaxes(1, 0)))
def testMoveaxisExecution(self):
x = zeros((3, 4, 5), chunks=2)
t = moveaxis(x, 0, -1)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (4, 5, 3))
t = moveaxis(x, -1, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 3, 4))
t = moveaxis(x, [0, 1], [-1, -2])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
t = moveaxis(x, [0, 1, 2], [-1, -2, -3])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
def testBroadcastToExecution(self):
raw = np.random.random((10, 5, 1))
arr = tensor(raw, chunks=2)
arr2 = broadcast_to(arr, (5, 10, 5, 6))
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], np.broadcast_to(raw, (5, 10, 5, 6))))
def testBroadcastArraysExecutions(self):
x_data = [[1, 2, 3]]
x = tensor(x_data, chunks=1)
y_data = [[1], [2], [3]]
y = tensor(y_data, chunks=2)
a = broadcast_arrays(x, y)
res = [self.executor.execute_tensor(arr, concat=True)[0] for arr in a]
expected = np.broadcast_arrays(x_data, y_data)
for r, e in zip(res, expected):
np.testing.assert_equal(r, e)
def testWhereExecution(self):
raw_cond = np.random.randint(0, 2, size=(4, 4), dtype='?')
raw_x = np.random.rand(4, 1)
raw_y = np.random.rand(4, 4)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)
self.assertTrue(np.array_equal(res[0], np.where(raw_cond, raw_x, raw_y)))
raw_cond = sps.csr_matrix(np.random.randint(0, 2, size=(4, 4), dtype='?'))
raw_x = sps.random(4, 1, density=.1)
raw_y = sps.random(4, 4, density=.1)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)[0]
self.assertTrue(np.array_equal(res.toarray(),
np.where(raw_cond.toarray(), raw_x.toarray(), raw_y.toarray())))
def testReshapeExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 30)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 30)))
y2 = x.reshape(10, -1)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(10, -1)))
y3 = x.reshape(-1)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1)))
y4 = x.ravel()
res = self.executor.execute_tensor(y4, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.ravel()))
raw_data = np.random.rand(30, 100, 20)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 20, 5, 5, 4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 20, 5, 5, 4)))
y2 = x.reshape(3000, 10, 2)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(3000, 10, 2)))
y3 = x.reshape(60, 25, 40)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(60, 25, 40)))
def testExpandDimsExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = expand_dims(x, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 1)))
y = expand_dims(x, 0)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 0)))
y = expand_dims(x, 3)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 3)))
y = expand_dims(x, -1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -1)))
y = expand_dims(x, -4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -4)))
with self.assertRaises(np.AxisError):
expand_dims(x, -5)
with self.assertRaises(np.AxisError):
expand_dims(x, 4)
def testRollAxisExecution(self):
x = ones((3, 4, 5, 6), chunks=1)
y = rollaxis(x, 3, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.rollaxis(np.ones((3, 4, 5, 6)), 3, 1)))
def testAtleast1dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_1d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([1])))
self.assertTrue(np.array_equal(res[1], np.ones(3)))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast2dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_2d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([[1]])))
self.assertTrue(np.array_equal(res[1], np.atleast_2d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast3dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_3d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.atleast_3d(x)))
self.assertTrue(np.array_equal(res[1], np.atleast_3d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.atleast_3d(np.ones((3, 4)))))
def testArgwhereExecution(self):
x = arange(6, chunks=2).reshape(2, 3)
t = argwhere(x > 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.argwhere(np.arange(6).reshape(2, 3) > 1)
self.assertTrue(np.array_equal(res, expected))
def testArraySplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = array_split(x, 3, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), 3, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = array_split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
def testSplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = split(x, 4, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), 4, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# hsplit
x = arange(120, chunks=3).reshape(2, 12, 5)
ss = hsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.hsplit(np.arange(120).reshape(2, 12, 5), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# vsplit
x = arange(48, chunks=3).reshape(8, 3, 2)
ss = vsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.vsplit(np.arange(48).reshape(8, 3, 2), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# dsplit
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = dsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.dsplit(np.arange(48).reshape(2, 3, 8), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
x_data = sps.random(12, 8, density=.1)
x = tensor(x_data, chunks=3)
ss = split(x, 4, axis=0)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(x_data.toarray(), 4, axis=0)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r.toarray(), e) for r, e in zip(res, expected)]
def testRollExecution(self):
x = arange(10, chunks=2)
t = roll(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10), 2)
np.testing.assert_equal(res, expected)
x2 = x.reshape(2, 5)
t = roll(x2, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=0)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=1)
np.testing.assert_equal(res, expected)
def testSqueezeExecution(self):
data = np.array([[[0], [1], [2]]])
x = tensor(data, chunks=1)
t = squeeze(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data)
np.testing.assert_equal(res, expected)
t = squeeze(x, axis=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data, axis=2)
np.testing.assert_equal(res, expected)
def testPtpExecution(self):
x = arange(4, chunks=1).reshape(2, 2)
t = ptp(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=0)
np.testing.assert_equal(res, expected)
t = ptp(x, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=1)
np.testing.assert_equal(res, expected)
t = ptp(x)
res = self.executor.execute_tensor(t)[0]
expected = np.ptp(np.arange(4).reshape(2, 2))
np.testing.assert_equal(res, expected)
def testDiffExecution(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, n=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, n=2)
np.testing.assert_equal(res, expected)
data = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, axis=0)
np.testing.assert_equal(res, expected)
x = mt.arange('1066-10-13', '1066-10-16', dtype=mt.datetime64)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64))
np.testing.assert_equal(res, expected)
def testEdiff1d(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = ediff1d(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
to_begin = tensor(-99, chunks=2)
to_end = tensor([88, 99], chunks=2)
t = ediff1d(x, to_begin=to_begin, to_end=to_end)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data, to_begin=-99, to_end=np.array([88, 99]))
np.testing.assert_equal(res, expected)
data = [[1, 2, 4], [1, 6, 24]]
t = ediff1d(tensor(data, chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
def testDigitizeExecution(self):
data = np.array([0.2, 6.4, 3.0, 1.6])
x = tensor(data, chunks=2)
bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
b = tensor(bins, chunks=2)
inds = digitize(x, b)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
data = np.array([1.2, 10.0, 12.4, 15.5, 20.])
x = tensor(data, chunks=2)
bins = np.array([0, 5, 10, 15, 20])
inds = digitize(x, bins, right=True)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=True)
np.testing.assert_equal(res, expected)
inds = digitize(x, bins, right=False)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=False)
np.testing.assert_equal(res, expected)
data = sps.random(10, 1, density=.1) * 12
x = tensor(data, chunks=2)
bins = np.array([1.0, 2.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data.toarray(), bins, right=False)
np.testing.assert_equal(res.toarray(), expected)
def testAverageExecution(self):
data = arange(1, 5, chunks=1)
t = average(data)
res = self.executor.execute_tensor(t)[0]
expected = np.average(np.arange(1, 5))
self.assertEqual(res, expected)
t = average(arange(1, 11, chunks=2), weights=arange(10, 0, -1, chunks=2))
res = self.executor.execute_tensor(t)[0]
expected = np.average(range(1, 11), weights=range(10, 0, -1))
self.assertEqual(res, expected)
data = arange(6, chunks=2).reshape((3, 2))
t = average(data, axis=1, weights=tensor([1./4, 3./4], chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.average(np.arange(6).reshape(3, 2), axis=1, weights=(1./4, 3./4))
np.testing.assert_equal(res, expected)
with self.assertRaises(TypeError):
average(data, weights=tensor([1./4, 3./4], chunks=2))
def testCovExecution(self):
data = np.array([[0, 2], [1, 1], [2, 0]]).T
x = tensor(data, chunks=1)
t = cov(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.cov(data)
np.testing.assert_equal(res, expected)
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
X = stack((x, y), axis=0)
t = cov(x, y)
r = tall(t == cov(X))
self.assertTrue(self.executor.execute_tensor(r)[0])
def testCorrcoefExecution(self):
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
t = corrcoef(x, y)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.corrcoef(data_x, data_y)
np.testing.assert_equal(res, expected)
def testFlipExecution(self):
a = arange(8, chunks=2).reshape((2, 2, 2))
t = flip(a, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 0)
np.testing.assert_equal(res, expected)
t = flip(a, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 1)
np.testing.assert_equal(res, expected)
t = flipud(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flipud(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
t = fliplr(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.fliplr(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
def testRepeatExecution(self):
a = repeat(3, 4)
res = self.executor.execute_tensor(a)[0]
expected = np.repeat(3, 4)
np.testing.assert_equal(res, expected)
x_data = np.random.randn(20, 30)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 2)
np.testing.assert_equal(res, expected)
t = repeat(x, 3, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 3, axis=1)
np.testing.assert_equal(res, expected)
t = repeat(x, np.arange(20), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
t = repeat(x, arange(20, chunks=5), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
x_data = sps.random(20, 30, density=.1)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data.toarray(), 2, axis=1)
np.testing.assert_equal(res.toarray(), expected)
def testTileExecution(self):
a_data = np.array([0, 1, 2])
a = tensor(a_data, chunks=2)
t = tile(a, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, 2)
np.testing.assert_equal(res, expected)
t = tile(a, (2, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 2))
np.testing.assert_equal(res, expected)
t = tile(a, (2, 1, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 1, 2))
np.testing.assert_equal(res, expected)
b_data = np.array([[1, 2], [3, 4]])
b = tensor(b_data, chunks=1)
t = tile(b, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, 2)
np.testing.assert_equal(res, expected)
t = tile(b, (2, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, (2, 1))
np.testing.assert_equal(res, expected)
c_data = np.array([1, 2, 3, 4])
c = tensor(c_data, chunks=3)
t = tile(c, (4, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(c_data, (4, 1))
np.testing.assert_equal(res, expected)
def testIsInExecution(self):
element = 2 * arange(4, chunks=1).reshape((2, 2))
test_elements = [1, 2, 4, 8]
mask = isin(element, test_elements)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([2, 4])
np.testing.assert_equal(res, expected)
mask = isin(element, test_elements, invert=True)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements, invert=True)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([0, 6])
np.testing.assert_equal(res, expected)
test_set = {1, 2, 4, 8}
mask = isin(element, test_set)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_set)
np.testing.assert_equal(res, expected)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.tensor.execution.core import Executor
from mars import tensor as mt
from mars.tensor.expressions.datasource import tensor, ones, zeros, arange
from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \
expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \
hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, \
flip, flipud, fliplr, repeat, tile, isin
from mars.tensor.expressions.merge import stack
from mars.tensor.expressions.reduction import all as tall
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
def testRechunkExecution(self):
raw = np.random.random((11, 8))
arr = tensor(raw, chunks=3)
arr2 = arr.rechunk(4)
res = self.executor.execute_tensor(arr2)
self.assertTrue(np.array_equal(res[0], raw[:4, :4]))
self.assertTrue(np.array_equal(res[1], raw[:4, 4:]))
self.assertTrue(np.array_equal(res[2], raw[4:8, :4]))
self.assertTrue(np.array_equal(res[3], raw[4:8, 4:]))
self.assertTrue(np.array_equal(res[4], raw[8:, :4]))
self.assertTrue(np.array_equal(res[5], raw[8:, 4:]))
def testCopytoExecution(self):
a = ones((2, 3), chunks=1)
b = tensor([3, -1, 3], chunks=2)
copyto(a, b, where=b > 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.array([[3, 1, 3], [3, 1, 3]])
np.testing.assert_equal(res, expected)
def testAstypeExecution(self):
raw = np.random.random((10, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.astype('i8')))
raw = sps.random(10, 5, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.astype('i8').toarray()))
def testTransposeExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.T))
arr3 = transpose(arr, axes=(-2, -1, -3))
res = self.executor.execute_tensor(arr3, concat=True)
self.assertTrue(np.array_equal(res[0], raw.transpose(1, 2, 0)))
raw = sps.random(11, 8)
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
self.assertTrue(arr2.issparse())
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.T.toarray()))
def testSwapaxesExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.swapaxes(2, 0)))
raw = sps.random(11, 8, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.toarray().swapaxes(1, 0)))
def testMoveaxisExecution(self):
x = zeros((3, 4, 5), chunks=2)
t = moveaxis(x, 0, -1)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (4, 5, 3))
t = moveaxis(x, -1, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 3, 4))
t = moveaxis(x, [0, 1], [-1, -2])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
t = moveaxis(x, [0, 1, 2], [-1, -2, -3])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
def testBroadcastToExecution(self):
raw = np.random.random((10, 5, 1))
arr = tensor(raw, chunks=2)
arr2 = broadcast_to(arr, (5, 10, 5, 6))
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], np.broadcast_to(raw, (5, 10, 5, 6))))
def testBroadcastArraysExecutions(self):
x_data = [[1, 2, 3]]
x = tensor(x_data, chunks=1)
y_data = [[1], [2], [3]]
y = tensor(y_data, chunks=2)
a = broadcast_arrays(x, y)
res = [self.executor.execute_tensor(arr, concat=True)[0] for arr in a]
expected = np.broadcast_arrays(x_data, y_data)
for r, e in zip(res, expected):
np.testing.assert_equal(r, e)
def testWhereExecution(self):
raw_cond = np.random.randint(0, 2, size=(4, 4), dtype='?')
raw_x = np.random.rand(4, 1)
raw_y = np.random.rand(4, 4)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)
self.assertTrue(np.array_equal(res[0], np.where(raw_cond, raw_x, raw_y)))
raw_cond = sps.csr_matrix(np.random.randint(0, 2, size=(4, 4), dtype='?'))
raw_x = sps.random(4, 1, density=.1)
raw_y = sps.random(4, 4, density=.1)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)[0]
self.assertTrue(np.array_equal(res.toarray(),
np.where(raw_cond.toarray(), raw_x.toarray(), raw_y.toarray())))
def testReshapeExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 30)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 30)))
y2 = x.reshape(10, -1)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(10, -1)))
y3 = x.reshape(-1)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1)))
y4 = x.ravel()
res = self.executor.execute_tensor(y4, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.ravel()))
raw_data = np.random.rand(30, 100, 20)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 20, 5, 5, 4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 20, 5, 5, 4)))
y2 = x.reshape(3000, 10, 2)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(3000, 10, 2)))
y3 = x.reshape(60, 25, 40)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(60, 25, 40)))
def testExpandDimsExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = expand_dims(x, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 1)))
y = expand_dims(x, 0)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 0)))
y = expand_dims(x, 3)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 3)))
y = expand_dims(x, -1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -1)))
y = expand_dims(x, -4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -4)))
with self.assertRaises(np.AxisError):
expand_dims(x, -5)
with self.assertRaises(np.AxisError):
expand_dims(x, 4)
def testRollAxisExecution(self):
x = ones((3, 4, 5, 6), chunks=1)
y = rollaxis(x, 3, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.rollaxis(np.ones((3, 4, 5, 6)), 3, 1)))
def testAtleast1dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_1d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([1])))
self.assertTrue(np.array_equal(res[1], np.ones(3)))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast2dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_2d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([[1]])))
self.assertTrue(np.array_equal(res[1], np.atleast_2d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast3dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_3d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.atleast_3d(x)))
self.assertTrue(np.array_equal(res[1], np.atleast_3d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.atleast_3d(np.ones((3, 4)))))
def testArgwhereExecution(self):
x = arange(6, chunks=2).reshape(2, 3)
t = argwhere(x > 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.argwhere(np.arange(6).reshape(2, 3) > 1)
self.assertTrue(np.array_equal(res, expected))
def testArraySplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = array_split(x, 3, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), 3, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = array_split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
def testSplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = split(x, 4, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), 4, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# hsplit
x = arange(120, chunks=3).reshape(2, 12, 5)
ss = hsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.hsplit(np.arange(120).reshape(2, 12, 5), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# vsplit
x = arange(48, chunks=3).reshape(8, 3, 2)
ss = vsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.vsplit(np.arange(48).reshape(8, 3, 2), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# dsplit
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = dsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.dsplit(np.arange(48).reshape(2, 3, 8), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
x_data = sps.random(12, 8, density=.1)
x = tensor(x_data, chunks=3)
ss = split(x, 4, axis=0)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(x_data.toarray(), 4, axis=0)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r.toarray(), e) for r, e in zip(res, expected)]
def testRollExecution(self):
x = arange(10, chunks=2)
t = roll(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10), 2)
np.testing.assert_equal(res, expected)
x2 = x.reshape(2, 5)
t = roll(x2, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=0)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=1)
np.testing.assert_equal(res, expected)
def testSqueezeExecution(self):
data = np.array([[[0], [1], [2]]])
x = tensor(data, chunks=1)
t = squeeze(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data)
np.testing.assert_equal(res, expected)
t = squeeze(x, axis=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data, axis=2)
np.testing.assert_equal(res, expected)
def testPtpExecution(self):
x = arange(4, chunks=1).reshape(2, 2)
t = ptp(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=0)
np.testing.assert_equal(res, expected)
t = ptp(x, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=1)
np.testing.assert_equal(res, expected)
t = ptp(x)
res = self.executor.execute_tensor(t)[0]
expected = np.ptp(np.arange(4).reshape(2, 2))
np.testing.assert_equal(res, expected)
def testDiffExecution(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, n=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, n=2)
np.testing.assert_equal(res, expected)
data = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, axis=0)
np.testing.assert_equal(res, expected)
x = mt.arange('1066-10-13', '1066-10-16', dtype=mt.datetime64)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64))
np.testing.assert_equal(res, expected)
def testEdiff1d(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = ediff1d(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
to_begin = tensor(-99, chunks=2)
to_end = tensor([88, 99], chunks=2)
t = ediff1d(x, to_begin=to_begin, to_end=to_end)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data, to_begin=-99, to_end=np.array([88, 99]))
np.testing.assert_equal(res, expected)
data = [[1, 2, 4], [1, 6, 24]]
t = ediff1d(tensor(data, chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
def testDigitizeExecution(self):
data = np.array([0.2, 6.4, 3.0, 1.6])
x = tensor(data, chunks=2)
bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
b = tensor(bins, chunks=2)
inds = digitize(x, b)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
data = np.array([1.2, 10.0, 12.4, 15.5, 20.])
x = tensor(data, chunks=2)
bins = np.array([0, 5, 10, 15, 20])
inds = digitize(x, bins, right=True)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=True)
np.testing.assert_equal(res, expected)
inds = digitize(x, bins, right=False)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=False)
np.testing.assert_equal(res, expected)
data = sps.random(10, 1, density=.1) * 12
x = tensor(data, chunks=2)
bins = np.array([1.0, 2.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data.toarray(), bins, right=False)
np.testing.assert_equal(res.toarray(), expected)
def testAverageExecution(self):
data = arange(1, 5, chunks=1)
t = average(data)
res = self.executor.execute_tensor(t)[0]
expected = np.average(np.arange(1, 5))
self.assertEqual(res, expected)
t = average(arange(1, 11, chunks=2), weights=arange(10, 0, -1, chunks=2))
res = self.executor.execute_tensor(t)[0]
expected = np.average(range(1, 11), weights=range(10, 0, -1))
self.assertEqual(res, expected)
data = arange(6, chunks=2).reshape((3, 2))
t = average(data, axis=1, weights=tensor([1./4, 3./4], chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.average(np.arange(6).reshape(3, 2), axis=1, weights=(1./4, 3./4))
np.testing.assert_equal(res, expected)
with self.assertRaises(TypeError):
average(data, weights=tensor([1./4, 3./4], chunks=2))
def testCovExecution(self):
data = np.array([[0, 2], [1, 1], [2, 0]]).T
x = tensor(data, chunks=1)
t = cov(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.cov(data)
np.testing.assert_equal(res, expected)
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
X = stack((x, y), axis=0)
t = cov(x, y)
r = tall(t == cov(X))
self.assertTrue(self.executor.execute_tensor(r)[0])
def testCorrcoefExecution(self):
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
t = corrcoef(x, y)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.corrcoef(data_x, data_y)
np.testing.assert_equal(res, expected)
def testFlipExecution(self):
a = arange(8, chunks=2).reshape((2, 2, 2))
t = flip(a, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 0)
np.testing.assert_equal(res, expected)
t = flip(a, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 1)
np.testing.assert_equal(res, expected)
t = flipud(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flipud(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
t = fliplr(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.fliplr(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
def testRepeatExecution(self):
a = repeat(3, 4)
res = self.executor.execute_tensor(a)[0]
expected = np.repeat(3, 4)
np.testing.assert_equal(res, expected)
x_data = np.random.randn(20, 30)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 2)
np.testing.assert_equal(res, expected)
t = repeat(x, 3, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 3, axis=1)
np.testing.assert_equal(res, expected)
t = repeat(x, np.arange(20), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
t = repeat(x, arange(20, chunks=5), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
x_data = sps.random(20, 30, density=.1)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data.toarray(), 2, axis=1)
np.testing.assert_equal(res.toarray(), expected)
def testTileExecution(self):
a_data = np.array([0, 1, 2])
a = tensor(a_data, chunks=2)
t = tile(a, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, 2)
np.testing.assert_equal(res, expected)
t = tile(a, (2, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 2))
np.testing.assert_equal(res, expected)
t = tile(a, (2, 1, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 1, 2))
np.testing.assert_equal(res, expected)
b_data = np.array([[1, 2], [3, 4]])
b = tensor(b_data, chunks=1)
t = tile(b, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, 2)
np.testing.assert_equal(res, expected)
t = tile(b, (2, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, (2, 1))
np.testing.assert_equal(res, expected)
c_data = np.array([1, 2, 3, 4])
c = tensor(c_data, chunks=3)
t = tile(c, (4, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(c_data, (4, 1))
np.testing.assert_equal(res, expected)
def testIsInExecution(self):
element = 2 * arange(4, chunks=1).reshape((2, 2))
test_elements = [1, 2, 4, 8]
mask = isin(element, test_elements)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([2, 4])
np.testing.assert_equal(res, expected)
mask = isin(element, test_elements, invert=True)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements, invert=True)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([0, 6])
np.testing.assert_equal(res, expected)
test_set = {1, 2, 4, 8}
mask = isin(element, test_set)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_set)
np.testing.assert_equal(res, expected)
| en | 0.806685 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2018 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # hsplit # vsplit # dsplit | 1.804501 | 2 |
comix-imagenet/init_paths.py | drumpt/Co-Mixup | 86 | 8897 | import sys
import matplotlib
matplotlib.use('Agg')
sys.path.insert(0, 'lib')
| import sys
import matplotlib
matplotlib.use('Agg')
sys.path.insert(0, 'lib')
| none | 1 | 1.212949 | 1 |
|
members_abundances_in_out_uncertainties.py | kcotar/Gaia_clusters_potential | 0 | 8898 | import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from astropy.table import Table, join
from os import chdir, system
from scipy.stats import norm as gauss_norm
from sys import argv
from getopt import getopt
# turn off polyfit ranking warnings
import warnings
warnings.filterwarnings('ignore')
def _prepare_pdf_data(means, stds, range, norm=True):
x_vals = np.linspace(range[0], range[1], 250)
y_vals = np.zeros_like(x_vals)
# create and sum all PDF of stellar abundances
for d_m, d_s in zip(means, stds):
if np.isfinite([d_m, d_s]).all():
y_vals += gauss_norm.pdf(x_vals, loc=d_m, scale=d_s)
# return normalized summed pdf of all stars
if norm and np.nansum(y_vals) > 0.:
y_vals = 1. * y_vals/np.nanmax(y_vals)
return x_vals, y_vals
def _prepare_hist_data(d, bins, range, norm=True):
heights, edges = np.histogram(d, bins=bins, range=range)
width = np.abs(edges[0] - edges[1])
if norm:
heights = 1.*heights / np.nanmax(heights)
return edges[:-1], heights, width
def _evaluate_abund_trend_fit(orig, fit, idx, sigma_low, sigma_high):
# diffence to the original data
diff = orig - fit
std_diff = np.nanstd(diff[idx])
# select data that will be fitted
idx_outlier = np.logical_or(diff < (-1. * std_diff * sigma_low),
diff > (std_diff * sigma_high))
return np.logical_and(idx, ~idx_outlier)
def fit_abund_trend(p_data, a_data,
steps=3, sigma_low=2.5, sigma_high=2.5,
order=5, window=10, n_min_perc=10.,func='poly'):
idx_fit = np.logical_and(np.isfinite(p_data), np.isfinite(a_data))
data_len = np.sum(idx_fit)
n_fit_points_prev = np.sum(idx_fit)
if data_len <= order + 1:
return None, None
p_offset = np.nanmedian(p_data)
for i_f in range(steps): # number of sigma clipping steps
if func == 'cheb':
coef = np.polynomial.chebyshev.chebfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
coef = np.polynomial.legendre.legfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
coef = np.polyfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
coef = splrep(p_data[idx_fit] - p_offset, a_data[idx_fit], k=order, s=window)
f_data = splev(p_data - p_offset, coef)
idx_fit = _evaluate_abund_trend_fit(a_data, f_data, idx_fit, sigma_low, sigma_high)
n_fit_points = np.sum(idx_fit)
if 100.*n_fit_points/data_len < n_min_perc:
break
if n_fit_points == n_fit_points_prev:
break
else:
n_fit_points_prev = n_fit_points
a_std = np.nanstd(a_data - f_data)
return [coef, p_offset], a_std
def eval_abund_trend(p_data, m_data, func='poly'):
coef, p_offset = m_data
if func == 'cheb':
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
f_data = splev(p_data - p_offset, coef)
return f_data
simulation_dir = '/shared/data-camelot/cotar/'
data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/'
data_dir = '/shared/ebla/cotar/'
USE_DR3 = True
Q_FLAGS = True
P_INDIVIDUAL = False
suffix = ''
if len(argv) > 1:
# parse input options
opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])
# set parameters, depending on user inputs
print(opts)
for o, a in opts:
if o == '--dr3':
USE_DR3 = int(a) > 0
if o == '--suffix':
suffix += str(a)
if o == '--flags':
Q_FLAGS = int(a) > 0
if o == '--individual':
P_INDIVIDUAL = int(a) > 0
CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits')
tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits')
# remove cluster members from tails data
print('Cluster members all:', len(CG_data), len(tails_data))
idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True)
tails_data = tails_data[idx_not_in_cluster]
print('Cluster members all:', len(CG_data), len(tails_data))
if USE_DR3:
# cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits')
cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits')
fe_col = 'fe_h'
teff_col = 'teff'
q_flag = 'flag_sp'
suffix += '_DR3'
else:
pass
if Q_FLAGS:
suffix += '_flag0'
# determine all possible simulation subdirs
chdir(data_dir_clusters)
for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'):
chdir(cluster_dir)
print('Working on clusters in ' + cluster_dir)
for sub_dir in glob('*'):
current_cluster = '_'.join(sub_dir.split('_')[0:2])
source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id']
source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id']
idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg))
idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail))
if '.png' in sub_dir or 'individual-abund' in sub_dir:
continue
print(' ')
print(sub_dir)
chdir(sub_dir)
try:
g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t')
idx_init = np.in1d(cannon_data['source_id'], g_init['source_id'])
except:
idx_init = np.full(len(cannon_data), False)
try:
g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t')
g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster
g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster
g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1.,
g_in['in_cluster_prob'] >= 68.)]
idx_in = np.in1d(cannon_data['source_id'], g_in['source_id'])
idx_in_no_CG = np.logical_and(idx_in,
np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id'])))
except:
idx_in = np.full(len(cannon_data), False)
idx_in_no_CG = np.full(len(cannon_data), False)
try:
g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0,
g_out['in_cluster_prob'] <= 0)]
idx_out = np.in1d(cannon_data['source_id'], g_out['source_id'])
except:
idx_out = np.full(len(cannon_data), False)
chdir('..')
if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0:
print(' Some Galah lists are missing')
if USE_DR3:
abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)]
else:
abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3]
# abund_cols = ['e_' + cc for cc in abund_cols]
# rg = (0., 0.35)
# yt = [0., 0.1, 0.2, 0.3]
# medfix = '-snr-sigma_'
abund_cols = ['diff_' + cc for cc in abund_cols]
rg = (-0.45, 0.45)
yt = [-0.3, -0.15, 0.0, 0.15, 0.3]
medfix = '-detrended-snr_'
# ------------------------------------------------------------------------------
# NEW: plot with parameter dependency trends
# ------------------------------------------------------------------------------
bs = 40
x_cols_fig = 7
y_cols_fig = 5
param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]}
for param in ['snr_c2_iraf']: #list(param_lims.keys()):
cannon_data['abund_det'] = 0
cannon_data['abund_det_elems'] = 0
print('Estimating membership using parameter', param)
fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10))
for i_c, col in enumerate(abund_cols):
# print(col)
x_p = i_c % x_cols_fig
y_p = int(1. * i_c / x_cols_fig)
fit_x_param = 'teff'
cur_abund_col = '_'.join(col.split('_')[1:])
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col]
idx_val = np.isfinite(cannon_data[col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u4 = np.logical_and(idx_cg_memb, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2],
cannon_data[cur_abund_col][idx_u2],
order=3, steps=2, func='poly',
sigma_low=2.5, sigma_high=2.5, n_min_perc=10.)
if fit_model is not None:
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly')
else:
cannon_data['diff_' + cur_abund_col] = np.nan
ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5)))
ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add,
ylim=rg,
yticks=yt,)
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
rg = (-0.6, 0.6)
idx_val = np.isfinite(cannon_data[teff_col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
x_p = -1
y_p = -1
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
sl1 = ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[fe_col][idx_u1],
lw=0, s=3, color='C2', label='Field')
sl2 = ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
sl3 = ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[fe_col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
fit_model, col_std = fit_abund_trend(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
order=3, steps=2, sigma_low=2.5, sigma_high=2.5, n_min_perc=10.,
func='poly')
if np.sum(idx_u5) > 0:
sl5 = ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[fe_col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
ax[-1, -3].legend(handles=[sl1, sl1, sl3, sl5])
else:
ax[-1, -3].legend(handles=[sl1, sl1, sl3])
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=rg, title='Fe/H' + label_add, xlim=param_lims[param])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
x_p = -2
y_p = -1
ax[y_p, x_p].scatter(cannon_data['age'][idx_u1], cannon_data[param][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u2], cannon_data[param][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u3], cannon_data[param][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
ax[y_p, x_p].scatter(cannon_data['age'][idx_u5], cannon_data[param][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=param_lims[param], title='age' + label_add, xlim=[0., 14.])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
plt.subplots_adjust(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=0.3, wspace=0.3)
# plt.show()
plt.savefig('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png', dpi=250)
plt.close(fig)
chdir('..')
| import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from astropy.table import Table, join
from os import chdir, system
from scipy.stats import norm as gauss_norm
from sys import argv
from getopt import getopt
# turn off polyfit ranking warnings
import warnings
warnings.filterwarnings('ignore')
def _prepare_pdf_data(means, stds, range, norm=True):
x_vals = np.linspace(range[0], range[1], 250)
y_vals = np.zeros_like(x_vals)
# create and sum all PDF of stellar abundances
for d_m, d_s in zip(means, stds):
if np.isfinite([d_m, d_s]).all():
y_vals += gauss_norm.pdf(x_vals, loc=d_m, scale=d_s)
# return normalized summed pdf of all stars
if norm and np.nansum(y_vals) > 0.:
y_vals = 1. * y_vals/np.nanmax(y_vals)
return x_vals, y_vals
def _prepare_hist_data(d, bins, range, norm=True):
heights, edges = np.histogram(d, bins=bins, range=range)
width = np.abs(edges[0] - edges[1])
if norm:
heights = 1.*heights / np.nanmax(heights)
return edges[:-1], heights, width
def _evaluate_abund_trend_fit(orig, fit, idx, sigma_low, sigma_high):
# diffence to the original data
diff = orig - fit
std_diff = np.nanstd(diff[idx])
# select data that will be fitted
idx_outlier = np.logical_or(diff < (-1. * std_diff * sigma_low),
diff > (std_diff * sigma_high))
return np.logical_and(idx, ~idx_outlier)
def fit_abund_trend(p_data, a_data,
steps=3, sigma_low=2.5, sigma_high=2.5,
order=5, window=10, n_min_perc=10.,func='poly'):
idx_fit = np.logical_and(np.isfinite(p_data), np.isfinite(a_data))
data_len = np.sum(idx_fit)
n_fit_points_prev = np.sum(idx_fit)
if data_len <= order + 1:
return None, None
p_offset = np.nanmedian(p_data)
for i_f in range(steps): # number of sigma clipping steps
if func == 'cheb':
coef = np.polynomial.chebyshev.chebfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
coef = np.polynomial.legendre.legfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
coef = np.polyfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
coef = splrep(p_data[idx_fit] - p_offset, a_data[idx_fit], k=order, s=window)
f_data = splev(p_data - p_offset, coef)
idx_fit = _evaluate_abund_trend_fit(a_data, f_data, idx_fit, sigma_low, sigma_high)
n_fit_points = np.sum(idx_fit)
if 100.*n_fit_points/data_len < n_min_perc:
break
if n_fit_points == n_fit_points_prev:
break
else:
n_fit_points_prev = n_fit_points
a_std = np.nanstd(a_data - f_data)
return [coef, p_offset], a_std
def eval_abund_trend(p_data, m_data, func='poly'):
coef, p_offset = m_data
if func == 'cheb':
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
f_data = splev(p_data - p_offset, coef)
return f_data
simulation_dir = '/shared/data-camelot/cotar/'
data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/'
data_dir = '/shared/ebla/cotar/'
USE_DR3 = True
Q_FLAGS = True
P_INDIVIDUAL = False
suffix = ''
if len(argv) > 1:
# parse input options
opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])
# set parameters, depending on user inputs
print(opts)
for o, a in opts:
if o == '--dr3':
USE_DR3 = int(a) > 0
if o == '--suffix':
suffix += str(a)
if o == '--flags':
Q_FLAGS = int(a) > 0
if o == '--individual':
P_INDIVIDUAL = int(a) > 0
CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits')
tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits')
# remove cluster members from tails data
print('Cluster members all:', len(CG_data), len(tails_data))
idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True)
tails_data = tails_data[idx_not_in_cluster]
print('Cluster members all:', len(CG_data), len(tails_data))
if USE_DR3:
# cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits')
cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits')
fe_col = 'fe_h'
teff_col = 'teff'
q_flag = 'flag_sp'
suffix += '_DR3'
else:
pass
if Q_FLAGS:
suffix += '_flag0'
# determine all possible simulation subdirs
chdir(data_dir_clusters)
for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'):
chdir(cluster_dir)
print('Working on clusters in ' + cluster_dir)
for sub_dir in glob('*'):
current_cluster = '_'.join(sub_dir.split('_')[0:2])
source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id']
source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id']
idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg))
idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail))
if '.png' in sub_dir or 'individual-abund' in sub_dir:
continue
print(' ')
print(sub_dir)
chdir(sub_dir)
try:
g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t')
idx_init = np.in1d(cannon_data['source_id'], g_init['source_id'])
except:
idx_init = np.full(len(cannon_data), False)
try:
g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t')
g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster
g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster
g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1.,
g_in['in_cluster_prob'] >= 68.)]
idx_in = np.in1d(cannon_data['source_id'], g_in['source_id'])
idx_in_no_CG = np.logical_and(idx_in,
np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id'])))
except:
idx_in = np.full(len(cannon_data), False)
idx_in_no_CG = np.full(len(cannon_data), False)
try:
g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0,
g_out['in_cluster_prob'] <= 0)]
idx_out = np.in1d(cannon_data['source_id'], g_out['source_id'])
except:
idx_out = np.full(len(cannon_data), False)
chdir('..')
if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0:
print(' Some Galah lists are missing')
if USE_DR3:
abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)]
else:
abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3]
# abund_cols = ['e_' + cc for cc in abund_cols]
# rg = (0., 0.35)
# yt = [0., 0.1, 0.2, 0.3]
# medfix = '-snr-sigma_'
abund_cols = ['diff_' + cc for cc in abund_cols]
rg = (-0.45, 0.45)
yt = [-0.3, -0.15, 0.0, 0.15, 0.3]
medfix = '-detrended-snr_'
# ------------------------------------------------------------------------------
# NEW: plot with parameter dependency trends
# ------------------------------------------------------------------------------
bs = 40
x_cols_fig = 7
y_cols_fig = 5
param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]}
for param in ['snr_c2_iraf']: #list(param_lims.keys()):
cannon_data['abund_det'] = 0
cannon_data['abund_det_elems'] = 0
print('Estimating membership using parameter', param)
fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10))
for i_c, col in enumerate(abund_cols):
# print(col)
x_p = i_c % x_cols_fig
y_p = int(1. * i_c / x_cols_fig)
fit_x_param = 'teff'
cur_abund_col = '_'.join(col.split('_')[1:])
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col]
idx_val = np.isfinite(cannon_data[col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u4 = np.logical_and(idx_cg_memb, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2],
cannon_data[cur_abund_col][idx_u2],
order=3, steps=2, func='poly',
sigma_low=2.5, sigma_high=2.5, n_min_perc=10.)
if fit_model is not None:
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly')
else:
cannon_data['diff_' + cur_abund_col] = np.nan
ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5)))
ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add,
ylim=rg,
yticks=yt,)
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
rg = (-0.6, 0.6)
idx_val = np.isfinite(cannon_data[teff_col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
x_p = -1
y_p = -1
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
sl1 = ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[fe_col][idx_u1],
lw=0, s=3, color='C2', label='Field')
sl2 = ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
sl3 = ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[fe_col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
fit_model, col_std = fit_abund_trend(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
order=3, steps=2, sigma_low=2.5, sigma_high=2.5, n_min_perc=10.,
func='poly')
if np.sum(idx_u5) > 0:
sl5 = ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[fe_col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
ax[-1, -3].legend(handles=[sl1, sl1, sl3, sl5])
else:
ax[-1, -3].legend(handles=[sl1, sl1, sl3])
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=rg, title='Fe/H' + label_add, xlim=param_lims[param])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
x_p = -2
y_p = -1
ax[y_p, x_p].scatter(cannon_data['age'][idx_u1], cannon_data[param][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u2], cannon_data[param][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u3], cannon_data[param][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
ax[y_p, x_p].scatter(cannon_data['age'][idx_u5], cannon_data[param][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=param_lims[param], title='age' + label_add, xlim=[0., 14.])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
plt.subplots_adjust(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=0.3, wspace=0.3)
# plt.show()
plt.savefig('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png', dpi=250)
plt.close(fig)
chdir('..')
| en | 0.523231 | # turn off polyfit ranking warnings # create and sum all PDF of stellar abundances # return normalized summed pdf of all stars # diffence to the original data # select data that will be fitted # number of sigma clipping steps # parse input options # set parameters, depending on user inputs # remove cluster members from tails data # cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits') # determine all possible simulation subdirs # further refinement of results to be plotted here # [Myr] longest time (of all incarnations) inside cluster # percentage of reincarnations inside cluster # further refinement of results to be plotted here # and ('I' in c or 'II' in c or 'III' in c)] # abund_cols = ['e_' + cc for cc in abund_cols] # rg = (0., 0.35) # yt = [0., 0.1, 0.2, 0.3] # medfix = '-snr-sigma_' # ------------------------------------------------------------------------------ # NEW: plot with parameter dependency trends # ------------------------------------------------------------------------------ #list(param_lims.keys()): # print(col) # plt.show() | 2.298521 | 2 |
src/python_minifier/transforms/remove_pass.py | donno2048/python-minifier | 0 | 8899 | <reponame>donno2048/python-minifier
import ast
from python_minifier.transforms.suite_transformer import SuiteTransformer
class RemovePass(SuiteTransformer):
"""
Remove Pass keywords from source
If a statement is syntactically necessary, use an empty expression instead
"""
def __call__(self, node):
return self.visit(node)
def suite(self, node_list, parent):
without_pass = [self.visit(a) for a in filter(lambda n: not self.is_node(n, ast.Pass), node_list)]
if len(without_pass) == 0:
if isinstance(parent, ast.Module):
return []
else:
return [self.add_child(ast.Expr(value=ast.Num(0)), parent=parent)]
return without_pass
| import ast
from python_minifier.transforms.suite_transformer import SuiteTransformer
class RemovePass(SuiteTransformer):
"""
Remove Pass keywords from source
If a statement is syntactically necessary, use an empty expression instead
"""
def __call__(self, node):
return self.visit(node)
def suite(self, node_list, parent):
without_pass = [self.visit(a) for a in filter(lambda n: not self.is_node(n, ast.Pass), node_list)]
if len(without_pass) == 0:
if isinstance(parent, ast.Module):
return []
else:
return [self.add_child(ast.Expr(value=ast.Num(0)), parent=parent)]
return without_pass | en | 0.631282 | Remove Pass keywords from source If a statement is syntactically necessary, use an empty expression instead | 2.422449 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.