filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_22808 | # -*- coding: utf-8 -*-
from hamcrest import assert_that, has_items, has_length, contains
from doublex import Stub
from boscli import interpreter as interpreter_module
from boscli import basic_types
from boscli.command import Command
with describe('Autocomplete'):
with before.each:
self.interpreter = interpreter_module.Interpreter()
self.implementation = Stub()
self.interpreter.add_command(Command(['sys', 'reboot'], self.implementation.reboot))
self.interpreter.add_command(Command(['sys', 'shutdown'], self.implementation.shutdown))
self.interpreter.add_command(Command(['net', 'show', 'configuration'], self.implementation.show_net_conf))
with describe('when autocompleting empty line'):
with it('complete with initial keywords'):
assert_that(self.interpreter.complete(''), has_items('sys ', 'net '))
with describe('when autocompleting keywords'):
with it('complete keywords'):
assert_that(self.interpreter.complete('sy'), has_items('sys '))
assert_that(self.interpreter.complete('sys'), has_items('sys '))
assert_that(self.interpreter.complete('sys r'), has_items('reboot'))
with it('not complete when a command match'):
assert_that(self.interpreter.complete('sys reboot'), has_length(0))
assert_that(self.interpreter.complete('sys reboot '), has_length(0))
with it('not complete unknown command'):
assert_that(self.interpreter.complete('unknown command'), has_length(0))
with describe('when autocompleting matching and partial matching command'):
with it('completes partial matching command with space'):
self.interpreter.add_command(
Command(['cmd', CompleteCompletionsType(['op1', 'op2']), 'last'], self.implementation.irrelevant_cmd))
self.interpreter.add_command(
Command(['cmd', CompleteCompletionsType(['op1', 'op2'])], self.implementation.irrelevant_cmd))
assert_that(self.interpreter.complete('cmd op1'), contains('op1 '))
with describe('when autocompleting type with completions'):
with it('completes with the final space if is not the last token'):
self.interpreter.add_command(Command(['cmd', PlainCompletionsType(['op1', 'op2']), 'last'],
self.implementation.irrelevant_cmd))
assert_that(self.interpreter.complete('cmd o'), has_items('op1 ', 'op2 '))
with it('completes without the final space if is the last token'):
self.interpreter.add_command(Command(['cmd', PlainCompletionsType(['op1', 'op2'])],
self.implementation.irrelevant_cmd))
assert_that(self.interpreter.complete('cmd o'), has_items('op1', 'op2'))
with describe('when autocompleting type with complete completions'):
with it('completes with the final space if is not the last token'):
self.interpreter.add_command(Command(['cmd', CompleteCompletionsType(['op1', 'op2']), 'last'],
self.implementation.irrelevant_cmd))
assert_that(self.interpreter.complete('cmd o'), has_items('op1 ', 'op2 '))
with it('completes without the final space if is the last token'):
self.interpreter.add_command(Command(['cmd', CompleteCompletionsType(['op1', 'op2'])],
self.implementation.irrelevant_cmd))
assert_that(self.interpreter.complete('cmd o'), has_items('op1', 'op2'))
with describe('when autocompleting type with partial completions'):
with it('completes without the final space if is not the last token'):
self.interpreter.add_command(Command(['cmd', PartialCompletionsType(['op1', 'op2']), 'last'],
self.implementation.irrelevant_cmd))
assert_that(self.interpreter.complete('cmd o'), has_items('op1', 'op2'))
with describe('when autocompleting options type'):
with it('complete with all matching options'):
self.interpreter.add_command(Command(['cmd', basic_types.OptionsType(['op1', 'op2'])],
self.implementation.show_net_conf))
assert_that(self.interpreter.complete('cmd o'), has_items('op1', 'op2'))
with describe('when autocompleting a string type'):
with it('no autocomplete at all'):
self.interpreter.add_command(Command(['cmd', basic_types.StringType()],
self.implementation.show_net_conf))
assert_that(self.interpreter.complete('cmd '), has_length(0))
with describe('Filter Autocomplete'):
with it('autocomplete with space when starting a filter'):
assert_that(self.interpreter.complete('net show configuration |'), has_items(' '))
with it('autocomplete all available filters'):
assert_that(self.interpreter.complete('net show configuration | '), has_items('include'))
assert_that(self.interpreter.complete('net show configuration | '), has_items('exclude'))
with it('autocomplete include'):
assert_that(self.interpreter.complete('net show configuration | inclu'), has_items('include'))
with it('autocomplete exclude'):
assert_that(self.interpreter.complete('net show configuration | exclu'), has_items('exclude'))
class _TestCompletionsType(basic_types.BaseType):
def __init__(self, options):
self.options = options
def match(self, word, context, partial_line=None):
return word in self.options
def partial_match(self, word, context, partial_line=None):
for options in self.options:
if options.startswith(word):
return True
return False
class PlainCompletionsType(_TestCompletionsType):
def complete(self, token, tokens, context):
return self.options
class CompleteCompletionsType(_TestCompletionsType):
def complete(self, token, tokens, context):
return [(value, True) for value in self.options if value.startswith(tokens[-1])]
class PartialCompletionsType(_TestCompletionsType):
def complete(self, token, tokens, context):
return [(value, False) for value in self.options if value.startswith(tokens[-1])]
|
the-stack_0_22810 | import math
from enum import Enum
from random import choice, randint
from app.database.sql_session import SQLSession
class OPERATIONS(Enum):
ADDITION = "+"
SUBTRACTION = "-"
MULTIPLICATION = "*"
DIVISION = "/"
class REAL(Enum):
PLUS = "" # you don't need to mark it with +, it's + by default
MINUS = "-"
class Generator:
MIN_COEFFICIENTS = None
MAX_COEFFICIENTS = None
MIN_COEFFICIENT_VALUE = None
MAX_COEFFICIENT_VALUE = None
MIN_COEFFICIENT_VALUE_SPECIAL = None
MAX_COEFFICIENT_VALUE_SPECIAL = None
MIN_EQUATIONS_IN_DB = None
EQUATIONS_TO_PUSH_IN_DB = None
TABLE = None
def __init__(self,
TABLE,
MIN_COEFFICIENTS,
MAX_COEFFICIENTS,
MIN_COEFFICIENT_VALUE,
MAX_COEFFICIENT_VALUE,
MIN_COEFFICIENT_VALUE_SPECIAL,
MAX_COEFFICIENT_VALUE_SPECIAL,
MIN_EQUATIONS_IN_DB,
EQUATIONS_TO_PUSH_IN_DB):
self.TABLE = TABLE
self.MIN_COEFFICIENTS = int(MIN_COEFFICIENTS)
self.MAX_COEFFICIENTS = int(MAX_COEFFICIENTS)
self.MIN_COEFFICIENT_VALUE = int(MIN_COEFFICIENT_VALUE)
self.MAX_COEFFICIENT_VALUE = int(MAX_COEFFICIENT_VALUE)
self.MIN_COEFFICIENT_VALUE_SPECIAL = int(MIN_COEFFICIENT_VALUE_SPECIAL)
self.MAX_COEFFICIENT_VALUE_SPECIAL = int(MAX_COEFFICIENT_VALUE_SPECIAL)
self.MIN_EQUATIONS_IN_DB = int( MIN_EQUATIONS_IN_DB)
self.EQUATIONS_TO_PUSH_IN_DB = int( EQUATIONS_TO_PUSH_IN_DB)
def generate_equation(self):
coefficients = randint(self.MIN_COEFFICIENTS, self.MAX_COEFFICIENTS)
final_equation = ""
for _ in range(0, coefficients - 1):
operation = choice(list(OPERATIONS))
operation_value = operation.value
real = choice(list(REAL))
real_value = real.value
if operation == OPERATIONS.DIVISION or operation == OPERATIONS.MULTIPLICATION:
coefficient_value = randint(self.MIN_COEFFICIENT_VALUE_SPECIAL, self.MAX_COEFFICIENT_VALUE_SPECIAL)
else:
coefficient_value = randint(self.MIN_COEFFICIENT_VALUE, self.MAX_COEFFICIENT_VALUE)
if real == REAL.MINUS:
final_equation += f"({real_value}{coefficient_value}){operation_value}"
else:
final_equation += f"{real_value}{coefficient_value}{operation_value}"
final_equation = final_equation[:-1] # get rid of additional operator
str_result = str(math.floor(eval(final_equation)))
SQLSession().insert(table=self.TABLE,
columns=("equation", "result", "winner"),
values_types=("%s", "%s", "%s"),
values=(final_equation, str_result, ""))
def auto_fill_db(self):
print("Checking database...")
if len(list(
SQLSession().find_where(table=self.TABLE, column="winner", search_value=""))) < self.MIN_EQUATIONS_IN_DB:
print(f"Filling database table {self.TABLE}...")
for _ in range(0, self.EQUATIONS_TO_PUSH_IN_DB):
self.generate_equation()
|
the-stack_0_22811 | import numpy as np
print("Soma dos números ímpares e múltiplos de três no intervalo.")
init = int(input("Digite o intervalo inicial: "))
if init%3 == 0:
inicio = init
else:
inicio = init + 3 - init%3
fim = int(input("Digite o final do intervalo: "))+1
# Calculando a soma com loop
soma = 0
for i in range(inicio, fim, 3):
if i%2 != 0:
soma += i
print("O valor da soma dos números ímpares e múltiplos de três no intervalo de {} e {} é {}.".format(init,fim-1,soma))
# Calculando com a fórmula da Soam de uma PA
if init%3 == 0: # Verifica se o intervalo inicial é o primeiro termo da PA
if init%2 != 0:
inicio = init
else: # Caso contrário ajusta o inicio da PA
inicio = init + 3
else:
inicio = init + 3 - init%3
if inicio%2 == 0:
inicio += 3
fim -= 1
if fim%3 == 0:
if fim%2 != 0:
final = fim
else:
final = fim - 3
else:
final = fim - fim%3
if final%2 == 0:
final -= 3
n = np.floor((final - inicio) / 6) + 1
somaPA = int((inicio + final) * n / 2)
print("O valor da soma dos números ímpares e múltiplos de três no intervalo de {} e {} é {}.".format(init,fim,somaPA)) |
the-stack_0_22812 | from civicboom.tests import *
class TestMembersController(TestController):
def test_member_page(self):
response = self.app.get(url('members', format='json'))
def test_member_qrcode(self):
response = self.app.get(url('member_action', id='unittest', action='qrcode'))
def test_member_list(self):
# by name
response = self.app.get(url('members', format="json", list="all", term="mr"))
self.assertIn("Mr U. Test", response)
# by username
# fff, this is pushed onto the second page of results...
#response = self.app.get(url('members', format="json", list="all", term="unit"))
#self.assertIn("Mr U. Test", response)
# invalid
response = self.app.get(url('members', format="json", list="all", term="waffleville"))
self.assertNotIn("Mr", response)
def test_member_show(self):
response = self.app.get(url('member', id='unittest', format='json'))
# case shouldn't matter
response = self.app.get(url('member', id='UnitTest', format='json'))
# non-existent members should 404
response = self.app.get(url('member', id='mrdoesnotexist', format='json'), status=404)
# show content from members
response = self.app.get(url('member_action', id='unittest', action='content' , format='json' ))
response = self.app.get(url('member_action', id='unittest', action='content' , format='json', list='articles'))
response = self.app.get(url('member_action', id='unittest', action='boomed' , format='json' ))
response = self.app.get(url('member_action', id='unittest', action='content_and_boomed', format='json' ))
# badly named content lists should give "bad paramaters" error
response = self.app.get(url('member_action', id='unittest', action='content', list='cake', format='json'), status=400)
# list equality - members show should be the same for anon users and non trusted followers
# AllanC - comparing output strings failed origninaly - because logged in users have differnt 'action' lists
# solution - remove the action lists and redump to string to check comparison
self.log_out()
response = self.app.get(url('member', id='unittest', format='json'))
response_json = json.loads(response.body)
del response_json['data']['actions']
data_1 = response_json
#json_string_1 = json.dumps(response_json)
self.log_in_as('kitten')
response = self.app.get(url('member', id='unittest', format='json'))
response_json = json.loads(response.body)
del response_json['data']['actions']
data_2 = response_json
#json_string_2 = json.dumps(response_json)
#self.assertEquals(json_string_1, json_string_2)
self.assertEquals(data_1, data_2)
def test_member_follow(self):
# no following self
response = self.app.post(
url('member_action', id='unittest', action='follow', format='json'),
params={'_authentication_token': self.auth_token},
status=400
)
# can follow someone else; refollow is error?
response = self.app.post(
url('member_action', id='puppy', action='follow', format='json'),
params={'_authentication_token': self.auth_token},
)
response = self.app.post(
url('member_action', id='puppy', action='follow', format='json'),
params={'_authentication_token': self.auth_token},
status=400
)
# can unfollow a followed person; re-unfollow is error?
response = self.app.post(
url('member_action', id='puppy', action='unfollow', format='json'),
params={'_authentication_token': self.auth_token},
)
response = self.app.post(
url('member_action', id='puppy', action='unfollow', format='json'),
params={'_authentication_token': self.auth_token},
status=400
)
|
the-stack_0_22813 | import json as json_lib
import sys
import types
from socket import gethostname
from time import sleep
import jwt
import requests
import six
from requests.auth import HTTPBasicAuth
from six.moves.urllib.parse import urlparse, urlunparse
from .callresult import CallResult
from .defs import (
ENV_VERBOSE, ENV_HOST, ENV_ACCESS_KEY, ENV_SECRET_KEY, ENV_WEB_HOST,
ENV_FILES_HOST, ENV_OFFLINE_MODE, ENV_CLEARML_NO_DEFAULT_SERVER, ENV_AUTH_TOKEN, )
from .request import Request, BatchRequest # noqa: F401
from .token_manager import TokenManager
from ..config import load
from ..utils import get_http_session_with_retry, urllib_log_warning_setup
from ...debugging import get_logger
from ...utilities.pyhocon import ConfigTree
from ...version import __version__
try:
from OpenSSL.SSL import Error as SSLError
except ImportError:
from requests.exceptions import SSLError
class LoginError(Exception):
pass
class MaxRequestSizeError(Exception):
pass
class Session(TokenManager):
""" ClearML API Session class. """
_AUTHORIZATION_HEADER = "Authorization"
_WORKER_HEADER = ("X-ClearML-Worker", "X-Trains-Worker", )
_ASYNC_HEADER = ("X-ClearML-Async", "X-Trains-Async", )
_CLIENT_HEADER = ("X-ClearML-Client", "X-Trains-Client", )
_async_status_code = 202
_session_requests = 0
_session_initial_timeout = (3.0, 10.)
_session_timeout = (10.0, 300.)
_write_session_data_size = 15000
_write_session_timeout = (300.0, 300.)
_sessions_created = 0
_ssl_error_count_verbosity = 2
_offline_mode = ENV_OFFLINE_MODE.get()
_offline_default_version = '2.9'
_client = [(__package__.partition(".")[0], __version__)]
api_version = '2.1'
max_api_version = '2.1'
default_demo_host = "https://demoapi.demo.clear.ml"
default_host = default_demo_host
default_web = "https://demoapp.demo.clear.ml"
default_files = "https://demofiles.demo.clear.ml"
default_key = "EGRTCO8JMSIGI6S39GTP43NFWXDQOW"
default_secret = "x!XTov_G-#vspE*Y(h$Anm&DIc5Ou-F)jsl$PdOyj5wG1&E!Z8"
force_max_api_version = None
# TODO: add requests.codes.gateway_timeout once we support async commits
_retry_codes = [
requests.codes.bad_gateway,
requests.codes.service_unavailable,
requests.codes.bandwidth_limit_exceeded,
requests.codes.too_many_requests,
]
@property
def access_key(self):
return self.__access_key
@property
def secret_key(self):
return self.__secret_key
@property
def auth_token(self):
return self.__auth_token
@property
def host(self):
return self.__host
@property
def worker(self):
return self.__worker
def __init__(
self,
worker=None,
api_key=None,
secret_key=None,
host=None,
logger=None,
verbose=None,
initialize_logging=True,
config=None,
http_retries_config=None,
**kwargs
):
if config is not None:
self.config = config
else:
self.config = load()
if initialize_logging:
self.config.initialize_logging()
token_expiration_threshold_sec = self.config.get(
"auth.token_expiration_threshold_sec", 60
)
self._verbose = verbose if verbose is not None else ENV_VERBOSE.get()
self._logger = logger
self.__auth_token = None
if ENV_AUTH_TOKEN.get():
self.__access_key = self.__secret_key = None
self.__auth_token = ENV_AUTH_TOKEN.get()
# if we use a token we override make sure we are at least 3600 seconds (1 hour)
# away from the token expiration date, ask for a new one.
token_expiration_threshold_sec = max(token_expiration_threshold_sec, 3600)
else:
self.__access_key = api_key or ENV_ACCESS_KEY.get(
default=(self.config.get("api.credentials.access_key", None) or self.default_key)
)
if not self.access_key:
raise ValueError(
"Missing access_key. Please set in configuration file or pass in session init."
)
self.__secret_key = secret_key or ENV_SECRET_KEY.get(
default=(self.config.get("api.credentials.secret_key", None) or self.default_secret)
)
if not self.secret_key:
raise ValueError(
"Missing secret_key. Please set in configuration file or pass in session init."
)
# init the token manager
super(Session, self).__init__(
token_expiration_threshold_sec=token_expiration_threshold_sec, **kwargs
)
host = host or self.get_api_server_host(config=self.config)
if not host:
raise ValueError("host is required in init or config")
if ENV_CLEARML_NO_DEFAULT_SERVER.get() and host == self.default_demo_host:
raise ValueError(
"ClearML configuration could not be found (missing `~/clearml.conf` or Environment CLEARML_API_HOST)\n"
"To get started with ClearML: setup your own `clearml-server`, "
"or create a free account at https://app.community.clear.ml"
)
self._ssl_error_count_verbosity = self.config.get(
"api.ssl_error_count_verbosity", self._ssl_error_count_verbosity)
self.__host = host.strip("/")
http_retries_config = http_retries_config or self.config.get(
"api.http.retries", ConfigTree()).as_plain_ordered_dict()
http_retries_config["status_forcelist"] = self._retry_codes
self.__http_session = get_http_session_with_retry(**http_retries_config)
self.__http_session.write_timeout = self._write_session_timeout
self.__http_session.request_size_threshold = self._write_session_data_size
self.__worker = worker or self.get_worker_host_name()
self.__max_req_size = self.config.get("api.http.max_req_size", None)
if not self.__max_req_size:
raise ValueError("missing max request size")
self.client = ", ".join("{}-{}".format(*x) for x in self._client)
if self._offline_mode:
return
self.refresh_token()
# update api version from server response
try:
token_dict = TokenManager.get_decoded_token(self.token)
api_version = token_dict.get('api_version')
if not api_version:
api_version = '2.2' if token_dict.get('env', '') == 'prod' else Session.api_version
if token_dict.get('server_version'):
if not any(True for c in Session._client if c[0] == 'clearml-server'):
Session._client.append(('clearml-server', token_dict.get('server_version'), ))
Session.max_api_version = Session.api_version = str(api_version)
except (jwt.DecodeError, ValueError):
(self._logger or get_logger()).warning(
"Failed parsing server API level, defaulting to {}".format(Session.api_version))
# now setup the session reporting, so one consecutive retries will show warning
# we do that here, so if we have problems authenticating, we see them immediately
# notice: this is across the board warning omission
urllib_log_warning_setup(total_retries=http_retries_config.get('total', 0), display_warning_after=3)
self.__class__._sessions_created += 1
if self.force_max_api_version and self.check_min_api_version(self.force_max_api_version):
Session.max_api_version = Session.api_version = str(self.force_max_api_version)
def _send_request(
self,
service,
action,
version=None,
method="get",
headers=None,
auth=None,
data=None,
json=None,
refresh_token_if_unauthorized=True,
):
""" Internal implementation for making a raw API request.
- Constructs the api endpoint name
- Injects the worker id into the headers
- Allows custom authorization using a requests auth object
- Intercepts `Unauthorized` responses and automatically attempts to refresh the session token once in this
case (only once). This is done since permissions are embedded in the token, and addresses a case where
server-side permissions have changed but are not reflected in the current token. Refreshing the token will
generate a token with the updated permissions.
"""
if self._offline_mode:
return None
res = None
host = self.host
headers = headers.copy() if headers else {}
for h in self._WORKER_HEADER:
headers[h] = self.worker
for h in self._CLIENT_HEADER:
headers[h] = self.client
token_refreshed_on_error = False
url = (
"{host}/v{version}/{service}.{action}"
if version
else "{host}/{service}.{action}"
).format(**locals())
retry_counter = 0
while True:
if data and len(data) > self._write_session_data_size:
timeout = self._write_session_timeout
elif self._session_requests < 1:
timeout = self._session_initial_timeout
else:
timeout = self._session_timeout
try:
res = self.__http_session.request(
method, url, headers=headers, auth=auth, data=data, json=json, timeout=timeout)
# except Exception as ex:
except SSLError as ex:
retry_counter += 1
# we should retry
if retry_counter >= self._ssl_error_count_verbosity:
(self._logger or get_logger()).warning("SSLError Retrying {}".format(ex))
sleep(0.1)
continue
if (
refresh_token_if_unauthorized
and res.status_code == requests.codes.unauthorized
and not token_refreshed_on_error
):
# it seems we're unauthorized, so we'll try to refresh our token once in case permissions changed since
# the last time we got the token, and try again
self.refresh_token()
token_refreshed_on_error = True
# try again
retry_counter += 1
continue
if (
res.status_code == requests.codes.service_unavailable
and self.config.get("api.http.wait_on_maintenance_forever", True)
):
(self._logger or get_logger()).warning(
"Service unavailable: {} is undergoing maintenance, retrying...".format(
host
)
)
retry_counter += 1
continue
break
self._session_requests += 1
return res
def add_auth_headers(self, headers):
headers[self._AUTHORIZATION_HEADER] = "Bearer {}".format(self.token)
return headers
def send_request(
self,
service,
action,
version=None,
method="get",
headers=None,
data=None,
json=None,
async_enable=False,
):
"""
Send a raw API request.
:param service: service name
:param action: action name
:param version: version number (default is the preconfigured api version)
:param method: method type (default is 'get')
:param headers: request headers (authorization and content type headers will be automatically added)
:param json: json to send in the request body (jsonable object or builtin types construct. if used,
content type will be application/json)
:param data: Dictionary, bytes, or file-like object to send in the request body
:param async_enable: whether request is asynchronous
:return: requests Response instance
"""
headers = self.add_auth_headers(
headers.copy() if headers else {}
)
if async_enable:
for h in self._ASYNC_HEADER:
headers[h] = "1"
return self._send_request(
service=service,
action=action,
version=version,
method=method,
headers=headers,
data=data,
json=json,
)
def send_request_batch(
self,
service,
action,
version=None,
headers=None,
data=None,
json=None,
method="get",
):
"""
Send a raw batch API request. Batch requests always use application/json-lines content type.
:param service: service name
:param action: action name
:param version: version number (default is the preconfigured api version)
:param headers: request headers (authorization and content type headers will be automatically added)
:param json: iterable of json items (batched items, jsonable objects or builtin types constructs). These will
be sent as a multi-line payload in the request body.
:param data: iterable of bytes objects (batched items). These will be sent as a multi-line payload in the
request body.
:param method: HTTP method
:return: requests Response instance
"""
if not all(
isinstance(x, (list, tuple, type(None), types.GeneratorType))
for x in (data, json)
):
raise ValueError("Expecting list, tuple or generator in 'data' or 'json'")
if not data and not json:
# Missing data (data or json), batch requests are meaningless without it.
return None
headers = headers.copy() if headers else {}
headers["Content-Type"] = "application/json-lines"
if data:
req_data = "\n".join(data)
else:
req_data = "\n".join(json_lib.dumps(x) for x in json)
cur = 0
results = []
while True:
size = self.__max_req_size
slice = req_data[cur: cur + size]
if not slice:
break
if len(slice) < size:
# this is the remainder, no need to search for newline
pass
elif slice[-1] != "\n":
# search for the last newline in order to send a coherent request
size = slice.rfind("\n") + 1
# readjust the slice
slice = req_data[cur: cur + size]
if not slice:
raise MaxRequestSizeError('Error: {}.{} request exceeds limit {} > {} bytes'.format(
service, action, len(req_data), self.__max_req_size))
res = self.send_request(
method=method,
service=service,
action=action,
data=slice,
headers=headers,
version=version,
)
results.append(res)
if res.status_code != requests.codes.ok:
break
cur += size
return results
def validate_request(self, req_obj):
""" Validate an API request against the current version and the request's schema """
try:
# make sure we're using a compatible version for this request
# validate the request (checks required fields and specific field version restrictions)
validate = req_obj.validate
except AttributeError:
raise TypeError(
'"req_obj" parameter must be an backend_api.session.Request object'
)
validate()
def send_async(self, req_obj):
"""
Asynchronously sends an API request using a request object.
:param req_obj: The request object
:type req_obj: Request
:return: CallResult object containing the raw response, response metadata and parsed response object.
"""
return self.send(req_obj=req_obj, async_enable=True)
def send(self, req_obj, async_enable=False, headers=None):
"""
Sends an API request using a request object.
:param req_obj: The request object
:type req_obj: Request
:param async_enable: Request this method be executed in an asynchronous manner
:param headers: Additional headers to send with request
:return: CallResult object containing the raw response, response metadata and parsed response object.
"""
self.validate_request(req_obj)
if self._offline_mode:
return None
if isinstance(req_obj, BatchRequest):
# TODO: support async for batch requests as well
if async_enable:
raise NotImplementedError(
"Async behavior is currently not implemented for batch requests"
)
json_data = req_obj.get_json()
res = self.send_request_batch(
service=req_obj._service,
action=req_obj._action,
version=req_obj._version,
json=json_data,
method=req_obj._method,
headers=headers,
)
# TODO: handle multiple results in this case
if res is not None:
try:
res = next(r for r in res if r.status_code != 200)
except StopIteration:
# all are 200
res = res[0]
else:
res = self.send_request(
service=req_obj._service,
action=req_obj._action,
version=req_obj._version,
json=req_obj.to_dict(),
method=req_obj._method,
async_enable=async_enable,
headers=headers,
)
call_result = CallResult.from_result(
res=res,
request_cls=req_obj.__class__,
logger=self._logger,
service=req_obj._service,
action=req_obj._action,
session=self,
)
return call_result
@classmethod
def get_api_server_host(cls, config=None):
if not config:
from ...config import config_obj
config = config_obj
return ENV_HOST.get(default=(config.get("api.api_server", None) or
config.get("api.host", None) or cls.default_host)).rstrip('/')
@classmethod
def get_app_server_host(cls, config=None):
if not config:
from ...config import config_obj
config = config_obj
# get from config/environment
web_host = ENV_WEB_HOST.get(default=config.get("api.web_server", "")).rstrip('/')
if web_host:
return web_host
# return default
host = cls.get_api_server_host(config)
if host == cls.default_host and cls.default_web:
return cls.default_web
# compose ourselves
if '://demoapi.' in host:
return host.replace('://demoapi.', '://demoapp.', 1)
if '://api.' in host:
return host.replace('://api.', '://app.', 1)
parsed = urlparse(host)
if parsed.port == 8008:
return host.replace(':8008', ':8080', 1)
raise ValueError('Could not detect ClearML web application server')
@classmethod
def get_files_server_host(cls, config=None):
if not config:
from ...config import config_obj
config = config_obj
# get from config/environment
files_host = ENV_FILES_HOST.get(default=(config.get("api.files_server", ""))).rstrip('/')
if files_host:
return files_host
# return default
host = cls.get_api_server_host(config)
if host == cls.default_host and cls.default_files:
return cls.default_files
# compose ourselves
app_host = cls.get_app_server_host(config)
parsed = urlparse(app_host)
if parsed.port:
parsed = parsed._replace(netloc=parsed.netloc.replace(':%d' % parsed.port, ':8081', 1))
elif parsed.netloc.startswith('demoapp.'):
parsed = parsed._replace(netloc=parsed.netloc.replace('demoapp.', 'demofiles.', 1))
elif parsed.netloc.startswith('app.'):
parsed = parsed._replace(netloc=parsed.netloc.replace('app.', 'files.', 1))
else:
parsed = parsed._replace(netloc=parsed.netloc + ':8081')
return urlunparse(parsed)
@classmethod
def check_min_api_version(cls, min_api_version):
"""
Return True if Session.api_version is greater or equal >= to min_api_version
"""
# If no session was created, create a default one, in order to get the backend api version.
if cls._sessions_created <= 0:
if cls._offline_mode:
# allow to change the offline mode version by setting ENV_OFFLINE_MODE to the required API version
if cls.api_version != cls._offline_default_version:
offline_api = ENV_OFFLINE_MODE.get(converter=lambda x: x)
if offline_api:
try:
# check cast to float, but leave original str if we pass it.
# minimum version is 2.3
if float(offline_api) >= 2.3:
cls._offline_default_version = str(offline_api)
except ValueError:
pass
cls.max_api_version = cls.api_version = cls._offline_default_version
else:
# noinspection PyBroadException
try:
cls()
except Exception:
pass
return cls._version_tuple(cls.api_version) >= cls._version_tuple(str(min_api_version))
@classmethod
def check_min_api_server_version(cls, min_api_version):
"""
Return True if Session.max_api_version is greater or equal >= to min_api_version
Notice this is the api version server reported, not the current SDK max supported api version
"""
if cls.check_min_api_version(min_api_version):
return True
return cls._version_tuple(cls.max_api_version) >= cls._version_tuple(str(min_api_version))
@classmethod
def get_worker_host_name(cls):
from ...config import dev_worker_name
return dev_worker_name() or gethostname()
@classmethod
def get_clients(cls):
return cls._client
@staticmethod
def _version_tuple(v):
v = tuple(map(int, (v.split("."))))
return v + (0,) * max(0, 3 - len(v))
def _do_refresh_token(self, old_token, exp=None):
""" TokenManager abstract method implementation.
Here we ignore the old token and simply obtain a new token.
"""
verbose = self._verbose and self._logger
if verbose:
self._logger.info(
"Refreshing token from {} (access_key={}, exp={})".format(
self.host, self.access_key, exp
)
)
headers = None
# use token only once (the second time the token is already built into the http session)
if self.__auth_token:
headers = dict(Authorization="Bearer {}".format(self.__auth_token))
self.__auth_token = None
auth = HTTPBasicAuth(self.access_key, self.secret_key) if self.access_key and self.secret_key else None
res = None
try:
data = {"expiration_sec": exp} if exp else {}
res = self._send_request(
service="auth",
action="login",
auth=auth,
json=data,
headers=headers,
refresh_token_if_unauthorized=False,
)
try:
resp = res.json()
except ValueError:
resp = {}
if res.status_code != 200:
msg = resp.get("meta", {}).get("result_msg", res.reason)
raise LoginError(
"Failed getting token (error {} from {}): {}".format(
res.status_code, self.host, msg
)
)
if verbose:
self._logger.info("Received new token")
# make sure we keep the token updated on the OS environment, so that child processes will have access.
if ENV_AUTH_TOKEN.get():
ENV_AUTH_TOKEN.set(resp["data"]["token"])
return resp["data"]["token"]
except LoginError:
six.reraise(*sys.exc_info())
except KeyError as ex:
# check if this is a misconfigured api server (getting 200 without the data section)
if res and res.status_code == 200:
raise ValueError('It seems *api_server* is misconfigured. '
'Is this the ClearML API server {} ?'.format(self.host))
else:
raise LoginError("Response data mismatch: No 'token' in 'data' value from res, receive : {}, "
"exception: {}".format(res, ex))
except Exception as ex:
raise LoginError('Unrecognized Authentication Error: {} {}'.format(type(ex), ex))
def __str__(self):
return "{self.__class__.__name__}[{self.host}, {self.access_key}/{secret_key}]".format(
self=self, secret_key=self.secret_key[:5] + "*" * (len(self.secret_key) - 5)
)
|
the-stack_0_22816 | import numpy
import six
from chainer import cuda
from chainer.functions.array import concat
from chainer.functions.pooling import max_pooling_2d
from chainer.functions.pooling import pooling_2d
class SpatialPyramidPooling2D(pooling_2d.Pooling2D):
"""Spatial pyramid pooling over a set of 2d planes."""
def __init__(self, x_shape, pyramid_height, pooling_class, use_cudnn=True):
bottom_c, bottom_h, bottom_w = x_shape
self.pyramid_height = pyramid_height
# create pooling functions for different pyramid levels
out_dim = 0
self.split_inds = []
self.poolers = []
for pyramid_level in six.moves.range(pyramid_height):
num_bins = int(2 ** pyramid_level)
ksize_h = int(numpy.ceil(bottom_h / (float(num_bins))))
remainder_h = ksize_h * num_bins - bottom_h
pad_h = remainder_h // 2
ksize_w = int(numpy.ceil(bottom_w / (float(num_bins))))
remainder_w = ksize_w * num_bins - bottom_w
pad_w = remainder_w // 2
ksize = (ksize_h, ksize_w)
pad = (pad_h, pad_w)
if pooling_class is max_pooling_2d.MaxPooling2D:
pooler = pooling_class(ksize=ksize, stride=None, pad=pad,
cover_all=True, use_cudnn=use_cudnn)
self.poolers.append(pooler)
else:
raise NotImplementedError()
out_dim += bottom_c * (num_bins ** 2)
if pyramid_level < pyramid_height - 1:
self.split_inds.append(out_dim)
def forward(self, x):
self.ys = []
for pooler in self.poolers:
y = pooler.forward(x)[0]
n, c, h, w = pooler.out_shape = y.shape
self.ys.append(y.reshape((n, c * h * w, 1, 1)))
return concat.Concat(axis=1).forward(self.ys)
def backward(self, x, gy):
xp = cuda.get_array_module(*x)
gx = xp.zeros_like(x[0])
gys = xp.split(gy[0], self.split_inds, axis=1)
for pooler, gy in zip(self.poolers, gys):
gy = gy.reshape(pooler.out_shape)
gx += pooler.backward(x, (gy,))[0]
return gx,
def spatial_pyramid_pooling_2d(x, pyramid_height, pooling_class,
use_cudnn=True):
"""Spatial pyramid pooling function.
It outputs a fixed-length vector regardless of input feature map size.
It performs pooling operation to the input 4D-array ``x`` with different
kernel sizes and padding sizes, and then flattens all dimensions except
first dimension of all pooling results, and finally concatenates them along
second dimension.
At :math:`i`-th pyramid level, the kernel size
:math:`(k_h^{(i)}, k_w^{(i)})` and padding size
:math:`(p_h^{(i)}, p_w^{(i)})` of pooling operation are calculated as
below:
.. math::
k_h^{(i)} &= \\lceil b_h / 2^i \\rceil, \\\\
k_w^{(i)} &= \\lceil b_w / 2^i \\rceil, \\\\
p_h^{(i)} &= (2^i k_h^{(i)} - b_h) / 2, \\\\
p_w^{(i)} &= (2^i k_w^{(i)} - b_w) / 2,
where :math:`\\lceil \\cdot \\rceil` denotes the ceiling function, and
:math:`b_h, b_w` are height and width of input variable ``x``,
respectively. Note that index of pyramid level :math:`i` is zero-based.
See detail in paper: `Spatial Pyramid Pooling in Deep Convolutional \
Networks for Visual Recognition \
<https://arxiv.org/abs/1406.4729>`_.
Args:
x (~chainer.Variable): Input variable. The shape of ``x`` should be
``(batchsize, # of channels, height, width)``.
pyramid_height (int): Number of pyramid levels
pooling_class (MaxPooling2D or AveragePooling2D):
Only MaxPooling2D class can be available for now.
use_cudnn (bool): If ``True`` and cuDNN is enabled, then this function
uses cuDNN as the core implementation.
Returns:
~chainer.Variable: Output variable. The shape of the output variable
will be :math:`(batchsize, c \\sum_{h=0}^{H-1} 2^{2h}, 1, 1)`,
where :math:`c` is the number of channels of input variable ``x``
and :math:`H` is the number of pyramid levels.
.. note::
This function uses some pooling classes as components to perform
spatial pyramid pooling. Now it supports only
:class:`~functions.MaxPooling2D` as elemental pooling operator so far.
"""
return SpatialPyramidPooling2D(x.shape[1:], pyramid_height,
pooling_class, use_cudnn=use_cudnn)(x)
|
the-stack_0_22817 | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lispapi.py
#
# This file containse API definitions that users call in their python programs.
#
# When this file is changed, remote file lispapi.html and click the "API
# Documentation" button on the landing page to build a pydoc lispapi.txt
# file.
#
# If you modify this file, please run:
#
# pydoc lispapi.py > lispapi.txt
#
# So the API documentation stays up to date. Then commit lispapi.txt to the
# repo.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
import requests
import json
import os
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
REQ_TIMEOUT = 3
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
class api_init ( ) :
def __init__ ( self , host , user , pw = None , port = 8080 , api_debug = False ,
do_get = True ) :
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
self . host = host
self . user = user
if ( pw == None ) : pw = os . getenv ( "LISPAPI_PW_" + host )
if ( pw == None ) : pw = os . getenv ( "LISPAPI_PW" )
self . pw = pw
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
Ooooo0Oo00oO0 = "https://"
if ( port < 0 ) :
port = - port
Ooooo0Oo00oO0 = Ooooo0Oo00oO0 . replace ( "s" , "" )
if 12 - 12: iIii1I11I1II1 * I1IiiI . ooOoO0o % I11i + O0
self . url = Ooooo0Oo00oO0 + self . host + ":{}/lisp/api/" . format ( str ( port ) )
if 70 - 70: Ii1I . oO0o * ooOoO0o . Ii1I
self . enable_status = None
self . debug_status = None
self . api_debug = api_debug
self . enable_status = None
self . debug_status = None
self . xtr_parameters = None
if 35 - 35: o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if ( do_get ) :
self . get_enable ( )
self . get_debug ( )
self . get_xtr_parameters ( )
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
def api_print ( self ) :
print ( "url: {}@{}, enable-status: {}, debug-status: {}" . format ( self . user , self . url , self . enable_status , self . debug_status ) )
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
def api_enable_debug ( self ) :
self . api_debug = True
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
def api_disable_debug ( self ) :
self . api_debug = False
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
def get_enable ( self , force_query = False ) :
if ( force_query == False and self . enable_status != None ) :
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
return ( self . enable_status )
if 50 - 50: I1IiiI
Ii1i11IIii1I = self . __get ( "lisp enable" )
self . enable_status = Ii1i11IIii1I
return ( Ii1i11IIii1I )
if 52 - 52: o0oOOo0O0Ooo - OoooooooOO + Ii1I + Ii1I - o0oOOo0O0Ooo / I1Ii111
if 44 - 44: ooOoO0o . i1IIi - I1ii11iIi11i . O0 - ooOoO0o
def get_debug ( self ) :
if ( self . debug_status != None ) : return ( self . debug_status )
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if 94 - 94: II111iiii % I1ii11iIi11i / OoOoOO00 * iIii1I11I1II1
Ii1i11IIii1I = self . __get ( "lisp debug" )
self . debug_status = Ii1i11IIii1I
return ( Ii1i11IIii1I )
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
if 70 - 70: Ii1I / I11i . iII111i % Oo0Ooo
def get_xtr_parameters ( self ) :
if ( self . xtr_parameters != None ) : return ( self . xtr_parameters )
if 67 - 67: OoOoOO00 * o0oOOo0O0Ooo . IiII - OoO0O00 * o0oOOo0O0Ooo
if 46 - 46: OOooOOo + OoOoOO00 . I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
Ii1i11IIii1I = self . __get ( "lisp xtr-parameters" )
self . xtr_parameters = Ii1i11IIii1I
return ( Ii1i11IIii1I )
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
def is_itr_enabled ( self ) :
return ( self . enable_status and self . enable_status [ "itr" ] == "yes" )
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
if 26 - 26: iII111i
def is_etr_enabled ( self ) :
return ( self . enable_status and self . enable_status [ "etr" ] == "yes" )
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
def is_rtr_enabled ( self ) :
return ( self . enable_status and self . enable_status [ "rtr" ] == "yes" )
if 91 - 91: oO0o % Oo0Ooo
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
def is_mr_enabled ( self ) :
return ( self . enable_status and
# o0oOOo0O0Ooo . ooOoO0o
self . enable_status [ "map-resolver" ] == "yes" )
if 54 - 54: II111iiii % OoOoOO00 % I11i % iIii1I11I1II1 + iIii1I11I1II1 * ooOoO0o
if 87 - 87: ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
def is_ms_enabled ( self ) :
return ( self . enable_status and
# iII111i - iIii1I11I1II1 * i11iIiiIii / I1ii11iIi11i * I1Ii111
self . enable_status [ "map-server" ] == "yes" )
if 23 - 23: iII111i
if 91 - 91: iIii1I11I1II1 + I1Ii111
def is_ddt_enabled ( self ) :
return ( self . enable_status and self . enable_status [ "ddt-node" ] == "yes" )
if 31 - 31: IiII . OoOoOO00 . OOooOOo
if 75 - 75: I11i + OoO0O00 . OoOoOO00 . ooOoO0o + Oo0Ooo . OoO0O00
if 96 - 96: OOooOOo . ooOoO0o - Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * OOooOOo
def is_itr_debug_enabled ( self ) :
return ( self . debug_status and self . debug_status [ "itr" ] == "yes" )
if 65 - 65: Ii1I . iIii1I11I1II1 / O0 - Ii1I
if 21 - 21: I1IiiI * iIii1I11I1II1
if 91 - 91: IiII
def is_etr_debug_enabled ( self ) :
return ( self . debug_status and self . debug_status [ "etr" ] == "yes" )
if 15 - 15: II111iiii
if 18 - 18: i11iIiiIii . i1IIi % OoooooooOO / O0
if 75 - 75: OoOoOO00 % o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1Ii111
def is_rtr_debug_enabled ( self ) :
return ( self . debug_status and self . debug_status [ "rtr" ] == "yes" )
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
def is_mr_debug_enabled ( self ) :
return ( self . debug_status and
# I1IiiI - o0oOOo0O0Ooo * oO0o + O0
self . debug_status [ "map-resolver" ] == "yes" )
if 71 - 71: Ii1I - i1IIi % i1IIi + OoOoOO00 + O0 + OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
def is_ms_debug_enabled ( self ) :
return ( self . debug_status and self . debug_status [ "map-server" ] == "yes" )
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
def is_ddt_debug_enabled ( self ) :
return ( self . debug_status and self . debug_status [ "ddt-node" ] == "yes" )
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
def enable_itr ( self ) :
if ( self . enable_status == None ) : return
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
self . enable_status [ "itr" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
def enable_etr ( self ) :
if ( self . enable_status == None ) : return
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
self . enable_status [ "etr" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
if 87 - 87: i11iIiiIii
def enable_rtr ( self ) :
if ( self . enable_status == None ) : return
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
self . enable_status [ "rtr" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
def enable_mr ( self ) :
if ( self . enable_status == None ) : return
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
self . enable_status [ "map-resolver" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
def enable_ms ( self ) :
if ( self . enable_status == None ) : return
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
self . enable_status [ "map-server" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
def enable_ddt ( self ) :
if ( self . enable_status == None ) : return
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
self . enable_status [ "ddt-node" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 23 - 23: O0
if 85 - 85: Ii1I
def disable_itr ( self ) :
if ( self . enable_status == None ) : return
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
self . enable_status [ "itr" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
def disable_etr ( self ) :
if ( self . enable_status == None ) : return
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
self . enable_status [ "etr" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
def disable_rtr ( self ) :
if ( self . enable_status == None ) : return
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
self . enable_status [ "rtr" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
def disable_mr ( self ) :
if ( self . enable_status == None ) : return
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
self . enable_status [ "map-resolver" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
def disable_ms ( self ) :
if ( self . enable_status == None ) : return
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
self . enable_status [ "map-server" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
def disable_ddt ( self ) :
if ( self . enable_status == None ) : return
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
self . enable_status [ "ddt-node" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp enable" , self . enable_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
def enable_core_debug ( self ) :
if ( self . debug_status == None ) : return
if 100 - 100: I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
self . debug_status [ "core" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
def enable_itr_debug ( self ) :
if ( self . debug_status == None ) : return
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
self . debug_status [ "itr" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
def enable_etr_debug ( self ) :
if ( self . debug_status == None ) : return
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
self . debug_status [ "etr" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
def enable_rtr_debug ( self ) :
if ( self . debug_status == None ) : return
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
self . debug_status [ "rtr" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
def enable_mr_debug ( self ) :
if ( self . debug_status == None ) : return
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
self . debug_status [ "map-resolver" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
def enable_ms_debug ( self ) :
if ( self . debug_status == None ) : return
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
self . debug_status [ "map-server" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
def enable_ddt_debug ( self ) :
if ( self . debug_status == None ) : return
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
self . debug_status [ "ddt-node" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
def disable_core_debug ( self ) :
if ( self . debug_status == None ) : return
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
self . debug_status [ "core" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
def disable_itr_debug ( self ) :
if ( self . debug_status == None ) : return
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
self . debug_status [ "itr" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
def disable_etr_debug ( self ) :
if ( self . debug_status == None ) : return
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
self . debug_status [ "etr" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
def disable_rtr_debug ( self ) :
if ( self . debug_status == None ) : return
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
self . debug_status [ "rtr" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
def disable_mr_debug ( self ) :
if ( self . debug_status == None ) : return
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
self . debug_status [ "map-resolver" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
def disable_ms_debug ( self ) :
if ( self . debug_status == None ) : return
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
self . debug_status [ "map-server" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
def disable_ddt_debug ( self ) :
if ( self . debug_status == None ) : return
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
self . debug_status [ "ddt-node" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp debug" , self . debug_status )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
def add_user_account ( self , username , password ) :
if ( self . enable_status == None ) : return
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
Ii1i11IIii1I = self . __put ( "lisp user-account" ,
{ "username" : username , "password" : password } )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
def delete_user_account ( self , username ) :
if ( self . enable_status == None ) : return
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
Ii1i11IIii1I = self . __delete ( "lisp user-account" , { "username" : username } )
return ( self . __error ( Ii1i11IIii1I ) == False )
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
def enable_itr_security ( self ) :
if ( self . enable_status == None ) : return
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
self . xtr_parameters [ "data-plane-security" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
def disable_itr_security ( self ) :
if ( self . enable_status == None ) : return
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
self . xtr_parameters [ "data-plane-security" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
def enable_xtr_nat_traversal ( self ) :
if ( self . enable_status == None ) : return
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
self . xtr_parameters [ "nat-traversal" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
def disable_xtr_nat_traversal ( self ) :
if ( self . enable_status == None ) : return
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
self . xtr_parameters [ "nat-traversal" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
def enable_xtr_rloc_probing ( self ) :
if ( self . enable_status == None ) : return
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
self . xtr_parameters [ "rloc-probing" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
def disable_xtr_rloc_probing ( self ) :
if ( self . enable_status == None ) : return
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
self . xtr_parameters [ "rloc-probing" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
def enable_xtr_nonce_echoing ( self ) :
if ( self . enable_status == None ) : return
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
self . xtr_parameters [ "nonce-echoing" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
def disable_xtr_nonce_echoing ( self ) :
if ( self . enable_status == None ) : return
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
self . xtr_parameters [ "nonce-echoing" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
def enable_xtr_data_plane_logging ( self ) :
if ( self . enable_status == None ) : return
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
self . xtr_parameters [ "data-plane-logging" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
def disable_xtr_data_plane_logging ( self ) :
if ( self . enable_status == None ) : return
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
self . xtr_parameters [ "data-plane-logging" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
def enable_xtr_flow_logging ( self ) :
if ( self . enable_status == None ) : return
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
self . xtr_parameters [ "flow-logging" ] = "yes"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
def disable_xtr_flow_logging ( self ) :
if ( self . enable_status == None ) : return
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
self . xtr_parameters [ "flow-logging" ] = "no"
Ii1i11IIii1I = self . __put ( "lisp xtr-parameters" , self . xtr_parameters )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
def add_mr_ddt_root ( self , address = "" ) :
if ( address == "" ) : return ( "no address supplied" )
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if ( self . __check_address_syntax ( address ) == False ) :
return ( "bad address syntax" )
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if ( self . enable_status == None ) : return
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
Ii1i11IIii1I = self . __put ( "lisp ddt-root" , { "address" : address } )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
def delete_mr_ddt_root ( self , address = "" ) :
if ( address == "" ) : return ( "no address supplied" )
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if ( self . __check_address_syntax ( address ) == False ) :
return ( "bad address syntax" )
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if ( self . enable_status == None ) : return
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
Ii1i11IIii1I = self . __delete ( "lisp ddt-root" , { "address" : address } )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
def add_mr_referral ( self , iid = "0" , prefix = "" , group = "" , referral_set = [ ] ) :
if ( prefix == "" ) : return ( "no prefix supplied" )
if 5 - 5: Ii1I
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if ( referral_set == "" ) : return ( "no referral-set supplied" )
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 9 - 9: Ii1I
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 59 - 59: I1IiiI * II111iiii . O0
if ( self . __check_address_set_syntax ( referral_set , False ) == False ) :
return ( "bad address syntax in referral-set" )
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if ( self . enable_status == None ) : return
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = [ Ii1i11IIii1I ]
for i1 in referral_set :
Ii1i11IIii1I . append ( { "referral" : { "address" : i1 } } )
if 51 - 51: OoO0O00 - O0 % oO0o - II111iiii
Ii1i11IIii1I = self . __put ( "lisp referral-cache" , Ii1i11IIii1I )
if 31 - 31: iII111i / Oo0Ooo - iII111i - OOooOOo
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 7 - 7: iII111i % O0 . OoOoOO00 + I1IiiI - I11i
if 75 - 75: I11i
def delete_mr_referral ( self , iid = "0" , prefix = "" , group = "" ) :
if ( prefix == "" ) : return ( "no prefix supplied" )
if 71 - 71: ooOoO0o
if 53 - 53: OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
if 28 - 28: I11i
if 58 - 58: OoOoOO00
if 37 - 37: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 73 - 73: i11iIiiIii - IiII
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
if ( self . enable_status == None ) : return
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = self . __delete ( "lisp referral-cache" , Ii1i11IIii1I )
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
def add_ddt_delegation ( self , iid = "0" , prefix = "" , group = "" ,
referral_set = [ ] ) :
if ( prefix == "" ) : return ( "no prefix supplied" )
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
if 97 - 97: I1IiiI / iII111i
if 71 - 71: II111iiii / i1IIi . I1ii11iIi11i % OoooooooOO . OoOoOO00
if 41 - 41: i1IIi * II111iiii / OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if ( referral_set == "" ) : return ( "no referral-set supplied" )
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 45 - 45: I1Ii111
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 83 - 83: OoOoOO00 . OoooooooOO
if ( self . __check_address_set_syntax ( referral_set , False ) == False ) :
return ( "bad address syntax in referral-set" )
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if ( self . enable_status == None ) : return
if 7 - 7: OoooooooOO . IiII
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = [ Ii1i11IIii1I ]
for i1 in referral_set :
Ii1i11IIii1I . append ( { "delegate" : { "address" : i1 } } )
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
Ii1i11IIii1I = self . __put ( "lisp delegation" , Ii1i11IIii1I )
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
def delete_ddt_delegation ( self , iid = "0" , prefix = "" , group = "" ) :
if ( prefix == "" ) : return ( "no prefix supplied" )
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if ( self . enable_status == None ) : return
if 75 - 75: OoooooooOO * IiII
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = self . __delete ( "lisp delegation" , Ii1i11IIii1I )
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
def add_ddt_auth_prefix ( self , iid = "0" , auth_prefix = "" , group = "" ) :
if ( auth_prefix == "" ) : return ( "no prefix supplied" )
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if ( self . __check_prefix_syntax ( auth_prefix ) == False ) :
return ( "bad prefix syntax" )
if 73 - 73: i1IIi / i11iIiiIii
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
if ( self . enable_status == None ) : return
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , auth_prefix , group )
Ii1i11IIii1I = [ Ii1i11IIii1I [ "prefix" ] ]
Ii1i11IIii1I = self . __put ( "lisp ddt-authoritative-prefix" , Ii1i11IIii1I )
if 27 - 27: Ii1I
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
def delete_ddt_auth_prefix ( self , iid = "0" , auth_prefix = "" , group = "" ) :
if ( auth_prefix == "" ) : return ( "no auth-prefix supplied" )
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
if ( self . __check_prefix_syntax ( auth_prefix ) == False ) :
return ( "bad prefix syntax" )
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if ( self . enable_status == None ) : return
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , auth_prefix , group )
Ii1i11IIii1I = self . __delete ( "lisp ddt-authoritative-prefix" , Ii1i11IIii1I )
if 90 - 90: iII111i
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
def add_ms_map_server_peer ( self , iid = "0" , prefix = "" , group = "" ,
peer_set = [ ] ) :
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if ( prefix == "" ) : return ( "no prefix supplied" )
if ( peer_set == "" ) : return ( "no peer-set supplied" )
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if ( self . __check_address_set_syntax ( peer_set , False ) == False ) :
return ( "bad address syntax in referral-set" )
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
if ( self . enable_status == None ) : return
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = [ Ii1i11IIii1I ]
for OOoOoo0 in peer_set :
Ii1i11IIii1I . append ( { "peer" : { "address" : OOoOoo0 } } )
if 17 - 17: Ii1I + oO0o . OoO0O00 - Oo0Ooo * i11iIiiIii
if 20 - 20: I1IiiI . OoooooooOO % OOooOOo
Ii1i11IIii1I = self . __put ( "lisp map-server-peer" , Ii1i11IIii1I )
if 63 - 63: I1IiiI % iIii1I11I1II1
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
def delete_ms_map_server_peer ( self , iid = "0" , prefix = "" , group = "" ) :
if ( prefix == "" ) : return ( "no prefix supplied" )
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 30 - 30: OoOoOO00
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if ( self . enable_status == None ) : return
if 10 - 10: II111iiii . iII111i
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = self . __delete ( "lisp map-server-peer" , Ii1i11IIii1I )
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 88 - 88: iII111i
if 19 - 19: II111iiii * IiII + Ii1I
def add_ms_site ( self , site_name , auth_key , prefix_list , description = "" ) :
if ( site_name == "" ) : return ( "no site-name supplied" )
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
if ( auth_key == "" ) : return ( "no auth_key supplied" )
if 67 - 67: I11i - OOooOOo . i1IIi
I1I1iI = self . __check_prefix_list ( prefix_list )
if ( I1I1iI != True ) : return ( I1I1iI )
if 16 - 16: IiII * OoOoOO00 . ooOoO0o / i1IIi . OoO0O00 - i1IIi
if ( self . enable_status == None ) : return
if 46 - 46: IiII + iIii1I11I1II1 + OOooOOo + OoO0O00 . I1ii11iIi11i
Ii1i11IIii1I = [ ]
Ii1i11IIii1I . append ( { "site-name" : site_name } )
Ii1i11IIii1I . append ( { "description" : description } )
Ii1i11IIii1I . append ( { "authentication-key" : auth_key } )
Ii1i11IIii1I . append ( prefix_list )
Ii1i11IIii1I = self . __put ( "lisp site" , Ii1i11IIii1I )
if 1 - 1: oO0o
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 62 - 62: i1IIi - OOooOOo
if 96 - 96: i1IIi . I1ii11iIi11i + oO0o
def build_ms_site_allowed_prefix ( self , prefix_list , iid = "0" , prefix = "" ,
group = "" , ams = False , fpr = False , fnpr = False , pprd = False , pra = "" ) :
if 48 - 48: iIii1I11I1II1 % i1IIi % iII111i + ooOoO0o
if 30 - 30: i11iIiiIii % iIii1I11I1II1 . I11i % iIii1I11I1II1
if 62 - 62: Oo0Ooo * OoOoOO00
if 79 - 79: OoO0O00 . iII111i * Ii1I - OOooOOo + ooOoO0o
if 14 - 14: i11iIiiIii - iII111i * OoOoOO00
if 51 - 51: I1ii11iIi11i / iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo * ooOoO0o + I1Ii111
if ( prefix == "" ) : return ( "no prefix supplied" )
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 77 - 77: ooOoO0o * OoOoOO00
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 14 - 14: I11i % I11i / IiII
if 72 - 72: i1IIi - II111iiii - OOooOOo + OOooOOo * o0oOOo0O0Ooo * OOooOOo
if ( self . enable_status == None ) : return
if 33 - 33: Oo0Ooo
if 49 - 49: OoO0O00 % iII111i % iII111i / iII111i
if 53 - 53: iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group ,
kw = "allowed-prefix" )
Ii1i1i1111 = Ii1i11IIii1I [ "allowed-prefix" ]
if ( ams ) : Ii1i1i1111 [ "accept-more-specifics" ] = "yes"
if ( fpr ) : Ii1i1i1111 [ "force-proxy-reply" ] = "yes"
if ( fnpr ) : Ii1i1i1111 [ "force-nat-proxy-reply" ] = "yes"
if ( pprd ) : Ii1i1i1111 [ "pitr-proxy-reply-drop" ] = "yes"
if ( pra != "" ) : Ii1i1i1111 [ "proxy-reply-action" ] = pra
if 57 - 57: Ii1I % II111iiii
prefix_list . append ( Ii1i11IIii1I )
return ( "good" )
if 67 - 67: ooOoO0o + I1IiiI * i11iIiiIii - oO0o / IiII % iII111i
if 92 - 92: Ii1I - oO0o - ooOoO0o % OoooooooOO / OOooOOo
def delete_ms_site ( self , site_name ) :
if 19 - 19: Oo0Ooo - OoO0O00
if 56 - 56: I1ii11iIi11i
if 26 - 26: OoooooooOO % OoooooooOO
if 33 - 33: I1Ii111
if ( site_name == "" ) : return ( "no site-name supplied" )
if ( self . enable_status == None ) : return
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
Ii1i11IIii1I = self . __delete ( "lisp site" , { "site-name" : site_name } )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
def add_etr_database_mapping ( self , iid = "0" , prefix = "" , group = "" ,
rloc_set = [ ] ) :
if ( prefix == "" ) : return ( "no prefix supplied" )
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
if 78 - 78: iIii1I11I1II1 + I11i - Ii1I * I1Ii111 - OoooooooOO % OoOoOO00
if ( rloc_set == "" ) : return ( "no rloc-set supplied" )
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 34 - 34: O0
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 80 - 80: i1IIi - Oo0Ooo / OoO0O00 - i11iIiiIii
if ( self . __check_address_set_syntax ( rloc_set , True ) == False ) :
return ( "bad address syntax in rloc-set" )
if 68 - 68: oO0o - I1ii11iIi11i % O0 % I1Ii111
if 11 - 11: O0 / OoO0O00 % OOooOOo + o0oOOo0O0Ooo + iIii1I11I1II1
if ( self . enable_status == None ) : return
if 40 - 40: ooOoO0o - OOooOOo . Ii1I * Oo0Ooo % I1Ii111
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = [ Ii1i11IIii1I ]
for i1 in rloc_set :
if ( type ( i1 ) == dict ) :
Ii1i11IIii1I . append ( { "rloc" : i1 } )
continue
if 56 - 56: i11iIiiIii . o0oOOo0O0Ooo - I1IiiI * I11i
if 91 - 91: oO0o + OoooooooOO - i1IIi
if ( i1 in [ "en0" , "en1" , "eth0" , "eth1" ] ) :
Ii1i11IIii1I . append ( { "rloc" : { "interface" : i1 } } )
else :
if ( self . __is_dist_name ( i1 ) ) :
Ii1i11IIii1I . append ( { "rloc" : { "rloc-record-name" : i1 } } )
else :
Ii1i11IIii1I . append ( { "rloc" : { "address" : i1 } } )
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
Ii1i11IIii1I = self . __put ( "lisp database-mapping" , Ii1i11IIii1I )
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
def delete_etr_database_mapping ( self , iid = "0" , prefix = "" , group = "" ) :
if ( prefix == "" ) : return ( "no prefix supplied" )
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
if ( self . enable_status == None ) : return
if 36 - 36: O0 + Oo0Ooo
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = self . __delete ( "lisp database-mapping" , Ii1i11IIii1I )
if 5 - 5: Oo0Ooo * OoOoOO00
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
def add_etr_map_server ( self , address = "" , auth_key = None ,
address_is_name = False ) :
if ( address == "" ) : return ( "no address supplied" )
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
if 63 - 63: I1ii11iIi11i
if ( address_is_name == False ) :
if ( self . __check_address_syntax ( address ) == False ) :
return ( "bad address syntax" )
if 6 - 6: ooOoO0o / I1ii11iIi11i
oOooO00o0O = "address"
else :
oOooO00o0O = "dns-name"
if 80 - 80: OOooOOo / I11i / OoOoOO00 + i1IIi - Oo0Ooo
if 11 - 11: o0oOOo0O0Ooo * OoO0O00
if ( auth_key == None ) : return ( "no auth-key supplied" )
if 15 - 15: OoOoOO00
if ( self . enable_status == None ) : return
if 62 - 62: Ii1I
Ii1i11IIii1I = self . __put ( "lisp map-server" ,
{ oOooO00o0O : address , "authentication-type" : "sha2" ,
"authentication-key" : auth_key } )
if 51 - 51: OoOoOO00
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
def get_etr_map_server ( self , address , address_is_name = False ) :
if ( self . enable_status == None ) : return
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if ( address == "" ) : return ( "no address supplied" )
if 31 - 31: I11i % OOooOOo * I11i
if ( address_is_name == False ) :
if ( self . __check_address_syntax ( address ) == False ) :
return ( "bad address syntax" )
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
oOooO00o0O = "address"
else :
oOooO00o0O = "dns-name"
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
Ii1i11IIii1I = self . __get_data ( "lisp map-server" , { oOooO00o0O : address } )
return ( Ii1i11IIii1I )
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
if 21 - 21: II111iiii % I1ii11iIi11i . i1IIi - OoooooooOO
def delete_etr_map_server ( self , address , address_is_name = False ) :
if ( address == "" ) : return ( "no address supplied" )
if 4 - 4: OoooooooOO . ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if ( address_is_name == False ) :
if ( self . __check_address_syntax ( address ) == False ) :
return ( "bad address syntax" )
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
oOooO00o0O = "address"
else :
oOooO00o0O = "dns-name"
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
if ( self . enable_status == None ) : return
if 83 - 83: I1Ii111
Ii1i11IIii1I = self . __delete ( "lisp map-server" , { oOooO00o0O : address } )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
def add_itr_map_resolver ( self , address , address_is_name = False ) :
if ( address == "" ) : return ( "no address supplied" )
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if ( address_is_name == False ) :
if ( self . __check_address_syntax ( address ) == False ) :
return ( "bad address syntax" )
if 62 - 62: i1IIi + Oo0Ooo % IiII
oOooO00o0O = "address"
else :
oOooO00o0O = "dns-name"
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if ( self . enable_status == None ) : return
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
Ii1i11IIii1I = self . __put ( "lisp map-resolver" , { oOooO00o0O : address } )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
def get_itr_map_resolver ( self , address , address_is_name = False ) :
if ( self . enable_status == None ) : return
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if ( address == "" ) : return ( "no address supplied" )
if 19 - 19: I11i
if ( address_is_name == False ) :
if ( self . __check_address_syntax ( address ) == False ) :
return ( "bad address syntax" )
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
oOooO00o0O = "address"
else :
oOooO00o0O = "dns-name"
if 27 - 27: OOooOOo
if 89 - 89: II111iiii / oO0o
Ii1i11IIii1I = self . __get_data ( "lisp map-resolver" , { oOooO00o0O : address } )
return ( Ii1i11IIii1I )
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
def delete_itr_map_resolver ( self , address , address_is_name = False ) :
if ( address == "" ) : return ( "no address supplied" )
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
if ( address_is_name == False ) :
if ( self . __check_address_syntax ( address ) == False ) :
return ( "bad address syntax" )
if 54 - 54: II111iiii . I11i
oOooO00o0O = "address"
else :
oOooO00o0O = "dns-name"
if 73 - 73: OoOoOO00 . I1IiiI
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
if ( self . enable_status == None ) : return
if 48 - 48: iII111i * iII111i
Ii1i11IIii1I = self . __delete ( "lisp map-resolver" , { oOooO00o0O : address } )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 13 - 13: Ii1I / I11i + OoOoOO00 . o0oOOo0O0Ooo % ooOoO0o
if 48 - 48: I1IiiI / i11iIiiIii - o0oOOo0O0Ooo * oO0o / OoooooooOO
def build_rloc_record ( self , rloc_or_int , upriority , uweight ,
rloc_name = None , mpriority = 255 , mweight = 0 , rloc_set = [ ] ) :
OoOo = { }
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if ( rloc_or_int in [ "en0" , "en1" , "eth0" , "eth1" ] ) :
OoOo [ "interface" ] = rloc_or_int
else :
OoOo [ "address" ] = rloc_or_int
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if ( rloc_name ) : OoOo [ "rloc-record-name" ] = rloc_name
OoOo [ "priority" ] = upriority
OoOo [ "weight" ] = uweight
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
rloc_set . append ( OoOo )
return ( rloc_set )
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
def add_itr_map_cache ( self , iid = "0" , prefix = "" , group = "" , rloc_set = [ ] ) :
if ( prefix == "" ) : return ( "no prefix supplied" )
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
if 49 - 49: I1ii11iIi11i
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 21 - 21: O0 * O0 % I1ii11iIi11i
if ( self . __check_address_set_syntax ( rloc_set , False ) == False ) :
return ( "bad address syntax in rloc-set" )
if 94 - 94: I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
if ( self . enable_status == None ) : return
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = [ Ii1i11IIii1I ]
for i1 in rloc_set :
if ( type ( i1 ) == dict ) :
Ii1i11IIii1I . append ( { "rloc" : i1 } )
continue
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
Ii1i11IIii1I . append ( { "rloc" : { "address" : i1 } } )
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
Ii1i11IIii1I = self . __put ( "lisp map-cache" , Ii1i11IIii1I )
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
def delete_itr_map_cache ( self , iid = "0" , prefix = "" , group = "" ) :
if ( prefix == "" ) : return ( "no prefix supplied" )
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if ( self . enable_status == None ) : return
if 23 - 23: I11i
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = self . __delete ( "lisp map-cache" , Ii1i11IIii1I )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
def get_system ( self ) :
if ( self . enable_status == None ) : return
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
Ii1i11IIii1I = self . __get_data ( "lisp system" , "" )
return ( Ii1i11IIii1I )
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
def get_map_cache ( self ) :
if ( self . enable_status == None ) : return
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
if 93 - 93: i1IIi
Ii1i11IIii1I = self . __get_data ( "lisp map-cache" , "" )
return ( Ii1i11IIii1I )
if 53 - 53: OoooooooOO + Oo0Ooo + oO0o
if 24 - 24: iII111i - IiII - iII111i * I1ii11iIi11i . OoooooooOO / IiII
def get_map_cache_entry ( self , iid = "" , prefix = "" , group = "" ) :
if 66 - 66: Oo0Ooo
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
if 100 - 100: I1Ii111 . I1IiiI * I1Ii111 - I1IiiI . I11i * Ii1I
if ( prefix == "" ) : return ( "no prefix supplied" )
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 89 - 89: OoO0O00 + IiII * I1Ii111
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 28 - 28: OoooooooOO . oO0o % I1ii11iIi11i / i1IIi / OOooOOo
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
if ( self . enable_status == None ) : return
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = self . __get_data ( "lisp map-cache" , Ii1i11IIii1I )
return ( Ii1i11IIii1I )
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
def get_site_cache ( self ) :
if ( self . enable_status == None ) : return
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
Ii1i11IIii1I = self . __get_data ( "lisp site-cache" , "" )
return ( Ii1i11IIii1I )
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
def get_site_cache_entry ( self , iid = "" , prefix = "" , group = "" ) :
if ( self . enable_status == None ) : return
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if ( prefix == "" ) : return ( "no prefix supplied" )
if ( self . __check_prefix_syntax ( prefix ) == False ) :
return ( "bad prefix syntax" )
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if ( group != "" and self . __check_prefix_syntax ( group ) == False ) :
return ( "bad group syntax" )
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
Ii1i11IIii1I = self . __build_prefix_tuple ( iid , prefix , group )
Ii1i11IIii1I = self . __get_data ( "lisp site-cache" , Ii1i11IIii1I )
return ( Ii1i11IIii1I )
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
def add_policy ( self , policy_name , match_iid = "0" , match_seid = "" ,
match_deid = "" , match_srloc = "" , match_drloc = "" , match_rloc_name = "" ,
match_geo = "" , match_elp = "" , match_rle = "" , match_json = "" ,
match_datetime_range = "" , set_action = "drop" , set_record_ttl = "" ,
set_iid = "" , set_seid = "" , set_deid = "" , set_rloc = "" , set_rloc_name = "" ,
set_geo = "" , set_elp = "" , set_rle = "" , set_json = "" ) :
if ( self . enable_status == None ) : return
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
IIii1 = { "policy-name" : policy_name }
if ( set_action != "" and set_action != None ) :
if ( set_action not in [ "process" , "drop" ] ) :
return ( "bad set-action value" )
if 35 - 35: i11iIiiIii - I1IiiI / OOooOOo + Ii1I * oO0o
IIii1 [ "set-action" ] = set_action
if 49 - 49: o0oOOo0O0Ooo * Ii1I + I11i + iII111i
if ( set_record_ttl != "" and set_record_ttl != None ) :
if ( set_record_ttl . isdigit ( ) == False ) :
return ( "bad set-record-ttl value" )
if 30 - 30: o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
IIii1 [ "set-record-ttl" ] = set_record_ttl
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if ( set_iid != "" and set_iid != None ) :
if ( set_iid . isdigit ( ) == False ) :
return ( "bad set-iid value" )
if 50 - 50: ooOoO0o + i1IIi
IIii1 [ "set-instance-id" ] = set_iid
if 31 - 31: Ii1I
if ( set_seid != "" and set_seid != None ) :
if ( self . __check_prefix_syntax ( set_seid ) == False ) :
return ( "bad set-source-eid prefix syntax" )
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
IIii1 [ "set-source-eid" ] = set_seid
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if ( set_deid != "" and set_deid != None ) :
if ( self . __check_prefix_syntax ( set_deid ) == False ) :
return ( "bad set-destination-eid prefix syntax" )
if 47 - 47: o0oOOo0O0Ooo
IIii1 [ "set-destination-eid" ] = set_deid
if 66 - 66: I1IiiI - IiII
if ( set_rloc != "" and set_rloc != None ) :
if ( self . __check_address_syntax ( set_rloc ) == False ) :
return ( "bad set-rloc-address syntax" )
if 33 - 33: I1IiiI / OoO0O00
IIii1 [ "set-rloc-address" ] = set_rloc
if 12 - 12: II111iiii
if ( set_rloc_name != "" and set_rloc_name != None ) :
IIii1 [ "set-rloc-record-name" ] = set_rloc_name
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if ( set_geo != "" and set_geo != None ) :
IIii1 [ "set-geo-name" ] = set_geo
if 25 - 25: oO0o
if ( set_elp != "" and set_elp != None ) :
IIii1 [ "set-elp-name" ] = set_elp
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if ( set_rle != "" and set_rle != None ) :
IIii1 [ "set-rle-name" ] = set_rle
if 43 - 43: I1ii11iIi11i - iII111i
if ( set_json != "" and set_json != None ) :
IIii1 [ "set-json-name" ] = set_json
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
o00Ooo0 = { }
if ( match_iid != "" and match_iid != None ) :
if ( match_iid . isdigit ( ) == False ) :
return ( "bad instance-id value" )
if 62 - 62: OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . OoOoOO00 + OoooooooOO
o00Ooo0 [ "instance-id" ] = match_iid
if 77 - 77: iIii1I11I1II1 . Ii1I % oO0o / Ii1I
if ( match_seid != "" and match_seid != None ) :
if ( self . __check_prefix_syntax ( match_seid ) == False ) :
return ( "bad source-eid prefix syntax" )
if 54 - 54: oO0o + ooOoO0o - Oo0Ooo
o00Ooo0 [ "source-eid" ] = match_seid
if 35 - 35: Ii1I - Ii1I + i1IIi - O0 - I1Ii111
if ( match_deid != "" and match_deid != None ) :
if ( self . __check_prefix_syntax ( match_deid ) == False ) :
return ( "bad destination-eid prefix syntax" )
if 58 - 58: OoOoOO00 - iII111i - OoooooooOO
o00Ooo0 [ "destination-eid" ] = match_deid
if 96 - 96: iIii1I11I1II1
if ( match_srloc != "" and match_srloc != None ) :
if ( self . __check_prefix_syntax ( match_srloc ) == False ) :
return ( "bad source-rloc prefix syntax" )
if 82 - 82: OoOoOO00 + O0 - IiII % oO0o * i11iIiiIii
o00Ooo0 [ "source-rloc" ] = match_srloc
if 15 - 15: o0oOOo0O0Ooo
if ( match_drloc != "" and match_drloc != None ) :
if ( self . __check_prefix_syntax ( match_drloc ) == False ) :
return ( "bad destination-rloc prefix syntax" )
if 39 - 39: OOooOOo / I1ii11iIi11i / I1IiiI * I1Ii111
o00Ooo0 [ "destination-rloc" ] = match_drloc
if 44 - 44: O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / O0 - I11i
if ( match_rloc_name != "" and match_rloc_name != None ) :
o00Ooo0 [ "rloc-record-name" ] = match_rloc_name
if 83 - 83: IiII * I11i / Oo0Ooo
if ( match_geo != "" and match_geo != None ) :
o00Ooo0 [ "geo-name" ] = match_geo
if 32 - 32: o0oOOo0O0Ooo + OoOoOO00 - OoooooooOO
if ( match_elp != "" and match_elp != None ) :
o00Ooo0 [ "elp-name" ] = match_elp
if 39 - 39: OoooooooOO * OOooOOo * O0 . I11i . OoO0O00 + ooOoO0o
if ( match_rle != "" and match_rle != None ) :
o00Ooo0 [ "rle-name" ] = match_rle
if 9 - 9: OoOoOO00 + oO0o % OoooooooOO + o0oOOo0O0Ooo
if ( match_json != "" and match_json != None ) :
o00Ooo0 [ "json-name" ] = match_json
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
if ( match_datetime_range != "" and
match_datetime_range != None ) :
o00Ooo0 [ "datetime-range" ] = match_datetime_range
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
o00Ooo0 = { "match" : o00Ooo0 }
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
Ii1i11IIii1I = [ ]
Ii1i11IIii1I . append ( IIii1 )
Ii1i11IIii1I . append ( o00Ooo0 )
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
Ii1i11IIii1I = self . __put ( "lisp policy" , Ii1i11IIii1I )
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "put error" )
return ( "good" )
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
def delete_policy ( self , policy_name ) :
if ( self . enable_status == None ) : return
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
Ii1i11IIii1I = self . __delete ( "lisp policy" , { "policy-name" : policy_name } )
if ( self . __error ( Ii1i11IIii1I ) ) : return ( "delete error" )
return ( "good" )
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
def __get ( self , command ) :
II11 = self . url + command . split ( " " ) [ 1 ]
if 2 - 2: iIii1I11I1II1
self . __api_debug ( "get command: {}" . format ( command ) )
try :
iiii1 = requests . get ( II11 , auth = ( self . user , self . pw ) , verify = False ,
timeout = REQ_TIMEOUT )
except :
return ( None )
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if ( iiii1 == None or iiii1 . text == None ) : return ( None )
self . __api_debug ( "get returned: {}" . format ( iiii1 . text ) )
if ( iiii1 . text == "" ) : return ( None )
if 70 - 70: I1Ii111 + oO0o
return ( self . __unicode_to_ascii ( command , iiii1 . text ) )
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
def __get_data ( self , command , data ) :
II11 = self . url + "data/" + command . split ( " " ) [ 1 ]
data = self . __ascii_to_unicode ( command , data )
if 78 - 78: O0 / II111iiii * OoO0O00
self . __api_debug ( "get api data: {}" . format ( data ) )
try :
iiii1 = requests . get ( II11 , data = data , auth = ( self . user , self . pw ) ,
verify = False , timeout = REQ_TIMEOUT )
except :
return ( None )
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if ( iiii1 == None or iiii1 . text == None ) : return ( None )
self . __api_debug ( "get returned: {}" . format ( iiii1 . text ) )
if ( iiii1 . text == "" ) : return ( None )
if 58 - 58: IiII + iIii1I11I1II1
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
if 69 - 69: ooOoO0o - o0oOOo0O0Ooo . ooOoO0o
if ( iiii1 . text . find ( "<html>" ) != - 1 ) : return ( None )
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
data = iiii1 . text . encode ( )
return ( json . loads ( data ) )
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
def __put ( self , command , data ) :
II11 = self . url + command . split ( " " ) [ 1 ]
data = self . __ascii_to_unicode ( command , data )
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
self . __api_debug ( "put data: {}" . format ( data ) )
try :
iiii1 = requests . put ( II11 , data = data , auth = ( self . user , self . pw ) ,
verify = False , timeout = REQ_TIMEOUT )
except :
return ( None )
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if ( iiii1 == None or iiii1 . text == None ) : return ( None )
self . __api_debug ( "put returned: {}" . format ( iiii1 . text ) )
if ( iiii1 . text == "" ) : return ( None )
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
return ( self . __unicode_to_ascii ( command , iiii1 . text ) )
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
def __delete ( self , command , data ) :
II11 = self . url + command . split ( " " ) [ 1 ]
data = self . __ascii_to_unicode ( command , data )
if 13 - 13: OoO0O00
self . __api_debug ( "delete data: {}" . format ( data ) )
try :
iiii1 = requests . delete ( II11 , data = data , auth = ( self . user , self . pw ) ,
verify = False , timeout = REQ_TIMEOUT )
except :
return ( None )
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if ( iiii1 == None or iiii1 . text == None ) : return ( None )
self . __api_debug ( "delete returned: {}" . format ( iiii1 . text ) )
if ( iiii1 . text == "" ) : return ( None )
if 2 - 2: OoooooooOO . OOooOOo . IiII
return ( self . __unicode_to_ascii ( command , iiii1 . text ) )
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
def __unicode_to_ascii ( self , command , rtext ) :
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if ( rtext . find ( "<html>" ) != - 1 ) : return ( None )
if 29 - 29: iII111i + i11iIiiIii % I11i
Ii1i11IIii1I = json . loads ( rtext ) [ 0 ]
oOo00Ooo0o0 = unicode ( command )
if ( Ii1i11IIii1I . has_key ( oOo00Ooo0o0 ) == False ) : return ( None )
if 33 - 33: I11i
if ( type ( Ii1i11IIii1I ) == dict ) :
Ii1i11IIii1I = Ii1i11IIii1I [ oOo00Ooo0o0 ]
oOO0 = { }
for IIi1I1i in Ii1i11IIii1I :
for oOo00Ooo0o0 in IIi1I1i :
oOO0 [ oOo00Ooo0o0 . encode ( ) ] = IIi1I1i [ oOo00Ooo0o0 ] . encode ( )
if 13 - 13: iIii1I11I1II1 . OoOoOO00 * I1IiiI / oO0o * Ii1I
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
else :
oOO0 = [ ]
for O00oo in Ii1i11IIii1I :
OoOo0oO0o = { }
o0OoOo00ooO = O00oo . values ( ) [ 0 ]
for oOo00Ooo0o0 in o0OoOo00ooO : OoOo0oO0o [ oOo00Ooo0o0 . encode ( ) ] = o0OoOo00ooO [ oOo00Ooo0o0 ] . encode ( )
OoOo0oO0o = { O00oo . keys ( ) [ 0 ] . encode ( ) : OoOo0oO0o }
oOO0 . append ( OoOo0oO0o )
if 10 - 10: I11i / I11i * i11iIiiIii
if 46 - 46: OoO0O00 * Oo0Ooo % oO0o + O0 * IiII
return ( oOO0 )
if 34 - 34: OoO0O00
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
def __walk_dict_array ( self , udata , u_dict ) :
for oOo00Ooo0o0 in u_dict :
IiIi1i = unicode ( u_dict [ oOo00Ooo0o0 ] )
if 99 - 99: OoOoOO00 . I1Ii111
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if ( type ( u_dict [ oOo00Ooo0o0 ] ) == dict ) :
Ii1i1 = { }
IiIi1i = u_dict [ oOo00Ooo0o0 ]
for oOoO00 in IiIi1i : Ii1i1 [ unicode ( oOoO00 ) ] = unicode ( IiIi1i [ oOoO00 ] )
IiIi1i = Ii1i1
if 45 - 45: Ii1I . OoooooooOO
if 27 - 27: Ii1I * Oo0Ooo . OoOoOO00
if 17 - 17: II111iiii % iII111i * OOooOOo % i1IIi . I1IiiI . iIii1I11I1II1
if 27 - 27: i11iIiiIii - I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
udata [ unicode ( oOo00Ooo0o0 ) ] = IiIi1i
if 50 - 50: OoOoOO00
if 33 - 33: I11i
if 98 - 98: OoOoOO00 % II111iiii
def __ascii_to_unicode ( self , command , ascii_data ) :
OoO0O000 = ( type ( ascii_data ) == dict )
if ( OoO0O000 ) : ascii_data = [ ascii_data ]
if 14 - 14: OoO0O00 / OoO0O00 * O0 . oO0o
oooOO0oOooO00 = { }
iIIiI11i1I11 = [ ]
for O00oo in ascii_data :
oooOO0oOooO00 = { }
if 29 - 29: OoO0O00 * iIii1I11I1II1 * O0 - OoOoOO00 / IiII
if ( type ( O00oo ) == dict ) :
self . __walk_dict_array ( oooOO0oOooO00 , O00oo )
elif ( type ( O00oo ) == list ) :
o0oO0OO00ooOO = [ ]
for IiIIiii11II1 in O00oo :
iiii1i1II1 = { }
self . __walk_dict_array ( iiii1i1II1 , IiIIiii11II1 )
o0oO0OO00ooOO . append ( iiii1i1II1 )
if 63 - 63: iIii1I11I1II1 % I1ii11iIi11i - iII111i
oooOO0oOooO00 = o0oO0OO00ooOO
else :
oooOO0oOooO00 = { unicode ( O00oo . keys ( ) [ 0 ] ) : oooOO0oOooO00 }
if 17 - 17: I1IiiI
iIIiI11i1I11 . append ( oooOO0oOooO00 )
if 88 - 88: OoooooooOO
oooOO0oOooO00 = iIIiI11i1I11
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
Oo0O = { }
Oo0O [ unicode ( command ) ] = oooOO0oOooO00 [ 0 ] if OoO0O000 else oooOO0oOooO00
return ( json . dumps ( Oo0O ) )
if 88 - 88: I1IiiI % OOooOOo % I1ii11iIi11i . i11iIiiIii % o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
def __error ( self , data ) :
if ( data == None ) : return ( True )
if ( data . has_key ( "!" ) ) : return ( False )
return ( True )
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if 48 - 48: iII111i + IiII
def __api_debug ( self , string ) :
if ( self . api_debug ) :
print ( "lispapi[{}@{}]: {}" . format ( self . user , self . host , string ) )
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
def __check_prefix_syntax ( self , prefix ) :
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if ( self . __is_dist_name ( prefix ) ) : return ( True )
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
oO0O = prefix . find ( "/" )
if ( oO0O == - 1 ) : return ( False )
return ( self . __check_address_syntax ( prefix [ 0 : oO0O ] ) )
if 59 - 59: OoooooooOO * Oo0Ooo + i1IIi
if 23 - 23: ooOoO0o
def __check_prefix_list ( self , prefix_list ) :
if ( type ( prefix_list ) != list ) : return ( "prefix_list must be an array" )
if ( len ( prefix_list ) == 0 ) :
return ( "prefix_list has no array elements supplied" )
if 13 - 13: iIii1I11I1II1
if ( type ( prefix_list [ 0 ] ) != dict ) :
return ( "prefix_list must be array of type dict" )
if 77 - 77: i11iIiiIii - iIii1I11I1II1 / oO0o / ooOoO0o / OoO0O00
if ( prefix_list [ 0 ] . has_key ( "allowed-prefix" ) == False ) :
return ( "prefix_list is incorrectly formated" )
if 56 - 56: OoooooooOO * O0
return ( True )
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
if 44 - 44: iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
def __check_address_set_syntax ( self , address_set , allow_interfaces ) :
for II1i11 in address_set :
if ( type ( II1i11 ) == str ) :
Ii1IIIII = II1i11
else :
Ii1IIIII = II1i11 [ "address" ] if II1i11 . has_key ( "address" ) else II1i11 [ "interface" ] if II1i11 . has_key ( "interface" ) else ""
if 49 - 49: iIii1I11I1II1 % II111iiii
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
if ( allow_interfaces and
Ii1IIIII in [ "en0" , "en1" , "eth0" , "eth1" ] ) : continue
if ( self . __check_address_syntax ( Ii1IIIII ) ) : continue
return ( False )
if 68 - 68: oO0o
return ( True )
if 10 - 10: Ii1I
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
def __is_dist_name ( self , addr_str ) :
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
return ( addr_str [ 0 ] == "'" and addr_str [ - 1 ] == "'" )
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
def __check_address_syntax ( self , address ) :
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if ( self . __is_dist_name ( address ) ) : return ( True )
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
II1i11 = address . split ( "." )
if ( len ( II1i11 ) > 1 ) :
if ( len ( II1i11 ) != 4 ) : return ( False )
for Oo00O0OO in range ( 4 ) :
if ( II1i11 [ Oo00O0OO ] . isdigit ( ) == False ) : return ( False )
oOOOoo0o = int ( II1i11 [ Oo00O0OO ] )
if ( oOOOoo0o < 0 or oOOOoo0o > 255 ) : return ( False )
if 44 - 44: O0 % i1IIi
return ( True )
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if 81 - 81: IiII / OoOoOO00 * IiII . O0
if 61 - 61: OoO0O00 * OOooOOo + I1Ii111 . iIii1I11I1II1 % I11i . I1Ii111
if 53 - 53: I1Ii111 * IiII / iIii1I11I1II1 / I1IiiI % I1ii11iIi11i
II1i11 = address . split ( ":" )
if ( len ( II1i11 ) > 1 ) :
if ( len ( II1i11 ) > 8 ) : return ( False )
for IIii in II1i11 :
if ( IIii == "" ) : continue
try : int ( IIii , 16 )
except : return ( False )
if 97 - 97: I1ii11iIi11i / Oo0Ooo + I1Ii111
return ( True )
if 32 - 32: ooOoO0o % I1Ii111 * Oo0Ooo
if 72 - 72: ooOoO0o . iII111i - I1Ii111 - Ii1I % i1IIi
if 56 - 56: Oo0Ooo * iII111i
if 13 - 13: Oo0Ooo * Oo0Ooo * II111iiii * iII111i . i1IIi / IiII
if 92 - 92: Ii1I * i11iIiiIii + iII111i * I1Ii111
II1i11 = address . split ( "-" )
if ( len ( II1i11 ) > 1 ) :
if ( len ( II1i11 ) != 3 ) : return ( False )
for IIii in II1i11 :
try : int ( IIii , 16 )
except : return ( False )
if 48 - 48: I11i * iII111i * iII111i
return ( True )
if 70 - 70: oO0o + I11i % i11iIiiIii + O0
return ( False )
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
def __build_prefix_tuple ( self , iid , eid_prefix , group , kw = "prefix" ) :
Ii1i11IIii1I = { }
Ii1i11IIii1I [ "instance-id" ] = iid
Ii1i11IIii1I [ "eid-prefix" ] = eid_prefix
if ( group != "" ) : Ii1i11IIii1I [ "group-prefix" ] = group
return ( { kw : Ii1i11IIii1I } )
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
the-stack_0_22819 | import numpy as np
from ..tools.pca import pca
from .. import settings
from .. import logging as logg
def tsne(
adata,
n_pcs=50,
perplexity=30,
early_exaggeration=12,
learning_rate=1000,
random_state=0,
use_fast_tsne=True,
recompute_pca=False,
n_jobs=None,
copy=False):
"""t-SNE [Maaten08]_ [Amir13]_ [Pedregosa11]_.
t-distributed stochastic neighborhood embedding (tSNE) [Maaten08]_ has been
proposed for visualizating single-cell data by [Amir13]_. Here, by default,
we use the implementation of *scikit-learn* [Pedregosa11]_. You can achieve
a huge speedup and better convergence if you install `Multicore-tSNE
<https://github.com/DmitryUlyanov/Multicore-TSNE>`__ by [Ulyanov16]_, which
will be automatically detected by Scanpy.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
Annotated data matrix.
n_pcs : `int`, optional (default: 50)
Number of principal components in preprocessing PCA. Set to 0 if you
do not want preprocessing with PCA.
perplexity : `float`, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : `float`, optional (default: 12.0)
Controls how tight natural clusters in the original space are in the
embedded space and how much space will be between them. For larger
values, the space between natural clusters will be larger in the
embedded space. Again, the choice of this parameter is not very
critical. If the cost function increases during initial optimization,
the early exaggeration factor or the learning rate might be too high.
learning_rate : `float`, optional (default: 1000)
Note that the R-package "Rtsne" uses a default of 200.
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
random_state : `int` or `None`, optional (default: 0)
Change this to use different intial states for the optimization. If `None`,
the initial state is not reproducible.
use_fast_tsne : `bool`, optional (default: `True`)
Use the MulticoreTSNE package by D. Ulyanov if it is installed.
n_jobs : `int` or `None` (default: `sc.settings.n_jobs`)
Number of jobs.
copy : `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
X_tsne : `np.ndarray` (`adata.obs`, dtype `float`)
tSNE coordinates of data.
"""
logg.info('computing tSNE', r=True)
adata = adata.copy() if copy else adata
# preprocessing by PCA
if (n_pcs > 0
and 'X_pca' in adata.obsm_keys()
and adata.obsm['X_pca'].shape[1] >= n_pcs
and not recompute_pca):
X = adata.obsm['X_pca'][:, :n_pcs]
logg.info(' using \'X_pca\' with n_pcs = {} for tSNE'
.format(n_pcs))
else:
if n_pcs > 0 and adata.X.shape[1] > n_pcs:
logg.info(' computing \'X_pca\' with n_pcs = {}'.format(n_pcs))
logg.hint('avoid this by setting n_pcs = 0')
X = pca(adata.X, random_state=random_state, n_comps=n_pcs)
adata.obsm['X_pca'] = X
else:
X = adata.X
logg.info(' using data matrix X directly (no PCA)')
# params for sklearn
params_sklearn = {'perplexity': perplexity,
'random_state': random_state,
'verbose': max(0, settings.verbosity-3),
'early_exaggeration': early_exaggeration,
'learning_rate': learning_rate,
}
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
# deal with different tSNE implementations
multicore_failed = True
if n_jobs >= 1 and use_fast_tsne:
try:
from MulticoreTSNE import MulticoreTSNE as TSNE
tsne = TSNE(n_jobs=n_jobs, **params_sklearn)
logg.info(' using the "MulticoreTSNE" package by Ulyanov (2017)')
X_tsne = tsne.fit_transform(X.astype(np.float64))
multicore_failed = False
except ImportError:
logg.warn('Consider installing the package MulticoreTSNE '
'(https://github.com/DmitryUlyanov/Multicore-TSNE). '
'Even for n_jobs=1 this speeds up the computation considerably '
'and might yield better converged results.')
pass
if multicore_failed:
from sklearn.manifold import TSNE
from . import _tsne_fix # fix by D. DeTomaso for sklearn < 0.19
# unfortunately, sklearn does not allow to set a minimum number of iterations for barnes-hut tSNE
tsne = TSNE(**params_sklearn)
logg.info(' using sklearn.manifold.TSNE with a fix by D. DeTomaso')
X_tsne = tsne.fit_transform(X)
# update AnnData instance
adata.obsm['X_tsne'] = X_tsne # annotate samples with tSNE coordinates
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint('added\n'
' \'X_tsne\', tSNE coordinates (adata.obs)')
return adata if copy else None
|
the-stack_0_22820 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + {"Collapsed": "false"}
import ipyvuetify as v
import scrapbook as sb
import yaml
from pathlib import Path
# + {"Collapsed": "false"}
with open('config.yml', 'r') as f:
config = yaml.safe_load(f)
dashboard_name = config.get('title', '')
server_url = config.get('server_url', 'http://localhost:8866')
voila_nb_path = Path(config.get('voila_nb_path', '.'))
title_bar_color = config.get('title_bar_color', 'orange')
voila_base_url = server_url + '/voila/render/'
logo = { 'jupyter': 'https://jupyter.org/assets/nav_logo.svg' }
# + {"Collapsed": "false"}
app_bar_links = [
v.Btn(children=[name], flat=True, href=link, target='_blank')
for name, link in config.get('app_bar_links', {}).items()
]
# + {"Collapsed": "false"}
filelist = {}
stages = [p for p in voila_nb_path.glob('*') if p.is_dir() and not str(p).startswith(".")]
for stage in stages:
files = stage.glob("*.ipynb")
items = []
for f in files:
this_item = {}
this_item['title'] = f.name
this_item['description'] = 'A Jupyter Notebook.'
this_item['link'] = voila_base_url + str(f)
this_item['logo'] = 'https://jupyter.org/assets/nav_logo.svg'
this_item['fname'] = str(f)
nb = sb.read_notebook(str(f))
for key, value in nb.scraps.data_dict.items():
this_item[key] = value
items.append(this_item)
filelist[stage.name] = items
# + {"Collapsed": "false"}
# build toolbar
toolbar = v.Toolbar(color=title_bar_color, dark=True, children=[
v.ToolbarItems(children=[v.Img(src=logo['jupyter'], style_='height:100%')]),
v.ToolbarTitle(children=[dashboard_name], color='green'),
v.Spacer(),
v.ToolbarItems(children=app_bar_links)
], app=True)
# + {"Collapsed": "false"}
tab_children = []
for stage in sorted(filelist.keys()):
items = filelist[stage]
cards = [
v.Flex(ma_2=True, fluid=True, children=[
v.Card(hover=True,
align_center=True,
fluid=True,
min_width='300px',
max_width='300px',
href=details['link'],
target='_blank',
children=[
v.CardTitle(children=[
v.Html(tag='div', class_='headline mb-0', children=[details['title']]),
v.Spacer(),
]),
v.CardText(children=[details['description']]),
])
])
for i, details in enumerate(items)
]
tab_children.append(v.Tab(children=[stage]))
tab_children.append(v.TabItem(children=[v.Layout(ma_5=True, wrap=True, children=cards)]))
# + {"Collapsed": "false"}
tabs = v.Tabs(v_model='tab', color='grey lighten-5', fixed_tabs=True, children=tab_children)
app = v.App(
style_="background: white",
children=[
toolbar,
v.Container(fluid=True, mt_3=True, children=[
v.Layout(children=[
v.Flex(children=[tabs])
])
])
]
)
# + {"Collapsed": "false"}
app
# + {"Collapsed": "false"}
|
the-stack_0_22822 | # pytype: skip-file
import time
from collections import defaultdict
import wikipedia # pip install wikipedia
from paraloop import ParaLoop, Variable
from paraloop.aggregation_strategies import Sum
def wikipedia_names():
return wikipedia.search("Python", results=20)[1:]
def original_code():
frequencies = defaultdict(int)
total = 0
start = time.time()
for name in wikipedia_names():
try:
content = wikipedia.page(name).content
except (
wikipedia.exceptions.DisambiguationError,
wikipedia.exceptions.PageError,
):
continue
for line in content.splitlines():
words = line.split(" ")
for word in words:
frequencies[word] += 1
total += 1
# We don't print the entire dictionary because it is too large, but let's just check
# how many words we found.
print(len(frequencies), total)
print(f"The original loop took {time.time() - start} seconds.")
def paraloop_code():
frequencies = Variable(defaultdict(int), aggregation_strategy=Sum)
total = Variable(0, aggregation_strategy=Sum)
start = time.time()
# Note that the content of the loop is identical!
for name in ParaLoop(wikipedia_names()):
try:
content = wikipedia.page(name).content
except (
wikipedia.exceptions.DisambiguationError,
wikipedia.exceptions.PageError,
):
continue
for line in content.splitlines():
words = line.split(" ")
for word in words:
frequencies[word] += 1
total += 1
# We don't print the entire dictionary because it is too large, but let's just check
# how many words we found.
print(len(frequencies), total)
print(f"The ParaLoop took {time.time() - start} seconds.")
if __name__ == "__main__":
original_code()
paraloop_code()
|
the-stack_0_22824 | from ctm_python_client.core.base import BaseJob
class ScriptJob(BaseJob):
def __init__(
self,
folder,
job_name,
file_name,
file_path,
pre_command,
post_command,
host=None,
run_as=None,
description=None,
):
BaseJob.__init__(
self, folder, job_name, description=description, host=host, run_as=run_as
)
self.file_name = file_name
self.file_path = file_path
self.pre_command = pre_command
self.post_command = post_command
def get_json(self):
job_json = BaseJob.get_json(self)
job_json["Type"] = "Job:Script"
if self.file_name != None:
job_json["FileName"] = self.file_name
if self.file_path != None:
job_json["FilePath"] = self.file_path
if self.pre_command != None:
job_json["PreCommand"] = self.pre_command
if self.post_command != None:
job_json["PostCommand"] = self.post_command
return job_json
|
the-stack_0_22826 | #!/usr/bin/python
import json, re
import random
import sys
try:
from urllib.request import build_opener
except:
from urllib2 import build_opener
# Makes a request to a given URL (first arg) and optional params (second arg)
def make_request(*args):
opener = build_opener()
opener.addheaders = [('User-agent',
'Mozilla/5.0'+str(random.randrange(1000000)))]
try:
return opener.open(*args).read().strip()
except Exception as e:
try:
p = e.read().strip()
except:
p = e
raise Exception(p)
def parse_addr_args(*args):
# Valid input formats: blockr_unspent([addr1, addr2,addr3])
# blockr_unspent(addr1, addr2, addr3)
# blockr_unspent([addr1, addr2, addr3], network)
# blockr_unspent(addr1, addr2, addr3, network)
# Where network is 'btc' or 'testnet'
network = 'btc'
addr_args = args
if len(args) >= 1 and args[-1] in ('testnet', 'btc'):
network = args[-1]
addr_args = args[:-1]
if len(addr_args) == 1 and isinstance(addr_args, list):
addr_args = addr_args[0]
return network, addr_args
# Gets the unspent outputs of one or more addresses
def bci_unspent(*args):
network, addrs = parse_addr_args(*args)
u = []
for a in addrs:
try:
data = make_request('https://blockchain.info/unspent?active='+a)
except Exception as e:
if str(e) == 'No free outputs to spend':
continue
else:
raise Exception(e)
try:
jsonobj = json.loads(data.decode("utf-8"))
for o in jsonobj["unspent_outputs"]:
h = o['tx_hash'].decode('hex')[::-1].encode('hex')
u.append({
"output": h+':'+str(o['tx_output_n']),
"value": o['value']
})
except:
raise Exception("Failed to decode data: "+data)
return u
def blockr_unspent(*args):
# Valid input formats: blockr_unspent([addr1, addr2,addr3])
# blockr_unspent(addr1, addr2, addr3)
# blockr_unspent([addr1, addr2, addr3], network)
# blockr_unspent(addr1, addr2, addr3, network)
# Where network is 'btc' or 'testnet'
network, addr_args = parse_addr_args(*args)
if network == 'testnet':
blockr_url = 'http://tbtc.blockr.io/api/v1/address/unspent/'
elif network == 'btc':
blockr_url = 'http://btc.blockr.io/api/v1/address/unspent/'
else:
raise Exception(
'Unsupported network {0} for blockr_unspent'.format(network))
if len(addr_args) == 0:
return []
elif isinstance(addr_args[0], list):
addrs = addr_args[0]
else:
addrs = addr_args
res = make_request(blockr_url+','.join(addrs))
data = json.loads(res.decode("utf-8"))['data']
o = []
if 'unspent' in data:
data = [data]
for dat in data:
for u in dat['unspent']:
o.append({
"output": u['tx']+':'+str(u['n']),
"value": int(u['amount'].replace('.', ''))
})
return o
def helloblock_unspent(*args):
network, addrs = parse_addr_args(*args)
if network == 'testnet':
url = 'https://testnet.helloblock.io/v1/addresses/%s/unspents?limit=500&offset=%s'
elif network == 'btc':
url = 'https://mainnet.helloblock.io/v1/addresses/%s/unspents?limit=500&offset=%s'
o = []
for addr in addrs:
for offset in xrange(0, 10**9, 500):
res = make_request(url % (addr, offset))
data = json.loads(res.decode("utf-8"))["data"]
if not len(data["unspents"]):
break
elif offset:
sys.stderr.write("Getting more unspents: %d\n" % offset)
for dat in data["unspents"]:
o.append({
"output": dat["txHash"]+':'+str(dat["index"]),
"value": dat["value"],
})
return o
unspent_getters = {
'bci': bci_unspent,
'blockr': blockr_unspent,
'helloblock': helloblock_unspent
}
def unspent(*args, **kwargs):
f = unspent_getters.get(kwargs.get('source', ''), bci_unspent)
return f(*args)
# Gets the transaction output history of a given set of addresses,
# including whether or not they have been spent
def history(*args):
# Valid input formats: history([addr1, addr2,addr3])
# history(addr1, addr2, addr3)
if len(args) == 0:
return []
elif isinstance(args[0], list):
addrs = args[0]
else:
addrs = args
txs = []
for addr in addrs:
offset = 0
while 1:
gathered = False
while not gathered:
try:
data = make_request(
'https://blockchain.info/address/%s?format=json&offset=%s' %
(addr, offset))
gathered = True
except Exception as e:
try:
sys.stderr.write(e.read().strip())
except:
sys.stderr.write(str(e))
gathered = False
try:
jsonobj = json.loads(data.decode("utf-8"))
except:
raise Exception("Failed to decode data: "+data)
txs.extend(jsonobj["txs"])
if len(jsonobj["txs"]) < 50:
break
offset += 50
sys.stderr.write("Fetching more transactions... "+str(offset)+'\n')
outs = {}
for tx in txs:
for o in tx["out"]:
if o.get('addr', None) in addrs:
key = str(tx["tx_index"])+':'+str(o["n"])
outs[key] = {
"address": o["addr"],
"value": o["value"],
"output": tx["hash"]+':'+str(o["n"]),
"block_height": tx.get("block_height", None)
}
for tx in txs:
for i, inp in enumerate(tx["inputs"]):
if "prev_out" in inp:
if inp["prev_out"].get("addr", None) in addrs:
key = str(inp["prev_out"]["tx_index"]) + \
':'+str(inp["prev_out"]["n"])
if outs.get(key):
outs[key]["spend"] = tx["hash"]+':'+str(i)
return [outs[k] for k in outs]
# Pushes a transaction to the network using https://blockchain.info/pushtx
def bci_pushtx(tx):
if not re.match('^[0-9a-fA-F]*$', tx):
tx = tx.encode('hex')
return make_request('https://blockchain.info/pushtx', 'tx='+tx)
def eligius_pushtx(tx):
if not re.match('^[0-9a-fA-F]*$', tx):
tx = tx.encode('hex')
s = make_request(
'http://eligius.st/~wizkid057/newstats/pushtxn.php',
'transaction='+tx+'&send=Push')
strings = re.findall('string[^"]*"[^"]*"', s)
for string in strings:
quote = re.findall('"[^"]*"', string)[0]
if len(quote) >= 5:
return quote[1:-1]
def blockr_pushtx(tx, network='btc'):
if network == 'testnet':
blockr_url = 'http://tbtc.blockr.io/api/v1/tx/push'
elif network == 'btc':
blockr_url = 'http://btc.blockr.io/api/v1/tx/push'
else:
raise Exception(
'Unsupported network {0} for blockr_pushtx'.format(network))
if not re.match('^[0-9a-fA-F]*$', tx):
tx = tx.encode('hex')
return make_request(blockr_url, '{"hex":"%s"}' % tx)
def helloblock_pushtx(tx):
if not re.match('^[0-9a-fA-F]*$', tx):
tx = tx.encode('hex')
return make_request('https://mainnet.helloblock.io/v1/transactions',
'rawTxHex='+tx)
pushtx_getters = {
'bci': bci_pushtx,
'blockr': blockr_pushtx,
'helloblock': helloblock_pushtx
}
def pushtx(*args, **kwargs):
f = pushtx_getters.get(kwargs.get('source', ''), bci_pushtx)
return f(*args)
def last_block_height(network='btc'):
if network == 'testnet':
data = make_request('http://tbtc.blockr.io/api/v1/block/info/last')
jsonobj = json.loads(data.decode("utf-8"))
return jsonobj["data"]["nb"]
data = make_request('https://blockchain.info/latestblock')
jsonobj = json.loads(data.decode("utf-8"))
return jsonobj["height"]
# Gets a specific transaction
def bci_fetchtx(txhash):
if isinstance(txhash, list):
return [bci_fetchtx(h) for h in txhash]
if not re.match('^[0-9a-fA-F]*$', txhash):
txhash = txhash.encode('hex')
data = make_request('https://blockchain.info/rawtx/'+txhash+'?format=hex')
return data
def blockr_fetchtx(txhash, network='btc'):
if network == 'testnet':
blockr_url = 'http://tbtc.blockr.io/api/v1/tx/raw/'
elif network == 'btc':
blockr_url = 'http://btc.blockr.io/api/v1/tx/raw/'
else:
raise Exception(
'Unsupported network {0} for blockr_fetchtx'.format(network))
if isinstance(txhash, list):
txhash = ','.join([x.encode('hex') if not re.match('^[0-9a-fA-F]*$', x)
else x for x in txhash])
jsondata = json.loads(make_request(blockr_url+txhash).decode("utf-8"))
return [d['tx']['hex'] for d in jsondata['data']]
else:
if not re.match('^[0-9a-fA-F]*$', txhash):
txhash = txhash.encode('hex')
jsondata = json.loads(make_request(blockr_url+txhash).decode("utf-8"))
return jsondata['data']['tx']['hex']
def helloblock_fetchtx(txhash, network='btc'):
if isinstance(txhash, list):
return [helloblock_fetchtx(h) for h in txhash]
if not re.match('^[0-9a-fA-F]*$', txhash):
txhash = txhash.encode('hex')
if network == 'testnet':
url = 'https://testnet.helloblock.io/v1/transactions/'
elif network == 'btc':
url = 'https://mainnet.helloblock.io/v1/transactions/'
else:
raise Exception(
'Unsupported network {0} for helloblock_fetchtx'.format(network))
data = json.loads(make_request(url + txhash).decode("utf-8"))["data"]["transaction"]
o = {
"locktime": data["locktime"],
"version": data["version"],
"ins": [],
"outs": []
}
for inp in data["inputs"]:
o["ins"].append({
"script": inp["scriptSig"],
"outpoint": {
"index": inp["prevTxoutIndex"],
"hash": inp["prevTxHash"],
},
"sequence": 4294967295
})
for outp in data["outputs"]:
o["outs"].append({
"value": outp["value"],
"script": outp["scriptPubKey"]
})
from bitcoin.transaction import serialize
from bitcoin.transaction import txhash as TXHASH
tx = serialize(o)
assert TXHASH(tx) == txhash
return tx
fetchtx_getters = {
'bci': bci_fetchtx,
'blockr': blockr_fetchtx,
'helloblock': helloblock_fetchtx
}
def fetchtx(*args, **kwargs):
f = fetchtx_getters.get(kwargs.get('source', ''), bci_fetchtx)
return f(*args)
def firstbits(address):
if len(address) >= 25:
return make_request('https://blockchain.info/q/getfirstbits/'+address)
else:
return make_request(
'https://blockchain.info/q/resolvefirstbits/'+address)
def get_block_at_height(height):
j = json.loads(make_request("https://blockchain.info/block-height/" +
str(height)+"?format=json").decode("utf-8"))
for b in j['blocks']:
if b['main_chain'] is True:
return b
raise Exception("Block at this height not found")
def _get_block(inp):
if len(str(inp)) < 64:
return get_block_at_height(inp)
else:
return json.loads(make_request(
'https://blockchain.info/rawblock/'+inp).decode("utf-8"))
def bci_get_block_header_data(inp):
j = _get_block(inp)
return {
'version': j['ver'],
'hash': j['hash'],
'prevhash': j['prev_block'],
'timestamp': j['time'],
'merkle_root': j['mrkl_root'],
'bits': j['bits'],
'nonce': j['nonce'],
}
def blockr_get_block_header_data(height, network='btc'):
if network == 'testnet':
blockr_url = "http://tbtc.blockr.io/api/v1/block/raw/"
elif network == 'btc':
blockr_url = "http://btc.blockr.io/api/v1/block/raw/"
else:
raise Exception(
'Unsupported network {0} for blockr_get_block_header_data'.format(network))
k = json.loads(make_request(blockr_url + str(height)).decode("utf-8"))
j = k['data']
return {
'version': j['version'],
'hash': j['hash'],
'prevhash': j['previousblockhash'],
'timestamp': j['time'],
'merkle_root': j['merkleroot'],
'bits': int(j['bits'], 16),
'nonce': j['nonce'],
}
def get_block_timestamp(height, network='btc'):
if network == 'testnet':
blockr_url = "http://tbtc.blockr.io/api/v1/block/info/"
elif network == 'btc':
blockr_url = "http://btc.blockr.io/api/v1/block/info/"
else:
raise Exception(
'Unsupported network {0} for get_block_timestamp'.format(network))
import time, calendar
if isinstance(height, list):
k = json.loads(make_request(blockr_url + ','.join([str(x) for x in height])).decode("utf-8"))
o = {x['nb']: calendar.timegm(time.strptime(x['time_utc'],
"%Y-%m-%dT%H:%M:%SZ")) for x in k['data']}
return [o[x] for x in height]
else:
k = json.loads(make_request(blockr_url + str(height)).decode("utf-8"))
j = k['data']['time_utc']
return calendar.timegm(time.strptime(j, "%Y-%m-%dT%H:%M:%SZ"))
block_header_data_getters = {
'bci': bci_get_block_header_data,
'blockr': blockr_get_block_header_data
}
def get_block_header_data(inp, **kwargs):
f = block_header_data_getters.get(kwargs.get('source', ''),
bci_get_block_header_data)
return f(inp, **kwargs)
def get_txs_in_block(inp):
j = _get_block(inp)
hashes = [t['hash'] for t in j['tx']]
return hashes
def get_block_height(txhash):
j = json.loads(make_request('https://blockchain.info/rawtx/'+txhash).decode("utf-8"))
return j['block_height']
|
the-stack_0_22827 |
serviceurl = 'http://maps.googleapis.com/maps/api/geocode/xml?'
tree = ET.fromstring(data)
lst = tree.findall('comments/comment')
print("count = ",len(lst))
s = 0
for item in lst:
count = int(item.find('count').text)
s = s + count
print("Sum : ",s)
while True:
address = input('Enter location: ')
if len(address) < 1: break
url = serviceurl + urllib.parse.urlencode({'address': address})
print('Retrieving', url)
uh = urllib.request.urlopen(url)
data = uh.read()
print('Retrieved', len(data), 'characters')
print(data.decode())
tree = ET.fromstring(data)
|
the-stack_0_22828 | from flask import *
import os
import wpbf
token = os.environ['TOKEN']
url = f'https://api.telegram.org/bot{token}/'
wp = Flask(__name__)
def data(username,password,id,mode):
if mode == 'write':
open(id,'a').write(username+' | '+password)
elif mode == 'read':
if id in os.listdir(os.getcwd()):
z=open(id,'r').read()
return z
else:
balas(id,'kamu tidak memiliki list web yg terdeface')
@wp.route('/',methods=['GET','POST'])
def skuy():
if request.method == 'POST':
DataUpdate = request.get_json()
returner(DataUpdate)
return 'pop'
else:
return 'oke'
def returner(pesan):
perintah = '''
author : krypton-Byte
perintah:
/bruteforce <url file web txt> <url file wordlist>
/hasil
'''
teks = pesan['message']['text']
cmd = teks.split(' ')
id = pesan['message']['chat']['id']
usr = pesan['message']['chat']['username']
cmd=teks.split('\n')
if 'new_chat_member' in pesan:
balas(id,'selamat datang @'+usr+'\n'+perintah)
elif cmd[0] == '/bruteforce':
try:
balas(id,'sedang mengcrack.......')
url = requests.get(cmd[1]).text
wordlist = requests.get(cmd[2]).text
for x in wordlist.split('\n'):
for y in wordlist.split('\n'):
for z in url.split('\n'):
if wpbf.bf(z,x,y) == True:
data(x,y,id,'write')
balas(id,x+' | '+y)
except:
balas(id,'argument salah')
elif cmd[0] == '/hasil':
balas('oke','oke',id,'view')
elif cmd[0] == '/help':
balas(perintah)
def balas(id,teks):
data={
'chat_id':id,
'text':teks
}
wp.run(host='0.0.0.0',port=int(os.environ.get('PORT','5000')),debug=True)
|
the-stack_0_22829 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Demonstrates one way of fixing the display resolution to a certain
size, but rendering to the full screen.
The method used in this example is:
1. Set the OpenGL viewport to the fixed resolution
2. Render the scene using any OpenGL functions (here, just a polygon)
3. Copy the framebuffer into a texture
4. Reset the OpenGL viewport to the window (full screen) size
5. Blit the texture to the framebuffer
Recent video cards could also render the scene directly to the texture
using EXT_framebuffer_object. (This is not demonstrated in this example).
'''
from pyglet.gl import *
import pyglet
# Create a fullscreen window using the user's desktop resolution. You can
# also use this technique on ordinary resizable windows.
window = pyglet.window.Window(fullscreen=True)
# Use 320x200 fixed resolution to make the effect completely obvious. You
# can change this to a more reasonable value such as 800x600 here.
target_resolution = 320, 200
class FixedResolutionViewport:
def __init__(self, window, width, height, filtered=False):
self.window = window
self.width = width
self.height = height
# Get the actual framebuffer size as this can be different from the window size
self.framebuffer_width, self.framebuffer_height = self.window.get_framebuffer_size()
self.texture = pyglet.image.Texture.create(width, height,
rectangle=True)
if not filtered:
# By default the texture will be bilinear filtered when scaled
# up. If requested, turn filtering off. This makes the image
# aliased, but is more suitable for pixel art.
glTexParameteri(self.texture.target,
GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(self.texture.target,
GL_TEXTURE_MIN_FILTER, GL_NEAREST)
def begin(self):
glViewport(0, 0, self.width, self.height)
self.set_fixed_projection()
def end(self):
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
self.texture.blit_into(buffer, 0, 0, 0)
glViewport(0, 0, self.framebuffer_width, self.framebuffer_height)
self.set_window_projection()
aspect_width = self.window.width / float(self.width)
aspect_height = self.window.height / float(self.height)
if aspect_width > aspect_height:
scale_width = aspect_height * self.width
scale_height = aspect_height * self.height
else:
scale_width = aspect_width * self.width
scale_height = aspect_width * self.height
x = (self.window.width - scale_width) / 2
y = (self.window.height - scale_height) / 2
glClearColor(0, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
glColor3f(1, 1, 1)
self.texture.blit(x, y, width=scale_width, height=scale_height)
def set_fixed_projection(self):
# Override this method if you need to change the projection of the
# fixed resolution viewport.
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, self.width, 0, self.height, -1, 1)
glMatrixMode(GL_MODELVIEW)
def set_window_projection(self):
# This is the same as the default window projection, reprinted here
# for clarity.
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, self.window.width, 0, self.window.height, -1, 1)
glMatrixMode(GL_MODELVIEW)
target_width, target_height = target_resolution
viewport = FixedResolutionViewport(window,
target_width, target_height, filtered=False)
def draw_scene():
'''Draw the scene, assuming the fixed resolution viewport and projection
have been set up. This just draws the rotated polygon.'''
glClearColor(1, 1, 1, 1)
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
w, h = target_resolution
glTranslatef(w//2, h//2, 0)
glRotatef(rotate, 0, 0, 1)
glColor3f(1, 0, 0)
s = min(w, h) // 3
glRectf(-s, -s, s, s)
rotate = 0
def update(dt):
global rotate
rotate += dt * 20
pyglet.clock.schedule_interval(update, 1/60.)
@window.event
def on_draw():
viewport.begin()
window.clear()
draw_scene()
viewport.end()
pyglet.app.run()
|
the-stack_0_22830 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Daniel Standage <[email protected]>
# Copyright (c) 2008 Sascha Steinbiss <[email protected]>
# Copyright (c) 2008 Center for Bioinformatics, University of Hamburg
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from gt.dlload import gtlib
class GTError(RuntimeError):
pass
class Error(Exception):
def __init__(self, ptr=None):
if ptr:
self.error = ptr
self.own = False
else:
self.error = gtlib.gt_error_new()
self.own = True
self._as_parameter_ = self.error
def __del__(self):
if self.own:
try:
gtlib.gt_error_delete(self.error)
except AttributeError:
pass
def from_param(cls, obj):
if not isinstance(obj, Error):
raise TypeError("argument must be an Error")
return obj._as_parameter_
from_param = classmethod(from_param)
def get(self):
if self.is_set():
return str(gtlib.gt_error_get(self.error).decode("UTF-8"))
else:
return "undefined error -- please report this as a bug!"
def set(self, errmsg):
return gtlib.gt_error_set_nonvariadic(self.error,
str(errmsg).encode('UTF-8'))
def is_set(self):
return gtlib.gt_error_is_set(self.error) == 1
def unset(self):
gtlib.gt_error_unset(self.error)
def register(cls, gtlib):
from ctypes import c_void_p, c_char_p, c_int
gtlib.gt_error_new.restype = c_void_p
gtlib.gt_error_new.argtypes = []
gtlib.gt_error_get.restype = c_char_p
gtlib.gt_error_get.argtypes = [c_void_p]
gtlib.gt_error_is_set.restype = c_int
gtlib.gt_error_is_set.argtypes = [c_void_p]
gtlib.gt_error_get.restype = c_char_p
gtlib.gt_error_get.argtypes = [c_void_p]
gtlib.gt_error_set_nonvariadic.restype = None
gtlib.gt_error_set_nonvariadic.argtypes = [c_void_p, c_char_p]
gtlib.gt_error_unset.restype = None
gtlib.gt_error_unset.argtypes = [c_void_p]
gtlib.gt_error_delete.restype = None
gtlib.gt_error_delete.argtypes = [c_void_p]
register = classmethod(register)
def gterror(err):
if isinstance(err, Error):
raise GTError("GenomeTools error: " + err.get())
else:
raise GTError("GenomeTools error: " + err)
|
the-stack_0_22832 | import numpy as np
from ctypes import c_int, c_double, c_bool, c_float, c_char_p, c_bool, c_void_p
import ctypes
import os
LIB_PATH = os.path.dirname( os.path.realpath(__file__) )
LIB_PATH_CPP = os.path.normpath(LIB_PATH+'../../../'+'/cpp/Build/libs/Molecular')
#LIB_PATH_CPP = os.path.normpath(LIB_PATH+'../../../'+'/cpp/Build-debug/libs/Molecular')
def recompile(path):
print( "recompile path :", path )
dir_bak = os.getcwd()
os.chdir( path)
os.system("make" )
os.chdir( dir_bak )
print( os.getcwd() )
# =========== main
recompile(LIB_PATH_CPP)
lib = ctypes.CDLL( LIB_PATH_CPP+"/libRigidMol.so" )
array1ui = np.ctypeslib.ndpointer(dtype=np.uint32, ndim=1, flags='CONTIGUOUS')
array1i = np.ctypeslib.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array2i = np.ctypeslib.ndpointer(dtype=np.int32, ndim=2, flags='CONTIGUOUS')
array1d = np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
array2d = np.ctypeslib.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS')
array3d = np.ctypeslib.ndpointer(dtype=np.double, ndim=3, flags='CONTIGUOUS')
default_icolor = int("0xFF101010", 0)
# ========= C functions
#void initRigidSubstrate( char* fname, int* ns, double* pos0, double* cell ){
lib.initRigidSubstrate.argtypes = [c_char_p,array1i,array1d,array2d]
lib.initRigidSubstrate.restype = None
def initRigidSubstrate( fname, ns, pos, cell ):
lib.initRigidSubstrate( fname, ns, pos, cell )
#void recalcGridFF( int* ns){
lib.recalcGridFF.argtypes = [array1i]
lib.recalcGridFF.restype = None
def recalcGridFF( ns ):
lib.recalcGridFF( ns )
#void saveGridFF(){
lib.saveGridFF.argtypes = []
lib.saveGridFF.restype = None
def saveGridFF( ):
lib.saveGridFF( )
#void loadGridFF(){
lib.loadGridFF.argtypes = []
lib.loadGridFF.restype = None
def loadGridFF( ):
lib.loadGridFF( )
#void debugSaveGridFF(const char* fname, double* testREQ ){
lib.debugSaveGridFF.argtypes = [c_char_p, array1d]
lib.debugSaveGridFF.restype = None
def debugSaveGridFF(fname, testREQ ):
lib.debugSaveGridFF(fname, testREQ )
#void initParams( char* fname_atomTypes, char* fname_bondTypes ){
lib.initParams.argtypes = [c_char_p, c_char_p]
lib.initParams.restype = None
def initParams( fname_atomTypes, fname_bondTypes):
lib.initParams(fname_atomTypes, fname_bondTypes )
#int loadMolType ( const char* fname ){
lib.loadMolType.argtypes = [c_char_p]
lib.loadMolType.restype = c_int
def loadMolType( fname ):
return lib.loadMolType( fname )
#int registerRigidMolType( int natom, Vec3d* apos, Vec3d* REQs, int* atomType ){
lib.registerRigidMolType.argtypes = [c_int, array2d, array2d, array1i ]
lib.registerRigidMolType.restype = c_int
def registerRigidMolType( apos, REQs, atomType ):
natoms = len(apos)
return lib.registerRigidMolType( natoms, apos, REQs, atomType )
#int insertMolecule( int itype, double* pos, double* rot, bool rigid ){
lib.insertMolecule.argtypes = [c_int, array1d, array2d, c_bool]
lib.insertMolecule.restype = c_int
def insertMolecule( itype, pos, rot, rigid ):
return lib.insertMolecule( itype, pos, rot, rigid )
#void clearMolTypes( bool deep){
lib.clearMolTypes.argtypes = [c_bool]
lib.clearMolTypes.restype = None
def clearMolTypes( deep=True ):
lib.clearMolTypes( deep )
#void bakeMMFF(){
lib.clear.argtypes = []
lib.clear.restype = None
def clear( ):
lib.clear( )
#void bakeMMFF(){
lib.bakeMMFF.argtypes = []
lib.bakeMMFF.restype = None
def bakeMMFF( ):
lib.bakeMMFF( )
#void prepareOpt(){
lib.prepareOpt.argtypes = []
lib.prepareOpt.restype = None
def prepareOpt( ):
lib.prepareOpt( )
#double relaxNsteps( int nsteps, double F2conf ){
lib.relaxNsteps.argtypes = [c_int, c_double]
lib.relaxNsteps.restype = c_double
def relaxNsteps( nsteps, F2conf ):
return lib.relaxNsteps( nsteps, F2conf )
#void save2xyz( char * fname ){
lib.save2xyz.argtypes = [c_char_p]
lib.save2xyz.restype = None
def save2xyz( fname ):
lib.save2xyz( fname )
#write2xyz( int i ){
lib.write2xyz.argtypes = [c_int]
lib.write2xyz.restype = None
def write2xyz( i ):
lib.write2xyz( i )
#openf(char* fname, int i, char* mode ){
lib.openf.argtypes = [c_char_p, c_int, c_char_p ]
lib.openf.restype = c_int
def openf( fname, i, mode ):
return lib.openf( fname, i, mode )
#closef(int i){
lib.closef.argtypes = [c_int]
lib.closef.restype = None
def closef( i ):
lib.closef( i )
lib.getPoses.argtypes = [ctypes.POINTER(c_int)]
lib.getPoses.restype = ctypes.POINTER(c_double)
def getPoses():
n=c_int(0)
ptr = lib.getPoses(ctypes.byref(n))
#print "n",n
return np.ctypeslib.as_array(ptr, shape=(n.value,8))
lib.getAtomPos.argtypes = [ctypes.POINTER(c_int)]
lib.getAtomPos.restype = ctypes.POINTER(c_double)
def getAtomPos():
n=c_int(0)
ptr = lib.getAtomPos(ctypes.byref(n));
#print "n",n
return np.ctypeslib.as_array( ptr, shape=(n.value,3))
#void setOptFIRE( double dt_max, double dt_min, double damp_max, int minLastNeg, double finc, double fdec, double falpha, double kickStart ){
lib.setOptFIRE.argtypes = [ c_double, c_double, c_double, c_int , c_double , c_double , c_double , c_double ]
lib.setOptFIRE.restype = None
def setOptFIRE( dt_max=0.05, dt_min=0.005, damp_max=0.1, minLastNeg=5, finc=1.1, fdec=0.5, falpha=0.98, kickStart=1.0 ):
lib.setOptFIRE( dt_max, dt_min, damp_max, minLastNeg, finc, fdec, falpha, kickStart )
#void setCoulombMirror(double* hdir,double* Vec3d& p0){
lib.setCoulombMirror.argtypes = [ array1d, array1d ]
lib.setCoulombMirror.restype = None
def setCoulombMirror( hdir, p0 ):
lib.setCoulombMirror( hdir, p0 )
# ========= Python Functions
def loadAtomTypeNames( fname ):
dct={}
fin = open( fname, 'r' )
for i,line in enumerate(fin):
aName=line.split()[0].strip()
dct[aName]=i
fin.close()
return dct
|
the-stack_0_22835 | import copy
import torch.nn as nn
from rlkit.launchers.launcher_util import setup_logger
import rlkit.torch.pytorch_util as ptu
from rlkit.core.ma_eval_util import get_generic_ma_path_information
def experiment(variant):
from multi_differential_game import MultiDifferentialGame
expl_env = MultiDifferentialGame(**variant['env_kwargs'])
eval_env = MultiDifferentialGame(**variant['env_kwargs'])
num_agent = expl_env.agent_num
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
from rlkit.torch.networks.graph_builders import FullGraphBuilder
graph_builder_1 = FullGraphBuilder(
input_node_dim=obs_dim+action_dim,
num_node=num_agent,
batch_size=variant['algorithm_kwargs']['batch_size'],
contain_self_loop=False)
from rlkit.torch.networks.gnn_networks import GNNNet
gnn1 = GNNNet(
graph_builder_1,
hidden_activation='lrelu0.2',
output_activation='lrelu0.2',
**variant['graph_kwargs'],
)
from rlkit.torch.networks.networks import FlattenMlp
qf1 = nn.Sequential(
gnn1,
FlattenMlp(input_size=variant['graph_kwargs']['node_dim'],
output_size=1,
hidden_sizes=[variant['qf_kwargs']['hidden_dim']]*(variant['qf_kwargs']['num_layer']-1),
hidden_activation=nn.LeakyReLU(negative_slope=0.2),
)
)
target_qf1 = copy.deepcopy(qf1)
from rlkit.torch.networks.graph_builders import FullGraphBuilder
graph_builder_2 = FullGraphBuilder(
input_node_dim=obs_dim+action_dim,
num_node=num_agent,
batch_size=variant['algorithm_kwargs']['batch_size'],
contain_self_loop=False)
from rlkit.torch.networks.gnn_networks import GNNNet
gnn2 = GNNNet(
graph_builder_2,
hidden_activation='lrelu0.2',
output_activation='lrelu0.2',
**variant['graph_kwargs'],
)
qf2 = nn.Sequential(
gnn2,
FlattenMlp(input_size=variant['graph_kwargs']['node_dim'],
output_size=1,
hidden_sizes=[variant['qf_kwargs']['hidden_dim']]*(variant['qf_kwargs']['num_layer']-1),
hidden_activation=nn.LeakyReLU(negative_slope=0.2),
)
)
target_qf2 = copy.deepcopy(qf2)
graph_builder_ca = FullGraphBuilder(
input_node_dim=obs_dim+action_dim,
num_node=num_agent,
batch_size=variant['algorithm_kwargs']['batch_size'],
contain_self_loop=False)
cgca = GNNNet(
graph_builder_ca,
hidden_activation='lrelu0.2',
output_activation='lrelu0.2',
**variant['graph_kwargs'],
)
from rlkit.torch.networks.layers import SplitLayer
from rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy
cactor = nn.Sequential(
cgca,
FlattenMlp(input_size=variant['graph_kwargs']['node_dim'],
output_size=variant['cactor_kwargs']['hidden_dim'],
hidden_sizes=[variant['cactor_kwargs']['hidden_dim']]*(variant['cactor_kwargs']['num_layer']-1),
hidden_activation=nn.LeakyReLU(negative_slope=0.2),
output_activation=nn.LeakyReLU(negative_slope=0.2),
),
nn.LeakyReLU(negative_slope=0.2),
SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),
nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])
)
cactor = TanhGaussianPolicy(module=cactor)
policy_n, expl_policy_n, eval_policy_n = [], [], []
for i in range(num_agent):
policy = nn.Sequential(
FlattenMlp(input_size=obs_dim,
output_size=variant['policy_kwargs']['hidden_dim'],
hidden_sizes=[variant['policy_kwargs']['hidden_dim']]*(variant['policy_kwargs']['num_layer']-1),
hidden_activation=nn.LeakyReLU(negative_slope=0.2),
output_activation=nn.LeakyReLU(negative_slope=0.2),
),
SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),
nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])
)
policy = TanhGaussianPolicy(module=policy)
from rlkit.torch.policies.make_deterministic import MakeDeterministic
eval_policy = MakeDeterministic(policy)
if variant['random_exploration']:
from rlkit.exploration_strategies.base import PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
expl_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=EpsilonGreedy(expl_env.action_space, prob_random_action=1.0),
policy=policy,
)
else:
expl_policy = policy
policy_n.append(policy)
expl_policy_n.append(expl_policy)
eval_policy_n.append(eval_policy)
from rlkit.samplers.data_collector.ma_path_collector import MAMdpPathCollector
eval_path_collector = MAMdpPathCollector(eval_env, eval_policy_n)
expl_path_collector = MAMdpPathCollector(expl_env, expl_policy_n)
from rlkit.data_management.ma_env_replay_buffer import MAEnvReplayBuffer
replay_buffer = MAEnvReplayBuffer(variant['replay_buffer_size'], expl_env, num_agent=num_agent)
from rlkit.torch.r2g.r2g_gnn9 import R2GGNNTrainer
trainer = R2GGNNTrainer(
env=expl_env,
qf1=qf1,
target_qf1=target_qf1,
qf2=qf2,
target_qf2=target_qf2,
cactor=cactor,
policy_n=policy_n,
**variant['trainer_kwargs']
)
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
log_path_function=get_generic_ma_path_information,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
# save init params
from rlkit.core import logger
snapshot = algorithm._get_snapshot()
file_name = osp.join(logger._snapshot_dir, 'itr_-1.pkl')
torch.save(snapshot, file_name)
algorithm.train()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='zero_sum')
parser.add_argument('--num_ag', type=int, default=2)
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--log_dir', type=str, default='R2GGNN9Gaussian')
parser.add_argument('--conv', type=str, default='GSage')
parser.add_argument('--layer', type=int, default=2)
parser.add_argument('--hidden', type=int, default=16)
parser.add_argument('--glayer', type=int, default=2)
parser.add_argument('--hnode', type=int, default=16)
parser.add_argument('--ce', action='store_true', default=False) # cactor entropy
parser.add_argument('--er', action='store_true', default=False) # entropy reward
parser.add_argument('--re', action='store_true', default=False) # random exploration
parser.add_argument('--alpha', type=float, default=None) # init alpha
parser.add_argument('--fa', action='store_true', default=False) # fix alpha
parser.add_argument('--dcig', action='store_true', default=False) # deterministic cactor in graph
parser.add_argument('--dna', action='store_true', default=False) # deterministic next action
parser.add_argument('--lr', type=float, default=None)
parser.add_argument('--bs', type=int, default=None)
parser.add_argument('--epoch', type=int, default=None)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--snapshot_mode', type=str, default="gap_and_last")
parser.add_argument('--snapshot_gap', type=int, default=100)
args = parser.parse_args()
import os.path as osp
pre_dir = './Data/'+args.exp_name+'_p'+str(args.num_ag)
main_dir = args.log_dir\
+args.conv\
+('layer'+str(args.layer))\
+('hidden'+str(args.hidden))\
+('glayer'+str(args.glayer))\
+('hnode'+str(args.hnode))\
+('ce' if args.ce else '')\
+('er' if args.er else '')\
+('re' if args.re else '')\
+(('alpha'+str(args.alpha)) if args.alpha else '')\
+('fa' if args.fa else '')\
+('dcig' if args.dcig else '')\
+('dna' if args.dna else '')\
+(('lr'+str(args.lr)) if args.lr else '')\
+(('bs'+str(args.bs)) if args.bs else '')
log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))
# noinspection PyTypeChecker
variant = dict(
random_exploration=args.re,
env_kwargs=dict(
game_name=args.exp_name,
agent_num=args.num_ag,
),
algorithm_kwargs=dict(
num_epochs=(args.epoch+1 if args.epoch else 101),
num_eval_steps_per_epoch=100,
num_trains_per_train_loop=100*args.num_ag,
num_expl_steps_per_train_loop=100*args.num_ag,
min_num_steps_before_training=100*args.num_ag,
max_path_length=100,
batch_size=(args.bs if args.bs else 256),
),
trainer_kwargs=dict(
use_soft_update=True,
tau=1e-2,
discount=0.99,
qf_learning_rate=(args.lr if args.lr else 1e-3),
cactor_learning_rate=(args.lr if args.lr else 1e-4),
policy_learning_rate=(args.lr if args.lr else 1e-4),
use_entropy_loss=True,
use_entropy_reward=args.er,
use_cactor_entropy_loss=args.ce,
init_alpha=(args.alpha if args.alpha else 1.),
use_automatic_entropy_tuning=(not args.fa),
deterministic_cactor_in_graph=args.dcig,
deterministic_next_action=args.dna,
),
graph_kwargs=dict(
conv_type=args.conv,
node_dim=args.hnode,
num_conv_layers=args.glayer,
),
qf_kwargs=dict(
hidden_dim=args.hidden,
num_layer=args.layer,
),
cactor_kwargs=dict(
hidden_dim=args.hidden,
num_layer=args.layer,
),
policy_kwargs=dict(
hidden_dim=args.hidden,
num_layer=args.layer,
),
replay_buffer_size=int(1E6),
)
import os
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
with open(osp.join(log_dir,'variant.json'),'w') as out_json:
import json
json.dump(variant,out_json,indent=2)
import sys
cmd_input = 'python ' + ' '.join(sys.argv) + '\n'
with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:
f.write(cmd_input)
setup_logger(args.exp_name+'/'+main_dir, variant=variant,
snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,
log_dir=log_dir)
import numpy as np
import torch
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if isinstance(args.gpu, int):
print('using gpu ',args.gpu)
ptu.set_gpu_mode(True, gpu_id=args.gpu)
experiment(variant)
|
the-stack_0_22836 | from collections import Counter
from itertools import count
from errno import ENOENT, EROFS
import os
from stat import S_IFDIR
from threading import RLock
from fuse import FuseOSError, Operations, LoggingMixIn
from logbook import Logger
log = Logger('fs')
class DescriptorManager(object):
def __init__(self):
self.refcount = Counter()
self.data_hash = {}
self.lock = RLock()
self.fd = count()
def get_free_fd(self, h):
fd = next(self.fd)
self.data_hash[fd] = h
return fd
def get_hash(self, fd):
return self.data_hash[fd]
def release(self, fd):
with self.lock:
newval = max(self.refcount[fd] - 1, 0)
self.refcount[fd] = newval
if newval == 0:
del self.data_hash[fd]
return newval != 0
class BlobNode:
def __init__(self, fs, obj, mode):
self.fs = fs
self.obj = obj
self.mode = mode
def getattr(self):
st = self.fs.empty_stat.copy()
st['st_mode'] = self.mode
st['st_size'] = self.obj.raw_length()
return st
def open(self, flags):
with self.fs.data_lock:
fd = self.fs.fd_man.get_free_fd(self.obj.id)
# load data into data_cache
if self.obj.id not in self.fs.data_cache:
self.fs.data_cache[self.obj.id] = self.obj.as_raw_string()
return fd
def read(self, size, offset, fh):
# lookup hash associated with filehandle
h = self.fs.fd_man.get_hash(fh)
# retrieve cached data for filehandle
data = self.fs.data_cache[h]
return data[offset:offset + size]
def release(self, fh):
with self.fs.data_lock:
h = self.fs.fd_man.get_hash(fh)
del self.fs.data_cache[h]
return 0
class DirNode:
def __init__(self, fs):
self.fs = fs
self.dirs = []
self.files = []
def getattr(self):
st = self.fs.empty_stat.copy()
st['st_mode'] |= S_IFDIR
return st
def readdir(self):
entries = ['.', '..']
entries += self.dirs
entries += self.files
return entries
class IndexFS(LoggingMixIn, Operations):
def __init__(self, root, repo, mountpoint):
self.root = os.path.abspath(root)
self.mountpoint = os.path.abspath(mountpoint)
root_stat = os.lstat(root)
self.empty_stat = {
'st_atime': 0,
'st_ctime': 0,
'st_gid': root_stat.st_gid,
'st_mode': 0o644,
'st_mtime': 0,
'st_nlink': 1,
'st_size': 0,
'st_uid': root_stat.st_uid,
}
self.data_cache = {}
self.data_lock = RLock()
self.fd_man = DescriptorManager()
self.passthrough_man = DescriptorManager()
self.repo = repo
self.dirs = {}
index = self.repo.open_index()
self.dirs['/'] = DirNode(self)
self.files = {}
for (fpath, bid, mode) in index.iterblobs():
components = fpath.decode().split('/')
d = self.dirs['/']
p = ''
for c in components[:-1]:
p += '/' + c
if p not in self.dirs:
self.dirs[p] = DirNode(self)
d.dirs.append(c)
d = self.dirs[p]
d.files.append(components[-1])
p = p + '/' + components[-1]
self.files[p] = BlobNode(self, self.repo.get_object(bid), mode)
def _get_path(self, path):
orig_path = path
rv = split_git(os.path.join(self.root, path))
# for debugging
log.debug(log.debug('{} => {}'.format(orig_path, rv)))
return rv
def _get_node(self, path):
if not path.startswith('/'):
path = '/' + path
if path not in self.dirs and path not in self.files:
log.debug(log.debug('{} => ENOENT'.format(path)))
raise FuseOSError(ENOENT)
if path in self.dirs:
rv = self.dirs[path]
else:
rv = self.files[path]
log.debug(log.debug('{} => {}'.format(path, rv)))
return rv
def readdir(self, path, fh=None):
node = self._get_node(path)
return node.readdir()
def getattr(self, path, fh=None):
node = self._get_node(path)
return node.getattr()
def open(self, path, flags=0):
if flags & (os.O_WRONLY | os.O_RDWR):
raise FuseOSError(EROFS)
node = self._get_node(path)
return node.open(flags)
def read(self, path, size, offset, fh):
node = self._get_node(path)
return node.read(size, offset, fh)
def release(self, path, fh):
# note: for some reason, this isn't called?
# flush is though...
node = self._get_node(path)
return node.release(fh)
def readlink(self, path):
node = self._get_node(path)
return node.readlink()
|
the-stack_0_22837 | __author__ = "Vasile2k"
import requests
from html.parser import HTMLParser
queries = [
"Corsair K95",
"Gigabyte Aorus Z390 Pro"
]
url = "https://www.olx.ro/oferte/"
user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko)" \
" Chrome/35.0.1916.47 Safari/537.36"
headers = {"User-Agent": user_agent}
products = {}
last_product = []
def clean_string(input):
# clean the data from tabs, spaces, slashes and/or newlines
# because as I see, the parser gives a lot of them
# and '\n', because fuck python
return str(input).replace("\n", "").replace("\\n", "").replace("\t", "").replace(" ", "").replace("\\", "")
def clean_shitty_decoding(input):
# clean the unescaped string
# encode all symbols to unicode, escape them, keep only ascii, decode
# now you have a clean string
# fuck python
return str(input).encode("utf-8").decode("unicode_escape").encode("ascii", errors="ignore").decode()
def add_product(product):
if not last_product == []:
raise Exception("Add the price of the previous product before adding a new one!")
if not isinstance(product, str):
raise TypeError("\'product\' should be a string!")
last_product.append(product)
def add_price(price):
if last_product == []:
raise Exception("Add a product before adding a price!")
if not isinstance(price, str):
raise TypeError("\'price\' should be a string!")
products[last_product[0]] = price
last_product.clear()
def list_all_products():
max_len = max(len(p) for p in products)
for k in products:
print(k.ljust(max_len + 4), " -> ", products[k])
class OlxResponseParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.__inside_table_count = 0
self.__has_data = False
self.__has_price_data = False
def handle_starttag(self, tag, attrs):
# convert attrs to dict
attrs = dict(attrs)
# clean the tag attribute because the parser seem so add a lot of shit
tag = clean_string(tag)
if tag == "table" and "id" in attrs and attrs["id"] == "offers_table":
# start the table with listings
self.__inside_table_count = 1
if self.__inside_table_count:
if tag == "table":
# incremet table counte because there are tables inside the table with listings
self.__inside_table_count += 1
elif tag == "a" and "data-cy" in attrs and attrs["data-cy"] == "listing-ad-title":
self.__has_data = True
elif tag == "p" and "class" in attrs and attrs["class"] == "price":
self.__has_price_data = True
def handle_endtag(self, tag):
if tag == "table" and self.__inside_table_count:
self.__inside_table_count -= 1
def handle_data(self, data):
if not clean_string(data) == "":
if self.__has_data:
add_product(clean_shitty_decoding(data))
self.__has_data = False
elif self.__has_price_data:
add_price(clean_shitty_decoding(data))
self.__has_price_data = False
def create_query_url(query_text):
return url + "q-" + query_text.replace(" ", "-") + "/"
if __name__ == "__main__":
for query in queries:
response = requests.get(create_query_url(query), headers=headers)
parser = OlxResponseParser()
parser.feed(str(response.content))
parser.close()
list_all_products()
|
the-stack_0_22839 | """
A collection of utility functions and classes. Originally, many
(but not all) were from the Python Cookbook -- hence the name cbook.
This module is safe to import from anywhere within matplotlib;
it imports matplotlib only at runtime.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
from itertools import repeat
import collections
import datetime
import errno
from functools import reduce
import glob
import gzip
import io
import locale
import os
import re
import sys
import time
import traceback
import types
import warnings
from weakref import ref, WeakKeyDictionary
import numpy as np
import numpy.ma as ma
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
mplDeprecation = MatplotlibDeprecationWarning
def _generate_deprecation_message(since, message='', name='',
alternative='', pending=False,
obj_type='attribute'):
if not message:
altmessage = ''
if pending:
message = (
'The %(func)s %(obj_type)s will be deprecated in a '
'future version.')
else:
message = (
'The %(func)s %(obj_type)s was deprecated in version '
'%(since)s.')
if alternative:
altmessage = ' Use %s instead.' % alternative
message = ((message % {
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type,
'since': since}) +
altmessage)
return message
def warn_deprecated(
since, message='', name='', alternative='', pending=False,
obj_type='attribute'):
"""
Used to display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
Examples
--------
Basic example::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
warnings.warn(message, mplDeprecation, stacklevel=1)
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type='function'):
"""
Decorator to mark a function as deprecated.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(func, message=message, name=name, alternative=alternative,
pending=pending):
import functools
import textwrap
if isinstance(func, classmethod):
func = func.__func__
is_classmethod = True
else:
is_classmethod = False
if not name:
name = func.__name__
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
@functools.wraps(func)
def deprecated_func(*args, **kwargs):
warnings.warn(message, mplDeprecation, stacklevel=2)
return func(*args, **kwargs)
old_doc = deprecated_func.__doc__
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
message = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
deprecated_func.__doc__ = new_doc
if is_classmethod:
deprecated_func = classmethod(deprecated_func)
return deprecated_func
return deprecate
# On some systems, locale.getpreferredencoding returns None,
# which can break unicode; and the sage project reports that
# some systems have incorrect locale specifications, e.g.,
# an encoding instead of a valid locale name. Another
# pathological case that has been reported is an empty string.
# On some systems, getpreferredencoding sets the locale, which has
# side effects. Passing False eliminates those side effects.
def unicode_safe(s):
import matplotlib
if isinstance(s, bytes):
try:
preferredencoding = locale.getpreferredencoding(
matplotlib.rcParams['axes.formatter.use_locale']).strip()
if not preferredencoding:
preferredencoding = None
except (ValueError, ImportError, AttributeError):
preferredencoding = None
if preferredencoding is None:
return six.text_type(s)
else:
return six.text_type(s, preferredencoding)
return s
class converter(object):
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s == self.missing:
return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s == self.missing
class tostr(converter):
"""convert to string or None"""
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
"""convert to a datetime or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
"""convert to a date or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
"""use a :func:`time.strptime` format string for conversion"""
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
"""convert to a float or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return float(s)
class toint(converter):
"""convert to an int or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return int(s)
class _BoundMethodProxy(object):
"""
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by Michael Droettboom
"""
def __init__(self, cb):
self._hash = hash(cb)
self._destroy_callbacks = []
try:
try:
if six.PY3:
self.inst = ref(cb.__self__, self._destroy)
else:
self.inst = ref(cb.im_self, self._destroy)
except TypeError:
self.inst = None
if six.PY3:
self.func = cb.__func__
self.klass = cb.__self__.__class__
else:
self.func = cb.im_func
self.klass = cb.im_class
except AttributeError:
self.inst = None
self.func = cb
self.klass = None
def add_destroy_callback(self, callback):
self._destroy_callbacks.append(_BoundMethodProxy(callback))
def _destroy(self, wk):
for callback in self._destroy_callbacks:
try:
callback(self)
except ReferenceError:
pass
def __getstate__(self):
d = self.__dict__.copy()
# de-weak reference inst
inst = d['inst']
if inst is not None:
d['inst'] = inst()
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
inst = statedict['inst']
# turn inst back into a weakref
if inst is not None:
self.inst = ref(inst)
def __call__(self, *args, **kwargs):
"""
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
"""
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the
# instance
mtd = types.MethodType(self.func, self.inst())
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
"""
Compare the held function and instance with that held by
another proxy.
"""
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
"""
Inverse of __eq__.
"""
return not self.__eq__(other)
def __hash__(self):
return self._hash
class CallbackRegistry(object):
"""
Handle registering and disconnecting for a set of signals and
callbacks:
>>> def oneat(x):
... print('eat', x)
>>> def ondrink(x):
... print('drink', x)
>>> from matplotlib.cbook import CallbackRegistry
>>> callbacks = CallbackRegistry()
>>> id_eat = callbacks.connect('eat', oneat)
>>> id_drink = callbacks.connect('drink', ondrink)
>>> callbacks.process('drink', 123)
drink 123
>>> callbacks.process('eat', 456)
eat 456
>>> callbacks.process('be merry', 456) # nothing will be called
>>> callbacks.disconnect(id_eat)
>>> callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they
are no longer needed to avoid dangling references (and thus memory
leaks). However, real code in matplotlib rarely does so, and due
to its design, it is rather difficult to place this kind of code.
To get around this, and prevent this class of memory leaks, we
instead store weak references to bound methods only, so when the
destination object needs to die, the CallbackRegistry won't keep
it alive. The Python stdlib weakref module can not create weak
references to bound methods directly, so we need to create a proxy
object to handle weak references to bound methods (or regular free
functions). This technique was shared by Peter Parente on his
`"Mindtrove" blog
<http://mindtrove.info/python-weak-references/>`_.
"""
def __init__(self):
self.callbacks = dict()
self._cid = 0
self._func_cid_map = {}
def __getstate__(self):
# We cannot currently pickle the callables in the registry, so
# return an empty dictionary.
return {}
def __setstate__(self, state):
# re-initialise an empty callback registry
self.__init__()
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._func_cid_map.setdefault(s, WeakKeyDictionary())
# Note proxy not needed in python 3.
# TODO rewrite this when support for python2.x gets dropped.
proxy = _BoundMethodProxy(func)
if proxy in self._func_cid_map[s]:
return self._func_cid_map[s][proxy]
proxy.add_destroy_callback(self._remove_proxy)
self._cid += 1
cid = self._cid
self._func_cid_map[s][proxy] = cid
self.callbacks.setdefault(s, dict())
self.callbacks[s][cid] = proxy
return cid
def _remove_proxy(self, proxy):
for signal, proxies in list(six.iteritems(self._func_cid_map)):
try:
del self.callbacks[signal][proxies[proxy]]
except KeyError:
pass
if len(self.callbacks[signal]) == 0:
del self.callbacks[signal]
del self._func_cid_map[signal]
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in list(six.iteritems(self.callbacks)):
try:
del callbackd[cid]
except KeyError:
continue
else:
for signal, functions in list(
six.iteritems(self._func_cid_map)):
for function, value in list(six.iteritems(functions)):
if value == cid:
del functions[function]
return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
if s in self.callbacks:
for cid, proxy in list(six.iteritems(self.callbacks[s])):
try:
proxy(*args, **kwargs)
except ReferenceError:
self._remove_proxy(proxy)
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a given type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None:
self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return repr(self)
def __getstate__(self):
# store a dictionary of this SilentList's state
return {'type': self.type, 'seq': self[:]}
def __setstate__(self, state):
self.type = state['type']
self.extend(state['seq'])
class IgnoredKeywordWarning(UserWarning):
"""
A class for issuing warnings about keyword arguments that will be ignored
by matplotlib
"""
pass
def local_over_kwdict(local_var, kwargs, *keys):
"""
Enforces the priority of a local variable over potentially conflicting
argument(s) from a kwargs dict. The following possible output values are
considered in order of priority:
local_var > kwargs[keys[0]] > ... > kwargs[keys[-1]]
The first of these whose value is not None will be returned. If all are
None then None will be returned. Each key in keys will be removed from the
kwargs dict in place.
Parameters
----------
local_var: any object
The local variable (highest priority)
kwargs: dict
Dictionary of keyword arguments; modified in place
keys: str(s)
Name(s) of keyword arguments to process, in descending order of
priority
Returns
-------
out: any object
Either local_var or one of kwargs[key] for key in keys
Raises
------
IgnoredKeywordWarning
For each key in keys that is removed from kwargs but not used as
the output value
"""
out = local_var
for key in keys:
kwarg_val = kwargs.pop(key, None)
if kwarg_val is not None:
if out is None:
out = kwarg_val
else:
warnings.warn('"%s" keyword argument will be ignored' % key,
IgnoredKeywordWarning)
return out
def strip_math(s):
"""remove latex formatting from mathtext"""
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove:
s = s.replace(r, '')
return s
class Bunch(object):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables::
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: https://code.activestate.com/recipes/121294/
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
keys = six.iterkeys(self.__dict__)
return 'Bunch(%s)' % ', '.join(['%s=%s' % (k, self.__dict__[k])
for k
in keys])
def unique(x):
"""Return a list of unique elements of *x*"""
return list(six.iterkeys(dict([(val, 1) for val in x])))
def iterable(obj):
"""return true if *obj* is iterable"""
try:
iter(obj)
except TypeError:
return False
return True
def is_string_like(obj):
"""Return True if *obj* looks like a string"""
if isinstance(obj, six.string_types):
return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try:
obj + ''
except:
return False
return True
def is_sequence_of_strings(obj):
"""Returns true if *obj* is iterable and contains strings"""
if not iterable(obj):
return False
if is_string_like(obj) and not isinstance(obj, np.ndarray):
try:
obj = obj.values
except AttributeError:
# not pandas
return False
for o in obj:
if not is_string_like(o):
return False
return True
def is_hashable(obj):
"""Returns true if *obj* can be hashed"""
try:
hash(obj)
except TypeError:
return False
return True
def is_writable_file_like(obj):
"""return true if *obj* looks like a file object with a *write* method"""
return hasattr(obj, 'write') and six.callable(obj.write)
def file_requires_unicode(x):
"""
Returns `True` if the given writable file-like object requires Unicode
to be written to it.
"""
try:
x.write(b'')
except TypeError:
return True
else:
return False
def is_scalar(obj):
"""return true if *obj* is not string like and is not iterable"""
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
"""return true if *obj* looks like a number"""
try:
obj + 1
except:
return False
else:
return True
def to_filehandle(fname, flag='rU', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
# get rid of 'U' in flag for gzipped files.
flag = flag.replace('U', '')
fh = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
# get rid of 'U' in flag for bz2 files
flag = flag.replace('U', '')
import bz2
fh = bz2.BZ2File(fname, flag)
else:
fh = open(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
"""Return whether the given object is a scalar or string like."""
return is_string_like(val) or not iterable(val)
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def get_sample_data(fname, asfileobj=True):
"""
Return a sample data file. *fname* is a path relative to the
`mpl-data/sample_data` directory. If *asfileobj* is `True`
return a file object, otherwise just a file path.
Set the rc parameter examples.directory to the directory where we should
look, if sample_data files are stored in a location different than
default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
Python module files).
If the filename ends in .gz, the file is implicitly ungzipped.
"""
import matplotlib
if matplotlib.rcParams['examples.directory']:
root = matplotlib.rcParams['examples.directory']
else:
root = os.path.join(matplotlib._get_data_path(), 'sample_data')
path = os.path.join(root, fname)
if asfileobj:
if (os.path.splitext(fname)[-1].lower() in
('.csv', '.xrc', '.txt')):
mode = 'r'
else:
mode = 'rb'
base, ext = os.path.splitext(fname)
if ext == '.gz':
return gzip.open(path, mode)
else:
return open(path, mode)
else:
return path
def flatten(seq, scalarp=is_scalar_or_string):
"""
Returns a generator of flattened nested containers
For example:
>>> from matplotlib.cbook import flatten
>>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
>>> print(list(flatten(l)))
['John', 'Hunter', 1, 23, 42, 5, 23]
By: Composite of Holger Krekel and Luther Blissett
From: https://code.activestate.com/recipes/121294/
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item):
yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter(object):
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace:
data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i], attributename), i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print(multiple_replace(adict, text))
xlat = Xlator(adict)
print(xlat.xlat(text))
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, list(six.iterkeys(self)))))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc:
fc = c # Remember first letter
d = soundex_digits[ord(c) - ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null(object):
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return "Null()"
def __repr__(self):
return "Null()"
if six.PY3:
def __bool__(self):
return 0
else:
def __nonzero__(self):
return 0
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def mkdirs(newdir, mode=0o777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
# this functionality is now in core python as of 3.2
# LPY DROP
if six.PY3:
os.makedirs(newdir, mode=mode, exist_ok=True)
else:
try:
os.makedirs(newdir, mode=mode)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class GetRealpathAndStat(object):
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
"""delete all of the *keys* from the :class:`dict` *d*"""
for key in keys:
try:
del d[key]
except KeyError:
pass
class RingBuffer(object):
""" class that implements a not-yet-full buffer """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
.
"""
s_len = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, xrange(len(seq))):
s_len += len(word) + 1 # +1 to account for the len(' ')
if s_len >= N:
return ind
return len(seq)
def wrap(prefix, text, cols):
"""wrap *text* with *prefix* at length *cols*"""
pad = ' ' * len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind < Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path
import fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
results = []
for dirname, dirs, files in os.walk(root):
# Append to results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if return_folders or os.path.isfile(fullname):
for pattern in pattern_list:
if fnmatch.fnmatch(name, pattern):
results.append(fullname)
break
# Block recursion if recursion was disallowed
if not recurse:
break
return results
def get_recursive_filelist(args):
"""
Recurse all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"""Break up the *seq* into *num* tuples"""
start = 0
while 1:
item = seq[start:start + num]
if not len(item):
break
yield item
start += num
def exception_to_str(s=None):
if six.PY3:
sh = io.StringIO()
else:
sh = io.BytesIO()
if s is not None:
print(s, file=sh)
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq) < 2:
return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val:
return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if not val:
return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if val:
return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to constrain the size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if k not in self:
if len(self) >= self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
self._killkeys.append(k)
dict.__setitem__(self, k, v)
class Stack(object):
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
"""return the current element, or None"""
if not len(self._elements):
return self._default
else:
return self._elements[self._pos]
def __len__(self):
return self._elements.__len__()
def __getitem__(self, ind):
return self._elements.__getitem__(ind)
def forward(self):
"""move the position forward and return the current element"""
n = len(self._elements)
if self._pos < n - 1:
self._pos += 1
return self()
def back(self):
"""move the position back and return the current element"""
if self._pos > 0:
self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos + 1]
self._elements.append(o)
self._pos = len(self._elements) - 1
return self()
def home(self):
"""push the first element onto the top of the stack"""
if not len(self._elements):
return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements) == 0
def clear(self):
"""empty the stack"""
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso == o:
bubbles.append(thiso)
else:
self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso == o:
continue
else:
self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)):
seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name, name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o)
if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match) >= 0]
def reverse_dict(d):
"""reverse the dictionary -- may lose data if values are not unique!"""
return dict([(v, k) for k, v in six.iteritems(d)])
def restrict_dict(d, keys):
"""
Return a dictionary that contains those keys that appear in both
d and keys, with values from d.
"""
return dict([(k, v) for (k, v) in six.iteritems(d) if k in keys])
def report_memory(i=0): # argument may go away
"""return the memory consumed by process"""
from matplotlib.compat.subprocess import Popen, PIPE
pid = os.getpid()
if sys.platform == 'sunos5':
try:
a2 = Popen(str('ps -p %d -o osz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Sun OS only if "
"the 'ps' program is found")
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
try:
a2 = Popen(str('ps -p %d -o rss,sz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Linux only if "
"the 'ps' program is found")
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
try:
a2 = Popen(str('ps -p %d -o rss,vsz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Mac OS only if "
"the 'ps' program is found")
mem = int(a2[1].split()[0])
elif sys.platform.startswith('win'):
try:
a2 = Popen([str("tasklist"), "/nh", "/fi", "pid eq %d" % pid],
stdout=PIPE).stdout.read()
except OSError:
raise NotImplementedError(
"report_memory works on Windows only if "
"the 'tasklist' program is found")
mem = int(a2.strip().split()[-2].replace(',', ''))
else:
raise NotImplementedError(
"We don't have a memory monitor for %s" % sys.platform)
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
"""make sure *args* are equal len before zipping"""
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
return list(zip(*args))
def issubclass_safe(x, klass):
"""return issubclass(x, klass) and return False on a TypeError"""
try:
return issubclass(x, klass)
except TypeError:
return False
def safe_masked_invalid(x, copy=False):
x = np.array(x, subok=True, copy=copy)
if not x.dtype.isnative:
# Note that the argument to `byteswap` is 'inplace',
# thus if we have already made a copy, do the byteswap in
# place, else make a copy with the byte order swapped.
# Be explicit that we are swapping the byte order of the dtype
x = x.byteswap(copy).newbyteorder('S')
try:
xm = np.ma.masked_invalid(x, copy=False)
xm.shrink_mask()
except TypeError:
return x
return xm
class MemoryMonitor(object):
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n / segments)
ii = list(xrange(0, n, dn))
ii[-1] = n - 1
print()
print('memory report: i, mem, dmem, dmem/nloops')
print(0, self._mem[0])
for i in range(1, len(ii)):
di = ii[i] - ii[i - 1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i - 1]]
print('%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di)))
if self._overflow:
print("Warning: array size was too small for the number of calls.")
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from .pylab import figure
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in six.iteritems(step):
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, {}, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
For example:
>>> from matplotlib.cbook import Grouper
>>> class Foo(object):
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> sorted(map(tuple, grp))
[(a, b, c), (d, e)]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=()):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
to_drop = [key for key in mapping if key() is None]
for key in to_drop:
val = mapping.pop(key)
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def remove(self, a):
self.clean()
mapping = self._mapping
seta = mapping.pop(ref(a), None)
if seta is not None:
seta.remove(ref(a))
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token:
pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in six.itervalues(self._mapping):
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in six.itervalues(self._mapping):
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
if steps == 1:
return a
steps = int(np.floor(steps))
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1:]
delta = ((a1 - a0) / steps)
for i in range(1, steps):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in (glob.glob(os.path.join(path, '*')) +
glob.glob(os.path.join(path, '.*'))):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: # Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None,
autorange=False):
"""
Returns list of dictionaries of statistics used to draw a series
of box and whisker plots. The `Returns` section enumerates the
required keys of the dictionary. Users can skip this function and
pass a user-defined set of dictionaries to the new `axes.bxp` method
instead of relying on MPL to do the calculations.
Parameters
----------
X : array-like
Data that will be represented in the boxplots. Should have 2 or
fewer dimensions.
whis : float, string, or sequence (default = 1.5)
As a float, determines the reach of the whiskers past the first
and third quartiles (e.g., Q3 + whis*IQR, QR = interquartile
range, Q3-Q1). Beyond the whiskers, data are considered outliers
and are plotted as individual points. This can be set this to an
ascending sequence of percentile (e.g., [5, 95]) to set the
whiskers at specific percentiles of the data. Finally, `whis`
can be the string ``'range'`` to force the whiskers to the
minimum and maximum of the data. In the edge case that the 25th
and 75th percentiles are equivalent, `whis` can be automatically
set to ``'range'`` via the `autorange` option.
bootstrap : int, optional
Number of times the confidence intervals around the median
should be bootstrapped (percentile method).
labels : array-like, optional
Labels for each dataset. Length must be compatible with
dimensions of `X`.
autorange : bool, optional (False)
When `True` and the data are distributed such that the 25th and
75th percentiles are equal, ``whis`` is set to ``'range'`` such
that the whisker ends are at the minimum and maximum of the
data.
Returns
-------
bxpstats : list of dict
A list of dictionaries containing the results for each column
of data. Keys of each dictionary are the following:
======== ===================================
Key Value Description
======== ===================================
label tick label for the boxplot
mean arithemetic mean value
med 50th percentile
q1 first quartile (25th percentile)
q3 third quartile (75th percentile)
cilo lower notch around the median
cihi upper notch around the median
whislo end of the lower whisker
whishi end of the upper whisker
fliers outliers
======== ===================================
Notes
-----
Non-bootstrapping approach to confidence interval uses Gaussian-
based asymptotic approximation:
.. math::
\mathrm{med} \pm 1.57 \\times \\frac{\mathrm{iqr}}{\sqrt{N}}
General approach from:
McGill, R., Tukey, J.W., and Larsen, W.A. (1978) "Variations of
Boxplots", The American Statistician, 32:12-16.
"""
def _bootstrap_median(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentiles = [2.5, 97.5]
ii = np.random.randint(M, size=(N, M))
bsData = x[ii]
estimate = np.median(bsData, axis=1, overwrite_input=True)
CI = np.percentile(estimate, percentiles)
return CI
def _compute_conf_interval(data, med, iqr, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = _bootstrap_median(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
N = len(data)
notch_min = med - 1.57 * iqr / np.sqrt(N)
notch_max = med + 1.57 * iqr / np.sqrt(N)
return notch_min, notch_max
# output is a list of dicts
bxpstats = []
# convert X to a list of lists
X = _reshape_2D(X)
ncols = len(X)
if labels is None:
labels = repeat(None)
elif len(labels) != ncols:
raise ValueError("Dimensions of labels and X must be compatible")
input_whis = whis
for ii, (x, label) in enumerate(zip(X, labels), start=0):
# empty dict
stats = {}
if label is not None:
stats['label'] = label
# restore whis to the input values in case it got changed in the loop
whis = input_whis
# note tricksyness, append up here and then mutate below
bxpstats.append(stats)
# if empty, bail
if len(x) == 0:
stats['fliers'] = np.array([])
stats['mean'] = np.nan
stats['med'] = np.nan
stats['q1'] = np.nan
stats['q3'] = np.nan
stats['cilo'] = np.nan
stats['cihi'] = np.nan
stats['whislo'] = np.nan
stats['whishi'] = np.nan
stats['med'] = np.nan
continue
# up-convert to an array, just to be safe
x = np.asarray(x)
# arithmetic mean
stats['mean'] = np.mean(x)
# medians and quartiles
q1, med, q3 = np.percentile(x, [25, 50, 75])
# interquartile range
stats['iqr'] = q3 - q1
if stats['iqr'] == 0 and autorange:
whis = 'range'
# conf. interval around median
stats['cilo'], stats['cihi'] = _compute_conf_interval(
x, med, stats['iqr'], bootstrap
)
# lowest/highest non-outliers
if np.isscalar(whis):
if np.isreal(whis):
loval = q1 - whis * stats['iqr']
hival = q3 + whis * stats['iqr']
elif whis in ['range', 'limit', 'limits', 'min/max']:
loval = np.min(x)
hival = np.max(x)
else:
whismsg = ('whis must be a float, valid string, or '
'list of percentiles')
raise ValueError(whismsg)
else:
loval = np.percentile(x, whis[0])
hival = np.percentile(x, whis[1])
# get high extreme
wiskhi = np.compress(x <= hival, x)
if len(wiskhi) == 0 or np.max(wiskhi) < q3:
stats['whishi'] = q3
else:
stats['whishi'] = np.max(wiskhi)
# get low extreme
wisklo = np.compress(x >= loval, x)
if len(wisklo) == 0 or np.min(wisklo) > q1:
stats['whislo'] = q1
else:
stats['whislo'] = np.min(wisklo)
# compute a single array of outliers
stats['fliers'] = np.hstack([
np.compress(x < stats['whislo'], x),
np.compress(x > stats['whishi'], x)
])
# add in the remaining stats
stats['q1'], stats['med'], stats['q3'] = q1, med, q3
return bxpstats
# FIXME I don't think this is used anywhere
def unmasked_index_ranges(mask, compressed=True):
"""
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
"""
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
# The ls_mapper maps short codes for line style to their full name used
# by backends
# The reverse mapper is for mapping full names to short ones
ls_mapper_r = dict([(ls[1], ls[0]) for ls in _linestyles])
def align_iterators(func, *iterables):
"""
This generator takes a bunch of iterables that are ordered by func
It sends out ordered tuples::
(func(row), [rows from all iterators matching func(row)])
It is used by :func:`matplotlib.mlab.recs_join` to join record arrays
"""
class myiter:
def __init__(self, it):
self.it = it
self.key = self.value = None
self.iternext()
def iternext(self):
try:
self.value = next(self.it)
self.key = func(self.value)
except StopIteration:
self.value = self.key = None
def __call__(self, key):
retval = None
if key == self.key:
retval = self.value
self.iternext()
elif self.key and key > self.key:
raise ValueError("Iterator has been left behind")
return retval
# This can be made more efficient by not computing the minimum key for each
# iteration
iters = [myiter(it) for it in iterables]
minvals = minkey = True
while True:
minvals = ([_f for _f in [it.key for it in iters] if _f])
if minvals:
minkey = min(minvals)
yield (minkey, [it(minkey) for it in iters])
else:
break
def is_math_text(s):
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
try:
s = six.text_type(s)
except UnicodeDecodeError:
raise ValueError(
"matplotlib display text must have all code points < 128 or use "
"Unicode strings")
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
return even_dollars
def _check_1d(x):
'''
Converts a sequence of less than 1 dimension, to an array of 1
dimension; leaves everything else untouched.
'''
if not hasattr(x, 'shape') or len(x.shape) < 1:
return np.atleast_1d(x)
else:
try:
x[:, None]
return x
except (IndexError, TypeError):
return np.atleast_1d(x)
def _reshape_2D(X):
"""
Converts a non-empty list or an ndarray of two or fewer dimensions
into a list of iterable objects so that in
for v in _reshape_2D(X):
v is iterable and can be used to instantiate a 1D array.
"""
if hasattr(X, 'shape'):
# one item
if len(X.shape) == 1:
if hasattr(X[0], 'shape'):
X = list(X)
else:
X = [X, ]
# several items
elif len(X.shape) == 2:
nrows, ncols = X.shape
if nrows == 1:
X = [X]
elif ncols == 1:
X = [X.ravel()]
else:
X = [X[:, i] for i in xrange(ncols)]
else:
raise ValueError("input `X` must have 2 or fewer dimensions")
if not hasattr(X[0], '__len__'):
X = [X]
else:
X = [np.ravel(x) for x in X]
return X
def violin_stats(X, method, points=100):
"""
Returns a list of dictionaries of data which can be used to draw a series
of violin plots. See the `Returns` section below to view the required keys
of the dictionary. Users can skip this function and pass a user-defined set
of dictionaries to the `axes.vplot` method instead of using MPL to do the
calculations.
Parameters
----------
X : array-like
Sample data that will be used to produce the gaussian kernel density
estimates. Must have 2 or fewer dimensions.
method : callable
The method used to calculate the kernel density estimate for each
column of data. When called via `method(v, coords)`, it should
return a vector of the values of the KDE evaluated at the values
specified in coords.
points : scalar, default = 100
Defines the number of points to evaluate each of the gaussian kernel
density estimates at.
Returns
-------
A list of dictionaries containing the results for each column of data.
The dictionaries contain at least the following:
- coords: A list of scalars containing the coordinates this particular
kernel density estimate was evaluated at.
- vals: A list of scalars containing the values of the kernel density
estimate at each of the coordinates given in `coords`.
- mean: The mean value for this column of data.
- median: The median value for this column of data.
- min: The minimum value for this column of data.
- max: The maximum value for this column of data.
"""
# List of dictionaries describing each of the violins.
vpstats = []
# Want X to be a list of data sequences
X = _reshape_2D(X)
for x in X:
# Dictionary of results for this distribution
stats = {}
# Calculate basic stats for the distribution
min_val = np.min(x)
max_val = np.max(x)
# Evaluate the kernel density estimate
coords = np.linspace(min_val, max_val, points)
stats['vals'] = method(x, coords)
stats['coords'] = coords
# Store additional statistics for this distribution
stats['mean'] = np.mean(x)
stats['median'] = np.median(x)
stats['min'] = min_val
stats['max'] = max_val
# Append to output
vpstats.append(stats)
return vpstats
class _NestedClassGetter(object):
# recipe from http://stackoverflow.com/a/11493777/741316
"""
When called with the containing class as the first argument,
and the name of the nested class as the second argument,
returns an instance of the nested class.
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
# make an instance of a simple object (this one will do), for which we
# can change the __class__ later on.
nested_instance = _NestedClassGetter()
# set the class of the instance, the __init__ will never be called on
# the class but the original state will be set later on by pickle.
nested_instance.__class__ = nested_class
return nested_instance
class _InstanceMethodPickler(object):
"""
Pickle cannot handle instancemethod saving. _InstanceMethodPickler
provides a solution to this.
"""
def __init__(self, instancemethod):
"""Takes an instancemethod as its only argument."""
if six.PY3:
self.parent_obj = instancemethod.__self__
self.instancemethod_name = instancemethod.__func__.__name__
else:
self.parent_obj = instancemethod.im_self
self.instancemethod_name = instancemethod.im_func.__name__
def get_instancemethod(self):
return getattr(self.parent_obj, self.instancemethod_name)
def _step_validation(x, *args):
"""
Helper function of `pts_to_*step` functions
This function does all of the normalization required to the
input and generate the template for output
"""
args = tuple(np.asanyarray(y) for y in args)
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError("x must be 1 dimensional")
if len(args) == 0:
raise ValueError("At least one Y value must be passed")
return np.vstack((x, ) + args)
def pts_to_prestep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = np.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, 0::2], steps[0, 1::2] = vertices[0, :], vertices[0, :-1]
steps[1:, 0::2], steps[1:, 1:-1:2] = vertices[1:, :], vertices[1:, 1:]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_poststep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, ::2], steps[0, 1:-1:2] = vertices[0, :], vertices[0, 1:]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :-1]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_midstep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x)), np.float)
steps[0, 1:-1:2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 2::2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 0] = vertices[0, 0]
steps[0, -1] = vertices[0, -1]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :]
# convert 2D array back to tuple
return tuple(steps)
STEP_LOOKUP_MAP = {'pre': pts_to_prestep,
'post': pts_to_poststep,
'mid': pts_to_midstep,
'step-pre': pts_to_prestep,
'step-post': pts_to_poststep,
'step-mid': pts_to_midstep}
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = np.atleast_1d(y)
return np.arange(y.shape[0], dtype=float), y
def safe_first_element(obj):
if isinstance(obj, collections.Iterator):
# needed to accept `array.flat` as input.
# np.flatiter reports as an instance of collections.Iterator
# but can still be indexed via [].
# This has the side effect of re-setting the iterator, but
# that is acceptable.
try:
return obj[0]
except TypeError:
pass
raise RuntimeError("matplotlib does not support generators "
"as input")
return next(iter(obj))
def normalize_kwargs(kw, alias_mapping=None, required=(), forbidden=(),
allowed=None):
"""Helper function to normalize kwarg inputs
The order they are resolved are:
1. aliasing
2. required
3. forbidden
4. allowed
This order means that only the canonical names need appear in
`allowed`, `forbidden`, `required`
Parameters
----------
alias_mapping, dict, optional
A mapping between a canonical name to a list of
aliases, in order of precedence from lowest to highest.
If the canonical value is not in the list it is assumed to have
the highest priority.
required : iterable, optional
A tuple of fields that must be in kwargs.
forbidden : iterable, optional
A list of keys which may not be in kwargs
allowed : tuple, optional
A tuple of allowed fields. If this not None, then raise if
`kw` contains any keys not in the union of `required`
and `allowed`. To allow only the required fields pass in
``()`` for `allowed`
Raises
------
TypeError
To match what python raises if invalid args/kwargs are passed to
a callable.
"""
# deal with default value of alias_mapping
if alias_mapping is None:
alias_mapping = dict()
# make a local so we can pop
kw = dict(kw)
# output dictionary
ret = dict()
# hit all alias mappings
for canonical, alias_list in six.iteritems(alias_mapping):
# the alias lists are ordered from lowest to highest priority
# so we know to use the last value in this list
tmp = []
seen = []
for a in alias_list:
try:
tmp.append(kw.pop(a))
seen.append(a)
except KeyError:
pass
# if canonical is not in the alias_list assume highest priority
if canonical not in alias_list:
try:
tmp.append(kw.pop(canonical))
seen.append(canonical)
except KeyError:
pass
# if we found anything in this set of aliases put it in the return
# dict
if tmp:
ret[canonical] = tmp[-1]
if len(tmp) > 1:
warnings.warn("Saw kwargs {seen!r} which are all aliases for "
"{canon!r}. Kept value from {used!r}".format(
seen=seen, canon=canonical, used=seen[-1]))
# at this point we know that all keys which are aliased are removed, update
# the return dictionary from the cleaned local copy of the input
ret.update(kw)
fail_keys = [k for k in required if k not in ret]
if fail_keys:
raise TypeError("The required keys {keys!r} "
"are not in kwargs".format(keys=fail_keys))
fail_keys = [k for k in forbidden if k in ret]
if fail_keys:
raise TypeError("The forbidden keys {keys!r} "
"are in kwargs".format(keys=fail_keys))
if allowed is not None:
allowed_set = set(required) | set(allowed)
fail_keys = [k for k in ret if k not in allowed_set]
if fail_keys:
raise TypeError("kwargs contains {keys!r} which are not in "
"the required {req!r} or "
"allowed {allow!r} keys".format(
keys=fail_keys, req=required,
allow=allowed))
return ret
def get_label(y, default_name):
try:
return y.name
except AttributeError:
return default_name
# Numpy > 1.6.x deprecates putmask in favor of the new copyto.
# So long as we support versions 1.6.x and less, we need the
# following local version of putmask. We choose to make a
# local version of putmask rather than of copyto because the
# latter includes more functionality than the former. Therefore
# it is easy to make a local version that gives full putmask
# behavior, but duplicating the full copyto behavior would be
# more difficult.
try:
np.copyto
except AttributeError:
_putmask = np.putmask
else:
def _putmask(a, mask, values):
return np.copyto(a, values, where=mask)
_lockstr = """\
LOCKERROR: matplotlib is trying to acquire the lock
{!r}
and has failed. This maybe due to any other process holding this
lock. If you are sure no other matplotlib process is running try
removing these folders and trying again.
"""
class Locked(object):
"""
Context manager to handle locks.
Based on code from conda.
(c) 2012-2013 Continuum Analytics, Inc. / https://www.continuum.io/
All Rights Reserved
conda is distributed under the terms of the BSD 3-clause license.
Consult LICENSE_CONDA or https://opensource.org/licenses/BSD-3-Clause.
"""
LOCKFN = '.matplotlib_lock'
class TimeoutError(RuntimeError):
pass
def __init__(self, path):
self.path = path
self.end = "-" + str(os.getpid())
self.lock_path = os.path.join(self.path, self.LOCKFN + self.end)
self.pattern = os.path.join(self.path, self.LOCKFN + '-*')
self.remove = True
def __enter__(self):
retries = 50
sleeptime = 0.1
while retries:
files = glob.glob(self.pattern)
if files and not files[0].endswith(self.end):
time.sleep(sleeptime)
retries -= 1
else:
break
else:
err_str = _lockstr.format(self.pattern)
raise self.TimeoutError(err_str)
if not files:
try:
os.makedirs(self.lock_path)
except OSError:
pass
else: # PID lock already here --- someone else will remove it.
self.remove = False
def __exit__(self, exc_type, exc_value, traceback):
if self.remove:
for path in self.lock_path, self.path:
try:
os.rmdir(path)
except OSError:
pass
|
the-stack_0_22844 | from . import ScribeModuleBaseClass
from . lib.util import to_list, validate_length
from . lib.k8s_util import remove_managed_fields
class K8s_nodes(ScribeModuleBaseClass):
def __init__(self, input_dict=None, module_name=None, host_name=None,
input_type=None, scribe_uuid=None):
ScribeModuleBaseClass.__init__(self, module_name=module_name,
input_dict=input_dict,
host_name=host_name,
input_type=input_type,
scribe_uuid=scribe_uuid)
def parse(self):
nodes_full = self._input_dict
# Flatten some of the dictionaries to lists
validate_length(len(nodes_full), self.module)
# Flatten some of the dictionaries to lists
nodes_full = to_list("metadata","annotations",nodes_full)
nodes_full = to_list("metadata","labels",nodes_full)
output_dict = self._dict
remove_managed_fields(nodes_full)
output_dict['value'] = nodes_full
yield output_dict
|
the-stack_0_22845 | """ Sample Flask based application for Asgardeo OIDC SDK.
This application demonstrates Asgardeo OIDC SDK capabilities.
"""
from functools import wraps
from http.client import HTTPException
from flask import Flask, redirect, jsonify, url_for, render_template
from samples.flask.conf import auth_config
from asgardeo_auth.Integration.flask_client import FlaskAsgardeoAuth
from asgardeo_auth.exception.asgardeo_auth_error import \
AsgardeoAuthError
from samples.flask.constants import REDIRECT, TOKEN_RESPONSE, USERNAME
app = Flask(__name__)
app.secret_key = 'super_secret_key'
# initialize the app
identity_auth = FlaskAsgardeoAuth(auth_config=auth_config)
def requires_auth(f):
"""
Decorator to secure the protected endpoint which require user
authentication.
Args:
f : function to be decorated
"""
@wraps(f)
def decorated(*args, **kwargs):
"""
Decorator to redirect user to the dashboard.
"""
if not identity_auth.is_session_data_available(USERNAME):
return redirect(url_for('dashboard'))
return f(*args, **kwargs)
return decorated
@app.errorhandler(Exception)
def handle_auth_error(ex):
"""
Handle an authentication error.
Args:
ex : Exception to handle.
"""
response = jsonify(message=str(ex))
response.status_code = (ex.code if isinstance(ex, HTTPException) else 500)
return response
@app.route('/')
@requires_auth
def home():
"""
Render the login page.
"""
session_data = identity_auth.get_post_auth_session_data()
return render_template('/dashboard.html', session_data=session_data)
@app.route('/signin')
def dashboard():
"""
Render the dashboard page.
"""
return render_template('/index.html')
@app.route('/login')
def login():
"""
Login to implementation from asgardeo_auth_python_sdk.
"""
response = identity_auth.sign_in()
if REDIRECT in response:
return redirect(response[REDIRECT])
elif TOKEN_RESPONSE in response:
credentials, authenticated_user = response[TOKEN_RESPONSE]
return redirect(url_for('home'))
else:
raise AsgardeoAuthError(
'Error occurred on the sign in Process Please Try again later')
@app.route('/logout')
def logout():
"""
Logout implementation from asgardeo_auth_python_sdk.
"""
return identity_auth.sign_out()
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000, ssl_context='adhoc')
|
the-stack_0_22846 |
from download_youtube_album import AlbumDownload
if __name__ == '__main__':
temp_file_name = "tmp.mp4"
track_list_file = "songs-info-input.txt"
album_download = AlbumDownload(temp_file_name, track_list_file)
album_download.run()
|
the-stack_0_22847 | from collections import Counter
from dataclasses import dataclass
from functools import lru_cache
from math import ceil
from math import erfc
from math import sqrt
from operator import attrgetter
from typing import List
from typing import NamedTuple
import altair as alt
import pandas as pd
from scipy.special import gammaincc
from coinflip._randtests.common.core import *
from coinflip._randtests.common.result import TestResult
from coinflip._randtests.common.result import make_testvars_table
from coinflip._randtests.common.result import plot_chi2_dist
from coinflip._randtests.common.result import plot_halfnorm_dist
from coinflip._randtests.common.testutils import blocks
from coinflip._randtests.common.typing import Face
from coinflip._randtests.common.typing import Integer
__all__ = ["monobit", "frequency_within_block"]
# ------------------------------------------------------------------------------
# Frequency (Monobit) Test
class FaceCount(NamedTuple):
value: Face
count: Integer
class FaceCounts(NamedTuple):
heads: FaceCount
tails: FaceCount
@property
@lru_cache()
def max(self):
return max(*self, key=attrgetter("count"))
@property
@lru_cache()
def min(self):
return min(*self, key=attrgetter("count"))
@classmethod
def from_series(cls, series, heads, tails):
heads = FaceCount(heads, series[heads])
tails = FaceCount(tails, series[tails])
return cls(heads, tails)
@randtest()
def monobit(series, heads, tails, ctx):
n = len(series)
set_task_total(ctx, 2)
failures = check_recommendations(ctx, {"n ≥ 100": n >= 100})
counts = FaceCounts.from_series(series.value_counts(), heads, tails)
advance_task(ctx)
diff = counts.max.count - counts.min.count
normdiff = diff / sqrt(n)
p = erfc(normdiff / sqrt(2))
advance_task(ctx)
return MonobitTestResult(heads, tails, failures, normdiff, p, n, counts, diff)
@dataclass
class MonobitTestResult(TestResult):
n: Integer
counts: FaceCounts
diff: Integer
def _render(self):
yield self._pretty_result("normalised diff")
counts = make_testvars_table("value", "count")
counts.add_row(str(self.counts.max.value), str(self.counts.max.count))
counts.add_row(str(self.counts.min.value), str(self.counts.min.count))
yield counts
def plot_counts(self):
df = pd.DataFrame(
{
"Value": [self.counts.max.value, self.counts.min.value],
"Count": [self.counts.max.count, self.counts.min.count],
}
)
df["Value"] = df["Value"].astype("category")
chart = (
alt.Chart(df)
.mark_bar()
.encode(
alt.X("Value"),
alt.Y("Count", axis=alt.Axis(tickMinStep=1)),
tooltip="Count",
)
.properties(
title=f"Counts of {self.counts.max.value} and {self.counts.min.value}"
)
)
return chart
def plot_refdist(self):
return plot_halfnorm_dist(
self.statistic, xtitle="Difference between counts (normalised)"
)
# ------------------------------------------------------------------------------
# Frequency within Block Test
@randtest(min_n=8)
def frequency_within_block(series, heads, tails, ctx, blocksize=None):
n = len(series)
# TODO SAT solver
if not blocksize:
if n < 100:
blocksize = 8
else:
blocksize = max(ceil(0.01 * n), 20)
nblocks = n // blocksize
if nblocks >= 100 and n > 5000:
blocksize = n // 50
nblocks = n // blocksize
set_task_total(ctx, nblocks + 3)
failures = check_recommendations(
ctx,
{
"n ≥ 100": n >= 100,
"blocksize ≥ 20": blocksize >= 20,
"blocksize > 0.01 * n": blocksize > 0.01 * n,
"nblocks < 100": nblocks < 100,
},
)
advance_task(ctx)
counts = []
for block in blocks(series, blocksize):
matches = block == heads
count = matches.sum()
counts.append(count)
advance_task(ctx)
proportions = (count / blocksize for count in counts)
deviations = [prop - 1 / 2 for prop in proportions]
advance_task(ctx)
# TODO figure out the chi-square test being used
statistic = 4 * blocksize * sum(x ** 2 for x in deviations)
p = gammaincc(nblocks / 2, statistic / 2)
advance_task(ctx)
return FrequencyWithinBlockTestResult(
heads,
tails,
failures,
statistic,
p,
blocksize,
nblocks,
counts,
)
@dataclass
class FrequencyWithinBlockTestResult(TestResult):
blocksize: Integer
nblocks: Integer
counts: List[Integer]
def __post_init__(self):
self.count_nblocks = Counter(self.counts)
def _render(self):
yield self._pretty_result("chi-square")
yield TestResult._pretty_inputs(
("blocksize", self.blocksize), ("nblocks", self.nblocks)
)
def plot_count_nblocks(self):
df = pd.DataFrame(
{
"count": self.count_nblocks.keys(),
"nblocks": self.count_nblocks.values(),
}
)
brush = alt.selection(type="interval", encodings=["x"])
base = (
alt.Chart(df)
.mark_bar()
.encode(
x="count:Q",
y="nblocks:Q",
)
)
upper = base.encode(
alt.X(
"count:Q", title=f"Count of {self.heads}", scale=alt.Scale(domain=brush)
),
alt.Y(
"nblocks:Q",
title="Number of blocks",
axis=alt.Axis(tickMinStep=1),
),
).properties(
title=f"Occurences of {self.heads} counts",
)
lower = (
base.encode(
alt.X(
"count:Q",
scale=alt.Scale(
domain=(0, self.blocksize),
padding=0,
),
title=None,
),
alt.Y("nblocks:Q", title=None),
)
.properties(height=50)
.add_selection(brush)
)
chart = upper & lower
return chart
def plot_refdist(self):
return plot_chi2_dist(self.statistic, self.nblocks)
|
the-stack_0_22851 | # coding: utf-8
from __future__ import print_function
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from contextlib import closing
from collections import OrderedDict as order_dict
import requests
from bs4 import BeautifulSoup
from guessit import guessit
from getsub.downloader.downloader import Downloader
from getsub.sys_global_var import py, prefix
from getsub.progress_bar import ProgressBar
''' Zimuku 字幕下载器
'''
class ZimukuDownloader(Downloader):
name = 'zimuku'
choice_prefix = '[ZIMUKU]'
site_url = 'http://www.zimuku.la'
search_url = 'http://www.zimuku.la/search?q='
def get_subtitles(self, video_name, sub_num=10):
print(prefix + ' Searching ZIMUKU...', end='\r')
keywords, info_dict = Downloader.get_keywords(video_name)
keyword = ' '.join(keywords)
info = guessit(keyword)
keywords.pop(0)
keywords.insert(0, info['title'])
if info.get('season'):
season = str(info['season']).zfill(2)
keywords.insert(1, 's' + season)
sub_dict = order_dict()
s = requests.session()
s.headers.update(Downloader.header)
while True:
# 当前关键字搜索
r = s.get(ZimukuDownloader.search_url + keyword, timeout=10)
if py == 2:
html = r.text.encode('utf8')
else:
html = r.text
if '搜索不到相关字幕' not in html:
bs_obj = BeautifulSoup(r.text, 'html.parser')
if bs_obj.find('div', {'class': 'item'}):
# 综合搜索页面
for item in bs_obj.find_all('div', {'class': 'item'}):
title_boxes = item.find(
'div', {'class': 'title'}).find_all('p')
title_box = title_boxes[0]
sub_title_box = title_boxes[1]
if py == 2:
item_title = title_box.text.encode('utf8')
item_sub_title = sub_title_box.text.encode('utf8')
else:
item_title = title_box.text
item_sub_title = sub_title_box.text
item_info = guessit(item_title)
if info.get('year') and item_info.get('year'):
if info['year'] != item_info['year']:
# 年份不匹配,跳过
continue
item_titles = [
item_info.get('title', '').lower(),
item_info.get('alternative_title', '').lower()
] + item_sub_title.lower().strip().split(',')
title_included = sum([
1 for _ in item_sub_title
if info['title'].lower() not in _
])
if title_included == 0:
# guessit抽取标题不匹配,跳过
item_title_split = \
[one.split() for one in item_titles]
info_title_split = info['title'].lower().split()
sum1 = sum([1 for _ in info_title_split
if _ in item_title_split[0]])
sum2 = sum([1 for _ in info_title_split
if _ in item_title_split[1]])
if not (sum1 / len(info_title_split) >= 0.5
or sum2 / len(info_title_split) >= 0.5):
# 标题不匹配,跳过
continue
for a in item.find_all('td', {'class': 'first'})[:3]:
a = a.a
a_link = ZimukuDownloader.site_url + \
a.attrs['href']
if py == 2:
a_title = a.text.encode('utf8')
else:
a_title = a.text
a_title = ZimukuDownloader.choice_prefix + a_title
sub_dict[a_title] = {'type': 'default',
'link': a_link}
elif bs_obj.find('div', {'class': 'persub'}):
# 射手字幕页面
for persub in bs_obj.find_all('div', {'class': 'persub'}):
if py == 2:
a_title = persub.h1.text.encode('utf8')
else:
a_title = persub.h1.text
a_link = ZimukuDownloader.site_url + \
persub.h1.a.attrs['href']
a_title = ZimukuDownloader.choice_prefix + a_title
sub_dict[a_title] = {'type': 'shooter', 'link': a_link}
else:
raise ValueError('Zimuku搜索结果出现未知结构页面')
if len(sub_dict) >= sub_num:
del keywords[:]
break
if len(keywords) > 1:
keyword = keyword.replace(keywords[-1], '').strip()
keywords.pop(-1)
continue
break
for sub_name, sub_info in sub_dict.items():
if sub_info['type'] == 'default':
# 综合搜索字幕页面
r = s.get(sub_info['link'], timeout=60)
bs_obj = BeautifulSoup(r.text, 'html.parser')
lang_box = bs_obj.find('ul', {'class': 'subinfo'}).find('li')
type_score = 0
for lang in lang_box.find_all('img'):
if 'uk' in lang.attrs['src']:
type_score += 1
elif 'hongkong' in lang.attrs['src']:
type_score += 2
elif 'china' in lang.attrs['src']:
type_score += 4
elif 'jollyroger' in lang.attrs['src']:
type_score += 8
sub_info['lan'] = type_score
download_link = bs_obj.find('a', {'id': 'down1'}).attrs['href']
download_link = urljoin(
ZimukuDownloader.site_url, download_link)
r = s.get(download_link, timeout=60)
bs_obj = BeautifulSoup(r.text, 'html.parser')
download_link = bs_obj.find('a', {'rel': 'nofollow'})
download_link = download_link.attrs['href']
download_link = urljoin(
ZimukuDownloader.site_url, download_link)
sub_info['link'] = download_link
else:
# 射手字幕页面
r = s.get(sub_info['link'], timeout=60)
bs_obj = BeautifulSoup(r.text, 'html.parser')
lang_box = bs_obj.find('ul', {'class': 'subinfo'}).find('li')
type_score = 0
if py == 2:
text = lang_box.text.encode('utf8')
else:
text = lang_box.text
if '英' in text:
type_score += 1
elif '繁' in text:
type_score += 2
elif '简' in text:
type_score += 4
elif '双语' in text:
type_score += 8
sub_info['lan'] = type_score
download_link = bs_obj.find('a', {'id': 'down1'}).attrs['href']
sub_info['link'] = download_link
backup_session = requests.session()
backup_session.headers.update(s.headers)
backup_session.headers['Referer'] = sub_info['link']
backup_session.cookies.update(s.cookies)
sub_info['session'] = backup_session
if (len(sub_dict.items()) > 0
and list(sub_dict.items())[0][1]['lan'] < 8):
# 第一个候选字幕没有双语
sub_dict = order_dict(
sorted(sub_dict.items(),
key=lambda e: e[1]['lan'], reverse=True)
)
keys = list(sub_dict.keys())[:sub_num]
return {key: sub_dict[key] for key in keys}
def download_file(self, file_name, download_link, session=None):
try:
if not session:
session = requests.session()
with closing(session.get(download_link, stream=True)) as response:
filename = response.headers['Content-Disposition']
chunk_size = 1024 # 单次请求最大值
# 内容体总大小
content_size = int(response.headers['content-length'])
bar = ProgressBar(prefix + ' Get',
file_name.strip(), content_size)
sub_data_bytes = b''
for data in response.iter_content(chunk_size=chunk_size):
sub_data_bytes += data
bar.refresh(len(sub_data_bytes))
except requests.Timeout:
return None, None, 'false'
if '.rar' in filename:
datatype = '.rar'
elif '.zip' in filename:
datatype = '.zip'
elif '.7z' in filename:
datatype = '.7z'
else:
datatype = 'Unknown'
return datatype, sub_data_bytes, ''
|
the-stack_0_22852 | from typing import List
from ..data_structures import new_stack
class FindingSpans:
"""
"""
@staticmethod
def apply(xs: List[int]):
stack = new_stack()
state = [float('inf') for _ in range(len(xs))]
for i in range(0, len(xs)):
while not stack.is_empty() and xs[i] > xs[stack.peek()]:
stack.pop()
if stack.is_empty():
p = -1
else:
p = stack.peek()
state[i] = i - p
stack.push(i)
return state
if __name__ == '__main__':
data = [6, 3, 4, 5, 2]
assert FindingSpans.apply(data) == []
|
the-stack_0_22853 | from IPython.display import clear_output
def boardDisplay(board):
clear_output()
print(board[0]+"|"+board[1]+"|"+board[2])
print("-----")
print(board[3]+"|"+board[4]+"|"+board[5])
print("-----")
print(board[6]+"|"+board[7]+"|"+board[8])
def instructions(test_board):
print("The goal of this program is to play Tic Tac Toe! Just select the cell in which you want to put your mark (X or O)."+
"\nThe first player will chose the mark he wants to use. Be advice the number in each cell "+
"represents the number you must hit yo put your mark")
boardDisplay(test_board)
print("\nHave Fun!")
def win_check(board,mark):
return ((board[0] == mark and board[1] == mark and board[2] == mark) or # across the top
(board[3] == mark and board[4] == mark and board[5] == mark) or # across the middle
(board[6] == mark and board[7] == mark and board[8] == mark) or # across the bottom
(board[0] == mark and board[3] == mark and board[6] == mark) or # down the middle
(board[1] == mark and board[4] == mark and board[7] == mark) or # down the middle
(board[2] == mark and board[5] == mark and board[8] == mark) or # down the right side
(board[0] == mark and board[4] == mark and board[8] == mark) or # diagonal
(board[2] == mark and board[4] == mark and board[6] == mark)) # diagonal
def game(player1,player2, board):
roundGame=0
while " " in board:
roundGame+=1
print(f"\nWelcome to round {roundGame}")
position=0
position=int(input("\nChoose your position to play Player 1: "))
board[position-1]=player1
boardDisplay(board)
if win_check(board,player1):
print("You just won Player 1, Congrats! ")
break
if roundGame==5 and not win_check(board,player1):
print("OH OH there was a Tie!")
break
position=int(input("\nChoose your position to play Player 2: "))
board[position-1]=player2
boardDisplay(board)
if win_check(board,player2):
print("You just won Player 2, Congrats! ")
break
clear_output(wait=False)
def replay():
game=input("Do you want to play a game of Tic Tac Toe? (y/n): ")
if game.lower()=="y":
return True
else:
return False
test_board=["1","2","3","4","5","6","7","8","9"]
instructions(test_board)
game_on=replay()
while game_on:
board=[" "," "," "," "," "," "," "," "," "]
boardDisplay(board)
#Assign the Letter
player1=input("Please tell me what is it that you desire (x/o): ")
if player1.lower()=="x":
player2="O"
else:
player2="X"
print(f"Great! Player 1 will use {player1.upper()}"
+ f" and Player 2 will use {player2}")
game(player1,player2,board)
game_on=replay()
print("\nThis small game was created by Roger Urrutia. Hope you liked it.") |
the-stack_0_22854 | # import necessary libraries
import pandas as pd
import re
import torch
import collections
import numpy as np
import ujson as json
import time
import torch.nn as nn
import os
import argparse
from pathlib import Path
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
from tokenizers import BertWordPieceTokenizer
from transformers import (
BertForSequenceClassification,
BertConfig,
AdamW,
get_linear_schedule_with_warmup,
)
import random
from tqdm import tqdm
import numpy as np
import arguments.train_arguments as arguments
from data_processing.articles import Articles
import data_processing.dictionaries as dictionary
import sampling.sampler_util as sampler_util
import training.eval_util as eval_util
from training.collate import collate_fn
np.random.seed(0)
parser = argparse.ArgumentParser(
description="Train model on article data and test evaluation"
)
arguments.add_data(parser)
arguments.add_training(parser)
arguments.add_optimization(parser)
arguments.add_model(parser)
args = parser.parse_args()
# set device
if torch.cuda.is_available() and args.use_gpu:
device = "cuda"
elif not args.use_gpu:
device = "cpu"
else:
device = "cpu"
print("Cannot use GPU. Using CPU instead.")
print(f"Device: {device}")
# set output directory path
output_path = Path(args.output_dir)
# tensboard log and graph output folder declaration
log_tensorboard_dir = output_path / "runs" / "BERT"
writer = SummaryWriter(log_tensorboard_dir)
# load datasets
train_path = Path(args.train_path)
test_path = Path(args.test_path)
eval_path = Path(args.eval_path)
train_data = Articles(train_path)
test_data = Articles(test_path)
eval_data = Articles(eval_path, index_file=args.index_file_path)
print("Data Loaded")
# initialize tokenizer from BERT library
tokenizer = BertWordPieceTokenizer(args.tokenizer_file, lowercase=True)
print("Tokenizer Initialized!")
# create and save or load dictionaries based on arguments
if args.create_dicts:
(
final_word_ids,
final_url_ids,
final_publication_ids,
) = dictionary.create_merged_dictionaries(
train_data.examples, "target", args.tokenizer_file
)
print("Dictionaries Created")
dict_path = Path(args.data_dir) / "dictionaries"
if not dict_path.is_dir():
dict_path.mkdir()
dictionary.save_dictionaries(
final_word_ids, final_url_ids, final_publication_ids, dict_path
)
else:
dictionary_dir = Path(args.dict_dir)
final_word_ids, final_url_ids, final_publication_ids = dictionary.load_dictionaries(
dictionary_dir
)
print("Dictionaries loaded.")
# map items in dataset using dictionary keys (convert words and urls to numbers for the model)
if args.map_items:
badtokens = []
if args.bad_token_path.is_file():
bad_token_path = Path(args.bad_token_path)
with open(bad_token_path, "r") as f:
badTokens = [int(line.rstrip()) for line in f]
for dataset in [train_data, test_data, eval_data]:
dataset.map_items(
tokenizer, final_url_ids, final_publication_ids, filter=False,
)
else:
for dataset in [train_data, test_data, eval_data]:
dataset.map_items(
tokenizer, final_url_ids, final_publication_ids, filter=False
)
print("Items mapped")
mapped_data_path = Path(args.data_dir) / "mapped-data"
if not mapped_data_path.is_dir():
mapped_data_path.mkdir()
train_mapped_path = mapped_data_path / "train.json"
test_mapped_path = mapped_data_path / "test.json"
eval_mapped_path = mapped_data_path / "evaluation.json"
with open(train_mapped_path, "w") as file:
json.dump(train_data.examples, file)
with open(test_mapped_path, "w") as file:
json.dump(test_data.examples, file)
with open(eval_mapped_path, "w") as file:
json.dump(eval_data.examples, file)
print(f"Mapped Data saved to {mapped_data_path} directory")
# create weights for dataset samples to ensure only positive and negative examples are chosen in respective samples
pos_sampler = train_data.create_positive_sampler(args.target_publication)
neg_sampler = train_data.create_negative_sampler(args.target_publication)
train_batch_sampler = sampler_util.BatchSamplerWithNegativeSamples(
pos_sampler=pos_sampler,
neg_sampler=neg_sampler,
items=train_data.examples,
batch_size=args.batch_size,
)
# pin memory if using GPU for high efficiency
if args.use_gpu:
pin_mem = True
else:
pin_mem = False
# create dataloaders for iterable data when training and testing recall
train_loader = torch.utils.data.DataLoader(
train_data,
batch_sampler=train_batch_sampler,
collate_fn=collate_fn,
pin_memory=pin_mem,
)
eval_loader = torch.utils.data.DataLoader(
eval_data,
batch_size=args.eval_batch_size,
collate_fn=collate_fn,
pin_memory=pin_mem,
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.eval_batch_size,
collate_fn=collate_fn,
pin_memory=pin_mem,
)
# function that allows for infinite iteration over training batches
def cycle(iterable):
while True:
for x in iterable:
yield x
model = BertForSequenceClassification.from_pretrained(args.model_path)
model.to(device)
model_path = output_path / "model"
if not model_path.is_dir():
model_path.mkdir()
config_file = model_path / "config.json"
model.config.to_json_file(config_file)
steps_per_number_positive_labels = int(20000 // (args.batch_size / 2))
if 20000 % (args.batch_size / 2) != 0:
steps_per_number_positive_labels += 1
loss = torch.nn.BCEWithLogitsLoss()
optimizer = AdamW(params=model.parameters(), lr=args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps, # Default value in run_glue.py
num_training_steps=args.training_steps,
)
print(model)
print(optimizer)
print(scheduler)
model.train() # turn on training mode
running_loss = 0
labels = torch.Tensor(
(np.arange(args.batch_size) < args.batch_size // 2).astype(np.float32)
)
labels = labels.to(device)
validation_recall_list = []
print("Beginning Training")
print("--------------------")
# training loop with validation checks
for step, batch in enumerate(cycle(train_loader)):
# calculate test and evaluation performance based on user intended frequency
if step % args.frequency == 0 and step != args.training_steps:
# output loss
model.eval()
torch.no_grad()
writer.add_scalar("Loss/train", running_loss / args.frequency, step)
print(f"Training Loss: {running_loss/args.frequency}")
running_loss = 0.0
logit_list = []
for eval_batch in tqdm(eval_loader):
current_logits = eval_util.calculate_batched_predictions(
eval_batch, model, device, args.target_publication
)
logit_list = logit_list + list(current_logits)
converted_list = np.array(logit_list)
sorted_preds = np.sort(converted_list)
indices = np.argsort(converted_list)
calc_recall = eval_util.calculate_recall(
eval_data,
indices,
args.eval_recall_max,
args.target_publication,
"Eval",
writer,
step,
)
validation_recall_list.append(calc_recall)
model.train()
# save model for easy reloading
if max(validation_recall_list) == validation_recall_list[-1]:
model_string = str(step) + "-bert-model.pt"
current_model_path = model_path / model_string
torch.save(model.state_dict(), current_model_path)
# check if validation recall is increasing
if len(validation_recall_list) > 3:
if (
validation_recall_list[-1] < validation_recall_list[-2]
and validation_recall_list[-2] < validation_recall_list[-3]
and validation_recall_list[-3] < validation_recall_list[-4]
):
print("Validation Recall Decreased For Three Successive Iterations!")
break
# turn to training mode and calculate loss for backpropagation
torch.enable_grad()
optimizer.zero_grad()
word_attributes, attention_masks, word_subset_counts, real_labels = batch
word_attributes = word_attributes.to(device)
attention_masks = attention_masks.to(device)
logits = model(word_attributes, attention_masks)[0]
logits = torch.squeeze(logits)
L = loss(logits, labels)
L.backward()
if args.clip_grad:
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
running_loss += L.item()
print(f"Step: {step}, Batch Loss: {L.item()}")
if step != 0 and step % args.training_steps == 0:
writer.add_scalar("Loss/train", running_loss / args.frequency, step)
print(f"Training Loss: {running_loss/100}")
print("Getting Final Evaluation Results")
print("--------------------")
break
# write peak for training steps and load best model for final performance metrics and data saving
writer.add_scalar("Peaked Steps", np.argmax(validation_recall_list) * args.frequency)
writer.add_scalar("Max_Evaluation_Recall", np.max(validation_recall_list))
proper_step_model = (
str(np.argmax(validation_recall_list) * args.frequency) + "-bert-model.pt"
)
config = BertConfig.from_json_file(config_file)
model = BertForSequenceClassification(config)
abs_model_path = output_path / "model" / proper_step_model
model.load_state_dict(torch.load(abs_model_path))
model.to(device)
model.eval()
torch.no_grad()
# get final evaluation results and create a basic csv of top articles
eval_logit_list = []
for batch in tqdm(eval_loader):
current_logits = eval_util.calculate_batched_predictions(
batch, model, device, args.target_publication
)
eval_logit_list = eval_logit_list + list(current_logits)
converted_list = np.array(eval_logit_list)
sorted_preds = np.sort(converted_list)
indices = np.argsort(converted_list)
calc_recall = eval_util.calculate_recall(
eval_data,
indices,
args.eval_recall_max,
args.target_publication,
"Eval",
writer,
step,
)
ranked_df = eval_util.create_ranked_results_list(
final_word_ids, sorted_preds, indices, eval_data
)
eval_util.save_ranked_df(output_path, "evaluation", ranked_df)
# get final test results and create a basic csv of top articles
test_logit_list = []
for batch in tqdm(test_loader):
current_logits = eval_util.calculate_batched_predictions(
batch, model, device, args.target_publication
)
test_logit_list = test_logit_list + list(current_logits)
converted_list = np.array(test_logit_list)
sorted_preds = np.sort(converted_list)
indices = np.argsort(converted_list)
calc_recall = eval_util.calculate_recall(
test_data,
indices,
args.test_recall_max,
args.target_publication,
"Test",
writer,
step,
)
ranked_df = eval_util.create_ranked_results_list(
final_word_ids, sorted_preds, indices, test_data
)
eval_util.save_ranked_df(output_path, "test", ranked_df)
# close writer and exit
writer.close()
print(f"Ranked Data Saved to {output_path / 'results'}!")
print("Done!")
|
the-stack_0_22855 | import pandas as pd
import umap
import umap.plot
import numpy as np
import matplotlib.pyplot as plt
import os
seed = 42 # random seed
xi = 2 # from SOAP hyperparameter optimization
kernel_path = 'SP-avg_kernel_14482_14482.csv' # SOAP similarity kernel
bandgaps_path = os.path.join(
'..', 'opt-bandgaps.csv') # .csv of y properties
#---------------------------------------
# Band gaps and refcodes
df = pd.read_csv(bandgaps_path, delimiter=',')
bg = df['BG_PBE'].values
refcodes = df['refcode'].values
refcodes = [i.split('_')[0] for i in refcodes]
# SOAP similarity kernel
K = pd.read_csv(kernel_path, delimiter=',', header=None).to_numpy()
K[K>1] = 1.0 #avoid floating point issues
# Discretize band gaps for colorbar
bg_class = np.empty(len(bg), dtype=object)
for i, b in enumerate(bg):
if b < 0.5:
bg_class[i] = '[0 eV, 0.5 eV)'
elif b < 1:
bg_class[i] = '[0.5 eV, 1 eV)'
elif b < 2:
bg_class[i] = '[1 eV, 2 eV)'
elif b < 3:
bg_class[i] = '[2 eV, 3 eV)'
elif b < 4:
bg_class[i] = '[3 eV, 4 eV)'
else:
bg_class[i] = '[4 eV, 6.5 eV)'
bg_class = np.array(bg_class)
# Generate distance matrix
D = np.sqrt(2-2*K**xi)
# Perform dimensionality reduction
fit = umap.UMAP(metric='precomputed', random_state=seed)
u = fit.fit(D)
# Make static plot
plt.rcParams["figure.dpi"] = 1000
p = umap.plot.connectivity(u, edge_bundling='hammer', edge_cmap='inferno', width=8500, height=8500)
p.texts[0].set_visible(False)
plt.savefig('umap_connect.png',transparent=True)
|
the-stack_0_22858 | from __future__ import annotations
import itertools
import math
import random
import time
import warnings
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Tuple, Union, TYPE_CHECKING
from contextlib import suppress
from s2clientprotocol import sc2api_pb2 as sc_pb
from .cache import property_cache_forever, property_cache_once_per_frame, property_cache_once_per_frame_no_copy
from .constants import (
FakeEffectID,
abilityid_to_unittypeid,
geyser_ids,
mineral_ids,
TERRAN_TECH_REQUIREMENT,
PROTOSS_TECH_REQUIREMENT,
ZERG_TECH_REQUIREMENT,
ALL_GAS,
EQUIVALENTS_FOR_TECH_PROGRESS,
TERRAN_STRUCTURES_REQUIRE_SCV,
IS_PLACEHOLDER,
)
from .data import ActionResult, Alert, Race, Result, Target, race_gas, race_townhalls, race_worker
from .distances import DistanceCalculation
from .game_data import AbilityData, GameData
from .dicts.unit_trained_from import UNIT_TRAINED_FROM
from .dicts.unit_train_build_abilities import TRAIN_INFO
from .dicts.upgrade_researched_from import UPGRADE_RESEARCHED_FROM
from .dicts.unit_research_abilities import RESEARCH_INFO
# Imports for mypy and pycharm autocomplete as well as sphinx autodocumentation
from .game_state import Blip, EffectData, GameState
from .ids.ability_id import AbilityId
from .ids.unit_typeid import UnitTypeId
from .ids.upgrade_id import UpgradeId
from .pixel_map import PixelMap
from .position import Point2
from .unit import Unit
from .units import Units
from .game_data import Cost
from .unit_command import UnitCommand
from loguru import logger
if TYPE_CHECKING:
from .game_info import GameInfo, Ramp
from .client import Client
class BotAI(DistanceCalculation):
"""Base class for bots."""
EXPANSION_GAP_THRESHOLD = 15
def _initialize_variables(self):
""" Called from main.py internally """
DistanceCalculation.__init__(self)
# Specific opponent bot ID used in sc2ai ladder games http://sc2ai.net/ and on ai arena https://aiarena.net
# The bot ID will stay the same each game so your bot can "adapt" to the opponent
if not hasattr(self, "opponent_id"):
# Prevent overwriting the opponent_id which is set here https://github.com/Hannessa/python-sc2-ladderbot/blob/master/__init__.py#L40
# otherwise set it to None
self.opponent_id: str = None
# Select distance calculation method, see distances.py: _distances_override_functions function
if not hasattr(self, "distance_calculation_method"):
self.distance_calculation_method: int = 2
# Select if the Unit.command should return UnitCommand objects. Set this to True if your bot uses 'self.do(unit(ability, target))'
if not hasattr(self, "unit_command_uses_self_do"):
self.unit_command_uses_self_do: bool = False
# This value will be set to True by main.py in self._prepare_start if game is played in realtime (if true, the bot will have limited time per step)
self.realtime: bool = False
self.base_build: int = -1
self.all_units: Units = Units([], self)
self.units: Units = Units([], self)
self.workers: Units = Units([], self)
self.larva: Units = Units([], self)
self.structures: Units = Units([], self)
self.townhalls: Units = Units([], self)
self.gas_buildings: Units = Units([], self)
self.all_own_units: Units = Units([], self)
self.enemy_units: Units = Units([], self)
self.enemy_structures: Units = Units([], self)
self.all_enemy_units: Units = Units([], self)
self.resources: Units = Units([], self)
self.destructables: Units = Units([], self)
self.watchtowers: Units = Units([], self)
self.mineral_field: Units = Units([], self)
self.vespene_geyser: Units = Units([], self)
self.placeholders: Units = Units([], self)
self.techlab_tags: Set[int] = set()
self.reactor_tags: Set[int] = set()
self.minerals: int = 50
self.vespene: int = 0
self.supply_army: float = 0
self.supply_workers: float = 12 # Doesn't include workers in production
self.supply_cap: float = 15
self.supply_used: float = 12
self.supply_left: float = 3
self.idle_worker_count: int = 0
self.army_count: int = 0
self.warp_gate_count: int = 0
self.actions: List[UnitCommand] = []
self.blips: Set[Blip] = set()
self.race: Race = None
self.enemy_race: Race = None
self._units_created: Counter = Counter()
self._unit_tags_seen_this_game: Set[int] = set()
self._units_previous_map: Dict[int, Unit] = dict()
self._structures_previous_map: Dict[int, Unit] = dict()
self._enemy_units_previous_map: Dict[int, Unit] = dict()
self._enemy_structures_previous_map: Dict[int, Unit] = dict()
self._previous_upgrades: Set[UpgradeId] = set()
self._expansion_positions_list: List[Point2] = []
self._resource_location_to_expansion_position_dict: Dict[Point2, Point2] = {}
self._time_before_step: float = None
self._time_after_step: float = None
self._min_step_time: float = math.inf
self._max_step_time: float = 0
self._last_step_step_time: float = 0
self._total_time_in_on_step: float = 0
self._total_steps_iterations: int = 0
# Internally used to keep track which units received an action in this frame, so that self.train() function does not give the same larva two orders - cleared every frame
self.unit_tags_received_action: Set[int] = set()
@property
def time(self) -> float:
""" Returns time in seconds, assumes the game is played on 'faster' """
return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)
@property
def time_formatted(self) -> str:
""" Returns time as string in min:sec format """
t = self.time
return f"{int(t // 60):02}:{int(t % 60):02}"
@property
def step_time(self) -> Tuple[float, float, float, float]:
""" Returns a tuple of step duration in milliseconds.
First value is the minimum step duration - the shortest the bot ever took
Second value is the average step duration
Third value is the maximum step duration - the longest the bot ever took (including on_start())
Fourth value is the step duration the bot took last iteration
If called in the first iteration, it returns (inf, 0, 0, 0) """
avg_step_duration = (
(self._total_time_in_on_step / self._total_steps_iterations) if self._total_steps_iterations else 0
)
return (
self._min_step_time * 1000,
avg_step_duration * 1000,
self._max_step_time * 1000,
self._last_step_step_time * 1000,
)
@property
def game_info(self) -> GameInfo:
""" See game_info.py """
return self._game_info
@property
def game_data(self) -> GameData:
""" See game_data.py """
return self._game_data
@property
def client(self) -> Client:
""" See client.py """
return self._client
@property
def larva_count(self):
""" Replacement for self.state.common.larva_count https://github.com/Blizzard/s2client-proto/blob/d3d18392f9d7c646067d447df0c936a8ca57d587/s2clientprotocol/sc2api.proto#L614 """
warnings.warn(
"self.larva_count will be removed soon, please use len(self.larva) or self.larva.amount instead",
DeprecationWarning,
stacklevel=2,
)
return len(self.larva)
def alert(self, alert_code: Alert) -> bool:
"""
Check if alert is triggered in the current step.
Possible alerts are listed here https://github.com/Blizzard/s2client-proto/blob/e38efed74c03bec90f74b330ea1adda9215e655f/s2clientprotocol/sc2api.proto#L679-L702
Example use::
from sc2.data import Alert
if self.alert(Alert.AddOnComplete):
print("Addon Complete")
Alert codes::
AlertError
AddOnComplete
BuildingComplete
BuildingUnderAttack
LarvaHatched
MergeComplete
MineralsExhausted
MorphComplete
MothershipComplete
MULEExpired
NuclearLaunchDetected
NukeComplete
NydusWormDetected
ResearchComplete
TrainError
TrainUnitComplete
TrainWorkerComplete
TransformationComplete
UnitUnderAttack
UpgradeComplete
VespeneExhausted
WarpInComplete
:param alert_code:
"""
assert isinstance(alert_code, Alert), f"alert_code {alert_code} is no Alert"
return alert_code.value in self.state.alerts
@property
def start_location(self) -> Point2:
"""
Returns the spawn location of the bot, using the position of the first created townhall.
This will be None if the bot is run on an arcade or custom map that does not feature townhalls at game start.
"""
return self._game_info.player_start_location
@property
def enemy_start_locations(self) -> List[Point2]:
"""Possible start locations for enemies."""
return self._game_info.start_locations
@property
def main_base_ramp(self) -> Ramp:
""" Returns the Ramp instance of the closest main-ramp to start location.
Look in game_info.py for more information about the Ramp class
Example: See terran ramp wall bot
"""
if hasattr(self, "cached_main_base_ramp"):
return self.cached_main_base_ramp
# The reason for len(ramp.upper) in {2, 5} is:
# ParaSite map has 5 upper points, and most other maps have 2 upper points at the main ramp.
# The map Acolyte has 4 upper points at the wrong ramp (which is closest to the start position).
try:
self.cached_main_base_ramp = min(
(ramp for ramp in self.game_info.map_ramps if len(ramp.upper) in {2, 5}),
key=lambda r: self.start_location.distance_to(r.top_center),
)
except ValueError:
# Hardcoded hotfix for Honorgrounds LE map, as that map has a large main base ramp with inbase natural
self.cached_main_base_ramp = min(
(ramp for ramp in self.game_info.map_ramps if len(ramp.upper) in {4, 9}),
key=lambda r: self.start_location.distance_to(r.top_center),
)
return self.cached_main_base_ramp
@property_cache_once_per_frame
def expansion_locations_list(self) -> List[Point2]:
""" Returns a list of expansion positions, not sorted in any way. """
assert (
self._expansion_positions_list
), f"self._find_expansion_locations() has not been run yet, so accessing the list of expansion locations is pointless."
return self._expansion_positions_list
@property_cache_once_per_frame
def expansion_locations_dict(self) -> Dict[Point2, Units]:
"""
Returns dict with the correct expansion position Point2 object as key,
resources as Units (mineral fields and vespene geysers) as value.
Caution: This function is slow. If you only need the expansion locations, use the property above.
"""
assert (
self._expansion_positions_list
), f"self._find_expansion_locations() has not been run yet, so accessing the list of expansion locations is pointless."
expansion_locations: Dict[Point2, Units] = {pos: Units([], self) for pos in self._expansion_positions_list}
for resource in self.resources:
# It may be that some resources are not mapped to an expansion location
exp_position: Point2 = self._resource_location_to_expansion_position_dict.get(resource.position, None)
if exp_position:
assert exp_position in expansion_locations
expansion_locations[exp_position].append(resource)
return expansion_locations
# Deprecated
@property_cache_once_per_frame
def expansion_locations(self) -> Dict[Point2, Units]:
""" Same as the function above. """
assert (
self._expansion_positions_list
), f"self._find_expansion_locations() has not been run yet, so accessing the list of expansion locations is pointless."
warnings.warn(
f"You are using 'self.expansion_locations', please use 'self.expansion_locations_list' (fast) or 'self.expansion_locations_dict' (slow) instead.",
DeprecationWarning,
stacklevel=2,
)
return self.expansion_locations_dict
def _find_expansion_locations(self):
""" Ran once at the start of the game to calculate expansion locations. """
# Idea: create a group for every resource, then merge these groups if
# any resource in a group is closer than a threshold to any resource of another group
# Distance we group resources by
resource_spread_threshold: float = 8.5
geysers: Units = self.vespene_geyser
# Create a group for every resource
resource_groups: List[List[Unit]] = [
[resource]
for resource in self.resources
if resource.name != "MineralField450" # dont use low mineral count patches
]
# Loop the merging process as long as we change something
merged_group = True
while merged_group:
merged_group = False
# Check every combination of two groups
for group_a, group_b in itertools.combinations(resource_groups, 2):
# Check if any pair of resource of these groups is closer than threshold together
if any(
resource_a.distance_to(resource_b) <= resource_spread_threshold
for resource_a, resource_b in itertools.product(group_a, group_b)
):
# Remove the single groups and add the merged group
resource_groups.remove(group_a)
resource_groups.remove(group_b)
resource_groups.append(group_a + group_b)
merged_group = True
break
# Distance offsets we apply to center of each resource group to find expansion position
offset_range = 7
offsets = [
(x, y)
for x, y in itertools.product(range(-offset_range, offset_range + 1), repeat=2)
if 4 < math.hypot(x, y) <= 8
]
# Dict we want to return
centers = {}
# For every resource group:
for resources in resource_groups:
# Possible expansion points
amount = len(resources)
# Calculate center, round and add 0.5 because expansion location will have (x.5, y.5)
# coordinates because bases have size 5.
center_x = int(sum(resource.position.x for resource in resources) / amount) + 0.5
center_y = int(sum(resource.position.y for resource in resources) / amount) + 0.5
possible_points = (Point2((offset[0] + center_x, offset[1] + center_y)) for offset in offsets)
# Filter out points that are too near
possible_points = (
point
for point in possible_points
# Check if point can be built on
if self._game_info.placement_grid[point.rounded] == 1
# Check if all resources have enough space to point
and all(
point.distance_to(resource) >= (7 if resource._proto.unit_type in geyser_ids else 6)
for resource in resources
)
)
# Choose best fitting point
result: Point2 = min(
possible_points, key=lambda point: sum(point.distance_to(resource) for resource in resources)
)
centers[result] = resources
# Put all expansion locations in a list
self._expansion_positions_list.append(result)
# Maps all resource positions to the expansion position
for resource in resources:
self._resource_location_to_expansion_position_dict[resource.position] = result
@property
def units_created(self) -> Counter:
""" Returns a Counter for all your units and buildings you have created so far.
This may be used for statistics (at the end of the game) or for strategic decision making.
CAUTION: This does not properly work at the moment for morphing units and structures. Please use the 'on_unit_type_changed' event to add these morphing unit types manually to 'self._units_created'.
Issues would arrise in e.g. siege tank morphing to sieged tank, and then morphing back (suddenly the counter counts 2 tanks have been created).
Examples::
# Give attack command to enemy base every time 10 marines have been trained
async def on_unit_created(self, unit: Unit):
if unit.type_id == UnitTypeId.MARINE:
if self.units_created[MARINE] % 10 == 0:
for marine in self.units(UnitTypeId.MARINE):
marine.attack(self.enemy_start_locations[0])
"""
return self._units_created
def _correct_zerg_supply(self):
""" The client incorrectly rounds zerg supply down instead of up (see
https://github.com/Blizzard/s2client-proto/issues/123), so self.supply_used
and friends return the wrong value when there are an odd number of zerglings
and banelings. This function corrects the bad values. """
# TODO: remove when Blizzard/sc2client-proto#123 gets fixed.
half_supply_units = {
UnitTypeId.ZERGLING,
UnitTypeId.ZERGLINGBURROWED,
UnitTypeId.BANELING,
UnitTypeId.BANELINGBURROWED,
UnitTypeId.BANELINGCOCOON,
}
correction = self.units(half_supply_units).amount % 2
self.supply_used += correction
self.supply_army += correction
self.supply_left -= correction
async def get_available_abilities(
self, units: Union[List[Unit], Units], ignore_resource_requirements: bool = False
) -> List[List[AbilityId]]:
""" Returns available abilities of one or more units. Right now only checks cooldown, energy cost, and whether the ability has been researched.
Examples::
units_abilities = await self.get_available_abilities(self.units)
or::
units_abilities = await self.get_available_abilities([self.units.random])
:param units:
:param ignore_resource_requirements: """
return await self._client.query_available_abilities(units, ignore_resource_requirements)
async def expand_now(
self, building: UnitTypeId = None, max_distance: float = 10, location: Optional[Point2] = None
):
""" Finds the next possible expansion via 'self.get_next_expansion()'. If the target expansion is blocked (e.g. an enemy unit), it will misplace the expansion.
:param building:
:param max_distance:
:param location: """
if not building:
# self.race is never Race.Random
start_townhall_type = {
Race.Protoss: UnitTypeId.NEXUS,
Race.Terran: UnitTypeId.COMMANDCENTER,
Race.Zerg: UnitTypeId.HATCHERY,
}
building = start_townhall_type[self.race]
assert isinstance(building, UnitTypeId), f"{building} is no UnitTypeId"
if not location:
location = await self.get_next_expansion()
if not location:
# All expansions are used up or mined out
logger.warning("Trying to expand_now() but bot is out of locations to expand to")
return
await self.build(building, near=location, max_distance=max_distance, random_alternative=False, placement_step=1)
async def get_next_expansion(self) -> Optional[Point2]:
"""Find next expansion location."""
closest = None
distance = math.inf
for el in self.expansion_locations_list:
def is_near_to_expansion(t):
return t.distance_to(el) < self.EXPANSION_GAP_THRESHOLD
if any(map(is_near_to_expansion, self.townhalls)):
# already taken
continue
startp = self._game_info.player_start_location
d = await self._client.query_pathing(startp, el)
if d is None:
continue
if d < distance:
distance = d
closest = el
return closest
async def distribute_workers(self, resource_ratio: float = 2):
"""
Distributes workers across all the bases taken.
Keyword `resource_ratio` takes a float. If the current minerals to gas
ratio is bigger than `resource_ratio`, this function prefer filling gas_buildings
first, if it is lower, it will prefer sending workers to minerals first.
NOTE: This function is far from optimal, if you really want to have
refined worker control, you should write your own distribution function.
For example long distance mining control and moving workers if a base was killed
are not being handled.
WARNING: This is quite slow when there are lots of workers or multiple bases.
:param resource_ratio: """
if not self.mineral_field or not self.workers or not self.townhalls.ready:
return
worker_pool = [worker for worker in self.workers.idle]
bases = self.townhalls.ready
gas_buildings = self.gas_buildings.ready
# list of places that need more workers
deficit_mining_places = []
for mining_place in bases | gas_buildings:
difference = mining_place.surplus_harvesters
# perfect amount of workers, skip mining place
if not difference:
continue
if mining_place.has_vespene:
# get all workers that target the gas extraction site
# or are on their way back from it
local_workers = self.workers.filter(
lambda unit: unit.order_target == mining_place.tag
or (unit.is_carrying_vespene and unit.order_target == bases.closest_to(mining_place).tag)
)
else:
# get tags of minerals around expansion
local_minerals_tags = {
mineral.tag for mineral in self.mineral_field if mineral.distance_to(mining_place) <= 8
}
# get all target tags a worker can have
# tags of the minerals he could mine at that base
# get workers that work at that gather site
local_workers = self.workers.filter(
lambda unit: unit.order_target in local_minerals_tags
or (unit.is_carrying_minerals and unit.order_target == mining_place.tag)
)
# too many workers
if difference > 0:
for worker in local_workers[:difference]:
worker_pool.append(worker)
# too few workers
# add mining place to deficit bases for every missing worker
else:
deficit_mining_places += [mining_place for _ in range(-difference)]
# prepare all minerals near a base if we have too many workers
# and need to send them to the closest patch
if len(worker_pool) > len(deficit_mining_places):
all_minerals_near_base = [
mineral
for mineral in self.mineral_field
if any(mineral.distance_to(base) <= 8 for base in self.townhalls.ready)
]
# distribute every worker in the pool
for worker in worker_pool:
# as long as have workers and mining places
if deficit_mining_places:
# choose only mineral fields first if current mineral to gas ratio is less than target ratio
if self.vespene and self.minerals / self.vespene < resource_ratio:
possible_mining_places = [place for place in deficit_mining_places if not place.vespene_contents]
# else prefer gas
else:
possible_mining_places = [place for place in deficit_mining_places if place.vespene_contents]
# if preferred type is not available any more, get all other places
if not possible_mining_places:
possible_mining_places = deficit_mining_places
# find closest mining place
current_place = min(deficit_mining_places, key=lambda place: place.distance_to(worker))
# remove it from the list
deficit_mining_places.remove(current_place)
# if current place is a gas extraction site, go there
if current_place.vespene_contents:
worker.gather(current_place)
# if current place is a gas extraction site,
# go to the mineral field that is near and has the most minerals left
else:
local_minerals = (
mineral for mineral in self.mineral_field if mineral.distance_to(current_place) <= 8
)
# local_minerals can be empty if townhall is misplaced
target_mineral = max(local_minerals, key=lambda mineral: mineral.mineral_contents, default=None)
if target_mineral:
worker.gather(target_mineral)
# more workers to distribute than free mining spots
# send to closest if worker is doing nothing
elif worker.is_idle and all_minerals_near_base:
target_mineral = min(all_minerals_near_base, key=lambda mineral: mineral.distance_to(worker))
worker.gather(target_mineral)
else:
# there are no deficit mining places and worker is not idle
# so dont move him
pass
@property
def owned_expansions(self) -> Dict[Point2, Unit]:
"""List of expansions owned by the player."""
owned = {}
for el in self.expansion_locations:
def is_near_to_expansion(t):
return t.distance_to(el) < self.EXPANSION_GAP_THRESHOLD
th = next((x for x in self.townhalls if is_near_to_expansion(x)), None)
if th:
owned[el] = th
return owned
def calculate_supply_cost(self, unit_type: UnitTypeId) -> float:
"""
This function calculates the required supply to train or morph a unit.
The total supply of a baneling is 0.5, but a zergling already uses up 0.5 supply, so the morph supply cost is 0.
The total supply of a ravager is 3, but a roach already uses up 2 supply, so the morph supply cost is 1.
The required supply to build zerglings is 1 because they pop in pairs, so this function returns 1 because the larva morph command requires 1 free supply.
Example::
roach_supply_cost = self.calculate_supply_cost(UnitTypeId.ROACH) # Is 2
ravager_supply_cost = self.calculate_supply_cost(UnitTypeId.RAVAGER) # Is 1
baneling_supply_cost = self.calculate_supply_cost(UnitTypeId.BANELING) # Is 0
:param unit_type: """
if unit_type in {UnitTypeId.ZERGLING}:
return 1
unit_supply_cost = self._game_data.units[unit_type.value]._proto.food_required
if unit_supply_cost > 0 and unit_type in UNIT_TRAINED_FROM and len(UNIT_TRAINED_FROM[unit_type]) == 1:
for producer in UNIT_TRAINED_FROM[unit_type]: # type: UnitTypeId
producer_unit_data = self.game_data.units[producer.value]
if producer_unit_data._proto.food_required <= unit_supply_cost:
producer_supply_cost = producer_unit_data._proto.food_required
unit_supply_cost -= producer_supply_cost
return unit_supply_cost
def can_feed(self, unit_type: UnitTypeId) -> bool:
""" Checks if you have enough free supply to build the unit
Example::
cc = self.townhalls.idle.random_or(None)
# self.townhalls can be empty or there are no idle townhalls
if cc and self.can_feed(UnitTypeId.SCV):
cc.train(UnitTypeId.SCV)
:param unit_type: """
required = self.calculate_supply_cost(unit_type)
# "required <= 0" in case self.supply_left is negative
return required <= 0 or self.supply_left >= required
def calculate_unit_value(self, unit_type: UnitTypeId) -> Cost:
"""
Unlike the function below, this function returns the value of a unit given by the API (e.g. the resources lost value on kill).
Examples::
self.calculate_value(UnitTypeId.ORBITALCOMMAND) == Cost(550, 0)
self.calculate_value(UnitTypeId.RAVAGER) == Cost(100, 100)
self.calculate_value(UnitTypeId.ARCHON) == Cost(175, 275)
:param unit_type:
"""
unit_data = self.game_data.units[unit_type.value]
return Cost(unit_data._proto.mineral_cost, unit_data._proto.vespene_cost)
def calculate_cost(self, item_id: Union[UnitTypeId, UpgradeId, AbilityId]) -> Cost:
"""
Calculate the required build, train or morph cost of a unit. It is recommended to use the UnitTypeId instead of the ability to create the unit.
The total cost to create a ravager is 100/100, but the actual morph cost from roach to ravager is only 25/75, so this function returns 25/75.
It is adviced to use the UnitTypeId instead of the AbilityId. Instead of::
self.calculate_cost(AbilityId.UPGRADETOORBITAL_ORBITALCOMMAND)
use::
self.calculate_cost(UnitTypeId.ORBITALCOMMAND)
More examples::
from sc2.game_data import Cost
self.calculate_cost(UnitTypeId.BROODLORD) == Cost(150, 150)
self.calculate_cost(UnitTypeId.RAVAGER) == Cost(25, 75)
self.calculate_cost(UnitTypeId.BANELING) == Cost(25, 25)
self.calculate_cost(UnitTypeId.ORBITALCOMMAND) == Cost(150, 0)
self.calculate_cost(UnitTypeId.REACTOR) == Cost(50, 50)
self.calculate_cost(UnitTypeId.TECHLAB) == Cost(50, 25)
self.calculate_cost(UnitTypeId.QUEEN) == Cost(150, 0)
self.calculate_cost(UnitTypeId.HATCHERY) == Cost(300, 0)
self.calculate_cost(UnitTypeId.LAIR) == Cost(150, 100)
self.calculate_cost(UnitTypeId.HIVE) == Cost(200, 150)
:param item_id:
"""
if isinstance(item_id, UnitTypeId):
# Fix cost for reactor and techlab where the API returns 0 for both
if item_id in {UnitTypeId.REACTOR, UnitTypeId.TECHLAB, UnitTypeId.ARCHON}:
if item_id == UnitTypeId.REACTOR:
return Cost(50, 50)
elif item_id == UnitTypeId.TECHLAB:
return Cost(50, 25)
elif item_id == UnitTypeId.ARCHON:
return self.calculate_unit_value(UnitTypeId.ARCHON)
unit_data = self._game_data.units[item_id.value]
# Cost of structure morphs is automatically correctly calculated by 'calculate_ability_cost'
cost = self._game_data.calculate_ability_cost(unit_data.creation_ability)
# Fix non-structure morph cost: check if is morph, then subtract the original cost
unit_supply_cost = unit_data._proto.food_required
if unit_supply_cost > 0 and item_id in UNIT_TRAINED_FROM and len(UNIT_TRAINED_FROM[item_id]) == 1:
for producer in UNIT_TRAINED_FROM[item_id]: # type: UnitTypeId
producer_unit_data = self.game_data.units[producer.value]
if 0 < producer_unit_data._proto.food_required <= unit_supply_cost:
if producer == UnitTypeId.ZERGLING:
producer_cost = Cost(25, 0)
else:
producer_cost = self.game_data.calculate_ability_cost(producer_unit_data.creation_ability)
cost = cost - producer_cost
elif isinstance(item_id, UpgradeId):
cost = self._game_data.upgrades[item_id.value].cost
else:
# Is already AbilityId
cost = self._game_data.calculate_ability_cost(item_id)
return cost
def can_afford(self, item_id: Union[UnitTypeId, UpgradeId, AbilityId], check_supply_cost: bool = True) -> bool:
""" Tests if the player has enough resources to build a unit or structure.
Example::
cc = self.townhalls.idle.random_or(None)
# self.townhalls can be empty or there are no idle townhalls
if cc and self.can_afford(UnitTypeId.SCV):
cc.train(UnitTypeId.SCV)
Example::
# Current state: we have 150 minerals and one command center and a barracks
can_afford_morph = self.can_afford(UnitTypeId.ORBITALCOMMAND, check_supply_cost=False)
# Will be 'True' although the API reports that an orbital is worth 550 minerals, but the morph cost is only 150 minerals
:param item_id:
:param check_supply_cost: """
cost = self.calculate_cost(item_id)
if cost.minerals > self.minerals or cost.vespene > self.vespene:
return False
if check_supply_cost and isinstance(item_id, UnitTypeId):
supply_cost = self.calculate_supply_cost(item_id)
if supply_cost and supply_cost > self.supply_left:
return False
return True
async def can_cast(
self,
unit: Unit,
ability_id: AbilityId,
target: Optional[Union[Unit, Point2]] = None,
only_check_energy_and_cooldown: bool = False,
cached_abilities_of_unit: List[AbilityId] = None,
) -> bool:
""" Tests if a unit has an ability available and enough energy to cast it.
Example::
stalkers = self.units(UnitTypeId.STALKER)
stalkers_that_can_blink = stalkers.filter(lambda unit: unit.type_id == UnitTypeId.STALKER and (await self.can_cast(unit, AbilityId.EFFECT_BLINK_STALKER, only_check_energy_and_cooldown=True)))
See data_pb2.py (line 161) for the numbers 1-5 to make sense
:param unit:
:param ability_id:
:param target:
:param only_check_energy_and_cooldown:
:param cached_abilities_of_unit: """
assert isinstance(unit, Unit), f"{unit} is no Unit object"
assert isinstance(ability_id, AbilityId), f"{ability_id} is no AbilityId"
assert isinstance(target, (type(None), Unit, Point2))
# check if unit has enough energy to cast or if ability is on cooldown
if cached_abilities_of_unit:
abilities = cached_abilities_of_unit
else:
abilities = (await self.get_available_abilities([unit], ignore_resource_requirements=False))[0]
if ability_id in abilities:
if only_check_energy_and_cooldown:
return True
cast_range = self._game_data.abilities[ability_id.value]._proto.cast_range
ability_target = self._game_data.abilities[ability_id.value]._proto.target
# Check if target is in range (or is a self cast like stimpack)
if (
ability_target == 1
or ability_target == Target.PointOrNone.value
and isinstance(target, Point2)
and unit.distance_to(target) <= unit.radius + target.radius + cast_range
): # cant replace 1 with "Target.None.value" because ".None" doesnt seem to be a valid enum name
return True
# Check if able to use ability on a unit
elif (
ability_target in {Target.Unit.value, Target.PointOrUnit.value}
and isinstance(target, Unit)
and unit.distance_to(target) <= unit.radius + target.radius + cast_range
):
return True
# Check if able to use ability on a position
elif (
ability_target in {Target.Point.value, Target.PointOrUnit.value}
and isinstance(target, Point2)
and unit.distance_to(target) <= unit.radius + cast_range
):
return True
return False
def select_build_worker(self, pos: Union[Unit, Point2], force: bool = False) -> Optional[Unit]:
"""Select a worker to build a building with.
Example::
barracks_placement_position = self.main_base_ramp.barracks_correct_placement
worker = self.select_build_worker(barracks_placement_position)
# Can return None
if worker:
worker.build(UnitTypeId.BARRACKS, barracks_placement_position)
:param pos:
:param force: """
workers = (
self.workers.filter(lambda w: (w.is_gathering or w.is_idle) and w.distance_to(pos) < 20) or self.workers
)
if workers:
for worker in workers.sorted_by_distance_to(pos).prefer_idle:
if (
worker not in self.unit_tags_received_action
and not worker.orders
or len(worker.orders) == 1
and worker.orders[0].ability.id in {AbilityId.MOVE, AbilityId.HARVEST_GATHER}
):
return worker
return workers.random if force else None
async def can_place_single(
self, building: Union[AbilityData, AbilityId, UnitTypeId], position: Union[Point2, tuple]
) -> bool:
""" Checks the placement for only one position.
This function might get removed in favor of the function below (can_place). """
r = await self._client._query_building_placement_fast(building, [position])
return r[0]
async def can_place(
self, building: Union[AbilityData, AbilityId, UnitTypeId], positions: List[Union[Point2, tuple, list]]
) -> List[bool]:
""" Tests if a building can be placed in the given locations.
Example::
barracks_placement_position = self.main_base_ramp.barracks_correct_placement
worker = self.select_build_worker(barracks_placement_position)
# Can return None
if worker and (await self.can_place(UnitTypeId.BARRACKS, [barracks_placement_position])[0]:
worker.build(UnitTypeId.BARRACKS, barracks_placement_position)
:param building:
:param position: """
building_type = type(building)
assert type(building) in {AbilityData, AbilityId, UnitTypeId}, f"{building}, {building_type}"
if building_type == UnitTypeId:
building = self._game_data.units[building.value].creation_ability
elif building_type == AbilityId:
building = self._game_data.abilities[building.value]
if isinstance(positions, (Point2, tuple)):
return await self.can_place_single(building, positions)
else:
assert isinstance(positions, list), f"Expected an iterable (list, tuple), but was: {positions}"
assert isinstance(
positions[0], (Point2, tuple, list)
), f"List is expected to have Point2, tuples or lists, but instead had: {positions[0]} {type(positions[0])}"
return await self._client._query_building_placement_fast(building, positions)
async def find_placement(
self,
building: UnitTypeId,
near: Union[Unit, Point2],
max_distance: int = 20,
random_alternative: bool = True,
placement_step: int = 2,
addon_place: bool = False,
) -> Optional[Point2]:
""" Finds a placement location for building.
Example::
if self.townahlls:
cc = self.townhalls[0]
depot_position = await self.find_placement(UnitTypeId.SUPPLYDEPOT, near=cc)
:param building:
:param near:
:param max_distance:
:param random_alternative:
:param placement_step: """
assert isinstance(building, (AbilityId, UnitTypeId))
assert isinstance(near, Point2), f"{near} is no Point2 object"
if isinstance(building, UnitTypeId):
building = self._game_data.units[building.value].creation_ability
else: # AbilityId
building = self._game_data.abilities[building.value]
if await self.can_place(building, near) and (
not addon_place or await self.can_place(UnitTypeId.SUPPLYDEPOT, near.offset((2.5, -0.5)))
):
return near
if max_distance == 0:
return None
for distance in range(placement_step, max_distance, placement_step):
possible_positions = [
Point2(p).offset(near).to2
for p in (
[(dx, -distance) for dx in range(-distance, distance + 1, placement_step)]
+ [(dx, distance) for dx in range(-distance, distance + 1, placement_step)]
+ [(-distance, dy) for dy in range(-distance, distance + 1, placement_step)]
+ [(distance, dy) for dy in range(-distance, distance + 1, placement_step)]
)
]
res = await self._client.query_building_placement(building, possible_positions)
possible = [p for r, p in zip(res, possible_positions) if r == ActionResult.Success]
if addon_place:
res = await self._client.query_building_placement(
self._game_data.units[UnitTypeId.SUPPLYDEPOT.value].creation_ability,
[p.offset((2.5, -0.5)) for p in possible],
)
possible = [p for r, p in zip(res, possible) if r == ActionResult.Success]
if not possible:
continue
if random_alternative:
return random.choice(possible)
else:
return min(possible, key=lambda p: p.distance_to_point2(near))
return None
# TODO: improve using cache per frame
def already_pending_upgrade(self, upgrade_type: UpgradeId) -> float:
""" Check if an upgrade is being researched
Returns values are::
0 # not started
0 < x < 1 # researching
1 # completed
Example::
stim_completion_percentage = self.already_pending_upgrade(UpgradeId.STIMPACK)
:param upgrade_type:
"""
assert isinstance(upgrade_type, UpgradeId), f"{upgrade_type} is no UpgradeId"
if upgrade_type in self.state.upgrades:
return 1
creationAbilityID = self._game_data.upgrades[upgrade_type.value].research_ability.exact_id
for structure in self.structures.filter(lambda unit: unit.is_ready):
for order in structure.orders:
if order.ability.exact_id == creationAbilityID:
return order.progress
return 0
@property_cache_once_per_frame_no_copy
def _abilities_all_units(self) -> Tuple[Counter, Dict[UnitTypeId, float]]:
""" Cache for the already_pending function, includes protoss units warping in,
all units in production and all structures, and all morphs """
abilities_amount = Counter()
max_build_progress: Dict[UnitTypeId, float] = {}
for unit in self.units + self.structures: # type: Unit
for order in unit.orders:
abilities_amount[order.ability] += 1
if not unit.is_ready:
if self.race != Race.Terran or not unit.is_structure:
# If an SCV is constructing a building, already_pending would count this structure twice
# (once from the SCV order, and once from "not structure.is_ready")
creation_ability: AbilityData = self._game_data.units[unit.type_id.value].creation_ability
abilities_amount[creation_ability] += 1
max_build_progress[creation_ability] = max(
max_build_progress.get(creation_ability, 0), unit.build_progress
)
return abilities_amount, max_build_progress
def structure_type_build_progress(self, structure_type: Union[UnitTypeId, int]) -> float:
"""
Returns the build progress of a structure type.
Return range: 0 <= x <= 1 where
0: no such structure exists
0 < x < 1: at least one structure is under construction, returns the progress of the one with the highest progress
1: we have at least one such structure complete
Example::
# Assuming you have one barracks building at 0.5 build progress:
progress = self.structure_type_build_progress(UnitTypeId.BARRACKS)
print(progress)
# This prints out 0.5
# If you want to save up money for mutalisks, you can now save up once the spire is nearly completed:
spire_almost_completed: bool = self.structure_type_build_progress(UnitTypeId.SPIRE) > 0.75
# If you have a Hive completed but no lair, this function returns 1.0 for the following:
self.structure_type_build_progress(UnitTypeId.LAIR)
# Assume you have 2 command centers in production, one has 0.5 build_progress and the other 0.2, the following returns 0.5
highest_progress_of_command_center: float = self.structure_type_build_progress(UnitTypeId.COMMANDCENTER)
:param structure_type:
"""
assert isinstance(
structure_type, (int, UnitTypeId)
), f"Needs to be int or UnitTypeId, but was: {type(structure_type)}"
if isinstance(structure_type, int):
structure_type_value: int = structure_type
structure_type = UnitTypeId(structure_type_value)
else:
structure_type_value = structure_type.value
assert structure_type_value, f"structure_type can not be 0 or NOTAUNIT, but was: {structure_type_value}"
equiv_values: Set[int] = {structure_type_value} | {
s_type.value for s_type in EQUIVALENTS_FOR_TECH_PROGRESS.get(structure_type, set())
}
# SUPPLYDEPOTDROP is not in self._game_data.units, so bot_ai should not check the build progress via creation ability (worker abilities)
if structure_type_value not in self._game_data.units:
return max([s.build_progress for s in self.structures if s._proto.unit_type in equiv_values], default=0)
creation_ability: AbilityData = self._game_data.units[structure_type_value].creation_ability
max_value = max(
[s.build_progress for s in self.structures if s._proto.unit_type in equiv_values]
+ [self._abilities_all_units[1].get(creation_ability, 0)],
default=0,
)
return max_value
def tech_requirement_progress(self, structure_type: UnitTypeId) -> float:
""" Returns the tech requirement progress for a specific building
Example::
# Current state: supply depot is at 50% completion
tech_requirement = self.tech_requirement_progress(UnitTypeId.BARRACKS)
print(tech_requirement) # Prints 0.5 because supply depot is half way done
Example::
# Current state: your bot has one hive, no lair
tech_requirement = self.tech_requirement_progress(UnitTypeId.HYDRALISKDEN)
print(tech_requirement) # Prints 1 because a hive exists even though only a lair is required
Example::
# Current state: One factory is flying and one is half way done
tech_requirement = self.tech_requirement_progress(UnitTypeId.STARPORT)
print(tech_requirement) # Prints 1 because even though the type id of the flying factory is different, it still has build progress of 1 and thus tech requirement is completed
:param structure_type: """
race_dict = {
Race.Protoss: PROTOSS_TECH_REQUIREMENT,
Race.Terran: TERRAN_TECH_REQUIREMENT,
Race.Zerg: ZERG_TECH_REQUIREMENT,
}
unit_info_id = race_dict[self.race][structure_type]
unit_info_id_value = unit_info_id.value
# The following commented out line is unreliable for ghost / thor as they return 0 which is incorrect
# unit_info_id_value = self._game_data.units[structure_type.value]._proto.tech_requirement
if not unit_info_id_value: # Equivalent to "if unit_info_id_value == 0:"
return 1
progresses: List[float] = [self.structure_type_build_progress(unit_info_id_value)]
for equiv_structure in EQUIVALENTS_FOR_TECH_PROGRESS.get(unit_info_id, []):
progresses.append(self.structure_type_build_progress(equiv_structure.value))
return max(progresses)
def already_pending(self, unit_type: Union[UpgradeId, UnitTypeId]) -> float:
"""
Returns a number of buildings or units already in progress, or if a
worker is en route to build it. This also includes queued orders for
workers and build queues of buildings.
Example::
amount_of_scv_in_production: int = self.already_pending(UnitTypeId.SCV)
amount_of_CCs_in_queue_and_production: int = self.already_pending(UnitTypeId.COMMANDCENTER)
amount_of_lairs_morphing: int = self.already_pending(UnitTypeId.LAIR)
:param unit_type:
"""
if isinstance(unit_type, UpgradeId):
return self.already_pending_upgrade(unit_type)
ability = self._game_data.units[unit_type.value].creation_ability
return self._abilities_all_units[0][ability]
@property_cache_once_per_frame_no_copy
def _worker_orders(self) -> Counter:
""" This function is used internally, do not use! It is to store all worker abilities. """
abilities_amount = Counter()
structures_in_production: Set[Union[Point2, int]] = set()
for structure in self.structures:
if structure.type_id in TERRAN_STRUCTURES_REQUIRE_SCV:
structures_in_production.add(structure.position)
structures_in_production.add(structure.tag)
for worker in self.workers:
for order in worker.orders:
# Skip if the SCV is constructing (not isinstance(order.target, int))
# or resuming construction (isinstance(order.target, int))
is_int = isinstance(order.target, int)
if (
is_int
and order.target in structures_in_production
or not is_int
and Point2.from_proto(order.target) in structures_in_production
):
continue
abilities_amount[order.ability] += 1
return abilities_amount
def worker_en_route_to_build(self, unit_type: UnitTypeId) -> float:
""" This function counts how many workers are on the way to start the construction a building.
Warning: this function may change its name in the future!
New function. Please report any bugs!
:param unit_type: """
ability = self._game_data.units[unit_type.value].creation_ability
return self._worker_orders[ability]
@property_cache_once_per_frame
def structures_without_construction_SCVs(self) -> Units:
""" Returns all structures that do not have an SCV constructing it.
Warning: this function may move to become a Units filter.
New function. Please report any bugs! """
worker_targets: Set[Union[int, Point2]] = set()
for worker in self.workers:
# Ignore repairing workers
if not worker.is_constructing_scv:
continue
for order in worker.orders:
# When a construction is resumed, the worker.orders[0].target is the tag of the structure, else it is a Point2
target = order.target
if isinstance(target, int):
worker_targets.add(target)
else:
worker_targets.add(Point2.from_proto(target))
return self.structures.filter(
lambda structure: structure.build_progress < 1
# Redundant check?
and structure.type_id in TERRAN_STRUCTURES_REQUIRE_SCV
and structure.position not in worker_targets
and structure.tag not in worker_targets
and structure.tag in self._structures_previous_map
and self._structures_previous_map[structure.tag].build_progress == structure.build_progress
)
async def build(
self,
building: UnitTypeId,
near: Union[Unit, Point2],
max_distance: int = 20,
build_worker: Optional[Unit] = None,
random_alternative: bool = True,
placement_step: int = 2,
) -> bool:
""" Not recommended as this function checks many positions if it "can place" on them until it found a valid
position. Also if the given position is not placeable, this function tries to find a nearby position to place
the structure. Then uses 'self.do' to give the worker the order to start the construction.
:param building:
:param near:
:param max_distance:
:param build_worker:
:param random_alternative:
:param placement_step: """
assert isinstance(near, (Unit, Point2))
if not self.can_afford(building):
return False
p = None
gas_buildings = {UnitTypeId.EXTRACTOR, UnitTypeId.ASSIMILATOR, UnitTypeId.REFINERY}
if isinstance(near, Unit) and building not in gas_buildings:
near = near.position
if isinstance(near, Point2):
near = near.to2
if isinstance(near, Point2):
p = await self.find_placement(building, near, max_distance, random_alternative, placement_step)
if p is None:
return False
builder = build_worker or self.select_build_worker(near)
if builder is None:
return False
if building in gas_buildings:
builder.build_gas(near)
return True
self.do(builder.build(building, p), subtract_cost=True, ignore_warning=True)
return True
def train(
self, unit_type: UnitTypeId, amount: int = 1, closest_to: Point2 = None, train_only_idle_buildings: bool = True
) -> int:
""" Trains a specified number of units. Trains only one if amount is not specified.
Warning: currently has issues with warp gate warp ins
New function. Please report any bugs!
Example Zerg::
self.train(UnitTypeId.QUEEN, 5)
# This should queue 5 queens in 5 different townhalls if you have enough townhalls, enough minerals and enough free supply left
Example Terran::
# Assuming you have 2 idle barracks with reactors, one barracks without addon and one with techlab
# It should only queue 4 marines in the 2 idle barracks with reactors
self.train(UnitTypeId.MARINE, 4)
Example distance to::
# If you want to train based on distance to a certain point, you can use "closest_to"
self.train(UnitTypeId.MARINE, 4, closest_to = self.game_info.map_center)
:param unit_type:
:param amount:
:param closest_to:
:param train_only_idle_buildings: """
# Tech requirement not met
if self.tech_requirement_progress(unit_type) < 1:
race_dict = {
Race.Protoss: PROTOSS_TECH_REQUIREMENT,
Race.Terran: TERRAN_TECH_REQUIREMENT,
Race.Zerg: ZERG_TECH_REQUIREMENT,
}
unit_info_id = race_dict[self.race][unit_type]
logger.warning(
"{} Trying to produce unit {} in self.train() but tech requirement is not met: {}".format(
self.time_formatted, unit_type, unit_info_id
)
)
return 0
# Not affordable
if not self.can_afford(unit_type):
return 0
trained_amount = 0
# All train structure types: queen can made from hatchery, lair, hive
train_structure_type: Set[UnitTypeId] = UNIT_TRAINED_FROM[unit_type]
train_structures = self.structures if self.race != Race.Zerg else self.structures | self.larva
requires_techlab = any(
TRAIN_INFO[structure_type][unit_type].get("requires_techlab", False)
for structure_type in train_structure_type
)
is_protoss = self.race == Race.Protoss
is_terran = self.race == Race.Terran
can_have_addons = any(
u in train_structure_type for u in {UnitTypeId.BARRACKS, UnitTypeId.FACTORY, UnitTypeId.STARPORT}
)
# Sort structures closest to a point
if closest_to is not None:
train_structures = train_structures.sorted_by_distance_to(closest_to)
elif can_have_addons:
# This should sort the structures in ascending order: first structures with reactor, then naked, then with techlab
train_structures = train_structures.sorted(
key=lambda structure: -1 * (structure.add_on_tag in self.reactor_tags)
+ 1 * (structure.add_on_tag in self.techlab_tags)
)
structure: Unit
for structure in train_structures:
# Exit early if we can't afford
if not self.can_afford(unit_type):
return trained_amount
if (
# If structure hasn't received an action/order this frame
structure.tag not in self.unit_tags_received_action
# If structure can train this unit at all
and structure.type_id in train_structure_type
# Structure has to be completed to be able to train
and structure.build_progress == 1
# If structure is protoss, it needs to be powered to train
and (not is_protoss or structure.is_powered)
# Either parameter "train_only_idle_buildings" is False or structure is idle or structure has less than 2 orders and has reactor
and (
not train_only_idle_buildings
or len(structure.orders) < 1 + int(structure.add_on_tag in self.reactor_tags)
)
# If structure type_id does not accept addons, it cant require a techlab
# Else we have to check if building has techlab as addon
and (not requires_techlab or structure.add_on_tag in self.techlab_tags)
):
# Warp in at location
# TODO: find fast warp in locations either random location or closest to the given parameter "closest_to"
# TODO: find out which pylons have fast warp in by checking distance to nexus and warpgates.ready
if structure.type_id == UnitTypeId.WARPGATE:
pylons = self.structures(UnitTypeId.PYLON)
location = pylons.random.position.random_on_distance(4)
successfully_trained = structure.warp_in(unit_type, location)
else:
# Normal train a unit from larva or inside a structure
successfully_trained = self.do(
structure.train(unit_type), subtract_cost=True, subtract_supply=True, ignore_warning=True
)
# Check if structure has reactor: queue same unit again
if (
# Only terran can have reactors
is_terran
# Check if we have enough cost or supply for this unit type
and self.can_afford(unit_type)
# Structure needs to be idle in the current frame
and not structure.orders
# We are at least 2 away from goal
and trained_amount + 1 < amount
# Unit type does not require techlab
and not requires_techlab
# Train structure has reactor
and structure.add_on_tag in self.reactor_tags
):
trained_amount += 1
# With one command queue=False and one queue=True, you can queue 2 marines in a reactored barracks in one frame
successfully_trained = self.do(
structure.train(unit_type, queue=True),
subtract_cost=True,
subtract_supply=True,
ignore_warning=True,
)
if successfully_trained:
trained_amount += 1
if trained_amount == amount:
# Target unit train amount reached
return trained_amount
else:
# Some error occured and we couldn't train the unit
return trained_amount
return trained_amount
def research(self, upgrade_type: UpgradeId) -> bool:
"""
Researches an upgrade from a structure that can research it, if it is idle and powered (protoss).
Returns True if the research was started.
Return False if the requirement was not met, or the bot did not have enough resources to start the upgrade,
or the building to research the upgrade was missing or not idle.
New function. Please report any bugs!
Example::
# Try to research zergling movement speed if we can afford it
# and if at least one pool is at build_progress == 1
# and we are not researching it yet
if self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED) == 0 and self.can_afford(UpgradeId.ZERGLINGMOVEMENTSPEED):
spawning_pools_ready = self.structures(UnitTypeId.SPAWNINGPOOL).ready
if spawning_pools_ready:
self.research(UpgradeId.ZERGLINGMOVEMENTSPEED)
:param upgrade_type:
"""
assert (
upgrade_type in UPGRADE_RESEARCHED_FROM
), f"Could not find upgrade {upgrade_type} in 'research from'-dictionary"
# Not affordable
if not self.can_afford(upgrade_type):
return False
research_structure_types: UnitTypeId = UPGRADE_RESEARCHED_FROM[upgrade_type]
required_tech_building: Optional[UnitTypeId] = RESEARCH_INFO[research_structure_types][upgrade_type].get(
"required_building", None
)
requirement_met = (
required_tech_building is None or self.structure_type_build_progress(required_tech_building) == 1
)
if not requirement_met:
return False
is_protoss = self.race == Race.Protoss
# All upgrades right now that can be researched in spire and hatch can also be researched in their morphs
equiv_structures = {
UnitTypeId.SPIRE: {UnitTypeId.SPIRE, UnitTypeId.GREATERSPIRE},
UnitTypeId.GREATERSPIRE: {UnitTypeId.SPIRE, UnitTypeId.GREATERSPIRE},
UnitTypeId.HATCHERY: {UnitTypeId.HATCHERY, UnitTypeId.LAIR, UnitTypeId.HIVE},
UnitTypeId.LAIR: {UnitTypeId.HATCHERY, UnitTypeId.LAIR, UnitTypeId.HIVE},
UnitTypeId.HIVE: {UnitTypeId.HATCHERY, UnitTypeId.LAIR, UnitTypeId.HIVE},
}
# Convert to a set, or equivalent structures are chosen
# Overlord speed upgrade can be researched from hatchery, lair or hive
research_structure_types: Set[UnitTypeId] = equiv_structures.get(
research_structure_types, {research_structure_types}
)
structure: Unit
for structure in self.structures:
if (
# Structure can research this upgrade
structure.type_id in research_structure_types
# If structure hasn't received an action/order this frame
and structure.tag not in self.unit_tags_received_action
# Structure is idle
and structure.is_idle
# Structure belongs to protoss and is powered (near pylon)
and (not is_protoss or structure.is_powered)
):
# Can_afford check was already done earlier in this function
successful_action: bool = self.do(
structure.research(upgrade_type), subtract_cost=True, ignore_warning=True
)
return successful_action
return False
def do(
self,
action: UnitCommand,
subtract_cost: bool = False,
subtract_supply: bool = False,
can_afford_check: bool = False,
ignore_warning: bool = False,
) -> bool:
""" Adds a unit action to the 'self.actions' list which is then executed at the end of the frame.
Training a unit::
# Train an SCV from a random idle command center
cc = self.townhalls.idle.random_or(None)
# self.townhalls can be empty or there are no idle townhalls
if cc and self.can_afford(UnitTypeId.SCV):
cc.train(UnitTypeId.SCV)
Building a building::
# Building a barracks at the main ramp, requires 150 minerals and a depot
worker = self.workers.random_or(None)
barracks_placement_position = self.main_base_ramp.barracks_correct_placement
if worker and self.can_afford(UnitTypeId.BARRACKS):
worker.build(UnitTypeId.BARRACKS, barracks_placement_position)
Moving a unit::
# Move a random worker to the center of the map
worker = self.workers.random_or(None)
# worker can be None if all are dead
if worker:
worker.move(self.game_info.map_center)
:param action:
:param subtract_cost:
:param subtract_supply:
:param can_afford_check:
"""
if not self.unit_command_uses_self_do and isinstance(action, bool):
if not ignore_warning:
warnings.warn(
"You have used self.do(). Please consider putting 'self.unit_command_uses_self_do = True' in your bot __init__() function or removing self.do().",
DeprecationWarning,
stacklevel=2,
)
return action
assert isinstance(
action, UnitCommand
), f"Given unit command is not a command, but instead of type {type(action)}"
if subtract_cost:
cost: Cost = self._game_data.calculate_ability_cost(action.ability)
if can_afford_check and not (self.minerals >= cost.minerals and self.vespene >= cost.vespene):
# Dont do action if can't afford
return False
self.minerals -= cost.minerals
self.vespene -= cost.vespene
if subtract_supply and action.ability in abilityid_to_unittypeid:
unit_type = abilityid_to_unittypeid[action.ability]
required_supply = self.calculate_supply_cost(unit_type)
# Overlord has -8
if required_supply > 0:
self.supply_used += required_supply
self.supply_left -= required_supply
self.actions.append(action)
self.unit_tags_received_action.add(action.unit.tag)
return True
# TODO remove again, because you can just use 'self.do()' and execute '_do_actions' and 'self.actions.clear()' afterwards?
async def synchronous_do(self, action: UnitCommand):
"""
Not recommended. Use self.do instead to reduce lag.
This function is only useful for realtime=True in the first frame of the game to instantly produce a worker
and split workers on the mineral patches.
"""
assert isinstance(
action, UnitCommand
), f"Given unit command is not a command, but instead of type {type(action)}"
if not self.can_afford(action.ability):
logger.warning(f"Cannot afford action {action}")
return ActionResult.Error
r = await self._client.actions(action)
if not r: # success
cost = self._game_data.calculate_ability_cost(action.ability)
self.minerals -= cost.minerals
self.vespene -= cost.vespene
self.unit_tags_received_action.add(action.unit.tag)
else:
logger.error(f"Error: {r} (action: {action})")
return r
async def _do_actions(self, actions: List[UnitCommand], prevent_double: bool = True):
""" Used internally by main.py automatically, use self.do() instead!
:param actions:
:param prevent_double: """
if not actions:
return None
if prevent_double:
actions = list(filter(self.prevent_double_actions, actions))
result = await self._client.actions(actions)
return result
def prevent_double_actions(self, action) -> bool:
"""
:param action:
"""
# Always add actions if queued
if action.queue:
return True
if action.unit.orders:
# action: UnitCommand
# current_action: UnitOrder
current_action = action.unit.orders[0]
if current_action.ability.id != action.ability and current_action.ability.exact_id != action.ability:
# Different action, return True
return True
with suppress(AttributeError):
if current_action.target == action.target.tag:
# Same action, remove action if same target unit
return False
with suppress(AttributeError):
if action.target.x == current_action.target.x and action.target.y == current_action.target.y:
# Same action, remove action if same target position
return False
return True
return True
async def chat_send(self, message: str, team_only: bool = False):
""" Send a chat message to the SC2 Client.
Example::
await self.chat_send("Hello, this is a message from my bot!")
:param message:
:param team_only: """
assert isinstance(message, str), f"{message} is not a string"
await self._client.chat_send(message, team_only)
def in_map_bounds(self, pos: Union[Point2, tuple, list]) -> bool:
""" Tests if a 2 dimensional point is within the map boundaries of the pixelmaps.
:param pos: """
return (
self._game_info.playable_area.x
<= pos[0]
< self._game_info.playable_area.x + self.game_info.playable_area.width
and self._game_info.playable_area.y
<= pos[1]
< self._game_info.playable_area.y + self.game_info.playable_area.height
)
# For the functions below, make sure you are inside the boundaries of the map size.
def get_terrain_height(self, pos: Union[Point2, Unit]) -> int:
""" Returns terrain height at a position.
Caution: terrain height is different from a unit's z-coordinate.
:param pos: """
assert isinstance(pos, (Point2, Unit)), f"pos is not of type Point2 or Unit"
pos = pos.position.rounded
return self._game_info.terrain_height[pos]
def get_terrain_z_height(self, pos: Union[Point2, Unit]) -> int:
""" Returns terrain z-height at a position.
:param pos: """
assert isinstance(pos, (Point2, Unit)), f"pos is not of type Point2 or Unit"
pos = pos.position.rounded
return -16 + 32 * self._game_info.terrain_height[pos] / 255
def in_placement_grid(self, pos: Union[Point2, Unit]) -> bool:
""" Returns True if you can place something at a position.
Remember, buildings usually use 2x2, 3x3 or 5x5 of these grid points.
Caution: some x and y offset might be required, see ramp code in game_info.py
:param pos: """
assert isinstance(pos, (Point2, Unit)), f"pos is not of type Point2 or Unit"
pos = pos.position.rounded
return self._game_info.placement_grid[pos] == 1
def in_pathing_grid(self, pos: Union[Point2, Unit]) -> bool:
""" Returns True if a ground unit can pass through a grid point.
:param pos: """
assert isinstance(pos, (Point2, Unit)), f"pos is not of type Point2 or Unit"
pos = pos.position.rounded
return self._game_info.pathing_grid[pos] == 1
def is_visible(self, pos: Union[Point2, Unit]) -> bool:
""" Returns True if you have vision on a grid point.
:param pos: """
# more info: https://github.com/Blizzard/s2client-proto/blob/9906df71d6909511907d8419b33acc1a3bd51ec0/s2clientprotocol/spatial.proto#L19
assert isinstance(pos, (Point2, Unit)), f"pos is not of type Point2 or Unit"
pos = pos.position.rounded
return self.state.visibility[pos] == 2
def has_creep(self, pos: Union[Point2, Unit]) -> bool:
""" Returns True if there is creep on the grid point.
:param pos: """
assert isinstance(pos, (Point2, Unit)), f"pos is not of type Point2 or Unit"
pos = pos.position.rounded
return self.state.creep[pos] == 1
def _prepare_start(self, client, player_id, game_info, game_data, realtime: bool = False, base_build: int = -1):
"""
Ran until game start to set game and player data.
:param client:
:param player_id:
:param game_info:
:param game_data:
:param realtime:
"""
self._client: Client = client
self.player_id: int = player_id
self._game_info: GameInfo = game_info
self._game_data: GameData = game_data
self.realtime: bool = realtime
self.base_build: int = base_build
self.race: Race = Race(self._game_info.player_races[self.player_id])
if len(self._game_info.player_races) == 2:
self.enemy_race: Race = Race(self._game_info.player_races[3 - self.player_id])
self._distances_override_functions(self.distance_calculation_method)
def _prepare_first_step(self):
"""First step extra preparations. Must not be called before _prepare_step."""
if self.townhalls:
self._game_info.player_start_location = self.townhalls.first.position
# Calculate and cache expansion locations forever inside 'self._cache_expansion_locations', this is done to prevent a bug when this is run and cached later in the game
_ = self._find_expansion_locations()
self._game_info.map_ramps, self._game_info.vision_blockers = self._game_info._find_ramps_and_vision_blockers()
self._time_before_step: float = time.perf_counter()
def _prepare_step(self, state, proto_game_info):
"""
:param state:
:param proto_game_info:
"""
# Set attributes from new state before on_step."""
self.state: GameState = state # See game_state.py
# update pathing grid, which unfortunately is in GameInfo instead of GameState
self._game_info.pathing_grid: PixelMap = PixelMap(
proto_game_info.game_info.start_raw.pathing_grid, in_bits=True, mirrored=False
)
# Required for events, needs to be before self.units are initialized so the old units are stored
self._units_previous_map: Dict[int, Unit] = {unit.tag: unit for unit in self.units}
self._structures_previous_map: Dict[int, Unit] = {structure.tag: structure for structure in self.structures}
self._enemy_units_previous_map: Dict[int, Unit] = {unit.tag: unit for unit in self.enemy_units}
self._enemy_structures_previous_map: Dict[int, Unit] = {
structure.tag: structure for structure in self.enemy_structures
}
self._prepare_units()
self.minerals: int = state.common.minerals
self.vespene: int = state.common.vespene
self.supply_army: int = state.common.food_army
self.supply_workers: int = state.common.food_workers # Doesn't include workers in production
self.supply_cap: int = state.common.food_cap
self.supply_used: int = state.common.food_used
self.supply_left: int = self.supply_cap - self.supply_used
if self.race == Race.Zerg:
# Workaround Zerg supply rounding bug
self._correct_zerg_supply()
elif self.race == Race.Protoss:
self.warp_gate_count: int = state.common.warp_gate_count
self.idle_worker_count: int = state.common.idle_worker_count
self.army_count: int = state.common.army_count
self._time_before_step: float = time.perf_counter()
if self.enemy_race == Race.Random and self.all_enemy_units:
self.enemy_race = Race(self.all_enemy_units.first.race)
def _prepare_units(self):
# Set of enemy units detected by own sensor tower, as blips have less unit information than normal visible units
self.blips: Set[Blip] = set()
self.all_units: Units = Units([], self)
self.units: Units = Units([], self)
self.workers: Units = Units([], self)
self.larva: Units = Units([], self)
self.structures: Units = Units([], self)
self.townhalls: Units = Units([], self)
self.gas_buildings: Units = Units([], self)
self.all_own_units: Units = Units([], self)
self.enemy_units: Units = Units([], self)
self.enemy_structures: Units = Units([], self)
self.all_enemy_units: Units = Units([], self)
self.resources: Units = Units([], self)
self.destructables: Units = Units([], self)
self.watchtowers: Units = Units([], self)
self.mineral_field: Units = Units([], self)
self.vespene_geyser: Units = Units([], self)
self.placeholders: Units = Units([], self)
self.techlab_tags: Set[int] = set()
self.reactor_tags: Set[int] = set()
worker_types: Set[UnitTypeId] = {UnitTypeId.DRONE, UnitTypeId.DRONEBURROWED, UnitTypeId.SCV, UnitTypeId.PROBE}
index: int = 0
for unit in self.state.observation_raw.units:
if unit.is_blip:
self.blips.add(Blip(unit))
else:
unit_type: int = unit.unit_type
# Convert these units to effects: reaper grenade, parasitic bomb dummy, forcefield
if unit_type in FakeEffectID:
self.state.effects.add(EffectData(unit, fake=True))
continue
unit_obj = Unit(unit, self, distance_calculation_index=index, base_build=self.base_build)
index += 1
self.all_units.append(unit_obj)
if unit.display_type == IS_PLACEHOLDER:
self.placeholders.append(unit_obj)
continue
alliance = unit.alliance
# Alliance.Neutral.value = 3
if alliance == 3:
# XELNAGATOWER = 149
if unit_type == 149:
self.watchtowers.append(unit_obj)
# mineral field enums
elif unit_type in mineral_ids:
self.mineral_field.append(unit_obj)
self.resources.append(unit_obj)
# geyser enums
elif unit_type in geyser_ids:
self.vespene_geyser.append(unit_obj)
self.resources.append(unit_obj)
# all destructable rocks
else:
self.destructables.append(unit_obj)
# Alliance.Self.value = 1
elif alliance == 1:
self.all_own_units.append(unit_obj)
unit_id = unit_obj.type_id
if unit_obj.is_structure:
self.structures.append(unit_obj)
if unit_id in race_townhalls[self.race]:
self.townhalls.append(unit_obj)
elif unit_id in ALL_GAS or unit_obj.vespene_contents:
# TODO: remove "or unit_obj.vespene_contents" when a new linux client newer than version 4.10.0 is released
self.gas_buildings.append(unit_obj)
elif unit_id in {
UnitTypeId.TECHLAB,
UnitTypeId.BARRACKSTECHLAB,
UnitTypeId.FACTORYTECHLAB,
UnitTypeId.STARPORTTECHLAB,
}:
self.techlab_tags.add(unit_obj.tag)
elif unit_id in {
UnitTypeId.REACTOR,
UnitTypeId.BARRACKSREACTOR,
UnitTypeId.FACTORYREACTOR,
UnitTypeId.STARPORTREACTOR,
}:
self.reactor_tags.add(unit_obj.tag)
else:
self.units.append(unit_obj)
if unit_id in worker_types:
self.workers.append(unit_obj)
elif unit_id == UnitTypeId.LARVA:
self.larva.append(unit_obj)
# Alliance.Enemy.value = 4
elif alliance == 4:
self.all_enemy_units.append(unit_obj)
if unit_obj.is_structure:
self.enemy_structures.append(unit_obj)
else:
self.enemy_units.append(unit_obj)
# Force distance calculation and caching on all units using scipy pdist or cdist
if self.distance_calculation_method == 1:
_ = self._pdist
elif self.distance_calculation_method in {2, 3}:
_ = self._cdist
async def _after_step(self) -> int:
""" Executed by main.py after each on_step function. """
# Keep track of the bot on_step duration
self._time_after_step: float = time.perf_counter()
step_duration = self._time_after_step - self._time_before_step
self._min_step_time = min(step_duration, self._min_step_time)
self._max_step_time = max(step_duration, self._max_step_time)
self._last_step_step_time = step_duration
self._total_time_in_on_step += step_duration
self._total_steps_iterations += 1
# Commit and clear bot actions
if self.actions:
await self._do_actions(self.actions)
self.actions.clear()
# Clear set of unit tags that were given an order this frame by self.do()
self.unit_tags_received_action.clear()
# Commit debug queries
await self._client._send_debug()
return self.state.game_loop
async def _advance_steps(self, steps: int):
""" Advances the game loop by amount of 'steps'. This function is meant to be used as a debugging and testing tool only.
If you are using this, please be aware of the consequences, e.g. 'self.units' will be filled with completely new data. """
await self._after_step()
# Advance simulation by exactly "steps" frames
await self.client.step(steps)
state = await self.client.observation()
gs = GameState(state.observation)
proto_game_info = await self.client._execute(game_info=sc_pb.RequestGameInfo())
self._prepare_step(gs, proto_game_info)
await self.issue_events()
# await self.on_step(-1)
async def issue_events(self):
""" This function will be automatically run from main.py and triggers the following functions:
- on_unit_created
- on_unit_destroyed
- on_building_construction_started
- on_building_construction_complete
- on_upgrade_complete
"""
await self._issue_unit_dead_events()
await self._issue_unit_added_events()
await self._issue_building_events()
await self._issue_upgrade_events()
await self._issue_vision_events()
async def _issue_unit_added_events(self):
for unit in self.units:
if unit.tag not in self._units_previous_map and unit.tag not in self._unit_tags_seen_this_game:
self._unit_tags_seen_this_game.add(unit.tag)
self._units_created[unit.type_id] += 1
await self.on_unit_created(unit)
elif unit.tag in self._units_previous_map:
previous_frame_unit: Unit = self._units_previous_map[unit.tag]
# Check if a unit took damage this frame and then trigger event
if unit.health < previous_frame_unit.health or unit.shield < previous_frame_unit.shield:
damage_amount = previous_frame_unit.health - unit.health + previous_frame_unit.shield - unit.shield
await self.on_unit_took_damage(unit, damage_amount)
# Check if a unit type has changed
if previous_frame_unit.type_id != unit.type_id:
await self.on_unit_type_changed(unit, previous_frame_unit.type_id)
async def _issue_upgrade_events(self):
difference = self.state.upgrades - self._previous_upgrades
for upgrade_completed in difference:
await self.on_upgrade_complete(upgrade_completed)
self._previous_upgrades = self.state.upgrades
async def _issue_building_events(self):
for structure in self.structures:
if structure.tag not in self._structures_previous_map:
if structure.build_progress < 1:
await self.on_building_construction_started(structure)
else:
# Include starting townhall
self._units_created[structure.type_id] += 1
await self.on_building_construction_complete(structure)
elif structure.tag in self._structures_previous_map:
# Check if a structure took damage this frame and then trigger event
previous_frame_structure: Unit = self._structures_previous_map[structure.tag]
if (
structure.health < previous_frame_structure.health
or structure.shield < previous_frame_structure.shield
):
damage_amount = (
previous_frame_structure.health
- structure.health
+ previous_frame_structure.shield
- structure.shield
)
await self.on_unit_took_damage(structure, damage_amount)
# Check if a structure changed its type
if previous_frame_structure.type_id != structure.type_id:
await self.on_unit_type_changed(structure, previous_frame_structure.type_id)
# Check if structure completed
if structure.build_progress == 1 and previous_frame_structure.build_progress < 1:
self._units_created[structure.type_id] += 1
await self.on_building_construction_complete(structure)
async def _issue_vision_events(self):
# Call events for enemy unit entered vision
for enemy_unit in self.enemy_units:
if enemy_unit.tag not in self._enemy_units_previous_map:
await self.on_enemy_unit_entered_vision(enemy_unit)
for enemy_structure in self.enemy_structures:
if enemy_structure.tag not in self._enemy_structures_previous_map:
await self.on_enemy_unit_entered_vision(enemy_structure)
# Call events for enemy unit left vision
if self.enemy_units:
visible_enemy_units = self.enemy_units.tags
for enemy_unit_tag in self._enemy_units_previous_map.keys():
if enemy_unit_tag not in visible_enemy_units:
await self.on_enemy_unit_left_vision(enemy_unit_tag)
if self.enemy_structures:
visible_enemy_structures = self.enemy_structures.tags
for enemy_structure_tag in self._enemy_structures_previous_map.keys():
if enemy_structure_tag not in visible_enemy_structures:
await self.on_enemy_unit_left_vision(enemy_structure_tag)
async def _issue_unit_dead_events(self):
for unit_tag in self.state.dead_units:
await self.on_unit_destroyed(unit_tag)
async def on_unit_destroyed(self, unit_tag: int):
"""
Override this in your bot class.
Note that this function uses unit tags and not the unit objects
because the unit does not exist any more.
This will event will be called when a unit (or structure) dies.
For enemy units, this only works if the enemy unit was in vision on death.
:param unit_tag:
"""
async def on_unit_created(self, unit: Unit):
""" Override this in your bot class. This function is called when a unit is created.
:param unit: """
async def on_unit_type_changed(self, unit: Unit, previous_type: UnitTypeId):
""" Override this in your bot class. This function is called when a unit type has changed. To get the current UnitTypeId of the unit, use 'unit.type_id'
This may happen when a larva morphed to an egg, siege tank sieged, a zerg unit burrowed, a hatchery morphed to lair,
a corruptor morphed to broodlordcocoon, etc..
Examples::
print(f"My unit changed type: {unit} from {previous_type} to {unit.type_id}")
:param unit:
:param previous_type:
"""
async def on_building_construction_started(self, unit: Unit):
"""
Override this in your bot class.
This function is called when a building construction has started.
:param unit:
"""
async def on_building_construction_complete(self, unit: Unit):
"""
Override this in your bot class. This function is called when a building
construction is completed.
:param unit:
"""
async def on_upgrade_complete(self, upgrade: UpgradeId):
"""
Override this in your bot class. This function is called with the upgrade id of an upgrade that was not finished last step and is now.
:param upgrade:
"""
async def on_unit_took_damage(self, unit: Unit, amount_damage_taken: float):
"""
Override this in your bot class. This function is called when your own unit (unit or structure) took damage.
It will not be called if the unit died this frame.
This may be called frequently for terran structures that are burning down, or zerg buildings that are off creep,
or terran bio units that just used stimpack ability.
TODO: If there is a demand for it, then I can add a similar event for when enemy units took damage
Examples::
print(f"My unit took damage: {unit} took {amount_damage_taken} damage")
:param unit:
"""
async def on_enemy_unit_entered_vision(self, unit: Unit):
"""
Override this in your bot class. This function is called when an enemy unit (unit or structure) entered vision (which was not visible last frame).
:param unit:
"""
async def on_enemy_unit_left_vision(self, unit_tag: int):
"""
Override this in your bot class. This function is called when an enemy unit (unit or structure) left vision (which was visible last frame).
Same as the self.on_unit_destroyed event, this function is called with the unit's tag because the unit is no longer visible anymore.
If you want to store a snapshot of the unit, use self._enemy_units_previous_map[unit_tag] for units or self._enemy_structures_previous_map[unit_tag] for structures.
Examples::
last_known_unit = self._enemy_units_previous_map.get(unit_tag, None) or self._enemy_structures_previous_map[unit_tag]
print(f"Enemy unit left vision, last known location: {last_known_unit.position}")
:param unit_tag:
"""
async def on_before_start(self):
"""
Override this in your bot class. This function is called before "on_start"
and before "prepare_first_step" that calculates expansion locations.
Not all data is available yet.
This function is useful in realtime=True mode to split your workers or start producing the first worker.
"""
async def on_start(self):
"""
Override this in your bot class.
At this point, game_data, game_info and the first iteration of game_state (self.state) are available.
"""
async def on_step(self, iteration: int):
"""
You need to implement this function!
Override this in your bot class.
This function is called on every game step (looped in realtime mode).
:param iteration:
"""
raise NotImplementedError
async def on_end(self, game_result: Result):
""" Override this in your bot class. This function is called at the end of a game.
Unsure if this function will be called on the laddermanager client as the bot process may forcefully be terminated.
:param game_result: """
|
the-stack_0_22860 | from django.db import models
from django.urls import reverse
from dotmanca.storage import OverwriteStorage
class Gallery(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
added_timestamp = models.DateTimeField(auto_now_add=True)
description = models.TextField(blank=True)
sort_order = models.IntegerField()
def get_absolute_url(self):
kwargs = {"slug": self.slug}
return reverse("gallery:gallery", kwargs=kwargs)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "galleries"
def gallery_image_upload_to(instance, file_name):
file_extension = file_name.split(".")[-1]
return f"galleries/{instance.gallery.slug}/{instance.slug}.{file_extension}"
class GalleryImage(models.Model):
gallery = models.ForeignKey(
Gallery, on_delete=models.CASCADE, related_name="images"
)
name = models.CharField(max_length=50)
slug = models.SlugField()
sort_order = models.IntegerField()
the_image = models.ImageField(
null=False,
blank=False,
upload_to=gallery_image_upload_to,
storage=OverwriteStorage(),
)
added_timestamp = models.DateTimeField(auto_now_add=True)
description = models.TextField(blank=True)
def get_absolute_url(self):
kwargs = {"gallery_slug": self.gallery.slug, "slug": self.slug}
return reverse("gallery:gallery_image", kwargs=kwargs)
def __str__(self):
return self.name
class Meta:
unique_together = ("gallery", "slug")
|
the-stack_0_22861 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a sweep over all the leaderboard models.
Runs each model with three random seeds.
"""
import copy
from aqt.jax_legacy.jax.wmt_mlperf.hparams_configs.leaderboard import full_model_4bit_weights_and_auto_acts
from aqt.jax_legacy.jax.wmt_mlperf.hparams_configs.leaderboard import full_model_4bit_weights_only
from aqt.jax_legacy.jax.wmt_mlperf.hparams_configs.leaderboard import full_model_8bit_weights_and_auto_acts
from aqt.jax_legacy.jax.wmt_mlperf.hparams_configs.leaderboard import full_model_8bit_weights_only
from aqt.jax_legacy.jax.wmt_mlperf.hparams_configs.leaderboard import full_model_bfloat16
import ml_collections
def get_config():
"""Returns sweep configuration (see module docstring)."""
sweep_config = ml_collections.ConfigDict()
base_configs = [
full_model_bfloat16.get_config(),
full_model_8bit_weights_only.get_config(),
full_model_8bit_weights_and_auto_acts.get_config(),
full_model_4bit_weights_only.get_config(),
full_model_4bit_weights_and_auto_acts.get_config()
]
configs = []
for base_config in base_configs:
for seed in range(3):
config = copy.deepcopy(base_config)
config.random_seed = seed
config.metadata.hyper_str = f"{config.metadata.hyper_str}_seed={seed}"
configs.append(config)
sweep_config.configs = configs
return sweep_config
|
the-stack_0_22862 | import asyncio
import random
import time
print("asyncio.Queue asyncio.PriorityQueue")
async def worker(name, queue):
while True:
# Get a "work item" out of the queue.
sleep_for = await queue.get()
# Sleep for the "sleep_for" seconds.
await asyncio.sleep(sleep_for)
# Notify the queue that the "work item" has been processed.
queue.task_done()
print(f'{name} has slept for {sleep_for:.2f} seconds')
async def main():
# Create a queue that we will use to store our "workload".
queue = asyncio.Queue()
# Generate random timings and put them into the queue.
total_sleep_time = 0
for _ in range(20):
sleep_for = random.uniform(0.05, 1.0)
total_sleep_time += sleep_for
queue.put_nowait(sleep_for)
# Create three worker tasks to process the queue concurrently.
tasks = []
for i in range(3):
task = asyncio.create_task(worker(f'worker-{i}', queue))
tasks.append(task)
# Wait until the queue is fully processed.
started_at = time.monotonic()
await queue.join()
total_slept_for = time.monotonic() - started_at
# Cancel our worker tasks.
for task in tasks:
task.cancel()
# Wait until all worker tasks are cancelled.
await asyncio.gather(*tasks, return_exceptions=True)
print('====')
print(f'3 workers slept in parallel for {total_slept_for:.2f} seconds')
print(f'total expected sleep time: {total_sleep_time:.2f} seconds')
asyncio.run(main()) |
the-stack_0_22863 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import pytest
import torch
from torch import optim
from torch.utils.data import DataLoader
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import SLURMEnvironment
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
class AMPTestModel(BoringModel):
def _step(self, batch):
self._assert_autocast_enabled()
output = self(batch)
is_bfloat16 = self.trainer.precision_plugin.precision == "bf16"
assert output.dtype == torch.float16 if not is_bfloat16 else torch.bfloat16
loss = self.loss(batch, output)
return loss
def loss(self, batch, prediction):
# todo (sean): convert bfloat16 to float32 as mse loss for cpu amp is currently not supported
if self.trainer.precision_plugin.device == "cpu":
prediction = prediction.float()
return super().loss(batch, prediction)
def training_step(self, batch, batch_idx):
output = self._step(batch)
return {"loss": output}
def validation_step(self, batch, batch_idx):
output = self._step(batch)
return {"x": output}
def test_step(self, batch, batch_idx):
output = self._step(batch)
return {"y": output}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
self._assert_autocast_enabled()
output = self(batch)
is_bfloat16 = self.trainer.precision_plugin.precision == "bf16"
assert output.dtype == torch.float16 if not is_bfloat16 else torch.bfloat16
return output
def _assert_autocast_enabled(self):
if self.trainer.precision_plugin.device == "cpu":
assert torch.is_autocast_cpu_enabled()
else:
assert torch.is_autocast_enabled()
@RunIf(min_torch="1.10")
@pytest.mark.parametrize(
"strategy",
[
None,
pytest.param("dp", marks=pytest.mark.skip("dp + amp not supported on CPU currently")), # TODO
"ddp_spawn",
],
)
@pytest.mark.parametrize("precision", [16, "bf16"])
@pytest.mark.parametrize("num_processes", [1, 2])
def test_amp_cpus(tmpdir, strategy, precision, num_processes):
"""Make sure combinations of AMP and training types work if supported."""
tutils.reset_seed()
trainer = Trainer(
default_root_dir=tmpdir, num_processes=num_processes, max_epochs=1, strategy=strategy, precision=precision
)
model = AMPTestModel()
trainer.fit(model)
trainer.test(model)
trainer.predict(model, DataLoader(RandomDataset(32, 64)))
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_gpus=2, min_torch="1.10")
@pytest.mark.parametrize("strategy", [None, "dp", "ddp_spawn"])
@pytest.mark.parametrize("precision", [16, "bf16"])
@pytest.mark.parametrize("gpus", [1, 2])
def test_amp_gpus(tmpdir, strategy, precision, gpus):
"""Make sure combinations of AMP and training types work if supported."""
tutils.reset_seed()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, gpus=gpus, strategy=strategy, precision=precision)
model = AMPTestModel()
trainer.fit(model)
trainer.test(model)
trainer.predict(model, DataLoader(RandomDataset(32, 64)))
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_gpus=2)
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "1",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_LOCALID": "0",
"SLURM_PROCID": "0",
},
)
def test_amp_gpu_ddp_slurm_managed(tmpdir):
"""Make sure DDP + AMP work."""
# simulate setting slurm flags
tutils.set_random_main_port()
model = AMPTestModel()
# exp file to get meta
logger = tutils.get_default_logger(tmpdir)
# exp file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
gpus=[0],
strategy="ddp_spawn",
precision=16,
callbacks=[checkpoint],
logger=logger,
)
trainer.fit(model)
# correct result and ok accuracy
assert trainer.state.finished, "amp + ddp model failed to complete"
# test root model address
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc") == "abc"
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc[23]") == "abc23"
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc[23-24]") == "abc23"
generated = trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc[23-24, 45-40, 40]")
assert generated == "abc23"
@mock.patch("pytorch_lightning.plugins.precision.apex_amp.ApexMixedPrecisionPlugin.backward")
def test_amp_without_apex(bwd_mock, tmpdir):
"""Check that even with apex amp type without requesting precision=16 the amp backend is void."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, amp_backend="native")
assert trainer.amp_backend is None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, amp_backend="apex")
assert trainer.amp_backend is None
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert not bwd_mock.called
@RunIf(min_gpus=1, amp_apex=True)
@mock.patch("pytorch_lightning.plugins.precision.apex_amp.ApexMixedPrecisionPlugin.backward")
def test_amp_with_apex(bwd_mock, tmpdir):
"""Check calling apex scaling in training."""
class CustomModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
def configure_optimizers(self):
optimizer1 = optim.Adam(self.parameters(), lr=0.01)
optimizer2 = optim.SGD(self.parameters(), lr=0.01)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 1, gamma=0.1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]
model = CustomModel()
model.training_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, precision=16, amp_backend="apex", gpus=1)
assert str(trainer.amp_backend) == "AMPType.APEX"
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert bwd_mock.call_count == 10
assert isinstance(trainer.lr_schedulers[0]["scheduler"].optimizer, optim.Adam)
assert isinstance(trainer.lr_schedulers[1]["scheduler"].optimizer, optim.SGD)
|
the-stack_0_22865 | #!/usr/bin/env python
import logging
from os.path import abspath, dirname, join
from shutil import rmtree
import sys
import django
from django.conf import settings
sys.path.insert(0, abspath(dirname(__file__)))
media_root = join(abspath(dirname(__file__)), 'test_files')
rmtree(media_root, ignore_errors=True)
installed_apps = [
'simple_history.tests',
'simple_history.tests.custom_user',
'simple_history.tests.external',
'simple_history.registry_tests.migration_test_app',
'simple_history',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.admin',
]
DEFAULT_SETTINGS = dict(
AUTH_USER_MODEL='custom_user.CustomUser',
ROOT_URLCONF='simple_history.tests.urls',
MEDIA_ROOT=media_root,
STATIC_URL='/static/',
INSTALLED_APPS=installed_apps,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
MIDDLEWARE_CLASSES=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
],
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}],
)
def main():
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
if hasattr(django, 'setup'):
django.setup()
try:
from django.test.runner import DiscoverRunner
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
failures = DjangoTestSuiteRunner(failfast=False).run_tests(['tests'])
failures |= DjangoTestSuiteRunner(failfast=False).run_tests(['registry_tests'])
else:
failures = DiscoverRunner(failfast=False).run_tests(['simple_history.tests'])
failures |= DiscoverRunner(failfast=False).run_tests(['simple_history.registry_tests'])
sys.exit(failures)
if __name__ == "__main__":
logging.basicConfig()
main()
|
the-stack_0_22866 | import jax
import jax.nn
import jax.numpy as jnp
import numpy as np
from jax.lax import stop_gradient
import haiku as hk
from collections import deque
from typing import NamedTuple
import optax
import gym
import random
import os
from functools import partial
# tell JAX to use CPU, cpu is faster on small networks
os.environ.setdefault('JAX_PLATFORM_NAME', 'cpu')
BUFFER_SIZE = 1000000
TARGET_UPDATE = 10000
VERBOSE_UPDATE = 1000
EPSILON = 1
TAU = 0.005
class Transition(NamedTuple):
s: list # state
a: int # action
r: float # reward
s_p: list # next state
d: int # done
@jax.vmap
def noisy_action(a_t):
noise = (jax.random.normal(rng, shape=a_t.shape) * 0.2).clip(-0.5, 0.5)
return (a_t + noise).clip(-env.action_space.low[0],env.action_space.high[0])
@jax.jit
def q_loss_fn(Q_s, Q_sp1, r_t, done):
y = r_t + 0.99 * Q_sp1 * (1 - done)
return (Q_s - y)
@jax.jit
def critic_loss(q_params, q_params_t, pi_params_t, s_t, a_t, r_t, s_tp1, done):
Q_s1, Q_s2 = q_forward.apply(q_params, s_t, a_t)
a_pi = noisy_action(pi_forward.apply(pi_params_t, s_tp1))
Q1, Q2 = stop_gradient(q_forward.apply(q_params_t, s_tp1, a_pi))
Q_sp1 = jnp.minimum(Q1, Q2)
losses = jax.vmap(q_loss_fn)(Q_s1, Q_sp1, r_t, done) + jax.vmap(q_loss_fn)(Q_s2, Q_sp1, r_t, done)
return 0.5 * jnp.square(losses).mean()
@jax.jit
def critic_update(q_params, q_params_t, pi_params_t, q_optim_state, batch):
s_t = jnp.array(batch.s, dtype=jnp.float32)
a_t = jnp.array(batch.a, dtype=jnp.int32)
r_t = jnp.array(batch.r, dtype=jnp.float32)
s_tp1 = jnp.array(batch.s_p, dtype=jnp.float32)
done = jnp.array(batch.d, dtype=jnp.float32) #move all this to a replay buffer class
q_loss, q_grads = jax.value_and_grad(critic_loss)(q_params, q_params_t, pi_params_t, s_t, a_t, r_t, s_tp1, done)
updates, q_optim_state = q_optimizer.update(q_grads, q_optim_state, q_params)
q_params = optax.apply_updates(q_params, updates)
return q_loss, q_params, q_optim_state
@jax.jit
def policy_loss(pi_params, q_params, s_t):
a_pi = pi_forward.apply(pi_params, s_t)
pi_loss, _ = jax.vmap(partial(q_forward.apply, q_params))(s_t, a_pi)
return -jnp.mean(pi_loss)
@jax.jit
def policy_update(pi_params, q_params, pi_optim_state, batch):
s_t = jnp.array(batch.s, dtype=jnp.float32)
_, pi_grads = jax.value_and_grad(policy_loss)(pi_params, q_params, s_t)
updates, pi_optim_state = pi_optimizer.update(pi_grads, pi_optim_state, pi_params)
pi_params = optax.apply_updates(pi_params, updates)
return pi_params, pi_optim_state
@hk.transform
def pi(S):
seq = hk.Sequential([
hk.Linear(256), jax.nn.relu,
hk.Linear(256), jax.nn.relu,
hk.Linear(env.action_space.shape[0]), jax.nn.tanh,
])
a_pi = seq(S) * env.action_space.high[0]
return a_pi
@hk.transform
def q_val(S, A):
SA = jnp.concatenate([S, A], axis=1)
q1_seq = hk.Sequential([
hk.Linear(256), jax.nn.relu,
hk.Linear(256), jax.nn.relu,
hk.Linear(1),
])
q2_seq = hk.Sequential([
hk.Linear(256), jax.nn.relu,
hk.Linear(256), jax.nn.relu,
hk.Linear(1),
])
return q1_seq(SA), q2_seq(SA)
# experience replay:
replay_buffer = deque(maxlen=100000)
env = gym.make('LunarLanderContinuous-v2')
rng = jax.random.PRNGKey(42)
critic_dims = (jnp.zeros((1,env.observation_space.shape[0])), jnp.zeros((1,env.action_space.shape[0])))
#print(*critic_dims)
q_params = q_val.init(rng, *critic_dims)
q_params_t = hk.data_structures.to_immutable_dict(q_params)
q_forward = hk.without_apply_rng(q_val)
q_optimizer = optax.adam(1e-3)
q_optim_state = q_optimizer.init(q_params)
pi_params = pi.init(rng, jnp.ones(env.observation_space.shape[0]))
pi_params_t = hk.data_structures.to_immutable_dict(pi_params)
pi_forward = hk.without_apply_rng(pi)
pi_optimizer = optax.adam(1e-4)
pi_optim_state = pi_optimizer.init(pi_params)
polask_avg = lambda target, params: (1 - TAU) * target + TAU * params
s_t = env.reset()
avg_r = deque(maxlen=10)
avg_loss = deque(maxlen=10)
r_sum = 0
for i in range(300000): #https://stable-baselines.readthedocs.io/en/master/modules/ddpg.html
a_t = pi_forward.apply(pi_params, s_t)
a_t = noisy_action(a_t)
s_tp1, r_t, done, info = env.step(np.array(a_t))
r_sum += r_t
if done:
avg_r.append(r_sum)
r_sum = 0
s_t = env.reset()
replay_buffer.append([s_t, a_t, r_t, s_tp1, done])
s_t = s_tp1
if i >= 128:
batch = Transition(*zip(*random.sample(replay_buffer, k=128)))
q_loss, q_params, q_optim_state = critic_update(q_params, q_params_t, pi_params_t, q_optim_state, batch)
avg_loss.append(q_loss)
if i % 2 == 0: #td3 policy update delay
pi_params, pi_optim_state = policy_update(pi_params, q_params, pi_optim_state, batch)
q_params_t = jax.tree_multimap(polask_avg, q_params_t, q_params)
pi_params_t = jax.tree_multimap(polask_avg, pi_params_t, pi_params)
if i >= 500 and i % 100 == 0:
print(f'Timesteps: {i} | avg. reward {sum(avg_r)/10} | avg. critic loss: {sum(avg_loss)/10}')
env.close()
|
the-stack_0_22867 | import base64
import csv
import datetime
import gzip
import ujson
import logging
import os
import subprocess
import uuid
import bson
import pytz
import tzlocal
from urllib import parse
from typing import Tuple, Optional, Dict, Callable, Any
from pymongo import MongoClient
from pymongo.database import Database
from singer.utils import strftime as singer_strftime
from . import utils, split_gzip
from .errors import (
ExportError,
TableNotFoundError,
MongoDBInvalidDatetimeError,
UnsupportedKeyTypeException,
)
LOGGER = logging.getLogger(__name__)
DEFAULT_WRITE_BATCH_ROWS = 50000
def serialize_document(document: Dict) -> Dict:
"""
serialize mongodb Document into a json object
Args:
document: MongoDB document
Returns: Dict
"""
return {
key: transform_value(val, [key])
for key, val in document.items()
if not isinstance(val, (bson.min_key.MinKey, bson.max_key.MaxKey))
}
def class_to_string(key_value: Any, key_type: str) -> str:
"""
Converts specific types to string equivalent
The supported types are: datetime, bson Timestamp, bytes, int, Int64, float, ObjectId, str and UUID
Args:
key_value: The value to convert to string
key_type: the value type
Returns: string equivalent of key value
Raises: UnsupportedKeyTypeException if key_type is not supported
"""
if key_type == 'datetime':
if key_value.tzinfo is None:
timezone = tzlocal.get_localzone()
local_datetime = timezone.localize(key_value)
utc_datetime = local_datetime.astimezone(pytz.UTC)
else:
utc_datetime = key_value.astimezone(pytz.UTC)
return singer_strftime(utc_datetime)
if key_type == 'Timestamp':
return '{}.{}'.format(key_value.time, key_value.inc)
if key_type == 'bytes':
return base64.b64encode(key_value).decode('utf-8')
if key_type in ['int', 'Int64', 'float', 'ObjectId', 'str', 'UUID']:
return str(key_value)
raise UnsupportedKeyTypeException('{} is not a supported key type'.format(key_type))
def safe_transform_datetime(value: datetime.datetime, path) -> str:
"""
Safely transform datetime from local tz to UTC if applicable
Args:
value: datetime value to transform
path:
Returns: utc datetime as string
"""
timezone = tzlocal.get_localzone()
try:
local_datetime = timezone.localize(value)
utc_datetime = local_datetime.astimezone(pytz.UTC)
except Exception as ex:
if str(ex) == 'year is out of range' and value.year == 0:
# NB: Since datetimes are persisted as strings, it doesn't
# make sense to blow up on invalid Python datetimes (e.g.,
# year=0). In this case we're formatting it as a string and
# passing it along down the pipeline.
return '{:04d}-{:02d}-{:02d}T{:02d}:{:02d}:{:02d}.{:06d}Z'.format(
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
value.microsecond,
)
raise MongoDBInvalidDatetimeError(
'Found invalid datetime at [{}]: {}'.format('.'.join(map(str, path)), value)
) from ex
return singer_strftime(utc_datetime)
def transform_value(value: Any, path) -> Any:
"""
transform values to json friendly ones
Args:
value: value to transform
path:
Returns: transformed value
"""
conversion = {
list: lambda val, pat: list(
map(lambda v: transform_value(v[1], pat + [v[0]]), enumerate(val))
),
dict: lambda val, pat: {
k: transform_value(v, pat + [k]) for k, v in val.items()
},
uuid.UUID: lambda val, _: class_to_string(val, 'UUID'),
bson.objectid.ObjectId: lambda val, _: class_to_string(val, 'ObjectId'),
bson.datetime.datetime: safe_transform_datetime,
bson.timestamp.Timestamp: lambda val, _: singer_strftime(val.as_datetime()),
bson.int64.Int64: lambda val, _: class_to_string(val, 'Int64'),
bytes: lambda val, _: class_to_string(val, 'bytes'),
datetime.datetime: lambda val, _: class_to_string(val, 'datetime'),
bson.decimal128.Decimal128: lambda val, _: val.to_decimal(),
bson.regex.Regex: lambda val, _: dict(pattern=val.pattern, flags=val.flags),
bson.code.Code: lambda val, _: dict(value=str(val), scope=str(val.scope))
if val.scope
else str(val),
bson.dbref.DBRef: lambda val, _: dict(
id=str(val.id), collection=val.collection, database=val.database
),
}
if isinstance(value, tuple(conversion.keys())):
return conversion[type(value)](value, path)
return value
def get_connection_string(config: Dict):
"""
Generates a MongoClientConnectionString based on configuration
Args:
config: DB config
Returns: A MongoClient connection string
"""
srv = config.get('srv') == 'true'
# Default SSL verify mode to true, give option to disable
verify_mode = config.get('verify_mode', 'true') == 'true'
use_ssl = config.get('ssl') == 'true'
connection_query = {
'readPreference': 'secondaryPreferred',
'authSource': config['auth_database'],
}
if config.get('replica_set'):
connection_query['replicaSet'] = config['replica_set']
if use_ssl:
connection_query['ssl'] = 'true'
# NB: "sslAllowInvalidCertificates" must ONLY be supplied if `SSL` is true.
if not verify_mode and use_ssl:
connection_query['tlsAllowInvalidCertificates'] = 'true'
query_string = parse.urlencode(connection_query)
connection_string = '{protocol}://{user}:{password}@{host}{port}/{database}?{query_string}'.format(
protocol='mongodb+srv' if srv else 'mongodb',
user=config['user'],
password=config['password'],
host=config['host'],
port='' if srv else ':{port}'.format(port=int(config['port'])),
database=config['database'],
query_string=query_string
)
return connection_string
class FastSyncTapMongoDB:
"""
Common functions for fastsync from a MongoDB database
"""
def __init__(self, connection_config: Dict, tap_type_to_target_type: Callable):
"""
FastSyncTapMongoDB constructor
Args:
connection_config: A map of tap source config
tap_type_to_target_type: Function that maps tap types to target ones
"""
self.connection_config = connection_config
self.connection_config['write_batch_rows'] = connection_config.get(
'write_batch_rows', DEFAULT_WRITE_BATCH_ROWS
)
self.connection_config['connection_string'] = get_connection_string(self.connection_config)
self.tap_type_to_target_type = tap_type_to_target_type
self.database: Optional[Database] = None
def open_connection(self):
"""
Open connection
"""
self.database = MongoClient(self.connection_config['connection_string'])[
self.connection_config['database']
]
def close_connection(self):
"""
Close connection
"""
self.database.client.close()
# pylint: disable=R0914,R0913
def copy_table(
self,
table_name: str,
filepath: str,
temp_dir: str,
split_large_files=False,
split_file_chunk_size_mb=1000,
split_file_max_chunks=20,
compress=True,
):
"""
Export data from table to a zipped csv
Args:
table_name: Fully qualified table name to export
filepath: Path where to create the zip file(s) with the exported data
temp_dir: Temporary directory to export
split_large_files: Split large files to multiple pieces and create multiple zip files
with -partXYZ postfix in the filename. (Default: False)
split_file_chunk_size_mb: File chunk sizes if `split_large_files` enabled. (Default: 1000)
split_file_max_chunks: Max number of chunks if `split_large_files` enabled. (Default: 20)
compress: Flag to indicate whether to compress export files
"""
table_dict = utils.tablename_to_dict(table_name, '.')
if table_dict['table_name'] not in self.database.list_collection_names():
raise TableNotFoundError(f'{table_name} table not found!')
export_file_path = self._export_collection(temp_dir, table_dict['table_name'])
extracted_at = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
write_batch_rows = self.connection_config['write_batch_rows']
exported_rows = 0
try:
gzip_splitter = split_gzip.open(
filepath,
mode='wt',
chunk_size_mb=split_file_chunk_size_mb,
max_chunks=split_file_max_chunks if split_large_files else 0,
compress=compress,
)
with gzip.open(
export_file_path, 'rb'
) as export_file, gzip_splitter as gzfile:
writer = csv.DictWriter(
gzfile,
fieldnames=[elem[0] for elem in self._get_collection_columns()],
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
)
writer.writeheader()
rows = []
LOGGER.info('Starting data processing...')
# bson.decode_file_iter will generate one document at a time from the exported file
for document in bson.decode_file_iter(export_file):
try:
rows.append(
{
'_ID': str(document['_id']),
'DOCUMENT': ujson.dumps(serialize_document(document)),
utils.SDC_EXTRACTED_AT: extracted_at,
utils.SDC_BATCHED_AT: datetime.datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S.%f'
),
utils.SDC_DELETED_AT: None,
}
)
except TypeError:
LOGGER.error(
'TypeError encountered when processing document ID: %s',
document['_id'],
)
raise
exported_rows += 1
# writes batch to csv file and log some nice message on the progress.
if exported_rows % write_batch_rows == 0:
LOGGER.info(
'Exporting batch from %s to %s rows from %s...',
(exported_rows - write_batch_rows),
exported_rows,
table_name,
)
writer.writerows(rows)
rows.clear()
# write rows one last time
if rows:
LOGGER.info('Exporting last batch ...')
writer.writerows(rows)
rows.clear()
finally:
# whether the code in try succeeds or fails
# make sure to delete the exported file
os.remove(export_file_path)
LOGGER.info('Exported total of %s rows from %s...', exported_rows, table_name)
@staticmethod
def _get_collection_columns() -> Tuple:
"""
Get predefined table/collection column details
"""
return (
('_ID', 'string'),
('DOCUMENT', 'object'),
(utils.SDC_EXTRACTED_AT, 'datetime'),
(utils.SDC_BATCHED_AT, 'datetime'),
(utils.SDC_DELETED_AT, 'string'),
)
def fetch_current_log_pos(self) -> Dict:
"""
Find and returns the latest ChangeStream token.
LOG_BASED method uses changes streams.
MongoDB doesn't have any built-in feature to get the most recent token atm,
so a workaround is to start a cursor, grab the first token it returns then exit.
Returns: token
"""
token = None
with self.database.watch(max_await_time_ms=1000) as cursor:
while cursor.alive:
_ = cursor.try_next()
token = cursor.resume_token
if token is not None:
break
# Token can look like:
# {'_data': 'A_LONG_HEX_DECIMAL_STRING'}
# or {'_data': 'A_LONG_HEX_DECIMAL_STRING', '_typeBits': b'SOME_HEX'}
# https://github.com/mongodb/mongo/blob/master/src/mongo/db/pipeline/resume_token.cpp#L82-L96
# Get the '_data' only from resume token
# token can contain a property '_typeBits' of type bytes which cannot be json
# serialized when saving the state in the function 'utils.save_state_file'.
# '_data' is enough to resume LOG_BASED Singer replication after FastSync
return {'token': {'_data': token['_data']}}
# pylint: disable=invalid-name
def fetch_current_incremental_key_pos(
self, fully_qualified_table_name: str, replication_key: str
):
"""
No Implemented
Args:
fully_qualified_table_name:
replication_key:
"""
raise NotImplementedError('INCREMENTAL method is not supported for tap-mongodb')
def map_column_types_to_target(self):
"""
Create a map of columns and their target type in addition of primary keys
Returns: dictionary
"""
mapped_columns = []
for column_name, column_type in self._get_collection_columns():
mapped_columns.append(
f'{column_name} {self.tap_type_to_target_type(column_type)}'
)
return {'columns': mapped_columns, 'primary_key': ['_ID']}
def _export_collection(self, export_dir: str, collection_name) -> str:
"""
Dump a collection data into a compressed bson file and returns the path
Args:
export_dir: Specifies the directory where dumped file will be
collection_name: Name of the collection to dump
Returns: Path to the file
"""
LOGGER.info('Starting export of table "%s"', collection_name)
cmd = [
'mongodump',
'--uri',
f'"{self.connection_config["connection_string"]}"',
'--forceTableScan',
'--gzip',
'-c',
collection_name,
'-o',
export_dir,
]
return_code = subprocess.call(cmd)
LOGGER.debug('Export command return code %s', return_code)
if return_code != 0:
raise ExportError(f'Export failed with code {return_code}')
# mongodump creates two files "{collection_name}.metadata.json.gz" & "{collection_name}.bson.gz"
# we are only interested in the latter so we delete the former.
os.remove(
os.path.join(
export_dir,
self.connection_config['database'],
f'{collection_name}.metadata.json.gz',
)
)
return os.path.join(
export_dir, self.connection_config['database'], f'{collection_name}.bson.gz'
)
|
the-stack_0_22870 | # Copyright (c) 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import os.path
from collections import namedtuple
import requests
import requests_unixsocket
from six.moves.urllib import parse
try:
from ws4py.client import WebSocketBaseClient
_ws4py_installed = True
except ImportError: # pragma: no cover
WebSocketBaseClient = object
_ws4py_installed = False
from pylxd import exceptions, managers
requests_unixsocket.monkeypatch()
LXD_PATH = '.config/lxc/'
SNAP_ROOT = os.path.expanduser('~/snap/lxd/current/')
APT_ROOT = os.path.expanduser('~/')
CERT_FILE_NAME = 'client.crt'
KEY_FILE_NAME = 'client.key'
# check that the cert file and key file exist at the appopriate path
if os.path.exists(os.path.join(
SNAP_ROOT, LXD_PATH, CERT_FILE_NAME)): # pragma: no cover
CERTS_PATH = os.path.join(SNAP_ROOT, LXD_PATH) # pragma: no cover
else: # pragma: no cover
CERTS_PATH = os.path.join(APT_ROOT, LXD_PATH) # pragma: no cover
Cert = namedtuple('Cert', ['cert', 'key']) # pragma: no cover
DEFAULT_CERTS = Cert(
cert=os.path.expanduser(os.path.join(CERTS_PATH, CERT_FILE_NAME)),
key=os.path.expanduser(os.path.join(CERTS_PATH, KEY_FILE_NAME))
) # pragma: no cover
class _APINode(object):
"""An api node object."""
def __init__(self, api_endpoint, cert=None, verify=True, timeout=None):
self._api_endpoint = api_endpoint
self._timeout = timeout
if self._api_endpoint.startswith('http+unix://'):
self.session = requests_unixsocket.Session()
else:
self.session = requests.Session()
self.session.cert = cert
self.session.verify = verify
def __getattr__(self, name):
"""Converts attribute lookup into the next /<segment> of an api
url.
:param name: the next segment
:type name: str
:returns: new _APINode with /<name> on the end
:rtype: _APINode
"""
# Special case for storage_pools which needs to become 'storage-pools'
if name == 'storage_pools':
name = 'storage-pools'
return self.__class__('{}/{}'.format(self._api_endpoint, name),
cert=self.session.cert,
verify=self.session.verify)
def __getitem__(self, item):
"""This converts python api.thing[name] -> ".../thing/name"
:param item: the 'thing' in the square-braces in a python expr.
:type item: str
:returns: A new _APINode(with the new item tagged on as /<item>
:rtype: _APINode
"""
return self.__class__('{}/{}'.format(self._api_endpoint, item),
cert=self.session.cert,
verify=self.session.verify,
timeout=self._timeout)
def _assert_response(self, response, allowed_status_codes=(200,),
stream=False, is_api=True):
"""Assert properties of the response.
LXD's API clearly defines specific responses. If the API
response is something unexpected (i.e. an error), then
we need to raise an exception and have the call points
handle the errors or just let the issue be raised to the
user.
"""
if response.status_code not in allowed_status_codes:
if response.status_code == 404:
raise exceptions.NotFound(response)
raise exceptions.LXDAPIException(response)
# In the case of streaming, we can't validate the json the way we
# would with normal HTTP responses, so just ignore that entirely.
# Likewize, we can ignore NON api calls as the contents don't need to
# be validated.
if stream or not is_api:
return
try:
data = response.json()
except ValueError:
# Not a JSON response
return
if response.status_code == 200:
# Synchronous request
try:
if data['type'] != 'sync':
raise exceptions.LXDAPIException(response)
except KeyError:
# Missing 'type' in response
raise exceptions.LXDAPIException(response)
@property
def scheme(self):
return parse.urlparse(self.api._api_endpoint).scheme
@property
def netloc(self):
return parse.urlparse(self.api._api_endpoint).netloc
def get(self, *args, **kwargs):
"""Perform an HTTP GET.
Note if 'is_api' is passed in the kwargs then it is popped and used to
determine whether the get is an API call or a raw call.
This is for py27 compatibility.
"""
is_api = kwargs.pop('is_api', True)
kwargs['timeout'] = kwargs.get('timeout', self._timeout)
response = self.session.get(self._api_endpoint, *args, **kwargs)
self._assert_response(response,
stream=kwargs.get('stream', False),
is_api=is_api)
return response
def post(self, *args, **kwargs):
"""Perform an HTTP POST."""
kwargs['timeout'] = kwargs.get('timeout', self._timeout)
target = kwargs.pop("target", None)
if target is not None:
params = kwargs.get("params", {})
params["target"] = target
kwargs["params"] = params
response = self.session.post(self._api_endpoint, *args, **kwargs)
# Prior to LXD 2.0.3, successful synchronous requests returned 200,
# rather than 201.
self._assert_response(response, allowed_status_codes=(200, 201, 202))
return response
def put(self, *args, **kwargs):
"""Perform an HTTP PUT."""
kwargs['timeout'] = kwargs.get('timeout', self._timeout)
response = self.session.put(self._api_endpoint, *args, **kwargs)
self._assert_response(response, allowed_status_codes=(200, 202))
return response
def patch(self, *args, **kwargs):
"""Perform an HTTP PATCH."""
kwargs['timeout'] = kwargs.get('timeout', self._timeout)
response = self.session.patch(self._api_endpoint, *args, **kwargs)
self._assert_response(response, allowed_status_codes=(200, 202))
return response
def delete(self, *args, **kwargs):
"""Perform an HTTP delete."""
kwargs['timeout'] = kwargs.get('timeout', self._timeout)
response = self.session.delete(self._api_endpoint, *args, **kwargs)
self._assert_response(response, allowed_status_codes=(200, 202))
return response
class _WebsocketClient(WebSocketBaseClient):
"""A basic websocket client for the LXD API.
This client is intentionally barebones, and serves
as a simple default. It simply connects and saves
all json messages to a messages attribute, which can
then be read are parsed.
"""
def handshake_ok(self):
self.messages = []
def received_message(self, message):
json_message = json.loads(message.data.decode('utf-8'))
self.messages.append(json_message)
class Client(object):
"""Client class for LXD REST API.
This client wraps all the functionality required to interact with
LXD, and is meant to be the sole entry point.
.. attribute:: containers
Instance of :class:`Client.Containers
<pylxd.client.Client.Containers>`:
.. attribute:: images
Instance of :class:`Client.Images <pylxd.client.Client.Images>`.
.. attribute:: operations
Instance of :class:`Client.Operations
<pylxd.client.Client.Operations>`.
.. attribute:: profiles
Instance of :class:`Client.Profiles <pylxd.client.Client.Profiles>`.
.. attribute:: api
This attribute provides tree traversal syntax to LXD's REST API for
lower-level interaction.
Use the name of the url part as attribute or item of an api object to
create another api object appended with the new url part name, ie:
>>> api = Client().api
# /
>>> response = api.get()
# Check status code and response
>>> print response.status_code, response.json()
# /containers/test/
>>> print api.containers['test'].get().json()
"""
def __init__(
self, endpoint=None, version='1.0', cert=None, verify=True,
timeout=None):
"""Constructs a LXD client
:param endpoint: (optional): endpoint can be an http endpoint or
a path to a unix socket.
:param version: (optional): API version string to use with LXD
:param cert: (optional): A tuple of (cert, key) to use with
the http socket for client authentication
:param verify: (optional): Either a boolean, in which case it controls
whether we verify the server's TLS certificate, or a string, in
which case it must be a path to a CA bundle to use.
Defaults to ``True``.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
"""
self.cert = cert
if endpoint is not None:
if endpoint.startswith('/') and os.path.isfile(endpoint):
self.api = _APINode('http+unix://{}'.format(
parse.quote(endpoint, safe='')), timeout=timeout)
else:
# Extra trailing slashes cause LXD to 301
endpoint = endpoint.rstrip('/')
if cert is None and (
os.path.exists(DEFAULT_CERTS.cert) and
os.path.exists(DEFAULT_CERTS.key)):
cert = DEFAULT_CERTS
self.api = _APINode(
endpoint, cert=cert, verify=verify, timeout=timeout)
else:
if 'LXD_DIR' in os.environ:
path = os.path.join(os.environ.get('LXD_DIR'), 'unix.socket')
elif os.path.exists('/var/lib/lxd/unix.socket'):
path = '/var/lib/lxd/unix.socket'
else:
path = '/var/snap/lxd/common/lxd/unix.socket'
endpoint = 'http+unix://{}'.format(parse.quote(path, safe=''))
self.api = _APINode(endpoint, timeout=timeout)
self.api = self.api[version]
# Verify the connection is valid.
try:
response = self.api.get()
if response.status_code != 200:
raise exceptions.ClientConnectionFailed()
self.host_info = response.json()['metadata']
except (requests.exceptions.ConnectionError,
requests.exceptions.InvalidURL):
raise exceptions.ClientConnectionFailed()
self.cluster = managers.ClusterManager(self)
self.certificates = managers.CertificateManager(self)
self.containers = managers.ContainerManager(self)
self.images = managers.ImageManager(self)
self.networks = managers.NetworkManager(self)
self.operations = managers.OperationManager(self)
self.profiles = managers.ProfileManager(self)
self.storage_pools = managers.StoragePoolManager(self)
@property
def trusted(self):
return self.host_info['auth'] == 'trusted'
def has_api_extension(self, name):
"""Return True if the `name` api extension exists.
:param name: the api_extension to look for.
:type name: str
:returns: True if extension exists
:rtype: bool
"""
return name in self.host_info['api_extensions']
def assert_has_api_extension(self, name):
"""Asserts that the `name` api_extension exists.
If not, then is raises the LXDAPIExtensionNotAvailable error.
:param name: the api_extension to test for
:type name: str
:returns: None
:raises: :class:`pylxd.exceptions.LXDAPIExtensionNotAvailable`
"""
if not self.has_api_extension(name):
raise exceptions.LXDAPIExtensionNotAvailable(name)
def authenticate(self, password):
if self.trusted:
return
cert = open(self.api.session.cert[0]).read().encode('utf-8')
self.certificates.create(password, cert)
# Refresh the host info
response = self.api.get()
self.host_info = response.json()['metadata']
@property
def websocket_url(self):
if self.api.scheme in ('http', 'https'):
host = self.api.netloc
if self.api.scheme == 'http':
scheme = 'ws'
else:
scheme = 'wss'
else:
scheme = 'ws+unix'
host = parse.unquote(self.api.netloc)
url = parse.urlunparse((scheme, host, '', '', '', ''))
return url
def events(self, websocket_client=None):
"""Get a websocket client for getting events.
/events is a websocket url, and so must be handled differently than
most other LXD API endpoints. This method returns
a client that can be interacted with like any
regular python socket.
An optional `websocket_client` parameter can be
specified for implementation-specific handling
of events as they occur.
"""
if not _ws4py_installed:
raise ValueError(
'This feature requires the optional ws4py library.')
if websocket_client is None:
websocket_client = _WebsocketClient
client = websocket_client(self.websocket_url)
parsed = parse.urlparse(self.api.events._api_endpoint)
client.resource = parsed.path
return client
|
the-stack_0_22874 | #
# Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
import pkg_resources
import yaml
from oslo_config import cfg
from oslo_log import log
from oslo_utils import netutils
import six
from ceilometer.agent import plugin_base
from ceilometer.hardware import inspector as insloader
from ceilometer.hardware.pollsters import util
from ceilometer.i18n import _LE, _LI, _LW
from ceilometer import sample
OPTS = [
cfg.StrOpt('meter_definitions_file',
default="snmp.yaml",
help="Configuration file for defining hardware snmp meters."
),
]
cfg.CONF.register_opts(OPTS, group='hardware')
LOG = log.getLogger(__name__)
class MeterDefinitionException(Exception):
def __init__(self, message, definition_cfg):
super(MeterDefinitionException, self).__init__(message)
self.message = message
self.definition_cfg = definition_cfg
def __str__(self):
return '%s %s: %s' % (self.__class__.__name__,
self.definition_cfg, self.message)
class MeterDefinition(object):
required_fields = ['name', 'unit', 'type']
def __init__(self, definition_cfg):
self.cfg = definition_cfg
for fname, fval in self.cfg.items():
if (isinstance(fname, six.string_types) and
(fname in self.required_fields or
fname.endswith('_inspector'))):
setattr(self, fname, fval)
else:
LOG.warning(_LW("Ignore unrecognized field %s"), fname)
for fname in self.required_fields:
if not getattr(self, fname, None):
raise MeterDefinitionException(
_LE("Missing field %s") % fname, self.cfg)
if self.type not in sample.TYPES:
raise MeterDefinitionException(
_LE("Unrecognized type value %s") % self.type, self.cfg)
class GenericHardwareDeclarativePollster(plugin_base.PollsterBase):
CACHE_KEY = 'hardware.generic'
mapping = None
def __init__(self):
super(GenericHardwareDeclarativePollster, self).__init__()
self.inspectors = {}
def _update_meter_definition(self, definition):
self.meter_definition = definition
self.cached_inspector_params = {}
@property
def default_discovery(self):
return 'tripleo_overcloud_nodes'
@staticmethod
def _parse_resource(res):
"""Parse resource from discovery.
Either URL can be given or dict. Dict has to contain at least
keys 'resource_id' and 'resource_url', all the dict keys will be stored
as metadata.
:param res: URL or dict containing all resource info.
:return parsed_url, resource_id, metadata: Returns parsed URL used for
SNMP query, unique identifier of the resource and metadata
of the resource.
"""
parsed_url, resource_id, metadata = (None, None, None)
if isinstance(res, dict):
if 'resource_url' not in res or 'resource_id' not in res:
LOG.error(_LE('Passed resource dict must contain keys '
'resource_id and resource_url.'))
else:
metadata = res
parsed_url = netutils.urlsplit(res['resource_url'])
resource_id = res['resource_id']
else:
metadata = {}
parsed_url = netutils.urlsplit(res)
resource_id = res
return parsed_url, resource_id, metadata
def _get_inspector(self, parsed_url):
if parsed_url.scheme not in self.inspectors:
try:
driver = insloader.get_inspector(parsed_url)
self.inspectors[parsed_url.scheme] = driver
except Exception as err:
LOG.exception(_LE("Cannot load inspector %(name)s: %(err)s"),
dict(name=parsed_url.scheme,
err=err))
raise err
return self.inspectors[parsed_url.scheme]
def get_samples(self, manager, cache, resources=None):
"""Return an iterable of Sample instances from polling the resources.
:param manager: The service manager invoking the plugin
:param cache: A dictionary for passing data between plugins
:param resources: end point to poll data from
"""
resources = resources or []
h_cache = cache.setdefault(self.CACHE_KEY, {})
sample_iters = []
# Get the meter identifiers to poll
identifier = self.meter_definition.name
for resource in resources:
parsed_url, res, extra_metadata = self._parse_resource(resource)
if parsed_url is None:
LOG.error(_LE("Skip invalid resource %s"), resource)
continue
ins = self._get_inspector(parsed_url)
try:
# Call hardware inspector to poll for the data
i_cache = h_cache.setdefault(res, {})
# Prepare inspector parameters and cache it for performance
param_key = parsed_url.scheme + '.' + identifier
inspector_param = self.cached_inspector_params.get(param_key)
if not inspector_param:
param = getattr(self.meter_definition,
parsed_url.scheme + '_inspector', {})
inspector_param = ins.prepare_params(param)
self.cached_inspector_params[param_key] = inspector_param
if identifier not in i_cache:
i_cache[identifier] = list(ins.inspect_generic(
host=parsed_url,
cache=i_cache,
extra_metadata=extra_metadata,
param=inspector_param))
# Generate samples
if i_cache[identifier]:
sample_iters.append(self.generate_samples(
parsed_url,
i_cache[identifier]))
except Exception as err:
LOG.exception(_LE('inspector call failed for %(ident)s '
'host %(host)s: %(err)s'),
dict(ident=identifier,
host=parsed_url.hostname,
err=err))
return itertools.chain(*sample_iters)
def generate_samples(self, host_url, data):
"""Generate a list of Sample from the data returned by inspector
:param host_url: host url of the endpoint
:param data: list of data returned by the corresponding inspector
"""
samples = []
definition = self.meter_definition
for (value, metadata, extra) in data:
s = util.make_sample_from_host(host_url,
name=definition.name,
sample_type=definition.type,
unit=definition.unit,
volume=value,
res_metadata=metadata,
extra=extra,
name_prefix=None)
samples.append(s)
return samples
@classmethod
def build_pollsters(cls):
if not cls.mapping:
cls.mapping = load_definition(setup_meters_config())
pollsters = []
for name in cls.mapping:
pollster = cls()
pollster._update_meter_definition(cls.mapping[name])
pollsters.append((name, pollster))
return pollsters
def get_config_file():
config_file = cfg.CONF.hardware.meter_definitions_file
if not os.path.exists(config_file):
config_file = cfg.CONF.find_file(config_file)
if not config_file:
config_file = pkg_resources.resource_filename(
__name__, "data/snmp.yaml")
return config_file
def setup_meters_config():
"""load the meters definitions from yaml config file."""
config_file = get_config_file()
LOG.debug("Hardware snmp meter definition file: %s" % config_file)
with open(config_file) as cf:
config = cf.read()
try:
meters_config = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (_LE("Invalid YAML syntax in Meter Definitions file "
"%(file)s at line: %(line)s, column: %(column)s.")
% dict(file=config_file,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = (_LE("YAML error reading Meter Definitions file "
"%(file)s")
% dict(file=config_file))
LOG.error(errmsg)
raise
LOG.info(_LI("Meter Definitions: %s") % meters_config)
return meters_config
def load_definition(config_def):
mappings = {}
for meter_def in config_def.get('metric', []):
meter = MeterDefinition(meter_def)
mappings[meter.name] = meter
return mappings
|
the-stack_0_22875 | import argparse
import os
from random import choice
import matplotlib.pyplot as plt
from PIL import Image
from inference import inference
def main(opt):
""" main function """
print(opt.input_image)
if opt.input_image:
visualize(opt.input_image, opt.model, opt.cuda)
else:
test_img_dir = 'dataset/BSDS300/images/test/'
train_img_dir = 'dataset/BSDS300/images/train/'
test_images = os.listdir(test_img_dir)
train_images = os.listdir(train_img_dir)
all_images = [test_img_dir + img for img in test_images] + [train_img_dir + img for img in train_images]
# Ctrl + c to stop
while True:
input_image = choice(all_images)
visualize(input_image, opt.model, opt.cuda)
def visualize(input_image, model, cuda):
input_image = Image.open(input_image)
output_image = inference(input_image, model, cuda=cuda)
print(f'input image size: {input_image.size}')
print(f'output image size: {output_image.size}')
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(input_image)
axarr[1].imshow(output_image)
plt.show()
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='Compare input and output result')
parser.add_argument('--input_image', type=str, default='', help='input image to use')
parser.add_argument('--model', type=str, required=True, help='model file to use')
parser.add_argument('--cuda', action='store_true', help='use cuda')
opt = parser.parse_args()
print(opt)
main(opt)
|
the-stack_0_22876 | #
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from collections import OrderedDict
from functools import partial
import empyrical as ep
import numpy as np
import pandas as pd
import scipy as sp
import scipy.stats as stats
from sklearn import linear_model
from .deprecate import deprecated
from .interesting_periods import PERIODS
from .txn import get_turnover
from .utils import APPROX_BDAYS_PER_MONTH, APPROX_BDAYS_PER_YEAR
from .utils import DAILY
DEPRECATION_WARNING = ("Risk functions in pyfolio.timeseries are deprecated "
"and will be removed in a future release. Please "
"install the empyrical package instead.")
def var_cov_var_normal(P, c, mu=0, sigma=1):
"""
Variance-covariance calculation of daily Value-at-Risk in a
portfolio.
Parameters
----------
P : float
Portfolio value.
c : float
Confidence level.
mu : float, optional
Mean.
Returns
-------
float
Variance-covariance.
"""
alpha = sp.stats.norm.ppf(1 - c, mu, sigma)
return P - P * (alpha + 1)
@deprecated(msg=DEPRECATION_WARNING)
def max_drawdown(returns):
"""
Determines the maximum drawdown of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
float
Maximum drawdown.
Note
-----
See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.
"""
return ep.max_drawdown(returns)
@deprecated(msg=DEPRECATION_WARNING)
def annual_return(returns, period=DAILY):
"""
Determines the mean annual growth rate of returns.
Parameters
----------
returns : pd.Series
Periodic returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Annual Return as CAGR (Compounded Annual Growth Rate).
"""
return ep.annual_return(returns, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def annual_volatility(returns, period=DAILY):
"""
Determines the annual volatility of a strategy.
Parameters
----------
returns : pd.Series
Periodic returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing volatility. Can be 'monthly' or 'weekly' or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Annual volatility.
"""
return ep.annual_volatility(returns, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def calmar_ratio(returns, period=DAILY):
"""
Determines the Calmar ratio, or drawdown ratio, of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Calmar ratio (drawdown ratio) as float. Returns np.nan if there is no
calmar ratio.
Note
-----
See https://en.wikipedia.org/wiki/Calmar_ratio for more details.
"""
return ep.calmar_ratio(returns, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def omega_ratio(returns, annual_return_threshhold=0.0):
"""
Determines the Omega ratio of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
annual_return_threshold : float, optional
Minimum acceptable return of the investor. Annual threshold over which
returns are considered positive or negative. It is converted to a
value appropriate for the period of the returns for this ratio.
E.g. An annual minimum acceptable return of 100 translates to a daily
minimum acceptable return of 0.01848.
(1 + 100) ** (1. / 252) - 1 = 0.01848
Daily returns must exceed this value to be considered positive. The
daily return yields the desired annual return when compounded over
the average number of business days in a year.
(1 + 0.01848) ** 252 - 1 = 99.93
- Defaults to 0.0
Returns
-------
float
Omega ratio.
Note
-----
See https://en.wikipedia.org/wiki/Omega_ratio for more details.
"""
return ep.omega_ratio(returns,
required_return=annual_return_threshhold)
@deprecated(msg=DEPRECATION_WARNING)
def sortino_ratio(returns, required_return=0, period=DAILY):
"""
Determines the Sortino ratio of a strategy.
Parameters
----------
returns : pd.Series or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
depends on input type
series ==> float
DataFrame ==> np.array
Annualized Sortino ratio.
"""
return ep.sortino_ratio(returns, required_return=required_return)
@deprecated(msg=DEPRECATION_WARNING)
def downside_risk(returns, required_return=0, period=DAILY):
"""
Determines the downside deviation below a threshold
Parameters
----------
returns : pd.Series or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
depends on input type
series ==> float
DataFrame ==> np.array
Annualized downside deviation
"""
return ep.downside_risk(returns,
required_return=required_return,
period=period)
@deprecated(msg=DEPRECATION_WARNING)
def sharpe_ratio(returns, risk_free=0, period=DAILY):
"""
Determines the Sharpe ratio of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
risk_free : int, float
Constant risk-free return throughout the period.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Sharpe ratio.
np.nan
If insufficient length of returns or if if adjusted returns are 0.
Note
-----
See https://en.wikipedia.org/wiki/Sharpe_ratio for more details.
"""
return ep.sharpe_ratio(returns, risk_free=risk_free, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def alpha_beta(returns, factor_returns):
"""
Calculates both alpha and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
float
Alpha.
float
Beta.
"""
return ep.alpha_beta(returns, factor_returns=factor_returns)
@deprecated(msg=DEPRECATION_WARNING)
def alpha(returns, factor_returns):
"""
Calculates annualized alpha.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
float
Alpha.
"""
return ep.alpha(returns, factor_returns=factor_returns)
@deprecated(msg=DEPRECATION_WARNING)
def beta(returns, factor_returns):
"""
Calculates beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
float
Beta.
"""
return ep.beta(returns, factor_returns)
@deprecated(msg=DEPRECATION_WARNING)
def stability_of_timeseries(returns):
"""
Determines R-squared of a linear fit to the cumulative
log returns. Computes an ordinary least squares linear fit,
and returns R-squared.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
R-squared.
"""
return ep.stability_of_timeseries(returns)
@deprecated(msg=DEPRECATION_WARNING)
def tail_ratio(returns):
"""
Determines the ratio between the right (95%) and left tail (5%).
For example, a ratio of 0.25 means that losses are four times
as bad as profits.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
tail ratio
"""
return ep.tail_ratio(returns)
def common_sense_ratio(returns):
"""
Common sense ratio is the multiplication of the tail ratio and the
Gain-to-Pain-Ratio -- sum(profits) / sum(losses).
See http://bit.ly/1ORzGBk for more information on motivation of
this metric.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
float
common sense ratio
"""
return ep.tail_ratio(returns) * \
(1 + ep.annual_return(returns))
def normalize(returns, starting_value=1):
"""
Normalizes a returns timeseries based on the first value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
starting_value : float, optional
The starting returns (default 1).
Returns
-------
pd.Series
Normalized returns.
"""
return starting_value * (returns / returns.iloc[0])
@deprecated(msg=DEPRECATION_WARNING)
def cum_returns(returns, starting_value=0):
"""
Compute cumulative returns from simple returns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
starting_value : float, optional
The starting returns (default 1).
Returns
-------
pandas.Series
Series of cumulative returns.
Notes
-----
For increased numerical accuracy, convert input to log returns
where it is possible to sum instead of multiplying.
"""
return ep.cum_returns(returns, starting_value=starting_value)
@deprecated(msg=DEPRECATION_WARNING)
def aggregate_returns(returns, convert_to):
"""
Aggregates returns by week, month, or year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
convert_to : str
Can be 'weekly', 'monthly', or 'yearly'.
Returns
-------
pd.Series
Aggregated returns.
"""
return ep.aggregate_returns(returns, convert_to=convert_to)
def rolling_beta(returns, factor_returns,
rolling_window=APPROX_BDAYS_PER_MONTH * 6):
"""
Determines the rolling beta of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series or pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- If DataFrame is passed, computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The size of the rolling window, in days, over which to compute
beta (default 6 months).
Returns
-------
pd.Series
Rolling beta.
Note
-----
See https://en.wikipedia.org/wiki/Beta_(finance) for more details.
"""
if factor_returns.ndim > 1:
# Apply column-wise
return factor_returns.apply(partial(rolling_beta, returns),
rolling_window=rolling_window)
else:
out = pd.Series(index=returns.index)
for beg, end in zip(returns.index[0:-rolling_window],
returns.index[rolling_window:]):
out.loc[end] = ep.beta(
returns.loc[beg:end],
factor_returns.loc[beg:end])
return out
def rolling_regression(returns, factor_returns,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
nan_threshold=0.1):
"""
Computes rolling factor betas using a multivariate linear regression
(separate linear regressions is problematic because the factors may be
confounded).
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- Computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The days window over which to compute the beta. Defaults to 6 months.
nan_threshold : float, optional
If there are more than this fraction of NaNs, the rolling regression
for the given date will be skipped.
Returns
-------
pandas.DataFrame
DataFrame containing rolling beta coefficients to SMB, HML and UMD
"""
# We need to drop NaNs to regress
ret_no_na = returns.dropna()
columns = ['alpha'] + factor_returns.columns.tolist()
rolling_risk = pd.DataFrame(columns=columns,
index=ret_no_na.index)
rolling_risk.index.name = 'dt'
for beg, end in zip(ret_no_na.index[:-rolling_window],
ret_no_na.index[rolling_window:]):
returns_period = ret_no_na[beg:end]
factor_returns_period = factor_returns.loc[returns_period.index]
if np.all(factor_returns_period.isnull().mean()) < nan_threshold:
factor_returns_period_dnan = factor_returns_period.dropna()
reg = linear_model.LinearRegression(fit_intercept=True).fit(
factor_returns_period_dnan,
returns_period.loc[factor_returns_period_dnan.index])
rolling_risk.loc[end, factor_returns.columns] = reg.coef_
rolling_risk.loc[end, 'alpha'] = reg.intercept_
return rolling_risk
def gross_lev(positions):
"""
Calculates the gross leverage of a strategy.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
pd.Series
Gross leverage.
"""
exposure = positions.drop('cash', axis=1).abs().sum(axis=1)
return exposure / positions.sum(axis=1)
def value_at_risk(returns, period=None, sigma=2.0):
"""
Get value at risk (VaR).
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
period : str, optional
Period over which to calculate VaR. Set to 'weekly',
'monthly', or 'yearly', otherwise defaults to period of
returns (typically daily).
sigma : float, optional
Standard deviations of VaR, default 2.
"""
if period is not None:
returns_agg = ep.aggregate_returns(returns, period)
else:
returns_agg = returns.copy()
value_at_risk = returns_agg.mean() - sigma * returns_agg.std()
return value_at_risk
SIMPLE_STAT_FUNCS = [
ep.annual_return,
ep.cum_returns_final,
ep.annual_volatility,
ep.sharpe_ratio,
ep.calmar_ratio,
ep.stability_of_timeseries,
ep.max_drawdown,
ep.omega_ratio,
ep.sortino_ratio,
stats.skew,
stats.kurtosis,
ep.tail_ratio,
value_at_risk
]
FACTOR_STAT_FUNCS = [
ep.alpha,
ep.beta,
]
STAT_FUNC_NAMES = {
'annual_return': 'Annual return',
'cum_returns_final': 'Cumulative returns',
'annual_volatility': 'Annual volatility',
'sharpe_ratio': 'Sharpe ratio',
'calmar_ratio': 'Calmar ratio',
'stability_of_timeseries': 'Stability',
'max_drawdown': 'Max drawdown',
'omega_ratio': 'Omega ratio',
'sortino_ratio': 'Sortino ratio',
'skew': 'Skew',
'kurtosis': 'Kurtosis',
'tail_ratio': 'Tail ratio',
'common_sense_ratio': 'Common sense ratio',
'value_at_risk': 'Daily value at risk',
'alpha': 'Alpha',
'beta': 'Beta',
}
def perf_stats(returns, factor_returns=None, positions=None,
transactions=None, turnover_denom='AGB'):
"""
Calculates various performance metrics of a strategy, for use in
plotting.show_perf_stats.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
- If None, do not compute alpha, beta, and information ratio.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
turnover_denom : str
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
Returns
-------
pd.Series
Performance metrics.
"""
stats = pd.Series()
for stat_func in SIMPLE_STAT_FUNCS:
stats[STAT_FUNC_NAMES[stat_func.__name__]] = stat_func(returns)
if positions is not None:
stats['Gross leverage'] = gross_lev(positions).mean()
if transactions is not None:
stats['Daily turnover'] = get_turnover(positions,
transactions,
turnover_denom).mean()
if factor_returns is not None:
for stat_func in FACTOR_STAT_FUNCS:
res = stat_func(returns, factor_returns)
stats[STAT_FUNC_NAMES[stat_func.__name__]] = res
return stats
def perf_stats_bootstrap(returns, factor_returns=None, return_stats=True,
**kwargs):
"""Calculates various bootstrapped performance metrics of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
- If None, do not compute alpha, beta, and information ratio.
return_stats : boolean (optional)
If True, returns a DataFrame of mean, median, 5 and 95 percentiles
for each perf metric.
If False, returns a DataFrame with the bootstrap samples for
each perf metric.
Returns
-------
pd.DataFrame
if return_stats is True:
- Distributional statistics of bootstrapped sampling
distribution of performance metrics.
if return_stats is False:
- Bootstrap samples for each performance metric.
"""
bootstrap_values = OrderedDict()
for stat_func in SIMPLE_STAT_FUNCS:
stat_name = STAT_FUNC_NAMES[stat_func.__name__]
bootstrap_values[stat_name] = calc_bootstrap(stat_func,
returns)
if factor_returns is not None:
for stat_func in FACTOR_STAT_FUNCS:
stat_name = STAT_FUNC_NAMES[stat_func.__name__]
bootstrap_values[stat_name] = calc_bootstrap(
stat_func,
returns,
factor_returns=factor_returns)
bootstrap_values = pd.DataFrame(bootstrap_values)
if return_stats:
stats = bootstrap_values.apply(calc_distribution_stats)
return stats.T[['mean', 'median', '5%', '95%']]
else:
return bootstrap_values
def calc_bootstrap(func, returns, *args, **kwargs):
"""Performs a bootstrap analysis on a user-defined function returning
a summary statistic.
Parameters
----------
func : function
Function that either takes a single array (commonly returns)
or two arrays (commonly returns and factor returns) and
returns a single value (commonly a summary
statistic). Additional args and kwargs are passed as well.
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
n_samples : int, optional
Number of bootstrap samples to draw. Default is 1000.
Increasing this will lead to more stable / accurate estimates.
Returns
-------
numpy.ndarray
Bootstrapped sampling distribution of passed in func.
"""
n_samples = kwargs.pop('n_samples', 1000)
out = np.empty(n_samples)
factor_returns = kwargs.pop('factor_returns', None)
for i in range(n_samples):
idx = np.random.randint(len(returns), size=len(returns))
returns_i = returns.iloc[idx].reset_index(drop=True)
if factor_returns is not None:
factor_returns_i = factor_returns.iloc[idx].reset_index(drop=True)
out[i] = func(returns_i, factor_returns_i,
*args, **kwargs)
else:
out[i] = func(returns_i,
*args, **kwargs)
return out
def calc_distribution_stats(x):
"""Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values.
"""
return pd.Series({'mean': np.mean(x),
'median': np.median(x),
'std': np.std(x),
'5%': np.percentile(x, 5),
'25%': np.percentile(x, 25),
'75%': np.percentile(x, 75),
'95%': np.percentile(x, 95),
'IQR': np.subtract.reduce(
np.percentile(x, [75, 25])),
})
def get_max_drawdown_underwater(underwater):
"""
Determines peak, valley, and recovery dates given an 'underwater'
DataFrame.
An underwater DataFrame is a DataFrame that has precomputed
rolling drawdown.
Parameters
----------
underwater : pd.Series
Underwater returns (rolling drawdown) of a strategy.
Returns
-------
peak : datetime
The maximum drawdown's peak.
valley : datetime
The maximum drawdown's valley.
recovery : datetime
The maximum drawdown's recovery.
"""
valley = underwater.index[underwater.values.argmin()] # end of the period
# Find first 0
peak = underwater[:valley][underwater[:valley] == 0].index[-1]
# Find last 0
try:
recovery = underwater[valley:][underwater[valley:] == 0].index[0]
except IndexError:
recovery = np.nan # drawdown not recovered
return peak, valley, recovery
def get_max_drawdown(returns):
"""
Determines the maximum drawdown of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
Maximum drawdown.
Note
-----
See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.
"""
returns = returns.copy()
df_cum = cum_returns(returns, 1.0)
running_max = np.maximum.accumulate(df_cum)
underwater = df_cum / running_max - 1
return get_max_drawdown_underwater(underwater)
def get_top_drawdowns(returns, top=10):
"""
Finds top drawdowns, sorted by drawdown amount.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
The amount of top drawdowns to find (default 10).
Returns
-------
drawdowns : list
List of drawdown peaks, valleys, and recoveries. See get_max_drawdown.
"""
returns = returns.copy()
df_cum = ep.cum_returns(returns, 1.0)
running_max = np.maximum.accumulate(df_cum)
underwater = df_cum / running_max - 1
drawdowns = []
for _ in range(top):
peak, valley, recovery = get_max_drawdown_underwater(underwater)
# Slice out draw-down period
if not pd.isnull(recovery):
underwater.drop(underwater[peak: recovery].index[1:-1],
inplace=True)
else:
# drawdown has not ended yet
underwater = underwater.loc[:peak]
drawdowns.append((peak, valley, recovery))
if ((len(returns) == 0)
or (len(underwater) == 0)
or (np.min(underwater) == 0)):
break
return drawdowns
def gen_drawdown_table(returns, top=10):
"""
Places top drawdowns in a table.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
The amount of top drawdowns to find (default 10).
Returns
-------
df_drawdowns : pd.DataFrame
Information about top drawdowns.
"""
df_cum = ep.cum_returns(returns, 1.0)
drawdown_periods = get_top_drawdowns(returns, top=top)
df_drawdowns = pd.DataFrame(index=list(range(top)),
columns=['Net drawdown in %',
'Peak date',
'Valley date',
'Recovery date',
'Duration'])
for i, (peak, valley, recovery) in enumerate(drawdown_periods):
if pd.isnull(recovery):
df_drawdowns.loc[i, 'Duration'] = np.nan
else:
df_drawdowns.loc[i, 'Duration'] = len(pd.date_range(peak,
recovery,
freq='B'))
df_drawdowns.loc[i, 'Peak date'] = (peak.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Valley date'] = (valley.to_pydatetime()
.strftime('%Y-%m-%d'))
if isinstance(recovery, float):
df_drawdowns.loc[i, 'Recovery date'] = recovery
else:
df_drawdowns.loc[i, 'Recovery date'] = (recovery.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Net drawdown in %'] = (
(df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * 100
df_drawdowns['Peak date'] = pd.to_datetime(df_drawdowns['Peak date'])
df_drawdowns['Valley date'] = pd.to_datetime(df_drawdowns['Valley date'])
df_drawdowns['Recovery date'] = pd.to_datetime(
df_drawdowns['Recovery date'])
return df_drawdowns
def rolling_volatility(returns, rolling_vol_window):
"""
Determines the rolling volatility of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_vol_window : int
Length of rolling window, in days, over which to compute.
Returns
-------
pd.Series
Rolling volatility.
"""
return returns.rolling(rolling_vol_window).std() \
* np.sqrt(APPROX_BDAYS_PER_YEAR)
def rolling_sharpe(returns, rolling_sharpe_window):
"""
Determines the rolling Sharpe ratio of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_sharpe_window : int
Length of rolling window, in days, over which to compute.
Returns
-------
pd.Series
Rolling Sharpe ratio.
Note
-----
See https://en.wikipedia.org/wiki/Sharpe_ratio for more details.
"""
return returns.rolling(rolling_sharpe_window).mean() \
/ returns.rolling(rolling_sharpe_window).std() \
* np.sqrt(APPROX_BDAYS_PER_YEAR)
def simulate_paths(is_returns, num_days,
starting_value=1, num_samples=1000, random_seed=None):
"""
Gnerate alternate paths using available values from in-sample returns.
Parameters
----------
is_returns : pandas.core.frame.DataFrame
Non-cumulative in-sample returns.
num_days : int
Number of days to project the probability cone forward.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
samples : numpy.ndarray
"""
samples = np.empty((num_samples, num_days))
seed = np.random.RandomState(seed=random_seed)
for i in range(num_samples):
samples[i, :] = is_returns.sample(num_days, replace=True,
random_state=seed)
return samples
def summarize_paths(samples, cone_std=(1., 1.5, 2.), starting_value=1.):
"""
Gnerate the upper and lower bounds of an n standard deviation
cone of forecasted cumulative returns.
Parameters
----------
samples : numpy.ndarray
Alternative paths, or series of possible outcomes.
cone_std : list of int/float
Number of standard devations to use in the boundaries of
the cone. If multiple values are passed, cone bounds will
be generated for each value.
Returns
-------
samples : pandas.core.frame.DataFrame
"""
cum_samples = ep.cum_returns(samples.T,
starting_value=starting_value).T
cum_mean = cum_samples.mean(axis=0)
cum_std = cum_samples.std(axis=0)
if isinstance(cone_std, (float, int)):
cone_std = [cone_std]
cone_bounds = pd.DataFrame(columns=pd.Float64Index([]))
for num_std in cone_std:
cone_bounds.loc[:, float(num_std)] = cum_mean + cum_std * num_std
cone_bounds.loc[:, float(-num_std)] = cum_mean - cum_std * num_std
return cone_bounds
def forecast_cone_bootstrap(is_returns, num_days, cone_std=(1., 1.5, 2.),
starting_value=1, num_samples=1000,
random_seed=None):
"""
Determines the upper and lower bounds of an n standard deviation
cone of forecasted cumulative returns. Future cumulative mean and
standard devation are computed by repeatedly sampling from the
in-sample daily returns (i.e. bootstrap). This cone is non-parametric,
meaning it does not assume that returns are normally distributed.
Parameters
----------
is_returns : pd.Series
In-sample daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
num_days : int
Number of days to project the probability cone forward.
cone_std : int, float, or list of int/float
Number of standard devations to use in the boundaries of
the cone. If multiple values are passed, cone bounds will
be generated for each value.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
pd.DataFrame
Contains upper and lower cone boundaries. Column names are
strings corresponding to the number of standard devations
above (positive) or below (negative) the projected mean
cumulative returns.
"""
samples = simulate_paths(
is_returns=is_returns,
num_days=num_days,
starting_value=starting_value,
num_samples=num_samples,
random_seed=random_seed
)
cone_bounds = summarize_paths(
samples=samples,
cone_std=cone_std,
starting_value=starting_value
)
return cone_bounds
def extract_interesting_date_ranges(returns, periods=None):
"""
Extracts returns based on interesting events. See
gen_date_range_interesting.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
ranges : OrderedDict
Date ranges, with returns, of all valid events.
"""
if periods is None:
periods = PERIODS
returns_dupe = returns.copy()
returns_dupe.index = returns_dupe.index.map(pd.Timestamp)
ranges = OrderedDict()
for name, (start, end) in periods.items():
try:
period = returns_dupe.loc[start:end]
if len(period) == 0:
continue
ranges[name] = period
except BaseException:
continue
return ranges
|
the-stack_0_22877 | from math import sqrt
t=int(input())
while(t):
t-=1
n=int(input())
ans=99999999999999
for i in range(1,int(sqrt(n))+5):
if(n%i==0):
ans=min(ans,abs(i-n//i))
print(ans)
|
the-stack_0_22878 | # -*- coding: utf-8 -*-
# File: GAN.py
# Author: Yuxin Wu
import tensorflow as tf
import numpy as np
from tensorpack import (TowerTrainer,
ModelDescBase, DataFlow, StagingInput)
from tensorpack.tfutils.tower import TowerContext, TowerFuncWrapper
from tensorpack.graph_builder import DataParallelBuilder, LeastLoadedDeviceSetter
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.utils.argtools import memoized
class GANModelDesc(ModelDescBase):
def collect_variables(self, g_scope='gen', d_scope='discrim'):
"""
Assign `self.g_vars` to the parameters under scope `g_scope`,
and same with `self.d_vars`.
"""
self.g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, g_scope)
assert self.g_vars
self.d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, d_scope)
assert self.d_vars
def build_losses(self, logits_real, logits_fake):
"""
Build standard GAN loss and set `self.g_loss` and `self.d_loss`.
D and G play two-player minimax game with value function V(G,D)
min_G max _D V(D, G) = IE_{x ~ p_data} [log D(x)] + IE_{z ~ p_fake} [log (1 - D(G(z)))]
Args:
logits_real (tf.Tensor): discrim logits from real samples
logits_fake (tf.Tensor): discrim logits from fake samples produced by generator
"""
with tf.name_scope("GAN_loss"):
score_real = tf.sigmoid(logits_real)
score_fake = tf.sigmoid(logits_fake)
tf.summary.histogram('score-real', score_real)
tf.summary.histogram('score-fake', score_fake)
with tf.name_scope("discrim"):
d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_real, labels=tf.ones_like(logits_real)), name='loss_real')
d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_fake, labels=tf.zeros_like(logits_fake)), name='loss_fake')
d_pos_acc = tf.reduce_mean(tf.cast(score_real > 0.5, tf.float32), name='accuracy_real')
d_neg_acc = tf.reduce_mean(tf.cast(score_fake < 0.5, tf.float32), name='accuracy_fake')
d_accuracy = tf.add(.5 * d_pos_acc, .5 * d_neg_acc, name='accuracy')
self.d_loss = tf.add(.5 * d_loss_pos, .5 * d_loss_neg, name='loss')
with tf.name_scope("gen"):
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_fake, labels=tf.ones_like(logits_fake)), name='loss')
g_accuracy = tf.reduce_mean(tf.cast(score_fake > 0.5, tf.float32), name='accuracy')
add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)
def build_graph(self, *inputs):
"""
Have to build one tower and set the following attributes:
g_loss, d_loss, g_vars, d_vars.
"""
pass
@memoized
def get_optimizer(self):
return self.optimizer()
class GANTrainer(TowerTrainer):
def __init__(self, input, model):
"""
Args:
input (InputSource):
model (GANModelDesc):
"""
super(GANTrainer, self).__init__()
assert isinstance(model, GANModelDesc), model
inputs_desc = model.get_inputs_desc()
# Setup input
cbs = input.setup(inputs_desc)
self.register_callback(cbs)
"""
We need to set tower_func because it's a TowerTrainer,
and only TowerTrainer supports automatic graph creation for inference during training.
If we don't care about inference during training, using tower_func is
not needed. Just calling model.build_graph directly is OK.
"""
# Build the graph
self.tower_func = TowerFuncWrapper(model.build_graph, inputs_desc)
with TowerContext('', is_training=True):
self.tower_func(*input.get_input_tensors())
opt = model.get_optimizer()
# Define the training iteration
# by default, run one d_min after one g_min
with tf.name_scope('optimize'):
g_min = opt.minimize(model.g_loss, var_list=model.g_vars, name='g_op')
with tf.control_dependencies([g_min]):
d_min = opt.minimize(model.d_loss, var_list=model.d_vars, name='d_op')
self.train_op = d_min
class SeparateGANTrainer(TowerTrainer):
""" A GAN trainer which runs two optimization ops with a certain ratio."""
def __init__(self, input, model, d_period=1, g_period=1):
"""
Args:
d_period(int): period of each d_opt run
g_period(int): period of each g_opt run
"""
super(SeparateGANTrainer, self).__init__()
self._d_period = int(d_period)
self._g_period = int(g_period)
assert min(d_period, g_period) == 1
# Setup input
cbs = input.setup(model.get_inputs_desc())
self.register_callback(cbs)
# Build the graph
self.tower_func = TowerFuncWrapper(model.build_graph, model.get_inputs_desc())
with TowerContext('', is_training=True):
self.tower_func(*input.get_input_tensors())
opt = model.get_optimizer()
with tf.name_scope('optimize'):
self.d_min = opt.minimize(
model.d_loss, var_list=model.d_vars, name='d_min')
self.g_min = opt.minimize(
model.g_loss, var_list=model.g_vars, name='g_min')
def run_step(self):
# Define the training iteration
if self.global_step % (self._d_period) == 0:
self.hooked_sess.run(self.d_min)
if self.global_step % (self._g_period) == 0:
self.hooked_sess.run(self.g_min)
class MultiGPUGANTrainer(TowerTrainer):
"""
A replacement of GANTrainer (optimize d and g one by one) with multi-gpu support.
"""
def __init__(self, nr_gpu, input, model):
super(MultiGPUGANTrainer, self).__init__()
assert nr_gpu > 1
raw_devices = ['/gpu:{}'.format(k) for k in range(nr_gpu)]
# Setup input
input = StagingInput(input)
cbs = input.setup(model.get_inputs_desc())
self.register_callback(cbs)
# Build the graph with multi-gpu replication
def get_cost(*inputs):
model.build_graph(*inputs)
return [model.d_loss, model.g_loss]
self.tower_func = TowerFuncWrapper(get_cost, model.get_inputs_desc())
devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices]
cost_list = DataParallelBuilder.build_on_towers(
list(range(nr_gpu)),
lambda: self.tower_func(*input.get_input_tensors()),
devices)
# Simply average the cost here. It might be faster to average the gradients
with tf.name_scope('optimize'):
d_loss = tf.add_n([x[0] for x in cost_list]) * (1.0 / nr_gpu)
g_loss = tf.add_n([x[1] for x in cost_list]) * (1.0 / nr_gpu)
opt = model.get_optimizer()
# run one d_min after one g_min
g_min = opt.minimize(g_loss, var_list=model.g_vars,
colocate_gradients_with_ops=True, name='g_op')
with tf.control_dependencies([g_min]):
d_min = opt.minimize(d_loss, var_list=model.d_vars,
colocate_gradients_with_ops=True, name='d_op')
# Define the training iteration
self.train_op = d_min
class RandomZData(DataFlow):
def __init__(self, shape):
super(RandomZData, self).__init__()
self.shape = shape
def get_data(self):
while True:
yield [np.random.uniform(-1, 1, size=self.shape)]
|
the-stack_0_22882 | from CommonServerPython import *
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
feed_url_to_config = {
'https://www.cloudflare.com/ips-v4': {
'indicator_type': FeedIndicatorType.CIDR
},
'https://www.cloudflare.com/ips-v6': {
'indicator_type': FeedIndicatorType.IPv6CIDR
}
}
params['feed_url_to_config'] = feed_url_to_config
# Call the main execution of the HTTP API module.
feed_main('Cloudflare Feed', params, 'cloudflare-')
from HTTPFeedApiModule import * # noqa: E402
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
|
the-stack_0_22885 | from __future__ import print_function
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Data import CodonTable
import itertools
from mykrobe.utils import split_var_name
import logging
logger = logging.getLogger(__name__)
logger.setLevel("INFO")
def flatten(l):
return [item for sublist in l for item in sublist]
class Region(object):
def __init__(self, reference, start, end, forward=True):
self.reference = reference
self.start = start
self.end = end
self.forward = forward
@property
def strand(self):
if self.forward:
return "forward"
else:
return "reverse"
@property
def seq(self):
if self.forward:
return self.reference[self.start - 1:self.end]
else:
return self.reference[self.start - 1:self.end].reverse_complement()
def get_reference_position(self, pos):
if pos < 0 and self.forward:
return self.start + pos
elif pos < 0 and not self.forward:
# Upstream of a gene on the reverse stand
return self.end - pos
elif pos > 0 and self.forward:
return self.start + pos - 1
elif pos > 0 and not self.forward:
return self.end - pos + 1
else:
raise ValueError("Positions are 1-based")
class Gene(Region):
def __init__(self, name, reference, start, end, forward=True):
super(self.__class__, self).__init__(reference, start, end, forward)
self.name = name
self.translation_table = 11
self.backward_codon_table = make_backward_codon_table()
@property
def prot(self):
return self.seq.translate(table=self.translation_table).rstrip("*")
def get_context(self, pos, N):
return self.seq[(3 * (pos - 1)) - N: pos * 3 + N]
def get_codon(self, pos):
if pos > len(self.prot):
raise ValueError(
"There are only %s aminoacids in this gene" % len(
self.prot))
else:
return self.seq[(3 * (pos - 1)): pos * 3]
def get_reference_codon(self, pos):
if self.forward:
return self.get_codon(pos)
else:
return self.get_codon(pos).reverse_complement()
def get_reference_codons(self, pos):
standard_table = CodonTable.unambiguous_dna_by_id[11]
# Get the reference codon in reading frame
ref_codon = self.get_codon(pos)
# Get the backward codons
ref_aa = standard_table.forward_table[str(ref_codon)]
condons_in_reading_frame = self.backward_codon_table[ref_aa]
if self.forward:
return condons_in_reading_frame
else:
return [str(Seq(s).reverse_complement())
for s in condons_in_reading_frame]
def __str__(self):
return "Gene:%s" % self.name
def __repr__(self):
return "Gene:%s" % self.name
def make_backward_codon_table():
table = {}
standard_table = CodonTable.unambiguous_dna_by_id[11]
codons = generate_all_possible_codons()
for codon in codons:
if codon not in standard_table.stop_codons:
try:
table[standard_table.forward_table[codon]].append(codon)
except:
table[standard_table.forward_table[codon]] = [codon]
return table
def generate_all_possible_codons():
return ["".join(subset)
for subset in itertools.product(["A", "T", "C", "G"], repeat=3)]
class GeneAminoAcidChangeToDNAVariants():
def __init__(self, reference, genbank):
self.reference = self._parse_reference(reference)
self.genbank = self._parse_genbank(genbank)
self.backward_codon_table = make_backward_codon_table()
def _parse_reference(self, reference):
with open(reference, "r") as infile:
return list(SeqIO.parse(infile, "fasta"))[0].seq
def _parse_genbank(self, genbank):
d = {}
with open(genbank, 'r') as infile:
for feat in SeqIO.read(infile, "genbank").features:
if feat.type == "gene":
try:
name = feat.qualifiers.get(
"gene",
feat.qualifiers.get("db_xref"))[0]
except TypeError:
pass
else:
# SeqIO converts to 0-based
strand = int(feat.location.strand)
if strand == 1:
forward = True
elif strand == -1:
forward = False
else:
raise ValueError("Strand must be 1 or -1")
d[name] = Gene(
name,
self.reference,
start=feat.location.start + 1,
end=feat.location.end,
forward=forward)
return d
def get_alts(self, amino_acid):
if amino_acid == "X":
return flatten(self.backward_codon_table.values())
else:
return self.backward_codon_table[amino_acid]
def get_reference_alts(self, gene, amino_acid):
if gene.forward:
return self.get_alts(amino_acid)
else:
return [str(Seq(s).reverse_complement())
for s in self.get_alts(amino_acid)]
def get_location(self, gene, pos):
if gene.forward:
dna_pos = (3 * (pos - 1)) + 1
else:
dna_pos = (3 * (pos))
return gene.get_reference_position(dna_pos)
def get_variant_names(self, gene, mutation, protein_coding_var=True):
ref, start, alt = split_var_name(mutation)
gene = self.get_gene(gene)
if start < 0 or not protein_coding_var:
return self._process_DNA_mutation(gene, ref, start, alt)
elif start > 0:
return self._process_coding_mutation(gene, ref, start, alt)
else:
raise ValueError(
"Variants are defined in 1-based coordinates. You can't have pos 0. ")
def _process_DNA_mutation(self, gene, ref, start, alt):
names = []
pos = gene.get_reference_position(start)
if not gene.forward:
pos -= len(ref) - 1
ref = str(Seq(ref).reverse_complement())
if alt != 'X':
alt = str(Seq(alt).reverse_complement())
if alt == "X":
for a in ["A", "T", "C", "G"]:
if a != ref:
names.append("".join([ref, str(pos), a]))
else:
names.append("".join([ref, str(pos), alt]))
return names
def _process_coding_mutation(self, gene, ref, start, alt):
logger.debug("Processing gene:{} ref:{} start:{} alt:{}".format(
gene, ref, start, alt))
if not gene.prot or start > len(gene.prot):
raise ValueError("Error translating %s_%s " %
(gene, "".join([ref, str(start), alt])))
if not gene.prot[start - 1] == ref:
raise ValueError(
"Error processing %s_%s. The reference at pos %i is not %s, it's %s. " %
(gene, "".join(
[
ref, str(start), alt]), start, ref, gene.prot[
start - 1]))
ref_codons = gene.get_reference_codons(start)
alt_codons = self.get_reference_alts(gene, alt)
logger.debug("Reference codons (forward strand equivalent) {}".format(
"".join(ref_codons)))
logger.debug("Alternate codons (forward strand equivalent) {}".format(
"".join(alt_codons)))
for ref_codon in ref_codons:
if ref_codon in alt_codons:
alt_codons.remove(ref_codon)
location = self.get_location(gene, start)
alternative = "/".join(alt_codons)
ref_codon = gene.get_reference_codon(start)
names = ["".join(["".join(ref_codon),
str(location),
"".join(alt_codon)]) for alt_codon in alt_codons]
return names
def get_gene(self, gene):
return self.genbank[gene]
|
the-stack_0_22889 | # -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.utils.compatibility import * # NOQA
import os
import traceback
import h2o
from h2o.base import Keyed
from h2o.exceptions import H2OValueError
from h2o.job import H2OJob
from h2o.utils.metaclass import BackwardsCompatible, Deprecated as deprecated, h2o_meta
from h2o.utils.compatibility import viewitems
from h2o.utils.ext_dependencies import get_matplotlib_pyplot
from h2o.utils.shared_utils import can_use_pandas
from h2o.utils.typechecks import I, assert_is_type, assert_satisfies, Enum, is_type
@BackwardsCompatible(
instance_attrs=dict(
giniCoef=lambda self, *args, **kwargs: self.gini(*args, **kwargs)
)
)
class ModelBase(h2o_meta(Keyed)):
"""Base class for all models."""
def __init__(self):
"""Construct a new model instance."""
self._id = None
self._model_json = None
self._metrics_class = None
self._metrics_class_valid = None
self._is_xvalidated = False
self._xval_keys = None
self._parms = {} # internal, for object recycle
self.parms = {} # external
self._estimator_type = "unsupervised"
self._future = False # used by __repr__/show to query job state
self._job = None # used when _future is True
self._have_pojo = False
self._have_mojo = False
self._start_time = None
self._end_time = None
self._run_time = None
@property
def key(self):
return self._id
@property
def model_id(self):
"""Model identifier."""
return self._id
@model_id.setter
def model_id(self, newid):
oldid = self._id
self._id = newid
h2o.rapids("(rename '%s' '%s')" % (oldid, newid))
@property
def params(self):
"""
Get the parameters and the actual/default values only.
:returns: A dictionary of parameters used to build this model.
"""
params = {}
for p in self.parms:
params[p] = {"default": self.parms[p]["default_value"],
"actual": self.parms[p]["actual_value"],
"input": self.parms[p]["input_value"]}
return params
@property
def default_params(self):
"""Dictionary of the default parameters of the model."""
params = {}
for p in self.parms:
params[p] = self.parms[p]["default_value"]
return params
@property
def actual_params(self):
"""Dictionary of actual parameters of the model."""
params_to_select = {"model_id": "name",
"response_column": "column_name",
"training_frame": "name",
"validation_frame": "name"}
params = {}
for p in self.parms:
if p in params_to_select.keys():
params[p] = (self.parms[p].get("actual_value") or {}).get(params_to_select[p], None)
else:
params[p] = self.parms[p]["actual_value"]
return params
@property
def full_parameters(self):
"""Dictionary of the full specification of all parameters."""
return self.parms
@property
def type(self):
"""The type of model built: ``"classifier"`` or ``"regressor"`` or ``"unsupervised"``"""
return self._estimator_type
@property
def have_pojo(self):
"""True, if export to POJO is possible"""
return self._have_pojo
@property
def have_mojo(self):
"""True, if export to MOJO is possible"""
return self._have_mojo
@property
def start_time(self):
"""Timestamp (milliseconds since 1970) when the model training was started."""
return self._start_time
@property
def end_time(self):
"""Timestamp (milliseconds since 1970) when the model training was ended."""
return self._end_time
@property
def run_time(self):
"""Model training time in milliseconds"""
return self._run_time
def __repr__(self):
# PUBDEV-2278: using <method>? from IPython caused everything to dump
stk = traceback.extract_stack()
if not ("IPython" in stk[-2][0] and "info" == stk[-2][2]):
self.show()
return ""
def predict_leaf_node_assignment(self, test_data, type="Path"):
"""
Predict on a dataset and return the leaf node assignment (only for tree-based models).
:param H2OFrame test_data: Data on which to make predictions.
:param Enum type: How to identify the leaf node. Nodes can be either identified by a path from to the root node
of the tree to the node or by H2O's internal node id. One of: ``"Path"``, ``"Node_ID"`` (default: ``"Path"``).
:returns: A new H2OFrame of predictions.
"""
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
assert_is_type(type, None, Enum("Path", "Node_ID"))
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"leaf_node_assignment": True, "leaf_node_assignment_type": type})
return h2o.get_frame(j["predictions_frame"]["name"])
def staged_predict_proba(self, test_data):
"""
Predict class probabilities at each stage of an H2O Model (only GBM models).
The output structure is analogous to the output of function predict_leaf_node_assignment. For each tree t and
class c there will be a column Tt.Cc (eg. T3.C1 for tree 3 and class 1). The value will be the corresponding
predicted probability of this class by combining the raw contributions of trees T1.Cc,..,TtCc. Binomial models
build the trees just for the first class and values in columns Tx.C1 thus correspond to the the probability p0.
:param H2OFrame test_data: Data on which to make predictions.
:returns: A new H2OFrame of staged predictions.
"""
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"predict_staged_proba": True})
return h2o.get_frame(j["predictions_frame"]["name"])
def predict_contributions(self, test_data):
"""
Predict feature contributions - SHAP values on an H2O Model (only DRF, GBM and XGBoost models).
Returned H2OFrame has shape (#rows, #features + 1) - there is a feature contribution column for each input
feature, the last column is the model bias (same value for each row). The sum of the feature contributions
and the bias term is equal to the raw prediction of the model. Raw prediction of tree-based model is the sum
of the predictions of the individual trees before before the inverse link function is applied to get the actual
prediction. For Gaussian distribution the sum of the contributions is equal to the model prediction.
Note: Multinomial classification models are currently not supported.
:param H2OFrame test_data: Data on which to calculate contributions.
:returns: A new H2OFrame made of feature contributions.
"""
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"predict_contributions": True}), "contributions")
j.poll()
return h2o.get_frame(j.dest_key)
def feature_frequencies(self, test_data):
"""
Retrieve the number of occurrences of each feature for given observations
on their respective paths in a tree ensemble model.
Available for GBM, Random Forest and Isolation Forest models.
:param H2OFrame test_data: Data on which to calculate feature frequencies.
:returns: A new H2OFrame made of feature contributions.
"""
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"feature_frequencies": True})
return h2o.get_frame(j["predictions_frame"]["name"])
def predict(self, test_data, custom_metric = None, custom_metric_func = None):
"""
Predict on a dataset.
:param H2OFrame test_data: Data on which to make predictions.
:param custom_metric: custom evaluation function defined as class reference, the class get uploaded
into the cluster
:param custom_metric_func: custom evaluation function reference, e.g, result of upload_custom_metric
:returns: A new H2OFrame of predictions.
"""
# Upload evaluation function into DKV
if custom_metric:
assert_satisfies(custom_metric_func, custom_metric_func is None,
"The argument 'eval_func_ref' cannot be specified when eval_func is specified, ")
eval_func_ref = h2o.upload_custom_metric(custom_metric)
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id), data = {'custom_metric_func': custom_metric_func}),
self._model_json["algo"] + " prediction")
j.poll()
return h2o.get_frame(j.dest_key)
def is_cross_validated(self):
"""Return True if the model was cross-validated."""
return self._is_xvalidated
def xval_keys(self):
"""Return model keys for the cross-validated model."""
return self._xval_keys
def get_xval_models(self, key=None):
"""
Return a Model object.
:param key: If None, return all cross-validated models; otherwise return the model that key points to.
:returns: A model or list of models.
"""
return h2o.get_model(key) if key is not None else [h2o.get_model(k) for k in self._xval_keys]
@property
def xvals(self):
"""
Return a list of the cross-validated models.
:returns: A list of models.
"""
return self.get_xval_models()
def detach(self):
self._id = None
def deepfeatures(self, test_data, layer):
"""
Return hidden layer details.
:param test_data: Data to create a feature space on
:param layer: 0 index hidden layer
"""
if test_data is None: raise ValueError("Must specify test data")
if str(layer).isdigit():
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self._id, test_data.frame_id),
data={"deep_features_hidden_layer": layer}), "deepfeatures")
else:
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self._id, test_data.frame_id),
data={"deep_features_hidden_layer_name": layer}), "deepfeatures")
j.poll()
return h2o.get_frame(j.dest_key)
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix.
:param matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:returns: an H2OFrame which represents the weight matrix identified by matrix_id
"""
num_weight_matrices = len(self._model_json["output"]["weights"])
if matrix_id not in list(range(num_weight_matrices)):
raise ValueError(
"Weight matrix does not exist. Model has {0} weight matrices (0-based indexing), but matrix {1} "
"was requested.".format(num_weight_matrices, matrix_id))
return h2o.get_frame(self._model_json["output"]["weights"][matrix_id]["URL"].split("/")[3])
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector.
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:returns: an H2OFrame which represents the bias vector identified by vector_id
"""
num_bias_vectors = len(self._model_json["output"]["biases"])
if vector_id not in list(range(num_bias_vectors)):
raise ValueError(
"Bias vector does not exist. Model has {0} bias vectors (0-based indexing), but vector {1} "
"was requested.".format(num_bias_vectors, vector_id))
return h2o.get_frame(self._model_json["output"]["biases"][vector_id]["URL"].split("/")[3])
def normmul(self):
"""Normalization/Standardization multipliers for numeric predictors."""
return self._model_json["output"]["normmul"]
def normsub(self):
"""Normalization/Standardization offsets for numeric predictors."""
return self._model_json["output"]["normsub"]
def respmul(self):
"""Normalization/Standardization multipliers for numeric response."""
return self._model_json["output"]["normrespmul"]
def respsub(self):
"""Normalization/Standardization offsets for numeric response."""
return self._model_json["output"]["normrespsub"]
def catoffsets(self):
"""Categorical offsets for one-hot encoding."""
return self._model_json["output"]["catoffsets"]
def training_model_metrics(self):
"""
Return training model metrics for any model.
"""
return self._model_json["output"]["training_metrics"]._metric_json
def model_performance(self, test_data=None, train=False, valid=False, xval=False):
"""
Generate model metrics for this model on test_data.
:param H2OFrame test_data: Data set for which model metrics shall be computed against. All three of train,
valid and xval arguments are ignored if test_data is not None.
:param bool train: Report the training metrics for the model.
:param bool valid: Report the validation metrics for the model.
:param bool xval: Report the cross-validation metrics for the model. If train and valid are True, then it
defaults to True.
:returns: An object of class H2OModelMetrics.
"""
if test_data is None:
if not train and not valid and not xval: train = True # default to train
if train: return self._model_json["output"]["training_metrics"]
if valid: return self._model_json["output"]["validation_metrics"]
if xval: return self._model_json["output"]["cross_validation_metrics"]
else: # cases dealing with test_data not None
if not isinstance(test_data, h2o.H2OFrame):
raise ValueError("`test_data` must be of type H2OFrame. Got: " + type(test_data))
if (self._model_json["response_column_name"] != None) and not(self._model_json["response_column_name"] in test_data.names):
print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.")
return
res = h2o.api("POST /3/ModelMetrics/models/%s/frames/%s" % (self.model_id, test_data.frame_id))
# FIXME need to do the client-side filtering... (PUBDEV-874)
raw_metrics = None
for mm in res["model_metrics"]:
if mm["frame"] is not None and mm["frame"]["name"] == test_data.frame_id:
raw_metrics = mm
break
return self._metrics_class_valid(raw_metrics, algo=self._model_json["algo"])
def scoring_history(self):
"""
Retrieve Model Score History.
:returns: The score history as an H2OTwoDimTable or a Pandas DataFrame.
"""
model = self._model_json["output"]
if "scoring_history" in model and model["scoring_history"] is not None:
return model["scoring_history"].as_data_frame()
if "glm_scoring_history" in model and model["glm_scoring_history"] is not None:
return model["glm_scoring_history"].as_data_frame()
print("No score history for this model")
def ntrees_actual(self):
"""
Returns actual number of trees in a tree model. If early stopping enabled, GBM can reset the ntrees value.
In this case, the actual ntrees value is less than the original ntrees value a user set before
building the model.
Type: ``float``
"""
tree_algos = ['gbm', 'drf', 'isolationforest', 'xgboost']
if self._model_json["algo"] in tree_algos:
return self.summary()['number_of_trees'][0]
print("No actual number of trees for this model")
def feature_interaction(self, max_interaction_depth = 100, max_tree_depth = 100, max_deepening = -1):
"""
Feature interactions and importance, leaf statistics and split value histograms in a tabular form.
Available for XGBoost and GBM.
Metrics:
Gain - Total gain of each feature or feature interaction.
FScore - Amount of possible splits taken on a feature or feature interaction.
wFScore - Amount of possible splits taken on a feature or feature interaction weighed by
the probability of the splits to take place.
Average wFScore - wFScore divided by FScore.
Average Gain - Gain divided by FScore.
Expected Gain - Total gain of each feature or feature interaction weighed by the probability to gather the gain.
Average Tree Index
Average Tree Depth
:param max_interaction_depth: Upper bound for extracted feature interactions depth. Defaults to 100.
:param max_tree_depth: Upper bound for tree depth. Defaults to 100.
:param max_deepening: Upper bound for interaction start deepening (zero deepening => interactions
starting at root only). Defaults to -1.
:examples:
>>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv")
>>> predictors = boston.columns[:-1]
>>> response = "medv"
>>> boston['chas'] = boston['chas'].asfactor()
>>> train, valid = boston.split_frame(ratios=[.8])
>>> boston_xgb = H2OXGBoostEstimator(seed=1234)
>>> boston_xgb.train(y=response, x=predictors, training_frame=train)
>>> feature_interactions = boston_xgb.feature_interaction()
"""
supported_algos = ['gbm', 'xgboost']
if self._model_json["algo"] in supported_algos:
kwargs = {}
kwargs["model_id"] = self.model_id
kwargs["max_interaction_depth"] = max_interaction_depth
kwargs["max_tree_depth"] = max_tree_depth
kwargs["max_deepening"] = max_deepening
json = h2o.api("POST /3/FeatureInteraction", data=kwargs)
return json['feature_interaction']
print("No calculation available for this model")
def cross_validation_metrics_summary(self):
"""
Retrieve Cross-Validation Metrics Summary.
:returns: The cross-validation metrics summary as an H2OTwoDimTable
"""
model = self._model_json["output"]
if "cross_validation_metrics_summary" in model and model["cross_validation_metrics_summary"] is not None:
return model["cross_validation_metrics_summary"]
print("No cross-validation metrics summary for this model")
def summary(self):
"""Print a detailed summary of the model."""
model = self._model_json["output"]
if "model_summary" in model and model["model_summary"] is not None:
return model["model_summary"]
print("No model summary for this model")
def show(self):
"""Print innards of model, without regards to type."""
if self._future:
self._job.poll_once()
return
if self._model_json is None:
print("No model trained yet")
return
if self.model_id is None:
print("This H2OEstimator has been removed.")
return
model = self._model_json["output"]
print("Model Details")
print("=============")
print(self.__class__.__name__, ": ", self._model_json["algo_full_name"])
print("Model Key: ", self._id)
print()
summary = self.summary()
if summary:
print(summary)
# training metrics
tm = model["training_metrics"]
if tm: tm.show()
vm = model["validation_metrics"]
if vm: vm.show()
xm = model["cross_validation_metrics"]
if xm: xm.show()
xms = model["cross_validation_metrics_summary"]
if xms: xms.show()
if "scoring_history" in model and model["scoring_history"]:
model["scoring_history"].show()
if "variable_importances" in model and model["variable_importances"]:
model["variable_importances"].show()
def varimp(self, use_pandas=False):
"""
Pretty print the variable importances, or return them in a list.
:param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.
:returns: A list or Pandas DataFrame.
"""
model = self._model_json["output"]
if "variable_importances" in list(model.keys()) and model["variable_importances"]:
vals = model["variable_importances"].cell_values
header = model["variable_importances"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't have variable importances")
def residual_deviance(self, train=False, valid=False, xval=None):
"""
Retreive the residual deviance if this model has the attribute, or None otherwise.
:param bool train: Get the residual deviance for the training set. If both train and valid are False, then
train is selected by default.
:param bool valid: Get the residual deviance for the validation set. If both train and valid are True, then
train is selected by default.
:returns: Return the residual deviance, or None if it is not present.
"""
if xval: raise H2OValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
if train:
return self._model_json["output"]["training_metrics"].residual_deviance()
else:
return self._model_json["output"]["validation_metrics"].residual_deviance()
def residual_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the residual dof for the training set. If both train and valid are False, then train
is selected by default.
:param bool valid: Get the residual dof for the validation set. If both train and valid are True, then train
is selected by default.
:returns: Return the residual dof, or None if it is not present.
"""
if xval: raise H2OValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
if train:
return self._model_json["output"]["training_metrics"].residual_degrees_of_freedom()
else:
return self._model_json["output"]["validation_metrics"].residual_degrees_of_freedom()
def null_deviance(self, train=False, valid=False, xval=False):
"""
Retreive the null deviance if this model has the attribute, or None otherwise.
:param bool train: Get the null deviance for the training set. If both train and valid are False, then train
is selected by default.
:param bool valid: Get the null deviance for the validation set. If both train and valid are True, then train
is selected by default.
:returns: Return the null deviance, or None if it is not present.
"""
if xval: raise H2OValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
if train:
return self._model_json["output"]["training_metrics"].null_deviance()
else:
return self._model_json["output"]["validation_metrics"].null_deviance()
def null_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the null degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the null dof for the training set. If both train and valid are False, then train is
selected by default.
:param bool valid: Get the null dof for the validation set. If both train and valid are True, then train is
selected by default.
:returns: Return the null dof, or None if it is not present.
"""
if xval: raise H2OValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
if train:
return self._model_json["output"]["training_metrics"].null_degrees_of_freedom()
else:
return self._model_json["output"]["validation_metrics"].null_degrees_of_freedom()
def pprint_coef(self):
"""Pretty print the coefficents table (includes normalized coefficients)."""
print(self._model_json["output"]["coefficients_table"]) # will return None if no coefs!
def coef(self):
"""
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
"""
if (self._model_json["output"]['model_category']=="Multinomial") or \
(self._model_json["output"]['model_category']=="Ordinal"):
return self._fillMultinomialDict(False)
else:
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["coefficients"])}
def coef_norm(self):
"""
Return coefficients fitted on the standardized data (requires standardize = True, which is on by default).
These coefficients can be used to evaluate variable importance.
"""
if self._model_json["output"]["model_category"]=="Multinomial":
return self._fillMultinomialDict(True)
else:
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["standardized_coefficients"])}
def _fillMultinomialDict(self, standardize=False):
if self.algo == 'gam':
tbl = self._model_json["output"]["coefficients_table"]
else:
tbl = self._model_json["output"]["coefficients_table_multinomials_with_class_names"]
if tbl is None:
return None
coeff_dict = {} # contains coefficient names
coeffNames = tbl["names"]
all_col_header = tbl.col_header
startIndex = 1
endIndex = int((len(all_col_header)-1)/2+1)
if standardize:
startIndex = int((len(all_col_header)-1)/2+1) # start index for standardized coefficients
endIndex = len(all_col_header)
for nameIndex in list(range(startIndex, endIndex)):
coeffList = tbl[all_col_header[nameIndex]]
t1Dict = {name: coef for name, coef in zip(coeffNames, coeffList)}
coeff_dict[all_col_header[nameIndex]]=t1Dict
return coeff_dict
def r2(self, train=False, valid=False, xval=False):
"""
Return the R squared for this regression model.
Will return R^2 for GLM Models and will return NaN otherwise.
The R^2 value is defined to be 1 - MSE/var, where var is computed as sigma*sigma.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the R^2 value for the training data.
:param bool valid: If valid is True, then return the R^2 value for the validation data.
:param bool xval: If xval is True, then return the R^2 value for the cross validation data.
:returns: The R squared for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.r2()
return list(m.values())[0] if len(m) == 1 else m
def mse(self, train=False, valid=False, xval=False):
"""
Get the Mean Square Error.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the MSE value for the training data.
:param bool valid: If valid is True, then return the MSE value for the validation data.
:param bool xval: If xval is True, then return the MSE value for the cross validation data.
:returns: The MSE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.mse()
return list(m.values())[0] if len(m) == 1 else m
def rmse(self, train=False, valid=False, xval=False):
"""
Get the Root Mean Square Error.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the RMSE value for the training data.
:param bool valid: If valid is True, then return the RMSE value for the validation data.
:param bool xval: If xval is True, then return the RMSE value for the cross validation data.
:returns: The RMSE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.rmse()
return list(m.values())[0] if len(m) == 1 else m
def mae(self, train=False, valid=False, xval=False):
"""
Get the Mean Absolute Error.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the MAE value for the training data.
:param bool valid: If valid is True, then return the MAE value for the validation data.
:param bool xval: If xval is True, then return the MAE value for the cross validation data.
:returns: The MAE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.mae()
return list(m.values())[0] if len(m) == 1 else m
def rmsle(self, train=False, valid=False, xval=False):
"""
Get the Root Mean Squared Logarithmic Error.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the RMSLE value for the training data.
:param bool valid: If valid is True, then return the RMSLE value for the validation data.
:param bool xval: If xval is True, then return the RMSLE value for the cross validation data.
:returns: The RMSLE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.rmsle()
return list(m.values())[0] if len(m) == 1 else m
def logloss(self, train=False, valid=False, xval=False):
"""
Get the Log Loss.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the log loss value for the training data.
:param bool valid: If valid is True, then return the log loss value for the validation data.
:param bool xval: If xval is True, then return the log loss value for the cross validation data.
:returns: The log loss for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.logloss()
return list(m.values())[0] if len(m) == 1 else m
def mean_residual_deviance(self, train=False, valid=False, xval=False):
"""
Get the Mean Residual Deviances.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Mean Residual Deviance value for the training data.
:param bool valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
:param bool xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.
:returns: The Mean Residual Deviance for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.mean_residual_deviance()
return list(m.values())[0] if len(m) == 1 else m
def auc(self, train=False, valid=False, xval=False):
"""
Get the AUC (Area Under Curve).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AUC value for the training data.
:param bool valid: If valid is True, then return the AUC value for the validation data.
:param bool xval: If xval is True, then return the AUC value for the validation data.
:returns: The AUC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
if not(v == None) and not(is_type(v, h2o.model.metrics_base.H2OBinomialModelMetrics)) and not(is_type(v, h2o.model.metrics_base.H2OMultinomialModelMetrics)):
raise H2OValueError("auc() is only available for Binomial classifiers. For Multinomial classifiers is available average AUC value, default is Weighted One-to-Rest AUC.")
m[k] = None if v is None else v.auc()
return list(m.values())[0] if len(m) == 1 else m
def aic(self, train=False, valid=False, xval=False):
"""
Get the AIC (Akaike Information Criterium).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AIC value for the training data.
:param bool valid: If valid is True, then return the AIC value for the validation data.
:param bool xval: If xval is True, then return the AIC value for the validation data.
:returns: The AIC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.aic()
return list(m.values())[0] if len(m) == 1 else m
def gini(self, train=False, valid=False, xval=False):
"""
Get the Gini coefficient.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval"
:param bool train: If train is True, then return the Gini Coefficient value for the training data.
:param bool valid: If valid is True, then return the Gini Coefficient value for the validation data.
:param bool xval: If xval is True, then return the Gini Coefficient value for the cross validation data.
:returns: The Gini Coefficient for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.gini()
return list(m.values())[0] if len(m) == 1 else m
def aucpr(self, train=False, valid=False, xval=False):
"""
Get the aucPR (Area Under PRECISION RECALL Curve).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the aucpr value for the training data.
:param bool valid: If valid is True, then return the aucpr value for the validation data.
:param bool xval: If xval is True, then return the aucpr value for the validation data.
:returns: The aucpr.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
if v is not None and not is_type(v, h2o.model.metrics_base.H2OBinomialModelMetrics) and not is_type(v, h2o.model.metrics_base.H2OMultinomialModelMetrics):
raise H2OValueError("aucpr() is only available for Binomial classifiers. For Multinomial classifiers is available average PR AUC value, default is Weighted One-to-Rest PR AUC.")
m[k] = None if v is None else v.aucpr()
return list(m.values())[0] if len(m) == 1 else m
@deprecated(replaced_by=aucpr)
def pr_auc(self, train=False, valid=False, xval=False):
pass
def download_model(self, path=""):
"""
Download an H2O Model object to disk.
:param model: The model object to download.
:param path: a path to the directory where the model should be saved.
:returns: the path of the downloaded model
"""
assert_is_type(path, str)
return h2o.download_model(self, path)
def download_pojo(self, path="", get_genmodel_jar=False, genmodel_name=""):
"""
Download the POJO for this model to the directory specified by path.
If path is an empty string, then dump the output to screen.
:param path: An absolute path to the directory where POJO should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name: Custom name of genmodel jar
:returns: name of the POJO file written.
"""
assert_is_type(path, str)
assert_is_type(get_genmodel_jar, bool)
path = path.rstrip("/")
return h2o.download_pojo(self, path, get_jar=get_genmodel_jar, jar_name=genmodel_name)
def download_mojo(self, path=".", get_genmodel_jar=False, genmodel_name=""):
"""
Download the model in MOJO format.
:param path: the path where MOJO file should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name: Custom name of genmodel jar
:returns: name of the MOJO file written.
"""
assert_is_type(path, str)
assert_is_type(get_genmodel_jar, bool)
if not self.have_mojo:
raise H2OValueError("Export to MOJO not supported")
if get_genmodel_jar:
if genmodel_name == "":
h2o.api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, "h2o-genmodel.jar"))
else:
h2o.api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, genmodel_name))
return h2o.api("GET /3/Models/%s/mojo" % self.model_id, save_to=path)
def save_mojo(self, path="", force=False):
"""
Save an H2O Model as MOJO (Model Object, Optimized) to disk.
:param model: The model object to save.
:param path: a path to save the model at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:returns str: the path of the saved model
"""
assert_is_type(path, str)
assert_is_type(force, bool)
if not self.have_mojo:
raise H2OValueError("Export to MOJO not supported")
path = os.path.join(os.getcwd() if path == "" else path, self.model_id + ".zip")
return h2o.api("GET /99/Models.mojo/%s" % self.model_id, data={"dir": path, "force": force})["dir"]
def save_model_details(self, path="", force=False):
"""
Save Model Details of an H2O Model in JSON Format to disk.
:param model: The model object to save.
:param path: a path to save the model details at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:returns str: the path of the saved model details
"""
assert_is_type(path, str)
assert_is_type(force, bool)
path = os.path.join(os.getcwd() if path == "" else path, self.model_id + ".json")
return h2o.api("GET /99/Models/%s/json" % self.model_id, data={"dir": path, "force": force})["dir"]
@staticmethod
def _get_metrics(o, train, valid, xval):
# noinspection PyProtectedMember
output = o._model_json["output"]
metrics = {}
if train: metrics["train"] = output["training_metrics"]
if valid: metrics["valid"] = output["validation_metrics"]
if xval: metrics["xval"] = output["cross_validation_metrics"]
if len(metrics) == 0: metrics["train"] = output["training_metrics"]
return metrics
# Delete from cluster as model goes out of scope
# def __del__(self):
# h2o.remove(self._id)
def _plot(self, timestep, metric, server=False):
plt = get_matplotlib_pyplot(server)
if not plt: return
scoring_history = self.scoring_history()
# Separate functionality for GLM since its output is different from other algos
if self._model_json["algo"] in ("gam", "glm"):
if self.actual_params.get("lambda_search"):
allowed_timesteps = ["iteration", "duration"]
allowed_metrics = ["deviance_train", "deviance_test", "deviance_xval"]
# When provided with multiple alpha values, scoring history contains history of all...
scoring_history = scoring_history[scoring_history["alpha"] == self._model_json["output"]["alpha_best"]]
elif self.actual_params.get("HGLM"):
allowed_timesteps = ["iterations", "duration"]
allowed_metrics = ["convergence", "sumetaieta02"]
else:
allowed_timesteps = ["iterations", "duration"]
allowed_metrics = ["objective", "negative_log_likelihood"]
if metric == "AUTO":
metric = allowed_metrics[0]
elif metric not in allowed_metrics:
raise H2OValueError("for {}, metric must be one of: {}".format(self.algo.upper(),
", ".join(allowed_metrics)))
if timestep == "AUTO":
timestep = allowed_timesteps[0]
elif timestep not in allowed_timesteps:
raise H2OValueError("for {}, timestep must be one of: {}".format(self.algo.upper(),
", ".join(allowed_timesteps)))
plt.xlabel(timestep)
plt.ylabel(metric)
plt.title("Validation Scoring History")
style = "b-" if len(scoring_history[timestep]) > 1 else "bx"
plt.plot(scoring_history[timestep], scoring_history[metric], style)
elif self._model_json["algo"] in ("deeplearning", "xgboost", "drf", "gbm"):
# Set timestep
if self._model_json["algo"] in ("gbm", "drf", "xgboost"):
assert_is_type(timestep, "AUTO", "duration", "number_of_trees")
if timestep == "AUTO":
timestep = "number_of_trees"
else: # self._model_json["algo"] == "deeplearning":
# Delete first row of DL scoring history since it contains NAs & NaNs
if scoring_history["samples"][0] == 0:
scoring_history = scoring_history[1:]
assert_is_type(timestep, "AUTO", "epochs", "samples", "duration")
if timestep == "AUTO":
timestep = "epochs"
training_metric = "training_{}".format(metric)
validation_metric = "validation_{}".format(metric)
if timestep == "duration":
dur_colname = "duration_{}".format(scoring_history["duration"][1].split()[1])
scoring_history[dur_colname] = [str(x).split()[0] for x in scoring_history["duration"]]
timestep = dur_colname
if can_use_pandas():
valid = validation_metric in list(scoring_history)
ylim = (scoring_history[[training_metric, validation_metric]].min().min(),
scoring_history[[training_metric, validation_metric]].max().max()) if valid \
else (scoring_history[training_metric].min(), scoring_history[training_metric].max())
else:
valid = validation_metric in scoring_history.col_header
ylim = (min(min(scoring_history[[training_metric, validation_metric]])),
max(max(scoring_history[[training_metric, validation_metric]]))) if valid \
else (min(scoring_history[training_metric]), max(scoring_history[training_metric]))
if ylim[0] == ylim[1]: ylim = (0, 1)
if valid: # Training and validation scoring history
plt.xlabel(timestep)
plt.ylabel(metric)
plt.title("Scoring History")
plt.ylim(ylim)
plt.plot(scoring_history[timestep], scoring_history[training_metric], label="Training")
plt.plot(scoring_history[timestep], scoring_history[validation_metric], color="orange",
label="Validation")
plt.legend()
else: # Training scoring history only
plt.xlabel(timestep)
plt.ylabel(training_metric)
plt.title("Training Scoring History")
plt.ylim(ylim)
plt.plot(scoring_history[timestep], scoring_history[training_metric])
else: # algo is not glm, deeplearning, drf, gbm, xgboost
raise H2OValueError("Plotting not implemented for this type of model")
if not server: plt.show()
def partial_plot(self, data, cols=None, destination_key=None, nbins=20, weight_column=None,
plot=True, plot_stddev = True, figsize=(7, 10), server=False, include_na=False, user_splits=None,
col_pairs_2dpdp=None, save_to_file=None, row_index=None, targets=None):
"""
Create partial dependence plot which gives a graphical depiction of the marginal effect of a variable on the
response. The effect of a variable is measured in change in the mean response.
:param H2OFrame data: An H2OFrame object used for scoring and constructing the plot.
:param cols: Feature(s) for which partial dependence will be calculated.
:param destination_key: An key reference to the created partial dependence tables in H2O.
:param nbins: Number of bins used. For categorical columns make sure the number of bins exceed the level count. If you enable add_missing_NA, the returned length will be nbin+1.
:param weight_column: A string denoting which column of data should be used as the weight column.
:param plot: A boolean specifying whether to plot partial dependence table.
:param plot_stddev: A boolean specifying whether to add std err to partial dependence plot.
:param figsize: Dimension/size of the returning plots, adjust to fit your output cells.
:param server: Specify whether to activate matplotlib "server" mode. In this case, the plots are saved to a file instead of being rendered.
:param include_na: A boolean specifying whether missing value should be included in the Feature values.
:param user_splits: a dictionary containing column names as key and user defined split values as value in a list.
:param col_pairs_2dpdp: list containing pairs of column names for 2D pdp
:param save_to_file: Fully qualified name to an image file the resulting plot should be saved to, e.g. '/home/user/pdpplot.png'. The 'png' postfix might be omitted. If the file already exists, it will be overridden. Plot is only saved if plot = True.
:param row_index: Row for which partial dependence will be calculated instead of the whole input frame.
:param targets: Target classes for multiclass model.
:returns: Plot and list of calculated mean response tables for each feature requested.
"""
if not isinstance(data, h2o.H2OFrame): raise ValueError("Data must be an instance of H2OFrame.")
num_1dpdp = 0
num_2dpdp = 0
if not(cols==None):
assert_is_type(cols, [str])
num_1dpdp = len(cols)
if not(col_pairs_2dpdp==None):
assert_is_type(col_pairs_2dpdp, [[str, str]])
num_2dpdp=len(col_pairs_2dpdp)
if (cols==None) and (col_pairs_2dpdp==None):
raise ValueError("Must specify either cols or col_pairs_2dpd to generate partial dependency plots.")
if (col_pairs_2dpdp and targets and len(targets>1)):
raise ValueError("Multinomial 2D Partial Dependency is available only for one target.")
assert_is_type(destination_key, None, str)
assert_is_type(nbins, int)
assert_is_type(plot, bool)
assert_is_type(figsize, (int, int))
# Check cols specified exist in frame data
if not(cols==None):
for xi in cols:
if xi not in data.names:
raise H2OValueError("Column %s does not exist in the training frame." % xi)
if not(col_pairs_2dpdp==None):
for oneP in col_pairs_2dpdp:
if oneP[0] not in data.names:
raise H2OValueError("Column %s does not exist in the training frame." % oneP[0])
if oneP[1] not in data.names:
raise H2OValueError("Column %s does not exist in the training frame." % oneP[1])
if oneP[0]==oneP[1]:
raise H2OValueError("2D pdp must be with different columns.")
if isinstance(weight_column, int) and not (weight_column == -1):
raise H2OValueError("Weight column should be a column name in your data frame.")
elif isinstance(weight_column, str): # index is a name
if weight_column not in data.names:
raise H2OValueError("Column %s does not exist in the data frame" % weight_column)
weight_column = data.names.index(weight_column)
if row_index is not None:
if not isinstance(row_index, int):
raise H2OValueError("Row index should be of type int.")
else:
row_index = -1
if targets:
assert_is_type(targets, list)
for i in targets:
assert_is_type(i, str)
num_1dpdp = num_1dpdp
num_2dpdp = num_2dpdp
kwargs = {}
kwargs["cols"] = cols
kwargs["model_id"] = self.model_id
kwargs["frame_id"] = data.frame_id
kwargs["nbins"] = nbins
kwargs["destination_key"] = destination_key
kwargs["weight_column_index"] = weight_column
kwargs["add_missing_na"] = include_na
kwargs["row_index"] = row_index
kwargs["col_pairs_2dpdp"] = col_pairs_2dpdp
if targets:
kwargs["targets"] = targets
self.__generate_user_splits(user_splits, data, kwargs)
json = H2OJob(h2o.api("POST /3/PartialDependence/", data=kwargs), job_type="PartialDependencePlot").poll()
json = h2o.api("GET /3/PartialDependence/%s" % json.dest_key)
# Extract partial dependence data from json response
pps = json["partial_dependence_data"]
# Plot partial dependence plots using matplotlib
self.__generate_partial_plots(num_1dpdp, num_2dpdp, plot, server, pps, figsize, col_pairs_2dpdp, data, nbins,
kwargs["user_cols"], kwargs["num_user_splits"], plot_stddev, cols, save_to_file, row_index, targets, include_na)
return pps
def __generate_user_splits(self, user_splits, data, kwargs):
# extract user defined split points from dict user_splits into an integer array of column indices
# and a double array of user define values for the corresponding columns
if not(user_splits == None) and (len(user_splits) > 0):
if not(isinstance(user_splits, dict)):
raise H2OValueError("user_splits must be a Python dict.")
else:
user_cols = []
user_values = []
user_num_splits = []
data_ncol = data.ncol
column_names = data.names
for colKey,val in user_splits.items():
if is_type(colKey, str) and colKey in column_names:
user_cols.append(colKey)
elif isinstance(colKey, int) and colKey < data_ncol:
user_cols.append(column_names[colKey])
else:
raise H2OValueError("Column names/indices used in user_splits are not valid. They "
"should be chosen from the columns of your data set.")
if data[colKey].isfactor()[0] or data[colKey].isnumeric()[0]: # replace enum string with actual value
nVal = len(val)
if data[colKey].isfactor()[0]:
domains = data[colKey].levels()[0]
numVal = [0]*nVal
for ind in range(nVal):
if (val[ind] in domains):
numVal[ind] = domains.index(val[ind])
else:
raise H2OValueError("Illegal enum value {0} encountered. To include missing"
" values in your feature values, set include_na to "
"True.".format(val[ind]))
user_values.extend(numVal)
else:
user_values.extend(val)
user_num_splits.append(nVal)
else:
raise H2OValueError("Partial dependency plots are generated for numerical and categorical "
"columns only.")
kwargs["user_cols"] = user_cols
kwargs["user_splits"] = user_values
kwargs["num_user_splits"] = user_num_splits
else:
kwargs["user_cols"] = None
kwargs["user_splits"] = None
kwargs["num_user_splits"] = None
def __generate_partial_plots(self, num_1dpdp, num_2dpdp, plot, server, pps, figsize, col_pairs_2dpdp, data, nbins,
user_cols, user_num_splits, plot_stddev, cols, save_to_file, row_index, targets, include_na):
# Plot partial dependence plots using matplotlib
to_fig = num_1dpdp + num_2dpdp
if plot and to_fig > 0: # plot 1d pdp for now
plt = get_matplotlib_pyplot(server)
cm = _get_matplotlib_cm("Partial dependency plots")
if not plt:
return pps
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=figsize)
gxs = gridspec.GridSpec(to_fig, 1)
if num_2dpdp > 0: # 2d pdp requested
axes_3d = _get_mplot3d_pyplot("2D partial plots")
fig_plotted = False # indicated number of figures plotted
data_index = 0
target = None
if targets and len(targets) == 1:
target = targets[0]
for i in range(to_fig):
if i >= num_1dpdp: # plot 2D pdp
if axes_3d is None or cm is None or plt is None: # quit if cannot find toolbox
break
fig_plotted = self.__plot_2d_pdp(fig, col_pairs_2dpdp, gxs, num_1dpdp, data, pps[i], nbins,
user_cols, user_num_splits, plot_stddev, cm, i, row_index)
else: # plot 1D pdp
col = cols[i]
if targets is None or target:
fig_plotted = self.__plot_1d_pdp(col, i, data, pps[i], fig, gxs, plot_stddev, row_index, target, include_na)
else:
fig_plotted = self.__plot_1d_pdp_multinomial(col, i, data, pps, data_index, fig, gxs, cm,
plot_stddev, row_index, targets, include_na)
data_index = data_index + len(targets)
if fig_plotted:
fig.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
else:
print("No partial plot is generated and/or saved. You may be missing toolboxes like "
"mpl_toolkits.mplot3d or matplotlib.")
if (save_to_file is not None) and fig_plotted: # only save when a figure is actually plotted
plt.savefig(save_to_file)
def __plot_2d_pdp(self, fig, col_pairs_2dpdp, gxs, num_1dpdp, data, pp, nbins, user_cols, user_num_splits,
plot_stddev, cm, i, row_index):
ax = fig.add_subplot(gxs[i], projection='3d')
col_pairs = col_pairs_2dpdp[i-num_1dpdp]
x = self.__grab_values(pp, 0, data, col_pairs[0], ax) # change to numpy 2d_array
y = self.__grab_values(pp, 1, data, col_pairs[1], ax)
X,Y,Z = self.__pred_for_3d(x, y, pp[2], col_pairs, nbins, user_cols, user_num_splits)
zupper = [a + b for a, b in zip(pp[2], pp[3])] # pp[1] is mean, pp[2] is std
zlower = [a - b for a, b in zip(pp[2], pp[3])]
_,_,Zupper = self.__pred_for_3d(x, y, zupper, col_pairs, nbins, user_cols, user_num_splits)
_,_,Zlower = self.__pred_for_3d(x, y, zlower, col_pairs, nbins, user_cols, user_num_splits)
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,linewidth=1, antialiased=False, alpha=0.5, edgecolor='k')
if plot_stddev:
zupper = [a + b for a, b in zip(pp[2], pp[3])] # pp[1] is mean, pp[2] is std
zlower = [a - b for a, b in zip(pp[2], pp[3])]
_,_,Zupper = self.__pred_for_3d(x,y,zupper, col_pairs, nbins, user_cols, user_num_splits)
_,_,Zlower = self.__pred_for_3d(x,y,zlower, col_pairs, nbins, user_cols, user_num_splits)
ax.plot_surface(X, Y, Zupper, cmap=cm.coolwarm,linewidth=0.2, antialiased=False, alpha=0.3, edgecolor='y')
ax.plot_surface(X, Y, Zlower, cmap=cm.coolwarm,linewidth=0.2, antialiased=False, alpha=0.3, edgecolor='g')
ax.set_xlabel(col_pairs[0])
ax.set_xlim(min(x), max(x))
ax.set_ylabel(col_pairs[1])
ax.set_ylim(min(y), max(y))
ax.set_zlabel('Partial dependence')
title = '2D partial dependence plot for '+col_pairs[0] + ' and '+col_pairs[1]
if row_index >= 0:
title += ' and row index {}'.format(row_index)
ax.set_title(title)
return True
def __plot_1d_pdp(self, col, i, data, pp, fig, gxs, plot_stddev, row_index, target=None, include_na=False):
cat = data[col].isfactor()[0]
axs = fig.add_subplot(gxs[i])
self.__set_axs_1d(axs, plot_stddev, cat, pp, col, row_index, target, include_na)
return True
def __plot_1d_pdp_multinomial(self, col, i, data, pps, data_start_index, fig, gxs, cm, plot_stddev, row_index,
targets, include_na):
cat = data[col].isfactor()[0]
axs = fig.add_subplot(gxs[i])
self.__set_axs_1d_multinomial(axs, cm, plot_stddev, cat, pps, data_start_index, col, row_index, targets, include_na)
return True
# change x, y, z to be 2-D numpy arrays in order to plot it.
# note that, x stays at one value for the duration of y value changes.
def __pred_for_3d(self, x, y, z, colPairs, nbins, user_cols, user_num_splits):
# deal with y axis first
np = _get_numpy("2D partial plots")
if np is None:
print("Numpy not found. Cannot plot 2D partial plots.")
ycol = colPairs[1]
nBins = nbins
if user_cols is not None and ycol in user_cols:
ind = user_cols.index(ycol)
nBins = user_num_splits[ind]
nrow = int(len(x)/nBins)
X = np.transpose(np.array(x).reshape(nrow, nBins))
Y = np.transpose(np.array(y).reshape(nrow, nBins))
Z = np.transpose(np.array(z).reshape(nrow, nBins))
return X,Y,Z
def __grab_values(self, pp, index, data, col, axs):
cat = data[col].isfactor()[0]
if cat:
labels = pp[index]
uniqueL = list(set(labels))
x = range(len(uniqueL))
xlab = [None]*len(uniqueL)
for ind in range(len(uniqueL)):
xlab[ind] = labels[labels.index(uniqueL[ind])]
# replace string enum labels with integer values
xext = [None]*len(labels)
for ind in range(len(labels)):
xext[ind] = labels.index(labels[ind])
if index == 0: # x-axis
axs.set_xticks(x)
axs.set_xticklabels(xlab)
else: # y-axis
axs.set_yticks(x)
axs.set_yticklabels(xlab)
axs.margins(0.2)
return xext
else:
return pp[index]
def __set_axs_1d(self, axs, plot_stddev, cat, pp, col, row_index, target, include_na):
np = _get_numpy("1D partial plots")
if np is None:
print("Numpy not found. Cannot plot partial plots.")
pp_start_index = 0
x = pp[pp_start_index]
y = pp[pp_start_index+1]
if len(x) == 1:
fmt = 'o'
else:
fmt = '-'
axs.set_xlim(min(x), max(x))
if cat:
labels = x # 1d pdp, this is 0
x = range(len(labels))
fmt = "o"
axs.set_xticks(x)
axs.set_xticklabels(labels, rotation=45)
axs.set_xlim(min(x) - 0.2, max(x) + 0.2)
if plot_stddev:
std = pp[pp_start_index+2]
upper = [a + b for a, b in zip(y, std)] # pp[1] is mean, pp[2] is std
lower = [a - b for a, b in zip(y, std)]
if cat:
axs.errorbar(x, y, yerr=std, fmt=fmt, alpha=0.5, capsize=5, label=target)
else:
numline, = axs.plot(x, y, fmt, label=target)
axs.fill_between(x, lower, upper, where=lower < upper, alpha=0.1, interpolate=False)
axs.set_ylim(min(lower) - 0.2 * abs(min(lower)), max(upper) + 0.2 * abs(max(upper)))
else:
numline, = axs.plot(x, y, fmt, label=target)
axs.set_ylim(min(y) - 0.2 * abs(min(y)), max(y) + 0.2 * abs(max(y)))
if (not cat) and include_na:
axs.plot(x, [y[np.argwhere(np.isnan(x))[0][0]]] * len(x), '--', color=numline._color,label="NAN")
axs.legend()
title = "Partial Dependence Plot for {}".format(col)
if target:
title += " and class {}".format(target)
if row_index >= 0:
title += " and row index {}".format(row_index)
axs.set_title(title)
axs.set_xlabel(pp.col_header[pp_start_index])
axs.set_ylabel(pp.col_header[pp_start_index+1])
axs.xaxis.grid()
axs.yaxis.grid()
def __set_axs_1d_multinomial(self, axs, cm, plot_stddev, cat, pps, data_start_index, col, row_index, targets, include_na):
np = _get_numpy("1D multinomial partial plots")
if np is None:
print("Numpy not found. Cannot plot multinomial partial plots.")
pp_start_index = 0
pp = pps[data_start_index]
x = pp[pp_start_index]
y = pp[pp_start_index + 1]
# get initial maximum and minimum values to set xaxis and yaxis
min_y = min(y)
max_y = max(y)
if plot_stddev:
min_lower = min_y
max_upper = max_y
fmt = None
if cat: # adjust x axis to categorical values
labels = pp[pp_start_index]
x = range(len(labels))
axs.set_xticks(x)
axs.set_xticklabels(labels, rotation=45)
fmt = "o"
axs.set_xlim(min(x) - 0.2, max(x) + 0.2)
else:
axs.set_xlim(min(x), max(x))
axs.set_xlabel(pp.col_header[pp_start_index]) # set x axis label
axs.set_ylabel(pp.col_header[pp_start_index+1]) # set y axis label
cmap = cm.get_cmap("rainbow", len(targets)) # get color map
for i in range(len(targets)):
pp = pps[data_start_index + i]
y = pp[pp_start_index + 1]
min_y = min(min_y, min(y))
max_y = max(max_y, max(y))
if plot_stddev: # set std
std = pp[pp_start_index + 2]
upper = [a + b for a, b in zip(y, std)] # pp[1] is mean, pp[2] is std
lower = [a - b for a, b in zip(y, std)]
min_lower = min(min_lower, min(lower))
max_upper = max(max_upper, max(upper))
if cat:
axs.errorbar(x, y, yerr=std, fmt=fmt, c=cmap(i), alpha=0.5, capsize=5, label=targets[i])
else:
numline, = axs.plot(x, y, c=cmap(i), label=targets[i])
axs.fill_between(x, lower, upper, where=lower < upper, facecolor=cmap(i), alpha=0.1, interpolate=False)
else:
numline, = axs.plot(x, y, c=cmap(i), marker=fmt, label=targets[i])
if (not cat) and include_na:
axs.plot(x, [y[np.argwhere(np.isnan(x))[0][0]]] * len(x), '--', color=numline._color,label=targets[i] + " NAN")
if plot_stddev:
axs.set_ylim(min_lower - 0.2 * abs(min_lower), max_upper + 0.2 * abs(max_upper))
else:
axs.set_ylim(min_y - 0.2 * abs(min_y), max_y + 0.2 * abs(max_y))
axs.legend()
title = "Partial Dependence Plot for {} and classes \n {}".format(col, ', '.join(targets))
if row_index >= 0:
title += " and row index {}".format(row_index)
axs.set_title(title)
axs.xaxis.grid()
axs.yaxis.grid()
def varimp_plot(self, num_of_features=None, server=False):
"""
Plot the variable importance for a trained model.
:param num_of_features: the number of features shown in the plot (default is 10 or all if less than 10).
:param server: ?
:returns: None.
"""
assert_is_type(num_of_features, None, int)
assert_is_type(server, bool)
plt = get_matplotlib_pyplot(server)
if not plt: return
# get the variable importances as a list of tuples, do not use pandas dataframe
importances = self.varimp(use_pandas=False)
# features labels correspond to the first value of each tuple in the importances list
feature_labels = [tup[0] for tup in importances]
# relative importances correspond to the first value of each tuple in the importances list
scaled_importances = [tup[2] for tup in importances]
# specify bar centers on the y axis, but flip the order so largest bar appears at top
pos = range(len(feature_labels))[::-1]
# specify the bar lengths
val = scaled_importances
# # check that num_of_features is an integer
# if num_of_features is None:
# num_of_features = len(val)
# default to 10 or less features if num_of_features is not specified
if num_of_features is None:
num_of_features = min(len(val), 10)
fig, ax = plt.subplots(1, 1, figsize=(14, 10))
# create separate plot for the case where num_of_features == 1
if num_of_features == 1:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
ax.margins(None, 0.5)
else:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
plt.ylim([min(pos[0:num_of_features])- 1, max(pos[0:num_of_features])+1])
# ax.margins(y=0.5)
# check which algorithm was used to select right plot title
if self._model_json["algo"] == "gbm":
plt.title("Variable Importance: H2O GBM", fontsize=20)
if not server: plt.show()
elif self._model_json["algo"] == "drf":
plt.title("Variable Importance: H2O DRF", fontsize=20)
if not server: plt.show()
elif self._model_json["algo"] == "xgboost":
plt.title("Variable Importance: H2O XGBoost", fontsize=20)
if not server: plt.show()
# if H2ODeepLearningEstimator has variable_importances == True
elif self._model_json["algo"] == "deeplearning":
plt.title("Variable Importance: H2O Deep Learning", fontsize=20)
if not server: plt.show()
elif self._model_json["algo"] == "glm":
plt.title("Variable Importance: H2O GLM", fontsize=20)
if not server: plt.show()
else:
raise H2OValueError("A variable importances plot is not implemented for this type of model")
def std_coef_plot(self, num_of_features=None, server=False):
"""
Plot a GLM model"s standardized coefficient magnitudes.
:param num_of_features: the number of features shown in the plot.
:param server: ?
:returns: None.
"""
assert_is_type(num_of_features, None, I(int, lambda x: x > 0))
# check that model is a glm
if self._model_json["algo"] != "glm":
raise H2OValueError("This function is available for GLM models only")
plt = get_matplotlib_pyplot(server)
if not plt: return
# get unsorted tuple of labels and coefficients
unsorted_norm_coef = self.coef_norm().items()
# drop intercept value then sort tuples by the coefficient"s absolute value
drop_intercept = [tup for tup in unsorted_norm_coef if tup[0] != "Intercept"]
norm_coef = sorted(drop_intercept, key=lambda x: abs(x[1]), reverse=True)
signage = []
for element in norm_coef:
# if positive including zero, color blue, else color orange (use same colors as Flow)
if element[1] >= 0:
signage.append("#1F77B4") # blue
else:
signage.append("#FF7F0E") # dark orange
# get feature labels and their corresponding magnitudes
feature_labels = [tup[0] for tup in norm_coef]
norm_coef_magn = [abs(tup[1]) for tup in norm_coef]
# specify bar centers on the y axis, but flip the order so largest bar appears at top
pos = range(len(feature_labels))[::-1]
# specify the bar lengths
val = norm_coef_magn
# check number of features, default is all the features
if num_of_features is None:
num_of_features = len(val)
# plot horizontal plot
fig, ax = plt.subplots(1, 1, figsize=(14, 10))
# create separate plot for the case where num_of_features = 1
if num_of_features == 1:
plt.barh(pos[0], val[0],
align="center", height=0.8, color=signage[0], edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks([0], feature_labels[0])
ax.margins(None, 0.5)
else:
plt.barh(pos[0:num_of_features], val[0:num_of_features],
align="center", height=0.8, color=signage[0:num_of_features], edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
ax.margins(None, 0.05)
# generate custom fake lines that will be used as legend entries:
# check if positive and negative values exist
# if positive create positive legend
if "#1F77B4" in signage[0:num_of_features] and "#FF7F0E" not in signage[0:num_of_features]:
color_ids = ("Positive",)
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker="s", linestyle="")
for color in signage[0:num_of_features]]
lgnd = plt.legend(markers, color_ids, numpoints=1, loc="best", frameon=False, fontsize=13)
lgnd.legendHandles[0]._legmarker.set_markersize(10)
# if neg create neg legend
elif "#FF7F0E" in signage[0:num_of_features] and "#1F77B4" not in signage[0:num_of_features]:
color_ids = ("Negative",)
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker="s", linestyle="")
for color in set(signage[0:num_of_features])]
lgnd = plt.legend(markers, color_ids, numpoints=1, loc="best", frameon=False, fontsize=13)
lgnd.legendHandles[0]._legmarker.set_markersize(10)
# if both provide both colors in legend
else:
color_ids = ("Positive", "Negative")
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker="s", linestyle="")
for color in ['#1F77B4', '#FF7F0E']] # blue should always be positive, orange negative
lgnd = plt.legend(markers, color_ids, numpoints=1, loc="best", frameon=False, fontsize=13)
lgnd.legendHandles[0]._legmarker.set_markersize(10)
lgnd.legendHandles[1]._legmarker.set_markersize(10)
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
# ax.yaxis.set_ticks_position("left")
# ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
plt.tick_params(axis="x", which="minor", bottom="off", top="off", labelbottom="off")
plt.title("Standardized Coef. Magnitudes: H2O GLM", fontsize=20)
# plt.axis("tight")
# show plot
if not server: plt.show()
@staticmethod
def _check_targets(y_actual, y_predicted):
"""Check that y_actual and y_predicted have the same length.
:param H2OFrame y_actual:
:param H2OFrame y_predicted:
:returns: None
"""
if len(y_actual) != len(y_predicted):
raise ValueError("Row mismatch: [{},{}]".format(len(y_actual), len(y_predicted)))
def cross_validation_models(self):
"""
Obtain a list of cross-validation models.
:returns: list of H2OModel objects.
"""
cvmodels = self._model_json["output"]["cross_validation_models"]
if cvmodels is None: return None
m = []
for p in cvmodels: m.append(h2o.get_model(p["name"]))
return m
def cross_validation_predictions(self):
"""
Obtain the (out-of-sample) holdout predictions of all cross-validation models on their holdout data.
Note that the predictions are expanded to the full number of rows of the training data, with 0 fill-in.
:returns: list of H2OFrame objects.
"""
preds = self._model_json["output"]["cross_validation_predictions"]
if preds is None: return None
m = []
for p in preds: m.append(h2o.get_frame(p["name"]))
return m
def cross_validation_holdout_predictions(self):
"""
Obtain the (out-of-sample) holdout predictions of all cross-validation models on the training data.
This is equivalent to summing up all H2OFrames returned by cross_validation_predictions.
:returns: H2OFrame
"""
preds = self._model_json["output"]["cross_validation_holdout_predictions_frame_id"]
if preds is None: return None
return h2o.get_frame(preds["name"])
def cross_validation_fold_assignment(self):
"""
Obtain the cross-validation fold assignment for all rows in the training data.
:returns: H2OFrame
"""
fid = self._model_json["output"]["cross_validation_fold_assignment_frame_id"]
if fid is None: return None
return h2o.get_frame(fid["name"])
def rotation(self):
"""
Obtain the rotations (eigenvectors) for a PCA model
:return: H2OFrame
"""
if self._model_json["algo"] != "pca":
raise H2OValueError("This function is available for PCA models only")
return self._model_json["output"]["eigenvectors"]
def score_history(self):
"""DEPRECATED. Use :meth:`scoring_history` instead."""
return self.scoring_history()
def _get_mplot3d_pyplot(functionName):
try:
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D
return Axes3D
except ImportError:
print("`mpl_toolkits.mplot3d` library is required for function {0}!".format(functionName))
return None
def _get_numpy(functionName):
try:
import numpy as np
return np
except ImportError:
print("`numpy` library is required for function {0}!".format(functionName))
return None
def _get_matplotlib_cm(functionName):
try:
from matplotlib import cm
return cm
except ImportError:
print('matplotlib library is required for 3D plots for function {0}'.format(functionName))
return None
|
the-stack_0_22890 | #!/usr/bin/env python
__all__ = ['bilibili_download']
from ..common import *
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_id
from .youku import youku_download_by_vid
import hashlib
import re
appkey='8e9fc618fbd41e28'
def get_srt_xml(id):
url = 'http://comment.bilibili.com/%s.xml' % id
return get_html(url)
def parse_srt_p(p):
fields = p.split(',')
assert len(fields) == 8, fields
time, mode, font_size, font_color, pub_time, pool, user_id, history = fields
time = float(time)
mode = int(mode)
assert 1 <= mode <= 8
# mode 1~3: scrolling
# mode 4: bottom
# mode 5: top
# mode 6: reverse?
# mode 7: position
# mode 8: advanced
pool = int(pool)
assert 0 <= pool <= 2
# pool 0: normal
# pool 1: srt
# pool 2: special?
font_size = int(font_size)
font_color = '#%06x' % int(font_color)
return pool, mode, font_size, font_color
def parse_srt_xml(xml):
d = re.findall(r'<d p="([^"]+)">(.*)</d>', xml)
for x, y in d:
p = parse_srt_p(x)
raise NotImplementedError()
def parse_cid_playurl(xml):
from xml.dom.minidom import parseString
try:
doc = parseString(xml.encode('utf-8'))
urls = [durl.getElementsByTagName('url')[0].firstChild.nodeValue for durl in doc.getElementsByTagName('durl')]
return urls
except:
return []
def bilibili_download_by_cids(cids, title, output_dir='.', merge=True, info_only=False):
urls = []
for cid in cids:
url = 'http://interface.bilibili.com/playurl?appkey=' + appkey + '&cid=' + cid
urls += [i
if not re.match(r'.*\.qqvideo\.tc\.qq\.com', i)
else re.sub(r'.*\.qqvideo\.tc\.qq\.com', 'http://vsrc.store.qq.com', i)
for i in parse_cid_playurl(get_content(url))]
type_ = ''
size = 0
for url in urls:
_, type_, temp = url_info(url)
size += temp
print_info(site_info, title, type_, size)
if not info_only:
download_urls(urls, title, type_, total_size=None, output_dir=output_dir, merge=merge)
def bilibili_download_by_cid(cid, title, output_dir='.', merge=True, info_only=False):
url = 'http://interface.bilibili.com/playurl?appkey=' + appkey + '&cid=' + cid
urls = [i
if not re.match(r'.*\.qqvideo\.tc\.qq\.com', i)
else re.sub(r'.*\.qqvideo\.tc\.qq\.com', 'http://vsrc.store.qq.com', i)
for i in parse_cid_playurl(get_content(url))]
type_ = ''
size = 0
try:
for url in urls:
_, type_, temp = url_info(url)
size += temp or 0
except error.URLError:
log.wtf('[Failed] DNS not resolved. Please change your DNS server settings.')
print_info(site_info, title, type_, size)
if not info_only:
download_urls(urls, title, type_, total_size=None, output_dir=output_dir, merge=merge)
def bilibili_live_download_by_cid(cid, title, output_dir='.', merge=True, info_only=False):
api_url = 'http://live.bilibili.com/api/playurl?cid=' + cid
urls = parse_cid_playurl(get_content(api_url))
for url in urls:
_, type_, _ = url_info(url)
size = 0
print_info(site_info, title, type_, size)
if not info_only:
download_urls([url], title, type_, total_size=None, output_dir=output_dir, merge=merge)
def bilibili_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_content(url)
title = r1_of([r'<meta name="title" content="([^<>]{1,999})" />',
r'<h1[^>]*>([^<>]+)</h1>'], html)
if title:
title = unescape_html(title)
title = escape_file_path(title)
flashvars = r1_of([r'(cid=\d+)', r'(cid: \d+)', r'flashvars="([^"]+)"', r'"https://[a-z]+\.bilibili\.com/secure,(cid=\d+)(?:&aid=\d+)?"'], html)
assert flashvars
flashvars = flashvars.replace(': ','=')
t, cid = flashvars.split('=', 1)
cid = cid.split('&')[0]
if t == 'cid':
if re.match(r'https?://live\.bilibili\.com/', url):
title = r1(r'<title>([^<>]+)</title>', html)
bilibili_live_download_by_cid(cid, title, output_dir=output_dir, merge=merge, info_only=info_only)
else:
# multi-P
cids = []
pages = re.findall('<option value=\'([^\']*)\'', html)
titles = re.findall('<option value=.*>(.+)</option>', html)
for page in pages:
html = get_html("http://www.bilibili.com%s" % page)
flashvars = r1_of([r'(cid=\d+)',
r'flashvars="([^"]+)"',
r'"https://[a-z]+\.bilibili\.com/secure,(cid=\d+)(?:&aid=\d+)?"'], html)
if flashvars:
t, cid = flashvars.split('=', 1)
cids.append(cid.split('&')[0])
# no multi-P
if not pages:
cids = [cid]
titles = [r1(r'<option value=.* selected>(.+)</option>', html) or title]
for i in range(len(cids)):
bilibili_download_by_cid(cids[i],
titles[i],
output_dir=output_dir,
merge=merge,
info_only=info_only)
elif t == 'vid':
sina_download_by_vid(cid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
elif t == 'ykid':
youku_download_by_vid(cid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
elif t == 'uid':
tudou_download_by_id(cid, title, output_dir=output_dir, merge=merge, info_only=info_only)
else:
raise NotImplementedError(flashvars)
if not info_only and not dry_run:
if not kwargs['caption']:
print('Skipping danmaku.')
return
title = get_filename(title)
print('Downloading %s ...\n' % (title + '.cmt.xml'))
xml = get_srt_xml(cid)
with open(os.path.join(output_dir, title + '.cmt.xml'), 'w', encoding='utf-8') as x:
x.write(xml)
site_info = "bilibili.com"
download = bilibili_download
download_playlist = bilibili_download
|
the-stack_0_22891 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""State management for eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import numpy as np
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.eager import executor
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_starting_device_spec = pydev.DeviceSpec.from_string("")
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
MIRRORING_NONE = pywrap_tensorflow.TFE_MIRRORING_NONE
MIRRORING_ALL = pywrap_tensorflow.TFE_MIRRORING_ALL
_python_eager_context_create_counter = monitoring.Counter(
"/tensorflow/api/python/eager_context_create_counter",
"Counter for number of eager contexts created in Python.")
class _EagerTensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
def __init__(self, max_items=256, max_tensor_size=10000):
self._data = collections.OrderedDict()
self._max_items = max_items
self._max_tensor_size = max_tensor_size
def put(self, key, value):
if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access
return
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data = {}
class FunctionCallOptions(object):
"""Options applied at call sites of eager functions.
Eager functions are functions decorated with tf.contrib.eager.defun.
"""
def __init__(self, executor_type=None, config_proto=None):
"""Constructor.
Args:
executor_type: (optional) name of the executor to be used to execute the
eager function. If None or an empty string, the default Tensorflow
executor will be used.
config_proto: (optional) a `config_pb2.ConfigProto` proto or
a serialized string of that proto.
The config used by Grappler when optimizing the function graph.
Each concrete function is optimized the first time is called. Changing
config_proto after the first call has no effect.
If config_proto is None, an empty RewriterConfig will be used.
"""
self.config_proto_serialized = config_proto
self.executor_type = executor_type
@property
def executor_type(self):
return self._executor_type
@executor_type.setter
def executor_type(self, executor_type):
self._executor_type = executor_type
@property
def config_proto_serialized(self):
return self._config_proto_serialized
@config_proto_serialized.setter
def config_proto_serialized(self, config):
if isinstance(config, config_pb2.ConfigProto):
self._config_proto_serialized = config.SerializeToString()
elif isinstance(config, str):
self._config_proto_serialized = config
elif config is None:
self._config_proto_serialized = (
config_pb2.ConfigProto().SerializeToString())
else:
raise ValueError("the rewriter config must be either a "
"config_pb2.ConfigProto, or a serialized string of that "
"proto or None. got: {}".format(type(config)))
# Map from context_id (an int) to _TensorCaches.
# Dicts are thread safe in CPython.
# TODO(iga): Remove this once TensorCaches are moved to C++.
_tensor_caches_map = {}
class _TensorCaches(threading.local):
"""Thread local tensor caches."""
def __init__(self):
super(_TensorCaches, self).__init__()
self._ones_rank_cache = None
self._zeros_cache = None
@property
def ones_rank_cache(self):
if not self._ones_rank_cache:
self._ones_rank_cache = _EagerTensorCache()
return self._ones_rank_cache
@property
def zeros_cache(self):
if not self._zeros_cache:
self._zeros_cache = _EagerTensorCache()
return self._zeros_cache
class _ThreadLocalData(threading.local):
"""Thread local storage for the eager context."""
def __init__(self):
super(_ThreadLocalData, self).__init__()
self.device_spec = _starting_device_spec
self.device_name = ""
self.is_eager = default_execution_mode == EAGER_MODE
self.scope_name = ""
self.function_call_options = None
self.executor = None
self.op_callbacks = []
self.invoking_op_callbacks = False
ContextSwitch = collections.namedtuple(
"ContextSwitch", ["is_building_function", "enter_context_fn",
"device_stack"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`default_execution_mode`) and (2) `__init__` is
# called each time a threading.local object is used in a separate thread.
self.push(is_building_function=False, enter_context_fn=eager_mode,
device_stack=None)
def push(self, is_building_function, enter_context_fn, device_stack):
"""Push metadata about a context switch onto the stack.
A context switch can take any one of the two forms: installing a graph as
the default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
device_stack: If applicable, the device function stack for this
graph. When breaking out of graphs in init_scope, the innermost nonempty
device stack is used. Eager contexts put `None` here and the value is
never used.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn, device_stack))
def pop(self):
"""Pop the stack."""
self.stack.pop()
class LogicalDevice(
collections.namedtuple("LogicalDevice", ["name", "device_type"])):
"""Abstraction for a device initialized by the runtime.
A LogicalDevice corresponds to a initialized instance on a PhysicalDevice or a
remote device available in the cluster. Tensors and operations can be placed
on a specific LogicalDevice by calling `tf.device()` with the `name` of the
LogicalDevice.
Fields:
name: The fully qualified name of the device. Can be used for Op or function
placement.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
@tf_export("config.experimental.VirtualDeviceConfiguration")
class VirtualDeviceConfiguration(
collections.namedtuple("VirtualDeviceConfiguration", ["memory_limit"])):
"""Configuration class for a `LogicalDevice`.
The class specifies the parameters for a `LogicalDevice` used during runtime
initialization. Not all fields are valid for all device types.
See `tf.config.experimental.get_virtual_device_configuration` and
`tf.config.experimental.set_virtual_device_configuration` for usage examples.
Fields:
memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual
device. Currently only supported for GPUs.
"""
def __new__(cls, memory_limit=None):
return super(VirtualDeviceConfiguration, cls).__new__(cls, memory_limit)
class PhysicalDevice(
collections.namedtuple("PhysicalDevice", ["name", "device_type"])):
"""Abstraction for a locally visible physical device.
TensorFlow can utilize various devices such as the CPU or multiple GPUs
for computation. Before initializing a local device for use, the user can
customize certain properties of the device such as it's visibility or memory
configuration.
Once a PhysicalDevice is initialized one or many LogicalDevice objects are
created. Use tf.config.set_virtual_device_configuration() to create multiple
LogicalDevice objects for a PhysicalDevice. This is useful when separation
between models is needed.
Fields:
name: Unique identifier for device.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
class _AtomicCounter(object):
"""A simple atomic counter."""
def __init__(self):
self._value = 0
self._lock = threading.Lock()
def increment_and_get(self):
with self._lock:
self._value += 1
return self._value
_context_id_counter = _AtomicCounter()
class _TensorCacheDeleter(object):
"""Deletes tensor caches for a given context."""
def __init__(self, context_id):
self._context_id = context_id
def __del__(self):
if _tensor_caches_map is None:
return
if self._context_id in _tensor_caches_map:
del _tensor_caches_map[self._context_id]
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
When set to None, an appropriate value will be picked automatically.
The value picked may change between TensorFlow releases.
Defaults to DEVICE_PLACEMENT_SILENT.
Valid values:
- DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is
not correct.
- DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
- DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
- DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- SYNC: executes each operation synchronously.
- ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
# This _id is used only to index the tensor caches.
# TODO(iga): Remove this when tensor caches are moved to C++.
self._id = _context_id_counter.increment_and_get()
self._tensor_cache_deleter = _TensorCacheDeleter(self._id)
_tensor_caches_map[self._id] = _TensorCaches()
self._config = config
self._thread_local_data = _ThreadLocalData()
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._seed = None
self._initialize_lock = threading.Lock()
self._initialized = False
if device_policy is None:
device_policy = DEVICE_PLACEMENT_SILENT
self._device_policy = device_policy
self._mirroring_policy = None
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError(
"execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._default_is_async = execution_mode == ASYNC
self._server_def = server_def
self._collective_ops_server_def = None
self._collective_leader = None
self._collective_scoped_allocator_enabled_ops = None
self._collective_use_nccl_communication = None
self._collective_device_filters = None
self._device_lock = threading.Lock()
self._physical_devices = None
self._visible_device_list = []
self._memory_growth_map = None
self._virtual_device_map = {}
# Values set after construction
self._optimizer_jit = None
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
self._soft_device_placement = None
self._log_device_placement = None
self._optimizer_experimental_options = {}
_python_eager_context_create_counter.get_cell().increase_by(1)
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
# `random.Random(seed)` needs `seed` to be hashable, while values of type
# e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them
# to int.
try:
hash(seed)
except TypeError:
seed = int(np.array(seed))
self._rng = random.Random(seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_logical_devices(self):
"""Helper to initialize devices."""
# Store list of devices
self._logical_devices = []
self._context_devices = []
device_list = pywrap_tensorflow.TFE_ContextListDevices(
self._context_handle)
try:
self._num_gpus = 0
for i in range(pywrap_tensorflow.TF_DeviceListCount(device_list)):
dev_name = pywrap_tensorflow.TF_DeviceListName(device_list, i)
self._context_devices.append(pydev.canonical_name(dev_name))
spec = pydev.DeviceSpec.from_string(dev_name)
self._logical_devices.append(
LogicalDevice(name=dev_name, device_type=spec.device_type))
dev_type = pywrap_tensorflow.TF_DeviceListType(device_list, i)
if dev_type == "GPU":
self._num_gpus += 1
finally:
pywrap_tensorflow.TF_DeleteDeviceList(device_list)
def ensure_initialized(self):
"""Initialize handle and devices if not already done so."""
if self._initialized:
return
with self._initialize_lock:
if self._initialized:
return
assert self._context_devices is None
opts = pywrap_tensorflow.TFE_NewContextOptions()
try:
config_str = self.config.SerializeToString()
pywrap_tensorflow.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._mirroring_policy is not None:
pywrap_tensorflow.TFE_ContextOptionsSetMirroringPolicy(
opts, self._mirroring_policy)
if self._default_is_async == ASYNC:
pywrap_tensorflow.TFE_ContextOptionsSetAsync(opts, True)
context_handle = pywrap_tensorflow.TFE_NewContext(opts)
finally:
pywrap_tensorflow.TFE_DeleteContextOptions(opts)
assert not (self._server_def and self._collective_ops_server_def), (
"Cannot enable remote execution as well as collective ops at the "
"moment. If this is important to you, please file an issue.")
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(context_handle, 600,
server_def_str)
elif self._collective_ops_server_def is not None:
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tensorflow.TFE_EnableCollectiveOps(context_handle,
server_def_str)
self._context_handle = context_handle
self._initialize_logical_devices()
self._initialized = True
def _clear_caches(self):
self.ones_rank_cache().flush()
self.zeros_cache().flush()
pywrap_tensorflow.TFE_ClearScalarCache()
def set_server_def(self, server_def, keep_alive_secs=600):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto.
Enables execution on remote devices.
keep_alive_secs: Num. seconds after which the remote end will hang up.
As long as the client is still alive, the server state for the context
will be kept alive. If the client is killed (or there is some failure),
the server will clean up its context keep_alive_secs after the final RPC
it receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle,
keep_alive_secs, server_def_str)
self._initialize_logical_devices()
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
def enable_collective_ops(self, server_def):
"""Enable distributed collective ops with an appropriate server_def.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
Raises:
ValueError: if server_def is None.
RuntimeError: if this method is not called at program startup.
"""
if not server_def:
raise ValueError("server_def is None.")
if self._context_handle is not None:
raise RuntimeError("Collective ops must be enabled at program startup")
self._collective_ops_server_def = server_def
def configure_collective_ops(
self,
collective_leader="",
scoped_allocator_enabled_ops=("CollectiveReduce",),
use_nccl_communication=False,
device_filters=None):
"""Configure collective ops.
Collective group leader is necessary for collective ops to run, other
configurations are mainly for the purpose of performance.
Args:
collective_leader: a device string for collective leader, e.g.
"/job:worker/replica:0/task:"; empty string means local execution of
collective ops.
scoped_allocator_enabled_ops: a tuple or a list of op names for scoped
allocator to run with.
use_nccl_communication: whether to use nccl communication for collective
ops.
device_filters: a tuple or a list of device strings. If set, corresponding
task can only see the devices filtered by these device filters.
Raises:
RuntimeError: if this method is not called at program startup.
"""
if self._collective_leader is not None:
if (self._collective_leader != collective_leader or
self._collective_scoped_allocator_enabled_ops !=
scoped_allocator_enabled_ops or
self._collective_use_nccl_communication != use_nccl_communication or
self._collective_device_filters != device_filters):
raise ValueError("Collective ops are already configured.")
else:
return
if self._context_handle is not None:
raise RuntimeError("Collective ops must be configured at program startup")
self._collective_leader = collective_leader
self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops
self._collective_use_nccl_communication = use_nccl_communication
self._collective_device_filters = device_filters
@property
def _handle(self):
if self._context_handle is None:
raise AssertionError("Context must be initialized first.")
return self._context_handle
@property
def _devices(self):
if self._context_devices is None:
raise AssertionError("Context must be initialized first.")
return self._context_devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._thread_local_data
old_is_eager = ctx.is_eager
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode, None)
try:
yield
finally:
ctx.is_eager = old_is_eager
if mode == EAGER_MODE:
self.context_switches.pop()
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._thread_local_data.is_eager
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._thread_local_data.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._thread_local_data.scope_name = s
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._thread_local_data.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._thread_local_data.device_spec
def _set_device(self, device_name, device_spec):
self._thread_local_data.device_name = device_name
self._thread_local_data.device_spec = device_spec
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Returns:
Context manager that forces device placement.
Raises:
ValueError: If name is not a string or is an invalid device name.
RuntimeError: If device scopes are not properly nested.
"""
return _EagerDeviceContext(self, name)
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
# TODO(fishx): remove this property.
@property
def execution_mode(self):
"""Gets execution mode for current thread."""
return ASYNC if self.is_async() else SYNC
@execution_mode.setter
def execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError(
"Execution mode should be None/SYNC/ASYNC. Got %s" % mode)
if mode is None:
mode = SYNC
enable_async = (mode == ASYNC)
if self.is_async() != enable_async:
# Only set the execution mode if the context has already been initialized
if self._context_handle is not None:
self.executor.wait()
executor_new = executor.new_executor(enable_async)
self._thread_local_data.executor = executor_new
pywrap_tensorflow.TFE_ContextSetExecutorForThread(
self._context_handle, executor_new.handle())
else:
self._default_is_async = enable_async
def is_async(self):
if self._context_handle is not None:
return self.executor.is_async()
else:
return self._default_is_async
@property
def executor(self):
ensure_initialized()
return executor.Executor(
pywrap_tensorflow.TFE_ContextGetExecutorForThread(self._context_handle))
@executor.setter
def executor(self, e):
ensure_initialized()
pywrap_tensorflow.TFE_ContextSetExecutorForThread(self._context_handle,
e.handle())
@property
def config(self):
"""Return the ConfigProto with all runtime deltas applied."""
# Ensure physical devices have been discovered and config has been imported
self._initialize_physical_devices()
config = config_pb2.ConfigProto()
if self._config is not None:
config.CopyFrom(self._config)
if self._optimizer_jit is not None:
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1
if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)
if self._intra_op_parallelism_threads is not None:
config.intra_op_parallelism_threads = self._intra_op_parallelism_threads
if self._inter_op_parallelism_threads is not None:
config.inter_op_parallelism_threads = self._inter_op_parallelism_threads
if self._soft_device_placement is not None:
config.allow_soft_placement = self._soft_device_placement
else:
config.allow_soft_placement = self.executing_eagerly()
if self._log_device_placement is not None:
config.log_device_placement = self._log_device_placement
def rewriter_toggle(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
(rewriter_config_pb2.RewriterConfig.ON
if toggle else rewriter_config_pb2.RewriterConfig.OFF))
def rewriter_bool(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
toggle)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
nodes = self._optimizer_experimental_options.get("min_graph_nodes", None)
if nodes is not None:
config.graph_options.rewrite_options.min_graph_nodes = nodes
# Compute device counts
config.device_count["CPU"] = 0
config.device_count["GPU"] = 0
for dev in self._physical_devices:
if dev not in self._visible_device_list:
continue
virtual_devices = self._virtual_device_map.get(dev)
if virtual_devices is None:
config.device_count[dev.device_type] += 1
else:
config.device_count[dev.device_type] += len(virtual_devices)
# Configure gpu_options
gpu_options = self._compute_gpu_options()
config.gpu_options.MergeFrom(gpu_options)
# Configure collective ops
if self._collective_leader:
config.experimental.collective_group_leader = self._collective_leader
if self._collective_scoped_allocator_enabled_ops:
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
for op in self._collective_scoped_allocator_enabled_ops:
rewrite_options.scoped_allocator_opts.enable_op.append(op)
if self._collective_use_nccl_communication:
config.experimental.collective_nccl = True
if self._collective_device_filters:
del config.device_filters[:]
for f in self._collective_device_filters:
config.device_filters.append(f)
return config
def _compute_gpu_options(self):
"""Build the GPUOptions proto."""
visible_device_list = []
virtual_devices = []
gpu_index = -1
memory_growths = set()
for dev in self.list_physical_devices("GPU"):
gpu_index += 1
if dev not in self._visible_device_list:
continue
growth = self._memory_growth_map[dev]
memory_growths.add(growth)
visible_device_list.append(str(gpu_index))
if self._virtual_device_map:
vdevs = self._virtual_device_map.get(dev, [])
device_limits = []
for virt_dev in vdevs:
device_limits.append(virt_dev.memory_limit)
virtual_devices.append(
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=device_limits))
# Only compute growth if virtual devices have not been configured and we
# have GPUs
if not virtual_devices and memory_growths:
if len(memory_growths) > 1:
raise ValueError("Memory growth cannot differ between GPU devices")
allow_growth = memory_growths.pop()
else:
allow_growth = None
return config_pb2.GPUOptions(
allow_growth=allow_growth,
visible_device_list=",".join(visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(
virtual_devices=virtual_devices))
@property
def function_call_options(self):
"""Returns function call options for current thread.
Note that the returned object is still referenced by the eager context.
Returns: the FunctionCallOptions for current thread.
"""
if self._thread_local_data.function_call_options is None:
config = self.config
# Default to soft placement for functions unless specified
if self._soft_device_placement is None:
config.allow_soft_placement = True
self._thread_local_data.function_call_options = FunctionCallOptions(
config_proto=config)
return self._thread_local_data.function_call_options
@function_call_options.setter
def function_call_options(self, options):
"""Returns function call options for current thread."""
self._thread_local_data.function_call_options = options
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self.ensure_initialized()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
self.ensure_initialized()
pywrap_tensorflow.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
self.ensure_initialized()
fdef_string = fdef.SerializeToString()
pywrap_tensorflow.TFE_ContextAddFunctionDef(
self._handle, fdef_string, len(fdef_string))
def remove_function(self, name):
"""Remove a function from the context.
Once removed, the function cannot be executed anymore.
Args:
name: function signature name.
"""
self.ensure_initialized()
pywrap_tensorflow.TFE_ContextRemoveFunction(self._handle, name)
def has_function(self, name):
"""Check if a function `name` is registered."""
self.ensure_initialized()
return bool(pywrap_tensorflow.TFE_ContextHasFunction(self._handle, name))
def add_op_callback(self, callback):
"""Add a post-op callback to the context.
A post-op callback is invoked immediately after an eager operation or
function has finished execution or after a op has been added to a graph,
providing access to the op's type, name input and output tensors. Multiple
op callbacks can be added, in which case the callbacks will be invoked in
the order in which they are added.
Args:
callback: a callable of the signature
`f(op_type, inputs, attrs, outputs, op_name=None, graph=None)`.
See doc strings in `op_callbacks.py` for details on the function
signature and its semantics.
"""
if callback not in self._thread_local_data.op_callbacks:
self._thread_local_data.op_callbacks.append(callback)
def remove_op_callback(self, callback):
"""Remove an already-registered op callback.
Args:
callback: The op callback to be removed.
Raises:
KeyError: If `callback` is not already registered.
"""
if callback not in self._thread_local_data.op_callbacks:
raise KeyError(
"The specified op callback has not been registered, "
"and hence cannot be removed.")
del self._thread_local_data.op_callbacks[
self._thread_local_data.op_callbacks.index(callback)]
@property
def op_callbacks(self):
return self._thread_local_data.op_callbacks
@property
def invoking_op_callbacks(self):
return self._thread_local_data.invoking_op_callbacks
@invoking_op_callbacks.setter
def invoking_op_callbacks(self, value):
self._thread_local_data.invoking_op_callbacks = value
def _initialize_physical_devices(self):
"""Get local devices visible to the system."""
# We lazy initialize self._physical_devices since we do not want to do this
# the constructor since the backend may not be initialized yet.
with self._device_lock:
if self._physical_devices is not None:
return
devs = pywrap_tensorflow.TF_ListPhysicalDevices()
self._physical_devices = [
PhysicalDevice(name=d.decode(),
device_type=d.decode().split(":")[1]) for d in devs]
# Construct the visible device list from all physical devices but ignore
# XLA devices
self._visible_device_list = [
d for d in self._physical_devices
if not d.device_type.startswith("XLA")
]
self._memory_growth_map = {
d: None for d in self._physical_devices if d.device_type == "GPU"
}
# Import device settings that may have been passed into the constructor
self._import_config()
def list_physical_devices(self, device_type=None):
"""List local devices visible to the system.
This API allows a client to query the devices before they have been
initialized by the eager runtime. Additionally a user can filter by device
type, to get only CPUs or GPUs.
Args:
device_type: Optional device type to limit results to
Returns:
List of PhysicalDevice objects.
"""
self._initialize_physical_devices()
if device_type is not None:
return [
d for d in self._physical_devices
if device_type is None or device_type == d.device_type
]
return self._physical_devices
def _import_config(self):
"""Import config if passed in during construction.
If Context was created with a ConfigProto such as when calling
tf.compat.v1.enable_eager_execution(), then we need to pull out the
various pieces we might be replacing and import then into our internal
class representation.
"""
if self._config is None:
return
num_cpus = self._config.device_count.get("CPU", 1)
if num_cpus != 1:
cpus = [d for d in self._physical_devices if d.device_type == "CPU"]
if num_cpus == 0:
self.set_visible_devices([], "CPU")
elif num_cpus > 1:
self.set_virtual_device_configuration(
cpus[0], [VirtualDeviceConfiguration() for _ in range(num_cpus)])
# Parse GPU options
gpus = [d for d in self._physical_devices if d.device_type == "GPU"]
# If there are no GPUs detected, simply ignore all the GPU options passed in
# rather than doing any validation checks.
if not gpus:
return
gpu_count = self._config.device_count.get("GPU", None)
visible_gpus = []
# TODO(gjn): Handle importing existing virtual GPU configuration
visible_indices = self._config.gpu_options.visible_device_list
if visible_indices:
for index in visible_indices.split(","):
if int(index) >= len(gpus):
raise ValueError("Invalid visible device index: %s" % index)
visible_gpus.append(gpus[int(index)])
else:
visible_gpus = gpus
if gpu_count is not None:
visible_gpus = visible_gpus[:gpu_count]
self.set_visible_devices(visible_gpus, "GPU")
def list_logical_devices(self, device_type=None):
"""Return logical devices."""
self.ensure_initialized()
devices = []
for dev in self._logical_devices:
if device_type is not None and device_type != dev.device_type:
continue
devices.append(dev)
return devices
def get_visible_devices(self, device_type=None):
"""Get the list of visible devices."""
self._initialize_physical_devices()
if device_type is None:
return self._visible_device_list
else:
return [
d for d in self._visible_device_list if d.device_type == device_type
]
def set_visible_devices(self, devices, device_type=None):
"""Set the list of visible devices."""
self._initialize_physical_devices()
if not isinstance(devices, list):
devices = [devices]
for d in devices:
if d not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(d))
if device_type is not None and d.device_type != device_type:
raise ValueError("Unrecognized device: %s" % repr(d))
visible_device_list = []
if device_type is not None:
visible_device_list = [
d for d in self._visible_device_list if d.device_type != device_type
]
visible_device_list += devices
if self._visible_device_list == visible_device_list:
return
if self._context_handle is not None:
raise RuntimeError(
"Visible devices cannot be modified after being initialized")
self._visible_device_list = visible_device_list
def get_memory_growth(self, dev):
"""Get if memory growth is enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._memory_growth_map[dev]
def set_memory_growth(self, dev, enable):
"""Set if memory growth should be enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev in self._virtual_device_map:
raise ValueError(
"Cannot set memory growth on device when virtual devices configured")
if dev.device_type != "GPU":
raise ValueError("Cannot set memory growth on non-GPU devices")
if self._memory_growth_map.get(dev) == enable:
return
if self._context_handle is not None:
raise RuntimeError(
"Physical devices cannot be modified after being initialized")
self._memory_growth_map[dev] = enable
def get_virtual_device_configuration(self, dev):
"""Get the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._virtual_device_map.get(dev)
def set_virtual_device_configuration(self, dev, virtual_devices):
"""Set the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev.device_type == "CPU":
for vdev in virtual_devices:
if vdev.memory_limit is not None:
raise ValueError("Setting memory limit on CPU virtual devices is "
"currently not supported")
elif dev.device_type == "GPU":
for vdev in virtual_devices:
if vdev.memory_limit is None:
raise ValueError(
"Setting memory limit is required for GPU virtual devices is")
else:
raise ValueError("Virtual devices are not supported for %s" %
dev.device_type())
if self._virtual_device_map.get(dev) == virtual_devices:
return
if self._context_handle is not None:
raise RuntimeError(
"Virtual devices cannot be modified after being initialized")
self._virtual_device_map[dev] = virtual_devices
@property
def optimizer_jit(self):
level = self.config.graph_options.optimizer_options.global_jit_level
return (level == config_pb2.OptimizerOptions.ON_1 or
level == config_pb2.OptimizerOptions.ON_2)
@optimizer_jit.setter
def optimizer_jit(self, enabled):
self._optimizer_jit = enabled
self._thread_local_data.function_call_options = None
def get_optimizer_experimental_options(self):
"""Get experimental options for the optimizer.
Returns:
Dictionary of current option values
"""
rewrite_options = self.config.graph_options.rewrite_options
options = {}
def rewriter_toggle(option):
attr = getattr(rewrite_options, option)
if attr != 0:
options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)
def rewriter_bool(option):
options[option] = getattr(rewrite_options, option)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
if rewrite_options.min_graph_nodes != 0:
options["min_graph_nodes"] = rewrite_options.min_graph_nodes
return options
def set_optimizer_experimental_options(self, options):
"""Set experimental options for the optimizer.
Args:
options: Dictionary of options to modify
"""
self._optimizer_experimental_options.update(options)
self._thread_local_data.function_call_options = None
@property
def intra_op_parallelism_threads(self):
return self.config.intra_op_parallelism_threads
@intra_op_parallelism_threads.setter
def intra_op_parallelism_threads(self, num_threads):
if self._intra_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Intra op parallelism cannot be modified after initialization.")
self._intra_op_parallelism_threads = num_threads
@property
def inter_op_parallelism_threads(self):
return self.config.inter_op_parallelism_threads
@inter_op_parallelism_threads.setter
def inter_op_parallelism_threads(self, num_threads):
if self._inter_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Inter op parallelism cannot be modified after initialization.")
self._inter_op_parallelism_threads = num_threads
@property
def soft_device_placement(self):
return self.config.allow_soft_placement
@soft_device_placement.setter
def soft_device_placement(self, enabled):
self._soft_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def log_device_placement(self):
return self.config.log_device_placement
@log_device_placement.setter
def log_device_placement(self, enabled):
if self._log_device_placement == enabled:
return
if self._context_handle is not None:
raise RuntimeError(
"Device placement logging must be set at program startup")
self._log_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def device_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(self._handle)
return self._device_policy
@device_policy.setter
def device_policy(self, policy):
if policy is None:
policy = DEVICE_PLACEMENT_SILENT
if self._device_policy != policy:
self._device_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, self._device_policy)
@property
def mirroring_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tensorflow.TFE_ContextGetMirroringPolicy(self._handle)
return self._mirroring_policy
@mirroring_policy.setter
def mirroring_policy(self, policy):
if policy is None:
policy = MIRRORING_NONE
if self._mirroring_policy != policy:
self._mirroring_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextSetThreadLocalMirroringPolicy(
self._handle, self._mirroring_policy)
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
self.ensure_initialized()
pywrap_tensorflow.TFE_ContextEnableRunMetadata(self._handle)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tensorflow.TFE_ContextDisableRunMetadata(self._context_handle)
def enable_graph_collection(self):
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
self.ensure_initialized()
pywrap_tensorflow.TFE_ContextEnableGraphCollection(self._handle)
def disable_graph_collection(self):
"""Disables graph collection of executed functions."""
if not self._context_handle:
return
pywrap_tensorflow.TFE_ContextDisableGraphCollection(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_ContextExportRunMetadata(
self._context_handle, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
def start_step(self):
pywrap_tensorflow.TFE_ContextStartStep(self._handle)
def end_step(self):
pywrap_tensorflow.TFE_ContextEndStep(self._handle)
class _EagerDeviceContext(object):
"""Context-manager forcing placement of ops and Tensors on a device."""
def __init__(self, ctx, device_name):
self._device_name = device_name
self._ctx = ctx
self._stack = []
def __enter__(self):
ctx = self._ctx
old_device_name = ctx.device_name
old_device_spec = ctx.device_spec
new_device_name = self._device_name
cache_key = (old_device_name, new_device_name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
except KeyError:
# Handle a cache miss.
if new_device_name is not None:
if not isinstance(new_device_name, six.string_types):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
device_spec = pydev.DeviceSpec.from_string(new_device_name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
ctx.ensure_initialized()
new_device_spec = pydev.DeviceSpec.from_string(
ctx._context_devices[0]) # pylint: disable=protected-access
new_device_spec = new_device_spec.make_merged_spec(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access
self._stack.append((old_device_name, old_device_spec, new_device_spec))
def __exit__(self, *ex_info):
ctx = self._ctx
old_device_name, old_device_spec, new_device_spec = self._stack[-1]
if ctx.device_spec is not new_device_spec:
raise RuntimeError(
"Exiting device scope without proper scope nesting")
del self._stack[-1]
ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access
# Do not set directly. Use _set_context.
_context = None
_context_lock = threading.Lock()
def _set_context_locked(ctx):
global _context
pywrap_tensorflow.TFE_Py_SetEagerContext(ctx)
_context = ctx
def _set_context(ctx):
with _context_lock:
_set_context_locked(ctx)
def _create_context():
with _context_lock:
if _context is None:
ctx = Context()
_set_context_locked(ctx)
def context():
"""Returns a singleton context object."""
if _context is None:
_create_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def ensure_initialized():
"""Initialize the context."""
context().ensure_initialized()
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly")
def executing_eagerly():
"""Returns True if the current thread has eager execution enabled.
Eager execution is typically enabled via
`tf.compat.v1.enable_eager_execution`, but may also be enabled within the
context of a Python function via tf.contrib.eager.py_func.
"""
if context_safe() is None:
return default_execution_mode == EAGER_MODE
return context().executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def shared_name(name=None):
"""Returns the anonymous shared name GUID if no shared name is specified.
In eager mode we need to use a unique shared name to avoid spurious sharing
issues. The runtime generates a unique name on our behalf when the reserved
GUID is used as a shared name.
Args:
name: Optional shared name
Returns:
Eager compatible shared name.
"""
if name or not executing_eagerly():
return name
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
return "cd2c89b7-88b7-44c8-ad83-06c2a9158347"
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tf.device('gpu:0'):
with tf.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.random.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
ensure_initialized()
return context().device(name)
@tf_export("config.experimental_list_devices")
def list_devices():
"""List the names of the available devices.
Returns:
Names of the available devices, as a `list`.
"""
ensure_initialized()
return context().devices()
@tf_export("debugging.get_log_device_placement")
def get_log_device_placement():
"""Get if device placements are logged.
Returns:
If device placements are logged.
"""
return context().log_device_placement
@tf_export("debugging.set_log_device_placement")
def set_log_device_placement(enabled):
"""Set if device placements should be logged.
Args:
enabled: Whether to enabled device placement logging.
"""
context().log_device_placement = enabled
@tf_contextlib.contextmanager
def device_policy(policy):
"""Context manager for setting device placement policy for current thread."""
ctx = context()
old_policy = ctx.device_policy
try:
ctx.device_policy = policy
yield
finally:
ctx.device_policy = old_policy
@tf_contextlib.contextmanager
def mirroring_policy(policy):
"""Context manager for setting mirroring policy for current thread."""
ctx = context()
old_policy = ctx.mirroring_policy
try:
ctx.mirroring_policy = policy
yield
finally:
ctx.mirroring_policy = old_policy
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().execution_mode = mode
# TODO(fishx): remove this method.
@tf_contextlib.contextmanager
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
ctx = context()
executor_new = executor.new_executor(mode == ASYNC)
executor_old = ctx.executor
try:
executor_old.wait()
ctx.executor = executor_new
yield
finally:
ctx.executor = executor_old
executor_new.wait()
@tf_contextlib.contextmanager
def executor_scope(e):
"""Context manager for changing executor for current thread.
Args:
e: A Executor to execute eager ops under this scope. Setting it to None will
switch back to use the default executor for the context.
Yields:
Context manager for setting the executor for current thread.
"""
ctx = context()
executor_old = ctx.executor
try:
ctx.executor = e
yield
finally:
ctx.executor = executor_old
@tf_export("experimental.function_executor_type")
@tf_contextlib.contextmanager
def function_executor_type(executor_type):
"""Context manager for setting the executor of eager defined functions.
Eager defined functions are functions decorated by tf.contrib.eager.defun.
Args:
executor_type: a string for the name of the executor to be used to execute
functions defined by tf.contrib.eager.defun.
Yields:
Context manager for setting the executor of eager defined functions.
"""
current_options = context().function_call_options
old_options = copy.copy(current_options)
try:
current_options.executor_type = executor_type
yield
finally:
context().function_call_options = old_options
def is_async():
"""Returns true if current thread is in async mode."""
return context().is_async()
def async_wait():
"""Waits for ops dispatched in ASYNC mode to finish."""
return context().executor.wait()
def async_clear_error():
"""Clears errors raised during ASYNC execution mode."""
return context().executor.clear_error()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def enable_graph_collection():
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
context().enable_graph_collection()
def disable_graph_collection():
"""Disables graph collection of executed functions."""
context().disable_graph_collection()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
def set_server_def(server_def):
context().set_server_def(server_def)
def add_function(fdef):
"""Add a function definition to the context."""
context().add_function(fdef)
def remove_function(name):
"""Remove a function from the context."""
context().remove_function(name)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
if context_safe() is None:
# Context not yet initialized. Assume graph mode following the
# default implementation in `is_in_graph_mode`.
return True
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
|
the-stack_0_22893 | from flask import Blueprint
from api.controllers import userscontroller
usersprint = Blueprint("users", __name__)
usersprint.add_url_rule(
"/auth/login",
view_func=userscontroller.userController["login"],
methods=["POST"]
)
usersprint.add_url_rule(
"/auth/register",
view_func=userscontroller.userController["register"],
methods=["POST"]
) |
the-stack_0_22895 | # Object colocalization
# Copyright The MagellanMapper Contributors
"""Colocalize objects in an image, typically in separate channels."""
from enum import Enum
import multiprocessing as mp
import pandas as pd
import numpy as np
from skimage import morphology
from magmap.cv import chunking, detector, stack_detect, verifier
from magmap.io import cli, df_io, libmag, sqlite
from magmap.settings import config
class BlobMatch:
"""Blob match storage class as a wrapper for a data frame of matches.
Attributes:
df (:class:`pandas.DataFrame`): Data frame of matches with column
names given by :class:`BlobMatch.Cols`.
"""
class Cols(Enum):
"""Blob match column names."""
MATCH_ID = "MatchID"
ROI_ID = "RoiID"
BLOB1_ID = "Blob1ID"
BLOB1 = "Blob1"
BLOB2_ID = "Blob2ID"
BLOB2 = "Blob2"
DIST = "Distance"
def __init__(self, matches=None, match_id=None, roi_id=None, blob1_id=None,
blob2_id=None, df=None):
"""Initialize blob match object.
Args:
matches (list[list[
:class:`numpy.ndarray`, :class:`numpy.ndarray`, float]]:
List of blob match lists, which each contain,
``blob1, blob2, distance``. Defaults to None, which
sets the data frame to None.
match_id (Sequence[int]): Sequence of match IDs, which should be
of the same length as ``matches``; defaults to None.
roi_id (Sequence[int]): Sequence of ROI IDs, which should be
of the same length as ``matches``; defaults to None.
blob1_id (Sequence[int]): Sequence of blob 1 IDs, which should be
of the same length as ``matches``; defaults to None.
blob2_id (Sequence[int]): Sequence of blob2 IDs, which should be
of the same length as ``matches``; defaults to None.
df (:class:`pandas.DataFrame`): Pandas data frame to set in
place of any other arguments; defaults to None.
"""
if df is not None:
# set data frame directly and ignore any other arguments
self.df = df
return
if matches is None:
# set data frame to None and return since any other arguments
# must correspond to matches
self.df = None
return
matches_dict = {}
for i, match in enumerate(matches):
# assumes that all first sequences are of the same length
vals = {
BlobMatch.Cols.BLOB1: match[0],
BlobMatch.Cols.BLOB2: match[1],
BlobMatch.Cols.DIST: match[2],
}
if match_id is not None:
vals[BlobMatch.Cols.MATCH_ID] = match_id[i]
if roi_id is not None:
vals[BlobMatch.Cols.ROI_ID] = roi_id[i]
if blob1_id is not None:
vals[BlobMatch.Cols.BLOB1_ID] = blob1_id[i]
if blob2_id is not None:
vals[BlobMatch.Cols.BLOB2_ID] = blob2_id[i]
for key in BlobMatch.Cols:
matches_dict.setdefault(key, []).append(
vals[key] if key in vals else None)
self.df = df_io.dict_to_data_frame(matches_dict)
def __repr__(self):
"""Format the underlying data frame."""
if self.df is None:
return "Empty blob matches"
return df_io.print_data_frame(self.df, show=False)
def get_blobs(self, n):
"""Get blobs as a numpy array.
Args:
n (int): 1 for blob1, otherwise blob 2.
Returns:
:class:`numpy.ndarray`: Numpy array of the given blob type, or
None if the :attr:`df` is None or the blob column does not exist.
"""
col = BlobMatch.Cols.BLOB1 if n == 1 else BlobMatch.Cols.BLOB2
if self.df is None or col.value not in self.df:
return None
return np.vstack(self.df[col.value])
def get_blobs_all(self):
"""Get all blobs in the blob matches.
Returns:
tuple[:class:`numpy.ndarray`, :class:`numpy.ndarray`]:
Tuple of ``(blobs1, blobs2)``, or None if either are None.
"""
blobs_all = []
for n in (1, 2):
blobs = self.get_blobs(n)
if blobs is None:
return None
blobs_all.append(blobs)
return blobs_all
def update_blobs(self, fn, *args):
"""Update all blobs with the given function.
Args:
fn (func): Function that accepts the output of :meth:`get_blobs`
separately for each set of blobs.
*args (Any): Additional arguments to ``fn``.
"""
if self.df is None: return
for i, col in enumerate((BlobMatch.Cols.BLOB1, BlobMatch.Cols.BLOB2)):
blobs = self.get_blobs(i + 1)
if blobs is not None:
self.df[col.value] = fn(blobs, *args).tolist()
class StackColocalizer(object):
"""Colocalize blobs in blocks based on matching blobs across channels.
Support shared memory for spawned multiprocessing, with fallback to
pickling in forked multiprocessing.
"""
blobs = None
match_tol = None
@classmethod
def colocalize_block(cls, coord, offset, shape, blobs=None,
tol=None, setup_cli=False):
"""Colocalize blobs from different channels within a block.
Args:
coord (Tuple[int]): Block coordinate.
offset (List[int]): Block offset within the full image in z,y,x.
shape (List[int]): Block shape in z,y,x.
blobs (:obj:`np.ndarray`): 2D blobs array; defaults to None to
use :attr:`blobs`.
tol (List[float]): Tolerance for colocalizing blobs; defaults
to None to use :attr:`match_tol`.
setup_cli (bool): True to set up CLI arguments, typically for
a spawned (rather than forked) environment; defaults to False.
Returns:
Tuple[int], dict[Tuple[int], Tuple]: ``coord`` for tracking
multiprocessing and the dictionary of matches.
"""
if blobs is None:
blobs = cls.blobs
if tol is None:
tol = cls.match_tol
if setup_cli:
# reload command-line parameters
cli.process_cli_args()
matches = colocalize_blobs_match(blobs, offset[::-1], shape[::-1], tol)
return coord, matches
@classmethod
def colocalize_stack(cls, shape, blobs):
"""Entry point to colocalizing blobs within a stack.
Args:
shape (List[int]): Image shape in z,y,x.
blobs (:obj:`np.ndarray`): 2D Numpy array of blobs.
Returns:
dict[tuple[int, int], :class:`BlobMatch`]: The
dictionary of matches, where keys are tuples of the channel pairs,
and values are blob match objects.
"""
print("Colocalizing blobs based on matching blobs in each pair of "
"channels")
# set up ROI blocks from which to select blobs in each block
sub_roi_slices, sub_rois_offsets, _, _, _, overlap_base, _, _ \
= stack_detect.setup_blocks(config.roi_profile, shape)
match_tol = np.multiply(
overlap_base, config.roi_profile["verify_tol_factor"])
is_fork = chunking.is_fork()
if is_fork:
# set shared data in forked multiprocessing
cls.blobs = blobs
cls.match_tol = match_tol
pool = mp.Pool(processes=config.cpus)
pool_results = []
for z in range(sub_roi_slices.shape[0]):
for y in range(sub_roi_slices.shape[1]):
for x in range(sub_roi_slices.shape[2]):
coord = (z, y, x)
offset = sub_rois_offsets[coord]
slices = sub_roi_slices[coord]
shape = [s.stop - s.start for s in slices]
if is_fork:
# use variables stored as class attributes
pool_results.append(pool.apply_async(
StackColocalizer.colocalize_block,
args=(coord, offset, shape)))
else:
# pickle full set of variables
pool_results.append(pool.apply_async(
StackColocalizer.colocalize_block,
args=(coord, offset, shape,
detector.get_blobs_in_roi(
blobs, offset, shape)[0], match_tol,
True)))
# dict of channel combos to blob matches data frame
matches_all = {}
for result in pool_results:
coord, matches = result.get()
count = 0
for key, val in matches.items():
matches_all.setdefault(key, []).append(val.df)
count += len(val.df)
print("adding {} matches from block at {} of {}"
.format(count, coord, np.add(sub_roi_slices.shape, -1)))
pool.close()
pool.join()
# prune duplicates by taking matches with shortest distance
for key in matches_all.keys():
matches_all[key] = pd.concat(matches_all[key])
for blobi in (BlobMatch.Cols.BLOB1, BlobMatch.Cols.BLOB2):
# convert blob column to ndarray to extract coords by column
matches = matches_all[key]
matches_uniq, matches_i, matches_inv, matches_cts = np.unique(
np.vstack(matches[blobi.value])[:, :3], axis=0,
return_index=True, return_inverse=True, return_counts=True)
if np.sum(matches_cts > 1) > 0:
# prune if at least one blob has been matched to multiple
# other blobs
singles = matches.iloc[matches_i[matches_cts == 1]]
dups = []
for i, ct in enumerate(matches_cts):
# include non-duplicates to retain index
if ct <= 1: continue
# get indices in orig matches at given unique array
# index and take match with lowest dist
matches_mult = matches.loc[matches_inv == i]
dists = matches_mult[BlobMatch.Cols.DIST.value]
min_dist = np.amin(dists)
num_matches = len(matches_mult)
if config.verbose and num_matches > 1:
print("pruning from", num_matches,
"matches of dist:", dists)
matches_mult = matches_mult.loc[dists == min_dist]
# take first in case of any ties
dups.append(matches_mult.iloc[[0]])
matches_all[key] = pd.concat((singles, pd.concat(dups)))
print("Colocalization matches for channels {}: {}"
.format(key, len(matches_all[key])))
libmag.printv(print(matches_all[key]))
# store data frame in BlobMatch object
matches_all[key] = BlobMatch(df=matches_all[key])
return matches_all
def colocalize_blobs(roi, blobs, thresh=None):
"""Co-localize blobs from different channels based on surrounding
intensities.
Thresholds for detection are first identified in each channel by taking
the blobs in the given channel, finding the surrounding intensities,
and taking a low (5th) percentile. Then for each channel, the
surrounding intensities of blobs in that channel are compared with
the thresholds in the other channels. Blobs exceeding any given
threshold are considered to co-localize in that channel.
Args:
roi (:obj:`np.ndarray`): Region of interest as a 3D+channel array.
blobs (:obj:`np.ndarray`): Blobs as a 2D array in the format
``[[z, y, x, radius, confirmation, truth, channel...], ...]``.
thresh (int, float, str): Threshold percentile of intensities from
pixels surrounding each blob in the given channel. Use "min"
to instead take the mininimum average intensity of all blobs
in the channel. Defaults to None to use "min".
Returns:
:obj:`np.ndarray`: 2D Numpy array of same length as ``blobs`` with
a column for each channel where 1 indicates that the corresponding
blob has signal is present in the given channels at the blob's
location, and 0 indicates insufficient signal.
"""
if blobs is None or roi is None or len(roi.shape) < 4:
return None
if thresh is None:
thresh = "min"
print("Colocalizing blobs based on image intensity across channels")
threshs = []
selem = morphology.ball(2)
# find only blobs in ROI since blobs list may include blobs from immediate
# surrounds, but ROI is not available for them
blobs_roi, blobs_roi_mask = detector.get_blobs_in_roi(
blobs, (0, 0, 0), roi.shape[:3], reverse=False)
blobs_chl = detector.get_blobs_channel(blobs_roi)
blobs_range_chls = []
# get labeled masks of blobs for each channel and threshold intensities
mask_roi = np.ones(roi.shape[:3], dtype=int)
mask_roi_chls = []
for chl in range(roi.shape[3]):
# label a mask with blob indices surrounding each blob
blobs_chl_mask = np.isin(blobs_chl, chl)
blobs_range = np.where(blobs_chl_mask)[0]
blobs_range_chls.append(blobs_range)
mask = np.copy(mask_roi) * -1
mask[tuple(libmag.coords_for_indexing(
blobs_roi[blobs_chl_mask, :3].astype(int)))] = blobs_range
mask = morphology.dilation(mask, selem=selem)
mask_roi_chls.append(mask)
if thresh == "min":
# set minimum average surrounding intensity of all blobs as thresh
threshs.append(
None if len(blobs_range) == 0 else np.amin([
np.mean(roi[mask == b, chl]) for b in blobs_range]))
else:
# set a percentile of intensities surrounding all blobs in channel
# as threshold for that channel, or the whole ROI if no blobs
mask_blobs = mask >= 0
roi_mask = roi if np.sum(mask_blobs) < 1 else roi[mask_blobs, chl]
threshs.append(np.percentile(roi_mask, thresh))
channels = np.unique(detector.get_blobs_channel(blobs_roi)).astype(int)
colocs_roi = np.zeros((blobs_roi.shape[0], roi.shape[3]), dtype=np.uint8)
for chl in channels:
# get labeled mask of blobs in the given channel
mask = mask_roi_chls[chl]
blobs_range = blobs_range_chls[chl]
for chl_other in channels:
if threshs[chl_other] is None: continue
for blobi in blobs_range:
# find surrounding intensity of blob in another channel
mask_blob = mask == blobi
blob_avg = np.mean(roi[mask_blob, chl_other])
if config.verbose:
print(blobi, detector.get_blob_channel(blobs_roi[blobi]),
blobs_roi[blobi, :3], blob_avg, threshs[chl_other])
if blob_avg >= threshs[chl_other]:
# intensities in another channel around blob's position
# is above that channel's threshold
colocs_roi[blobi, chl_other] = 1
# create array for all blobs including those outside ROI
colocs = np.zeros((blobs.shape[0], roi.shape[3]), dtype=np.uint8)
colocs[blobs_roi_mask] = colocs_roi
if config.verbose:
for i, (blob, coloc) in enumerate(zip(blobs_roi, colocs)):
print(i, detector.get_blob_channel(blob), blob[:3], coloc)
return colocs
def colocalize_blobs_match(blobs, offset, size, tol, inner_padding=None):
"""Co-localize blobs in separate channels but the same ROI by finding
optimal blob matches.
Args:
blobs (:obj:`np.ndarray`): Blobs from separate channels.
offset (List[int]): ROI offset given as x,y,z.
size (List[int]): ROI shape given as x,y,z.
tol (List[float]): Tolerances for matching given as x,y,z
inner_padding (List[int]): ROI padding given as x,y,z; defaults
to None to use the padding based on ``tol``.
Returns:
dict[tuple[int, int], :class:`BlobMatch`]:
Dictionary where keys are tuples of the two channels compared and
values are blob matches objects.
"""
if blobs is None:
return None
thresh, scaling, inner_pad, resize, blobs = verifier.setup_match_blobs_roi(
blobs, tol)
if inner_padding is None:
inner_padding = inner_pad
matches_chls = {}
channels = np.unique(detector.get_blobs_channel(blobs)).astype(int)
for chl in channels:
# pair channels
blobs_chl = detector.blobs_in_channel(blobs, chl)
for chl_other in channels:
# prevent duplicates by skipping other channels below given channel
if chl >= chl_other: continue
# find colocalizations between blobs from one channel to blobs
# in another channel
blobs_chl_other = detector.blobs_in_channel(blobs, chl_other)
blobs_inner_plus, blobs_truth_inner_plus, offset_inner, \
size_inner, matches = verifier.match_blobs_roi(
blobs_chl_other, blobs_chl, offset, size, thresh, scaling,
inner_padding, resize)
# reset truth and confirmation blob flags in matches
chl_combo = (chl, chl_other)
matches.update_blobs(detector.set_blob_truth, -1)
matches.update_blobs(detector.set_blob_confirmed, -1)
matches_chls[chl_combo] = matches
return matches_chls
def _get_roi_id(db, offset, shape, exp_name=None):
"""Get database ROI ID for the given ROI position within the main image5d.
Args:
db (:obj:`sqlite.ClrDB`): Database object.
offset (List[int]): ROI offset in z,y,x.
shape (List[int]): ROI shape in z,y,x.
exp_name (str): Name of experiment; defaults to None to attempt
discovery through any image loaded to :attr:`config.img5d`.
Returns:
int: ROI ID or found or inserted ROI.
"""
if exp_name is None:
exp_name = sqlite.get_exp_name(
config.img5d.path_img if config.img5d else None)
exp_id = sqlite.select_or_insert_experiment(
db.conn, db.cur, exp_name, None)
roi_id = sqlite.select_or_insert_roi(
db.conn, db.cur, exp_id, config.series, offset, shape)[0]
return roi_id
def insert_matches(db, matches):
"""Insert matches into database for a whole image.
Args:
db (:obj:`sqlite.ClrDB`): Database object.
matches (dict[tuple[int, int], :class:`BlobMatch`):
Dictionary of channel combo tuples to blob match objects.
"""
# use size of 0 for each dimension for whole-image ROI, which avoids
# the need to discover the image size
roi_id = _get_roi_id(db, (0, 0, 0), (0, 0, 0))
for chl_matches in matches.values():
# insert blobs and matches for the given channel combo
blobs = chl_matches.get_blobs_all()
if blobs is not None:
sqlite.insert_blobs(db.conn, db.cur, roi_id, np.vstack(blobs))
config.db.insert_blob_matches(roi_id, chl_matches)
def select_matches(db, channels, offset=None, shape=None, exp_name=None):
"""Select blob matches for the given region from a database.
Blob matches are assumed to have been processed from the whole image.
To retrieve matches from a selected ROI, use
:meth:`magmap.io.sqlite.ClrDB.select_blob_matches` instead.
Args:
db (:obj:`sqlite.ClrDB`): Database object.
channels (list[int]): List of channels.
offset (list[int]): ROI offset in z,y,x; defaults to None to use
``(0, 0, 0)``.
shape (list[int]): ROI shape in z,y,x; defaults to None to use
``(0, 0, 0)``.
exp_name (str): Name of experiment in ``db``.
Returns:
dict[tuple[int, int], list[:obj:`BlobMatch`]: Dictionary where
keys are tuples of the two channels compared and values are a list
of blob matches. None if no blob matches are found.
"""
# get ROI for whole image
roi_id = _get_roi_id(db, (0, 0, 0), (0, 0, 0), exp_name)
if offset is not None and shape is not None:
# get blob from matches within ROI
blobs, blob_ids = db.select_blobs_by_position(
roi_id, offset[::-1], shape[::-1])
else:
# get blobs from matches within the whole image
blobs, blob_ids = db.select_blobs_by_roi(roi_id)
if blobs is None or len(blobs) == 0:
print("No blob matches found")
return None
blob_ids = np.array(blob_ids)
matches = {}
for chl in channels:
# pair channels
for chl_other in channels:
if chl >= chl_other: continue
# select matches for blobs in the given first channel of the pair
# of channels, assuming chls were paired this way during insertion
chl_matches = db.select_blob_matches_by_blob_id(
roi_id, 1,
blob_ids[detector.get_blobs_channel(blobs) == chl])
blobs2 = chl_matches.get_blobs(2)
if blobs2 is not None:
chl_matches = chl_matches.df.loc[detector.get_blobs_channel(
blobs2) == chl_other]
matches[(chl, chl_other)] = BlobMatch(df=chl_matches)
return matches
|
the-stack_0_22896 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from model import hybridModel
import tensorflow as tf
from tensorflow.keras import layers
from preprocess import createData
def classify(data, model):
"""
This function makes predictions for each sentences in the input text with its'
appropriate headings or sections.
Args :
data - Unstructured medical abstracts.
model - Model that trained to classify abstract sentences.
Returns:
A list of dictionary that contains the sentence and label of
the sentence.
Example :
results = [
{
label : BACKGROUND,
sentence : Most cancer patients are treated with some combination of surgery, radiation, and chemotherapy.
}.
{
label : METHODS,
sentence : We retrospectively analyzed the data of 867 COVID-19 cases.
}
]
"""
classes = ["BACKGROUND", "CONCLUSIONS", "METHODS", "OBJECTIVE", "RESULTS"]
data = createData(data)
abs_pred_probs = model.predict(x = data)
abs_preds = tf.argmax(abs_pred_probs, axis=1)
abs_pred_classes = [classes[i] for i in abs_preds]
results = []
for i , line in enumerate(data[0]):
predicted = {
'label':abs_pred_classes[i],
'sentence':line
}
results.append(predicted)
return results
#Only runs when this file is executed directly.
if __name__ == "__main__":
model= hybridModel()
try:
cont = 'y'
while cont == 'y' or cont =='':
abstract = input("\nEnter the Abstract: \n\n")
result = classify(abstract, model)
for r in result:
print(r['label'], " : ", r['sentence'], "\n")
cont = str(input("\nWant to skim another unstructured abstract? [Y/n] : ").lower())
except:
print("Please Enter only unstructured medical abstracts with atleat 5 lines!")
|
the-stack_0_22897 | """ Training routine for GraspNet baseline model.
"""
import os
import sys
import numpy as np
from datetime import datetime
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
sys.path.append(os.path.join(ROOT_DIR, 'pointnet2'))
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'dataset'))
from graspnet import GraspNet, get_loss
from pytorch_utils import BNMomentumScheduler
from graspnet_dataset import GraspNetDataset, collate_fn, load_grasp_labels
from label_generation import process_grasp_labels
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', required=True, help='Dataset root')
parser.add_argument('--camera', required=True, help='Camera split [realsense/kinect]')
parser.add_argument('--checkpoint_path', default=None, help='Model checkpoint path [default: None]')
parser.add_argument('--log_dir', default='logs/realsense', help='Dump dir to save model checkpoint [default: log]')
parser.add_argument('--num_point', type=int, default=20000, help='Point Number [default: 20000]')
parser.add_argument('--num_view', type=int, default=300, help='View Number [default: 300]')
parser.add_argument('--max_epoch', type=int, default=18, help='Epoch to run [default: 18]')
parser.add_argument('--batch_size', type=int, default=2, help='Batch Size during training [default: 2]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--weight_decay', type=float, default=0, help='Optimization L2 weight decay [default: 0]')
parser.add_argument('--bn_decay_step', type=int, default=2, help='Period of BN decay (in epochs) [default: 2]')
parser.add_argument('--bn_decay_rate', type=float, default=0.5, help='Decay rate for BN decay [default: 0.5]')
parser.add_argument('--lr_decay_steps', default='8,12,16',
help='When to decay the learning rate (in epochs) [default: 8,12,16]')
parser.add_argument('--lr_decay_rates', default='0.1,0.1,0.1', help='Decay rates for lr decay [default: 0.1,0.1,0.1]')
cfgs = parser.parse_args()
# ------------------------------------------------------------------------- GLOBAL CONFIG BEG
EPOCH_CNT = 0
LR_DECAY_STEPS = [int(x) for x in cfgs.lr_decay_steps.split(',')]
LR_DECAY_RATES = [float(x) for x in cfgs.lr_decay_rates.split(',')]
assert (len(LR_DECAY_STEPS) == len(LR_DECAY_RATES))
DEFAULT_CHECKPOINT_PATH = os.path.join(cfgs.log_dir, 'checkpoint.tar')
CHECKPOINT_PATH = cfgs.checkpoint_path if cfgs.checkpoint_path is not None \
else DEFAULT_CHECKPOINT_PATH
if not os.path.exists(cfgs.log_dir):
os.makedirs(cfgs.log_dir)
LOG_FOUT = open(os.path.join(cfgs.log_dir, 'log_train.txt'), 'a')
LOG_FOUT.write(str(cfgs) + '\n')
def log_string(out_str):
LOG_FOUT.write(out_str + '\n')
LOG_FOUT.flush()
print(out_str)
# Init datasets and dataloaders
def my_worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
pass
# Create Dataset and Dataloader
valid_obj_idxs, grasp_labels = load_grasp_labels(cfgs.dataset_root)
TRAIN_DATASET = GraspNetDataset(cfgs.dataset_root, valid_obj_idxs, grasp_labels, camera=cfgs.camera, split='train',
num_points=cfgs.num_point, remove_outlier=True, augment=True)
TEST_DATASET = GraspNetDataset(cfgs.dataset_root, valid_obj_idxs, grasp_labels, camera=cfgs.camera, split='test_seen',
num_points=cfgs.num_point, remove_outlier=True, augment=False)
print(len(TRAIN_DATASET), len(TEST_DATASET))
TRAIN_DATALOADER = DataLoader(TRAIN_DATASET, batch_size=cfgs.batch_size, shuffle=True,
num_workers=4, worker_init_fn=my_worker_init_fn, collate_fn=collate_fn)
TEST_DATALOADER = DataLoader(TEST_DATASET, batch_size=cfgs.batch_size, shuffle=False,
num_workers=4, worker_init_fn=my_worker_init_fn, collate_fn=collate_fn)
print(len(TRAIN_DATALOADER), len(TEST_DATALOADER))
# Init the model and optimzier
net = GraspNet(input_feature_dim=0, num_view=cfgs.num_view, num_angle=12, num_depth=4,
cylinder_radius=0.05, hmin=-0.02, hmax_list=[0.01, 0.02, 0.03, 0.04])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)
# Load the Adam optimizer
optimizer = optim.Adam(net.parameters(), lr=cfgs.learning_rate, weight_decay=cfgs.weight_decay)
# Load checkpoint if there is any
it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler`
start_epoch = 0
if CHECKPOINT_PATH is not None and os.path.isfile(CHECKPOINT_PATH):
checkpoint = torch.load(CHECKPOINT_PATH)
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
log_string("-> loaded checkpoint %s (epoch: %d)" % (CHECKPOINT_PATH, start_epoch))
# Decay Batchnorm momentum from 0.5 to 0.999
# note: pytorch's BN momentum (default 0.1)= 1 - tensorflow's BN momentum
BN_MOMENTUM_INIT = 0.5
BN_MOMENTUM_MAX = 0.001
bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * cfgs.bn_decay_rate ** (int(it / cfgs.bn_decay_step)), BN_MOMENTUM_MAX)
bnm_scheduler = BNMomentumScheduler(net, bn_lambda=bn_lbmd, last_epoch=start_epoch - 1)
def get_current_lr(epoch):
lr = cfgs.learning_rate
for i, lr_decay_epoch in enumerate(LR_DECAY_STEPS):
if epoch >= lr_decay_epoch:
lr *= LR_DECAY_RATES[i]
return lr
def adjust_learning_rate(optimizer, epoch):
lr = get_current_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# TensorBoard Visualizers
TRAIN_WRITER = SummaryWriter(os.path.join(cfgs.log_dir, 'train'))
TEST_WRITER = SummaryWriter(os.path.join(cfgs.log_dir, 'test'))
train_dataloader_length = len(TRAIN_DATALOADER)
# ------------------------------------------------------------------------- GLOBAL CONFIG END
def train_one_epoch(ep):
stat_dict = {} # collect statistics
adjust_learning_rate(optimizer, EPOCH_CNT)
bnm_scheduler.step() # decay BN momentum
# set model to training mode
net.train()
for batch_idx, batch_data_label in enumerate(TRAIN_DATALOADER):
for key in batch_data_label:
if 'list' in key:
for i in range(len(batch_data_label[key])):
for j in range(len(batch_data_label[key][i])):
batch_data_label[key][i][j] = batch_data_label[key][i][j].to(device)
else:
batch_data_label[key] = batch_data_label[key].to(device)
# Forward pass
end_points = net(batch_data_label)
# Compute loss and gradients, update parameters.
loss, end_points = get_loss(end_points)
loss.backward()
if (batch_idx + 1) % 1 == 0:
optimizer.step()
optimizer.zero_grad()
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'prec' in key or 'recall' in key or 'count' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_interval = 10
if (batch_idx + 1) % batch_interval == 0:
log_string(' ---- batch: %03d ----' % (batch_idx + 1))
for key in sorted(stat_dict.keys()):
TRAIN_WRITER.add_scalar(key, stat_dict[key] / batch_interval,
(EPOCH_CNT * len(TRAIN_DATALOADER) + batch_idx) * cfgs.batch_size)
log_string('mean %s: %f' % (key, stat_dict[key] / batch_interval))
stat_dict[key] = 0
if batch_idx == (train_dataloader_length / 2):
# Save checkpoint
save_dict = {'epoch': ep + 1, # after training one epoch, the start_epoch should be epoch+1
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}
try: # with nn.DataParallel() the net is added as a submodule of DataParallel
save_dict['model_state_dict'] = net.module.state_dict()
except:
save_dict['model_state_dict'] = net.state_dict()
torch.save(save_dict, os.path.join(cfgs.log_dir, 'checkpoint_epoch{}_half.tar'.format(ep + 1)))
def evaluate_one_epoch():
stat_dict = {} # collect statistics
# set model to eval mode (for bn and dp)
net.eval()
for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
if batch_idx % 10 == 0:
print('Eval batch: %d' % (batch_idx))
for key in batch_data_label:
if 'list' in key:
for i in range(len(batch_data_label[key])):
for j in range(len(batch_data_label[key][i])):
batch_data_label[key][i][j] = batch_data_label[key][i][j].to(device)
else:
batch_data_label[key] = batch_data_label[key].to(device)
# Forward pass
with torch.no_grad():
end_points = net(batch_data_label)
# Compute loss
loss, end_points = get_loss(end_points)
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'prec' in key or 'recall' in key or 'count' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
for key in sorted(stat_dict.keys()):
TEST_WRITER.add_scalar(key, stat_dict[key] / float(batch_idx + 1),
(EPOCH_CNT + 1) * len(TRAIN_DATALOADER) * cfgs.batch_size)
log_string('eval mean %s: %f' % (key, stat_dict[key] / (float(batch_idx + 1))))
mean_loss = stat_dict['loss/overall_loss'] / float(batch_idx + 1)
return mean_loss
def train(start_epoch):
global EPOCH_CNT
min_loss = 1e10
loss = 0
for epoch in range(start_epoch, cfgs.max_epoch):
EPOCH_CNT = epoch
log_string('**** EPOCH %03d ****' % epoch)
log_string('Current learning rate: %f' % (get_current_lr(epoch)))
log_string('Current BN decay momentum: %f' % (bnm_scheduler.lmbd(bnm_scheduler.last_epoch)))
log_string(str(datetime.now()))
# Reset numpy seed.
# REF: https://github.com/pytorch/pytorch/issues/5059
np.random.seed()
train_one_epoch(epoch)
loss = evaluate_one_epoch()
# Save checkpoint
save_dict = {'epoch': epoch + 1, # after training one epoch, the start_epoch should be epoch+1
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}
try: # with nn.DataParallel() the net is added as a submodule of DataParallel
save_dict['model_state_dict'] = net.module.state_dict()
except:
save_dict['model_state_dict'] = net.state_dict()
torch.save(save_dict, os.path.join(cfgs.log_dir, 'checkpoint_epoch{}.tar'.format(epoch + 1)))
if __name__ == '__main__':
train(start_epoch)
|
the-stack_0_22900 | from taylorDiagram import plot_Taylor_graph
import matplotlib.pyplot as plt
from PyEMD import EEMD
from hht import hht
from hht import plot_imfs
from hht import plot_frequency
import waipy
from scipy.stats import norm
import numpy as np
# from matplotlib.ticker import FuncFormatter
def millions(x, pos):
'The two args are the value and tick position'
return '%1.1fY' % (x/365.0)
plt.rcParams.update({'font.size': 10})
fontsize = 13
lengendfontsize = 12
col = ['plum', 'darkorchid', 'blue', 'navy', 'deepskyblue', 'darkcyan', 'seagreen', 'darkgreen',
'olivedrab', 'gold', 'tan', 'red', 'palevioletred', 'm']
start_year = 1991
end_year = 2014
# from matplotlib import colors as mcolors
# colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
# for name, color in colors.items())
# col = [name for hsv, name in by_hsv][]
# def plot_imfs(signal, imfs, time_samples = None, fig=None):
# ''' Author jaidevd https://github.com/jaidevd/pyhht/blob/dev/pyhht/visualization.py '''
# '''Original function from pyhht, but without plt.show()'''
# n_imfs = imfs.shape[0]
# # print(np.abs(imfs[:-1, :]))
# # axis_extent = max(np.max(np.abs(imfs[:-1, :]), axis=0))
# # Plot original signal
# ax = plt.subplot(n_imfs + 1, 1, 1)
# ax.plot(time_samples, signal)
# ax.axis([time_samples[0], time_samples[-1], signal.min(), signal.max()])
# ax.tick_params(which='both', left=False, bottom=False, labelleft=False,
# labelbottom=False)
# ax.grid(False)
# ax.set_ylabel('Signal')
# ax.set_title('Empirical Mode Decomposition')
#
# # Plot the IMFs
# for i in range(n_imfs - 1):
# # print(i + 2)
# ax = plt.subplot(n_imfs + 1, 1, i + 2)
# ax.plot(time_samples, imfs[i, :])
# # ax.axis([time_samples[0], time_samples[-1], -axis_extent, axis_extent])
# ax.tick_params(which='both', left=False, bottom=False, labelleft=False,
# labelbottom=False)
# ax.grid(False)
# ax.set_ylabel('imf' + str(i + 1), fontsize=fontsize)
#
# # Plot the residue
# ax = plt.subplot(n_imfs + 1, 1, n_imfs + 1)
# ax.plot(time_samples, imfs[-1, :], 'r')
# ax.axis('tight')
# ax.tick_params(which='both', left=False, bottom=False, labelleft=False,
# labelbottom=False)
# ax.grid(False)
# ax.set_ylabel('res.', fontsize=fontsize)
# return ax
class spectrum_post(object):
def __init__(self, filedir, h_site_name_obs, day_obs, day_mod, variable_name):
[d_obs, d_t_obs, d_unit_obs] = day_obs
[d_mod, d_t_mod, d_unit_mod] = day_mod
self.d_obs = d_obs
self.d_mod = d_mod
self.d_t_obs = d_t_obs
self.d_unit_obs = d_unit_obs
self.sitename = h_site_name_obs
self.variable = variable_name
self.filedir = filedir
def plot_decomposer_imf(self):
d_obs = self.d_obs
d_mod = self.d_mod
d_t_obs = self.d_t_obs
scores = []
for j, site in enumerate(self.sitename):
if self.sitename.mask[j]:
continue
print('Process on Decomposer_IMF_' + site + '_No.' + str(j) + '!')
fig0 = plt.figure(figsize=(8, 4))
fig3 = plt.figure(figsize=(5, 5))
data = d_obs[j, :].compressed()
time = d_t_obs[~d_obs[j, :].mask]
eemd = EEMD(trials=5)
if len(data) > 0:
imfs = eemd.eemd(data)
# print('obs',imfs.shape)
if len(imfs) >= 1:
ax0 = fig0.add_subplot(1, 2, 1)
ax0.plot(time, (imfs[len(imfs) - 1]), 'k-', label='Observed')
d_d_obs = np.asarray([str(start_year + int(x) / 365) + (
'0' + str(int(x) % 365 / 31 + 1) if int(x) % 365 / 31 < 9 else str(int(x) % 365 / 31 + 1)) for x
in
time])
ax0.xaxis.set_ticks(
[time[0], time[2 * len(time) / 5],
time[4 * len(time) / 5]])
ax0.set_xticklabels(
[d_d_obs[0], d_d_obs[2 * len(d_d_obs) / 5],
d_d_obs[4 * len(d_d_obs) / 5]])
## hht spectrum
if len(imfs) >= 1:
fig3, freq = hht(data, imfs, time, 1, fig3)
fig3.savefig(self.filedir + self.variable + '/' + site + '_hht_IMF_observed' + self.variable + '.png', bbox_inches='tight')
fig1 = plt.figure(figsize=(4 * (len(d_mod)+1), 8))
fig2 = plt.figure(figsize=(4 * (len(d_mod)+1), 8))
fig1.subplots_adjust(wspace=0.5, hspace=0.3)
fig2.subplots_adjust(wspace=0.5, hspace=0.3)
if len(data) > 0:
if len(imfs) >= 1:
fig1 = plot_imfs(data, imfs, time_samples=time, fig=fig1, no=1, m=len(d_mod))
fig2 = plot_frequency(data, freq.T, time_samples=time, fig=fig2, no=1, m=len(d_mod))
models1 = []
datamask = []
data1 = imfs[len(imfs) - 1]
for m in range(len(d_mod)):
## hht spectrum
eemd = EEMD(trials=5)
fig3 = plt.figure(figsize=(5, 5))
data2 = d_mod[m][j, :][~d_obs[j, :].mask]
imfs = eemd.eemd(data2.compressed())
# print('mod'+str(m), imfs.shape)
if len(imfs) >= 1:
fig3, freq = hht(data2.compressed(), imfs, time[~data2.mask], 1, fig3)
fig3.savefig(self.filedir + self.variable + '/' + site + '_hht_IMF_model' + str(m + 1) + self.variable + '.png', bbox_inches='tight')
if len(imfs) >= 1:
fig1 = plot_imfs(data2.compressed(), imfs, time_samples=time[~data2.mask], fig=fig1, no=m+2, m=len(d_mod))
fig2 = plot_frequency(data2.compressed(), freq.T, time_samples=time[~data2.mask], fig=fig2, no=m+2, m=len(d_mod))
ax0.plot(time[~data2.mask], (imfs[len(imfs) - 1]), '-', label='Model' + str(m+1), c=col[m])
models1.append(imfs[len(imfs) - 1])
datamask.append(data2)
ax0.set_xlabel('Time', fontsize=fontsize)
ax0.set_ylabel('' + self.variable + '(' + self.d_unit_obs + ')', fontsize=fontsize)
ax0.yaxis.tick_right()
ax0.yaxis.set_label_position("right")
ax0.legend(bbox_to_anchor=(-0.05, 1), shadow=False, fontsize='medium')
plot_Taylor_graph(data1, models1, fig0, 122, datamask=datamask)
else:
print("'Data's length is too short !")
fig1.savefig(self.filedir + self.variable + '/' + site + '_Decompose_IMF_' + self.variable + '.png', bbox_inches='tight')
fig2.savefig(self.filedir + self.variable + '/' + site + '_deviation_IMF_' + self.variable + '.png', bbox_inches='tight')
fig0.subplots_adjust(left=0.1, hspace=0.25, wspace=0.55)
fig0.savefig(self.filedir + self.variable + '/' + site + '_' + 'IMF' + '_' + self.variable + '.png', bbox_inches='tight')
plt.close('all')
return scores
def plot_wavelet(self):
d_obs = self.d_obs
d_mod = self.d_mod
d_t_obs = self.d_t_obs
""" plot data wavelet """
scores = []
for j, site in enumerate(self.sitename):
if self.sitename.mask[j]:
continue
print('Process on Wavelet_' + site + '_No.' + str(j) + '!')
data = d_obs[j, :].compressed()
fig3 = plt.figure(figsize=(8, 8))
if len(data) > 0:
time_data = d_t_obs[~d_obs[j, :].mask]
# time_data = d_t_obs
result = waipy.cwt(data, 1, 1, 0.125, 2, 4 / 0.125, 0.72, 6, mother='Morlet', name='Obs')
waipy.wavelet_plot('Obs', time_data, data, 0.03125, result, fig3, unit=self.d_unit_obs)
# plt.tight_layout()
for m in range(len(d_mod)):
fig4 = plt.figure(figsize=(8, 8))
data2 = d_mod[m][j, :][~d_obs[j, :].mask]
data = data2.compressed() - d_obs[j, :].compressed()[~data2.mask]
if len(data) > 0:
result = waipy.cwt(data, 1, 1, 0.125, 2, 4 / 0.125, 0.72, 6, mother='Morlet', name='Obs - Mod' + str(m+1))
waipy.wavelet_plot('Obs - Mod' + str(m+1), time_data[~data2.mask], data, 0.03125, result, fig4, unit=self.d_unit_obs, m=m)
# plt.tight_layout()
fig4.savefig(self.filedir + self.variable + '/' + site + 'model' + str(m) + '_wavelet_' + self.variable + '.png', bbox_inches='tight')
fig3.savefig(self.filedir + self.variable + '/' + site + '_Wavelet_' + self.variable + '.png',
bbox_inches='tight')
plt.close('all')
return scores
def plot_spectrum(self):
import waipy, math
import numpy as np
import matplotlib.pyplot as plt
d_obs = self.d_obs
d_mod = self.d_mod
d_t_obs = self.d_t_obs
scores = []
""" Plot global wavelet spectrum """
# col = ['palevioletred', 'm', 'plum', 'darkorchid', 'blue', 'navy', 'deepskyblue', 'darkcyan', 'seagreen',
# 'darkgreen', 'olivedrab', 'gold', 'tan', 'red']
for j, site in enumerate(self.sitename):
if self.sitename.mask[j]:
continue
print('Process on Spectrum_' + site + '_No.' + str(j) + '!')
data = d_obs[j, :].compressed()
if len(data) > 0:
result = waipy.cwt(data, 1, 1, 0.125, 2, 4 / 0.125, 0.72, 6, mother='Morlet', name='Data')
loc_o, scale_o = norm.fit(result['global_ws'])
fig4 = plt.figure(figsize=(4, 4))
ax4 = fig4.add_subplot(1, 1, 1)
# f1, sxx1 = waipy.fft(data)
# ax.plot(np.log2(1 / f1 * result['dt']), sxx1, 'red', label='Fourier spectrum')
# plt.suptitle(self.variable + ' ( ' + self.d_unit_obs + ' )', fontsize=8)
ax4.plot(np.log2(result['period']), result['global_ws'], 'k-', label='Wavelet spectrum')
ax4.plot(np.log2(result['period']), result['global_signif'], 'r--', label='95% confidence spectrum')
model_score = []
for m in range(len(d_mod)):
data2 = d_mod[m][j, :][~d_obs[j, :].mask]
data = data2.compressed()
# data = d_mod[m][j, :][~d_obs[j, :].mask]
if len(data) > 0:
result_temp = waipy.cwt(data, 1, 1, 0.125, 2, 4 / 0.125, 0.72, 6, mother='Morlet', name='Data')
loc_m, scale_m = norm.fit(result_temp['global_ws'])
model_score.append(abs(loc_m-loc_o))
ax4.plot(np.log2(result_temp['period']), result_temp['global_ws'], label='Model' + str(m), c=col[m])
else:
model_score.append(0.5)
model_score = [i/max(model_score) for i in model_score]
scores.append(model_score)
ax4.legend(bbox_to_anchor=(1.05, 1), loc=2, fontsize=lengendfontsize)
# ax4.set_ylim(0, 1.25 * np.max(result['global_ws']))
ax4.set_ylabel('Power', fontsize=fontsize)
ax4.set_title('Global Wavelet Spectrum', fontsize=fontsize)
y_min = int(min(np.log2(result['period'][0]), np.log2(result_temp['period'][0])))
y_max = int(max(np.log2(result['period'][-1]) + 1, np.log2(result_temp['period'][-1]) + 1))
yt = range(y_min, y_max, 3) # create the vector of period
Yticks = [float(math.pow(2, p)) for p in yt] # make 2^periods
ax4.set_xticks(yt)
ax4.set_xticklabels(Yticks)
ax4.set_xlim(xmin=(np.log2(np.min(result['period']))), xmax=(np.log2(np.max(result['period']))))
plt.tight_layout()
fig4.savefig(self.filedir + self.variable + '/' + site + '_spectrum_' + self.variable + '.png', bbox_inches='tight')
plt.close('all')
return scores
def plot_taylor_gram(self):
d_obs = self.d_obs
d_mod = self.d_mod
d_t_obs = self.d_t_obs
scores = []
""" Taylor diagram """
for j, site in enumerate(self.sitename):
if self.sitename.mask[j]:
continue
print('Process on Taylor_' + site + '_No.' + str(j) + '!')
data1 = d_obs[j, :].compressed()
models1 = []
fig7 = plt.figure(figsize=(8, 8))
for m in range(len(d_mod)):
models1.append(d_mod[m][j, :][~d_obs[j, :].mask])
# print(data1.shape, models1[0].shape)
plot_Taylor_graph(data1, models1, fig7, 111)
fig7.savefig(self.filedir + self.variable + '/' +site + '_taylor_' + self.variable + '.png', bbox_inches='tight')
plt.close('all')
return scores
def plot_spectrum_score(self):
import waipy, math
import numpy as np
import matplotlib.pyplot as plt
d_obs = self.d_obs
d_mod = self.d_mod
d_t_obs = self.d_t_obs
scores = []
""" Plot global wavelet spectrum """
for j, site in enumerate(self.sitename):
if self.sitename.mask[j]:
continue
print('Process on Spectrum_' + site + '_No.' + str(j) + '!')
data = d_obs[j, :].compressed()
result = waipy.cwt(data, 1, 1, 0.125, 2, 4 / 0.125, 0.72, 6, mother='Morlet', name='Data')
loc_o, scale_o = norm.fit(result['global_ws'])
model_score = []
for m in range(len(d_mod)):
data = d_mod[m][j, :][~d_obs[j, :].mask]
result_temp = waipy.cwt(data, 1, 1, 0.125, 2, 4 / 0.125, 0.72, 6, mother='Morlet', name='Data')
loc_m, scale_m = norm.fit(result_temp['global_ws'])
model_score.append(abs(loc_m - loc_o))
model_score = model_score / max(model_score)
scores.append(model_score)
return scores
def spectrum_analysis(filedir,h_site_name_obs, day_obs, day_mod, variable_name):
# plot frequency frequency
f2 = spectrum_post(filedir, h_site_name_obs, day_obs, day_mod, variable_name)
scores_decomposeimf = f2.plot_decomposer_imf()
score_wavelet = f2.plot_wavelet()
score_spectrum = f2.plot_spectrum()
return scores_decomposeimf, score_wavelet, score_spectrum |
the-stack_0_22901 | from pytpp.attributes._helper import IterableMeta, Attribute
from pytpp.attributes.agent_base import AgentBaseAttributes
from pytpp.attributes.top import TopAttributes
class AgentAttributes(AgentBaseAttributes, TopAttributes, metaclass=IterableMeta):
__config_class__ = "Agent"
action = Attribute('Action')
active_directory_dn_host_detail = Attribute('Active Directory DN Host Detail')
active_directory_domain_host_detail = Attribute('Active Directory Domain Host Detail')
active_directory_host_details = Attribute('Active Directory Host Details')
active_directory_query_host_detail = Attribute('Active Directory Query Host Detail')
active_directory_source_dn_host_detail = Attribute('Active Directory Source DN Host Detail')
address = Attribute('Address')
configuration_host_details = Attribute('Configuration Host Details')
custom_host_detail = Attribute('Custom Host Detail')
environment_host_details = Attribute('Environment Host Details')
first_update = Attribute('First Update')
host_os = Attribute('Host OS')
hostname = Attribute('Hostname')
last_update = Attribute('Last Update')
os_id = Attribute('OS Id')
|
the-stack_0_22904 | #!/usr/bin/env python
import io
import os
import re
import sys
import time
import json
import socket
import locale
import logging
import argparse
from http import cookiejar
from importlib import import_module
from urllib import request, parse, error
from .version import __version__
from .util import log, term
from .util.git import get_version
from .util.strings import get_filename, unescape_html
from . import json_output as json_output_
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
SITES = {
'163' : 'netease',
'56' : 'w56',
'acfun' : 'acfun',
'archive' : 'archive',
'baidu' : 'baidu',
'bandcamp' : 'bandcamp',
'baomihua' : 'baomihua',
'bigthink' : 'bigthink',
'bilibili' : 'bilibili',
'cctv' : 'cntv',
'cntv' : 'cntv',
'cbs' : 'cbs',
'coub' : 'coub',
'dailymotion' : 'dailymotion',
'dilidili' : 'dilidili',
'douban' : 'douban',
'douyin' : 'douyin',
'douyu' : 'douyutv',
'ehow' : 'ehow',
'facebook' : 'facebook',
'fantasy' : 'fantasy',
'fc2' : 'fc2video',
'flickr' : 'flickr',
'freesound' : 'freesound',
'fun' : 'funshion',
'google' : 'google',
'giphy' : 'giphy',
'heavy-music' : 'heavymusic',
'huaban' : 'huaban',
'huomao' : 'huomaotv',
'iask' : 'sina',
'icourses' : 'icourses',
'ifeng' : 'ifeng',
'imgur' : 'imgur',
'in' : 'alive',
'infoq' : 'infoq',
'instagram' : 'instagram',
'interest' : 'interest',
'iqilu' : 'iqilu',
'iqiyi' : 'iqiyi',
'ixigua' : 'ixigua',
'isuntv' : 'suntv',
'joy' : 'joy',
'kankanews' : 'bilibili',
'khanacademy' : 'khan',
'ku6' : 'ku6',
'kuaishou' : 'kuaishou',
'kugou' : 'kugou',
'kuwo' : 'kuwo',
'le' : 'le',
'letv' : 'le',
'lizhi' : 'lizhi',
'longzhu' : 'longzhu',
'magisto' : 'magisto',
'metacafe' : 'metacafe',
'mgtv' : 'mgtv',
'miomio' : 'miomio',
'mixcloud' : 'mixcloud',
'mtv81' : 'mtv81',
'musicplayon' : 'musicplayon',
'naver' : 'naver',
'7gogo' : 'nanagogo',
'nicovideo' : 'nicovideo',
'panda' : 'panda',
'pinterest' : 'pinterest',
'pixnet' : 'pixnet',
'pptv' : 'pptv',
'qingting' : 'qingting',
'qq' : 'qq',
'quanmin' : 'quanmin',
'showroom-live' : 'showroom',
'sina' : 'sina',
'smgbb' : 'bilibili',
'sohu' : 'sohu',
'soundcloud' : 'soundcloud',
'ted' : 'ted',
'theplatform' : 'theplatform',
'tucao' : 'tucao',
'tudou' : 'tudou',
'tumblr' : 'tumblr',
'twimg' : 'twitter',
'twitter' : 'twitter',
'ucas' : 'ucas',
'videomega' : 'videomega',
'vidto' : 'vidto',
'vimeo' : 'vimeo',
'wanmen' : 'wanmen',
'weibo' : 'miaopai',
'veoh' : 'veoh',
'vine' : 'vine',
'vk' : 'vk',
'xiami' : 'xiami',
'xiaokaxiu' : 'yixia',
'xiaojiadianvideo' : 'fc2video',
'ximalaya' : 'ximalaya',
'yinyuetai' : 'yinyuetai',
'miaopai' : 'yixia',
'yizhibo' : 'yizhibo',
'youku' : 'youku',
'iwara' : 'iwara',
'youtu' : 'youtube',
'youtube' : 'youtube',
'zhanqi' : 'zhanqi',
'365yg' : 'toutiao',
}
dry_run = False
json_output = False
force = False
player = None
extractor_proxy = None
cookies = None
output_filename = None
auto_rename = False
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # noqa
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0', # noqa
}
if sys.stdout.isatty():
default_encoding = sys.stdout.encoding.lower()
else:
default_encoding = locale.getpreferredencoding().lower()
def rc4(key, data):
# all encryption algo should work on bytes
assert type(key) == type(data) and type(key) == type(b'')
state = list(range(256))
j = 0
for i in range(256):
j += state[i] + key[i % len(key)]
j &= 0xff
state[i], state[j] = state[j], state[i]
i = 0
j = 0
out_list = []
for char in data:
i += 1
i &= 0xff
j += state[i]
j &= 0xff
state[i], state[j] = state[j], state[i]
prn = state[(state[i] + state[j]) & 0xff]
out_list.append(char ^ prn)
return bytes(out_list)
def general_m3u8_extractor(url, headers={}):
m3u8_list = get_content(url, headers=headers).split('\n')
urls = []
for line in m3u8_list:
line = line.strip()
if line and not line.startswith('#'):
if line.startswith('http'):
urls.append(line)
else:
seg_url = parse.urljoin(url, line)
urls.append(seg_url)
return urls
def maybe_print(*s):
try:
print(*s)
except:
pass
def tr(s):
if default_encoding == 'utf-8':
return s
else:
return s
# return str(s.encode('utf-8'))[2:-1]
# DEPRECATED in favor of match1()
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
# DEPRECATED in favor of match1()
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def match1(text, *patterns):
"""Scans through a string for substrings matched some patterns (first-subgroups only).
Args:
text: A string to be scanned.
patterns: Arbitrary number of regex patterns.
Returns:
When only one pattern is given, returns a string (None if no match found).
When more than one pattern are given, returns a list of strings ([] if no match found).
"""
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
def matchall(text, patterns):
"""Scans through a string for substrings matched some patterns.
Args:
text: A string to be scanned.
patterns: a list of regex pattern.
Returns:
a list if matched. empty if not.
"""
ret = []
for pattern in patterns:
match = re.findall(pattern, text)
ret += match
return ret
def launch_player(player, urls):
import subprocess
import shlex
subprocess.call(shlex.split(player) + list(urls))
def parse_query_param(url, param):
"""Parses the query string of a URL and returns the value of a parameter.
Args:
url: A URL.
param: A string representing the name of the parameter.
Returns:
The value of the parameter.
"""
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
except:
return None
def unicodize(text):
return re.sub(
r'\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])',
lambda x: chr(int(x.group(0)[2:], 16)),
text
)
# DEPRECATED in favor of util.legitimize()
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
def ungzip(data):
"""Decompresses data for Content-Encoding: gzip.
"""
from io import BytesIO
import gzip
buffer = BytesIO(data)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(data):
"""Decompresses data for Content-Encoding: deflate.
(the zlib compression is used.)
"""
import zlib
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return decompressobj.decompress(data)+decompressobj.flush()
# DEPRECATED in favor of get_content()
def get_response(url, faker=False):
logging.debug('get_response: %s' % url)
# install cookies
if cookies:
opener = request.build_opener(request.HTTPCookieProcessor(cookies))
request.install_opener(opener)
if faker:
response = request.urlopen(
request.Request(url, headers=fake_headers), None
)
else:
response = request.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
# DEPRECATED in favor of get_content()
def get_html(url, encoding=None, faker=False):
content = get_response(url, faker).data
return str(content, 'utf-8', 'ignore')
# DEPRECATED in favor of get_content()
def get_decoded_html(url, faker=False):
response = get_response(url, faker)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset, 'ignore')
else:
return data
def get_location(url):
logging.debug('get_location: %s' % url)
response = request.urlopen(url)
# urllib will follow redirections and it's too much code to tell urllib
# not to do that
return response.geturl()
def urlopen_with_retry(*args, **kwargs):
retry_time = 3
for i in range(retry_time):
try:
return request.urlopen(*args, **kwargs)
except socket.timeout as e:
logging.debug('request attempt %s timeout' % str(i + 1))
if i + 1 == retry_time:
raise e
# try to tackle youku CDN fails
except error.HTTPError as http_error:
logging.debug('HTTP Error with code{}'.format(http_error.code))
if i + 1 == retry_time:
raise http_error
def get_content(url, headers={}, decoded=True):
"""Gets the content of a URL via sending a HTTP GET request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
logging.debug('get_content: %s' % url)
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
response = urlopen_with_retry(req)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(
response.getheader('Content-Type'), r'charset=([\w-]+)'
)
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8', 'ignore')
return data
def post_content(url, headers={}, post_data={}, decoded=True):
"""Post the content of a URL via sending a HTTP POST request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
logging.debug('post_content: %s \n post_data: %s' % (url, post_data))
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
post_data_enc = bytes(parse.urlencode(post_data), 'utf-8')
response = urlopen_with_retry(req, data=post_data_enc)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(
response.getheader('Content-Type'), r'charset=([\w-]+)'
)
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data
def url_size(url, faker=False, headers={}):
if faker:
response = urlopen_with_retry(
request.Request(url, headers=fake_headers)
)
elif headers:
response = urlopen_with_retry(request.Request(url, headers=headers))
else:
response = urlopen_with_retry(url)
size = response.headers['content-length']
return int(size) if size is not None else float('inf')
def urls_size(urls, faker=False, headers={}):
return sum([url_size(url, faker=faker, headers=headers) for url in urls])
def get_head(url, headers={}, get_method='HEAD'):
logging.debug('get_head: %s' % url)
if headers:
req = request.Request(url, headers=headers)
else:
req = request.Request(url)
req.get_method = lambda: get_method
res = urlopen_with_retry(req)
return dict(res.headers)
def url_info(url, faker=False, headers={}):
logging.debug('url_info: %s' % url)
if faker:
response = urlopen_with_retry(
request.Request(url, headers=fake_headers)
)
elif headers:
response = urlopen_with_retry(request.Request(url, headers=headers))
else:
response = urlopen_with_retry(request.Request(url))
headers = response.headers
type = headers['content-type']
if type == 'image/jpg; charset=UTF-8' or type == 'image/jpg':
type = 'audio/mpeg' # fix for netease
mapping = {
'video/3gpp': '3gp',
'video/f4v': 'flv',
'video/mp4': 'mp4',
'video/MP2T': 'ts',
'video/quicktime': 'mov',
'video/webm': 'webm',
'video/x-flv': 'flv',
'video/x-ms-asf': 'asf',
'audio/mp4': 'mp4',
'audio/mpeg': 'mp3',
'audio/wav': 'wav',
'audio/x-wav': 'wav',
'audio/wave': 'wav',
'image/jpeg': 'jpg',
'image/png': 'png',
'image/gif': 'gif',
'application/pdf': 'pdf',
}
if type in mapping:
ext = mapping[type]
else:
type = None
if headers['content-disposition']:
try:
filename = parse.unquote(
r1(r'filename="?([^"]+)"?', headers['content-disposition'])
)
if len(filename.split('.')) > 1:
ext = filename.split('.')[-1]
else:
ext = None
except:
ext = None
else:
ext = None
if headers['transfer-encoding'] != 'chunked':
size = headers['content-length'] and int(headers['content-length'])
else:
size = None
return type, ext, size
def url_locations(urls, faker=False, headers={}):
locations = []
for url in urls:
logging.debug('url_locations: %s' % url)
if faker:
response = urlopen_with_retry(
request.Request(url, headers=fake_headers)
)
elif headers:
response = urlopen_with_retry(
request.Request(url, headers=headers)
)
else:
response = urlopen_with_retry(request.Request(url))
locations.append(response.url)
return locations
def url_save(
url, filepath, bar, refer=None, is_part=False, faker=False,
headers=None, timeout=None, **kwargs
):
tmp_headers = headers.copy() if headers is not None else {}
# When a referer specified with param refer,
# the key must be 'Referer' for the hack here
if refer is not None:
tmp_headers['Referer'] = refer
file_size = url_size(url, faker=faker, headers=tmp_headers)
continue_renameing = True
while continue_renameing:
continue_renameing = False
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not is_part:
if bar:
bar.done()
print(
'Skipping {}: file already exists'.format(
tr(os.path.basename(filepath))
)
)
else:
if bar:
bar.update_received(file_size)
return
else:
if not is_part:
if bar:
bar.done()
if not force and auto_rename:
path, ext = os.path.basename(filepath).rsplit('.', 1)
finder = re.compile(' \([1-9]\d*?\)$')
if (finder.search(path) is None):
thisfile = path + ' (1).' + ext
else:
def numreturn(a):
return ' (' + str(int(a.group()[2:-1]) + 1) + ').'
thisfile = finder.sub(numreturn, path) + ext
filepath = os.path.join(os.path.dirname(filepath), thisfile)
print('Changing name to %s' % tr(os.path.basename(filepath)), '...')
continue_renameing = True
continue
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download' if file_size != float('inf') \
else filepath
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
tmp_headers = fake_headers
'''
if parameter headers passed in, we have it copied as tmp_header
elif headers:
headers = headers
else:
headers = {}
'''
if received:
tmp_headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
tmp_headers['Referer'] = refer
if timeout:
response = urlopen_with_retry(
request.Request(url, headers=tmp_headers), timeout=timeout
)
else:
response = urlopen_with_retry(
request.Request(url, headers=tmp_headers)
)
try:
range_start = int(
response.headers[
'content-range'
][6:].split('/')[0].split('-')[0]
)
end_length = int(
response.headers['content-range'][6:].split('/')[1]
)
range_length = end_length - range_start
except:
content_length = response.headers['content-length']
range_length = int(content_length) if content_length is not None \
else float('inf')
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = None
try:
buffer = response.read(1024 * 256)
except socket.timeout:
pass
if not buffer:
if received == file_size: # Download finished
break
# Unexpected termination. Retry request
tmp_headers['Range'] = 'bytes=' + str(received) + '-'
response = urlopen_with_retry(
request.Request(url, headers=tmp_headers)
)
continue
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (
received, os.path.getsize(temp_filepath), temp_filepath
)
if os.access(filepath, os.W_OK):
# on Windows rename could fail if destination filepath exists
os.remove(filepath)
os.rename(temp_filepath, filepath)
class SimpleProgressBar:
term_size = term.get_terminal_size()[1]
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
self.speed = ''
self.last_updated = time.time()
total_pieces_len = len(str(total_pieces))
# 38 is the size of all statically known size in self.bar
total_str = '%5s' % round(self.total_size / 1048576, 1)
total_str_width = max(len(total_str), 5)
self.bar_size = self.term_size - 28 - 2 * total_pieces_len \
- 2 * total_str_width
self.bar = '{:>4}%% ({:>%s}/%sMB) ├{:─<%s}┤[{:>%s}/{:>%s}] {}' % (
total_str_width, total_str, self.bar_size, total_pieces_len,
total_pieces_len
)
def update(self):
self.displayed = True
bar_size = self.bar_size
percent = round(self.received * 100 / self.total_size, 1)
if percent >= 100:
percent = 100
dots = bar_size * int(percent) // 100
plus = int(percent) - dots // bar_size * 100
if plus > 0.8:
plus = '█'
elif plus > 0.4:
plus = '>'
else:
plus = ''
bar = '█' * dots + plus
bar = self.bar.format(
percent, round(self.received / 1048576, 1), bar,
self.current_piece, self.total_pieces, self.speed
)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
time_diff = time.time() - self.last_updated
bytes_ps = n / time_diff if time_diff else 0
if bytes_ps >= 1024 ** 3:
self.speed = '{:4.0f} GB/s'.format(bytes_ps / 1024 ** 3)
elif bytes_ps >= 1024 ** 2:
self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2)
elif bytes_ps >= 1024:
self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024)
else:
self.speed = '{:4.0f} B/s'.format(bytes_ps)
self.last_updated = time.time()
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>5}%[{1:<40}] {2}/{3}'.format(
'', '=' * 40, self.current_piece, self.total_pieces
)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def get_output_filename(urls, title, ext, output_dir, merge):
# lame hack for the --output-filename option
global output_filename
if output_filename:
if ext:
return output_filename + '.' + ext
return output_filename
merged_ext = ext
if (len(urls) > 1) and merge:
from .processor.ffmpeg import has_ffmpeg_installed
if ext in ['flv', 'f4v']:
if has_ffmpeg_installed():
merged_ext = 'mp4'
else:
merged_ext = 'flv'
elif ext == 'mp4':
merged_ext = 'mp4'
elif ext == 'ts':
if has_ffmpeg_installed():
merged_ext = 'mkv'
else:
merged_ext = 'ts'
return '%s.%s' % (title, merged_ext)
def print_user_agent(faker=False):
urllib_default_user_agent = 'Python-urllib/%d.%d' % sys.version_info[:2]
user_agent = fake_headers['User-Agent'] if faker else urllib_default_user_agent
print('User Agent: %s' % user_agent)
def download_urls(
urls, title, ext, total_size, output_dir='.', refer=None, merge=True,
faker=False, headers={}, **kwargs
):
assert urls
if json_output:
json_output_.download_urls(
urls=urls, title=title, ext=ext, total_size=total_size,
refer=refer
)
return
if dry_run:
print_user_agent(faker=faker)
print('Real URLs:\n%s' % '\n'.join(urls))
return
if player:
launch_player(player, urls)
return
if not total_size:
try:
total_size = urls_size(urls, faker=faker, headers=headers)
except:
import traceback
traceback.print_exc(file=sys.stdout)
pass
title = tr(get_filename(title))
output_filename = get_output_filename(urls, title, ext, output_dir, merge)
output_filepath = os.path.join(output_dir, output_filename)
if total_size:
if not force and os.path.exists(output_filepath) and not auto_rename\
and os.path.getsize(output_filepath) >= total_size * 0.9:
print('Skipping %s: file already exists' % output_filepath)
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print('Downloading %s ...' % tr(output_filename))
bar.update()
url_save(
url, output_filepath, bar, refer=refer, faker=faker,
headers=headers, **kwargs
)
bar.done()
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
bar.update()
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
# print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save(
url, filepath, bar, refer=refer, is_part=True, faker=faker,
headers=headers, **kwargs
)
bar.done()
if not merge:
print()
return
if 'av' in kwargs and kwargs['av']:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_av
ret = ffmpeg_concat_av(parts, output_filepath, ext)
print('Merged into %s' % output_filename)
if ret == 0:
for part in parts:
os.remove(part)
elif ext in ['flv', 'f4v']:
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4
ffmpeg_concat_flv_to_mp4(parts, output_filepath)
else:
from .processor.join_flv import concat_flv
concat_flv(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp4':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4
ffmpeg_concat_mp4_to_mp4(parts, output_filepath)
else:
from .processor.join_mp4 import concat_mp4
concat_mp4(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'ts':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
ffmpeg_concat_ts_to_mkv(parts, output_filepath)
else:
from .processor.join_ts import concat_ts
concat_ts(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
else:
print("Can't merge %s files" % ext)
print()
def download_rtmp_url(
url, title, ext, params={}, total_size=0, output_dir='.', refer=None,
merge=True, faker=False
):
assert url
if dry_run:
print_user_agent(faker=faker)
print('Real URL:\n%s\n' % [url])
if params.get('-y', False): # None or unset -> False
print('Real Playpath:\n%s\n' % [params.get('-y')])
return
if player:
from .processor.rtmpdump import play_rtmpdump_stream
play_rtmpdump_stream(player, url, params)
return
from .processor.rtmpdump import (
has_rtmpdump_installed, download_rtmpdump_stream
)
assert has_rtmpdump_installed(), 'RTMPDump not installed.'
download_rtmpdump_stream(url, title, ext, params, output_dir)
def download_url_ffmpeg(
url, title, ext, params={}, total_size=0, output_dir='.', refer=None,
merge=True, faker=False, stream=True
):
assert url
if dry_run:
print_user_agent(faker=faker)
print('Real URL:\n%s\n' % [url])
if params.get('-y', False): # None or unset ->False
print('Real Playpath:\n%s\n' % [params.get('-y')])
return
if player:
launch_player(player, [url])
return
from .processor.ffmpeg import has_ffmpeg_installed, ffmpeg_download_stream
assert has_ffmpeg_installed(), 'FFmpeg not installed.'
global output_filename
if output_filename:
dotPos = output_filename.rfind('.')
if dotPos > 0:
title = output_filename[:dotPos]
ext = output_filename[dotPos+1:]
else:
title = output_filename
title = tr(get_filename(title))
ffmpeg_download_stream(url, title, ext, params, output_dir, stream=stream)
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Playlist is not supported for ' + name)
return f
def print_info(site_info, title, type, size, **kwargs):
if json_output:
json_output_.print_info(
site_info=site_info, title=title, type=type, size=size
)
return
if type:
type = type.lower()
if type in ['3gp']:
type = 'video/3gpp'
elif type in ['asf', 'wmv']:
type = 'video/x-ms-asf'
elif type in ['flv', 'f4v']:
type = 'video/x-flv'
elif type in ['mkv']:
type = 'video/x-matroska'
elif type in ['mp3']:
type = 'audio/mpeg'
elif type in ['mp4']:
type = 'video/mp4'
elif type in ['mov']:
type = 'video/quicktime'
elif type in ['ts']:
type = 'video/MP2T'
elif type in ['webm']:
type = 'video/webm'
elif type in ['jpg']:
type = 'image/jpeg'
elif type in ['png']:
type = 'image/png'
elif type in ['gif']:
type = 'image/gif'
if type in ['video/3gpp']:
type_info = '3GPP multimedia file (%s)' % type
elif type in ['video/x-flv', 'video/f4v']:
type_info = 'Flash video (%s)' % type
elif type in ['video/mp4', 'video/x-m4v']:
type_info = 'MPEG-4 video (%s)' % type
elif type in ['video/MP2T']:
type_info = 'MPEG-2 transport stream (%s)' % type
elif type in ['video/webm']:
type_info = 'WebM video (%s)' % type
# elif type in ['video/ogg']:
# type_info = 'Ogg video (%s)' % type
elif type in ['video/quicktime']:
type_info = 'QuickTime video (%s)' % type
elif type in ['video/x-matroska']:
type_info = 'Matroska video (%s)' % type
# elif type in ['video/x-ms-wmv']:
# type_info = 'Windows Media video (%s)' % type
elif type in ['video/x-ms-asf']:
type_info = 'Advanced Systems Format (%s)' % type
# elif type in ['video/mpeg']:
# type_info = 'MPEG video (%s)' % type
elif type in ['audio/mp4', 'audio/m4a']:
type_info = 'MPEG-4 audio (%s)' % type
elif type in ['audio/mpeg']:
type_info = 'MP3 (%s)' % type
elif type in ['audio/wav', 'audio/wave', 'audio/x-wav']:
type_info = 'Waveform Audio File Format ({})'.format(type)
elif type in ['image/jpeg']:
type_info = 'JPEG Image (%s)' % type
elif type in ['image/png']:
type_info = 'Portable Network Graphics (%s)' % type
elif type in ['image/gif']:
type_info = 'Graphics Interchange Format (%s)' % type
elif type in ['m3u8']:
if 'm3u8_type' in kwargs:
if kwargs['m3u8_type'] == 'master':
type_info = 'M3U8 Master {}'.format(type)
else:
type_info = 'M3U8 Playlist {}'.format(type)
else:
type_info = 'Unknown type (%s)' % type
maybe_print('Site: ', site_info)
maybe_print('Title: ', unescape_html(tr(title)))
print('Type: ', type_info)
if type != 'm3u8':
print(
'Size: ', round(size / 1048576, 2),
'MiB (' + str(size) + ' Bytes)'
)
if type == 'm3u8' and 'm3u8_url' in kwargs:
print('M3U8 Url: {}'.format(kwargs['m3u8_url']))
print()
def mime_to_container(mime):
mapping = {
'video/3gpp': '3gp',
'video/mp4': 'mp4',
'video/webm': 'webm',
'video/x-flv': 'flv',
}
if mime in mapping:
return mapping[mime]
else:
return mime.split('/')[1]
def parse_host(host):
"""Parses host name and port number from a string.
"""
if re.match(r'^(\d+)$', host) is not None:
return ("0.0.0.0", int(host))
if re.match(r'^(\w+)://', host) is None:
host = "//" + host
o = parse.urlparse(host)
hostname = o.hostname or "0.0.0.0"
port = o.port or 0
return (hostname, port)
def set_proxy(proxy):
proxy_handler = request.ProxyHandler({
'http': '%s:%s' % proxy,
'https': '%s:%s' % proxy,
})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
def unset_proxy():
proxy_handler = request.ProxyHandler({})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
# DEPRECATED in favor of set_proxy() and unset_proxy()
def set_http_proxy(proxy):
if proxy is None: # Use system default setting
proxy_support = request.ProxyHandler()
elif proxy == '': # Don't use any proxy
proxy_support = request.ProxyHandler({})
else: # Use proxy
proxy_support = request.ProxyHandler(
{'http': '%s' % proxy, 'https': '%s' % proxy}
)
opener = request.build_opener(proxy_support)
request.install_opener(opener)
def print_more_compatible(*args, **kwargs):
import builtins as __builtin__
"""Overload default print function as py (<3.3) does not support 'flush' keyword.
Although the function name can be same as print to get itself overloaded automatically,
I'd rather leave it with a different name and only overload it when importing to make less confusion.
"""
# nothing happens on py3.3 and later
if sys.version_info[:2] >= (3, 3):
return __builtin__.print(*args, **kwargs)
# in lower pyver (e.g. 3.2.x), remove 'flush' keyword and flush it as requested
doFlush = kwargs.pop('flush', False)
ret = __builtin__.print(*args, **kwargs)
if doFlush:
kwargs.get('file', sys.stdout).flush()
return ret
def download_main(download, download_playlist, urls, playlist, **kwargs):
for url in urls:
if re.match(r'https?://', url) is None:
url = 'http://' + url
if playlist:
download_playlist(url, **kwargs)
else:
download(url, **kwargs)
def load_cookies(cookiefile):
global cookies
try:
cookies = cookiejar.MozillaCookieJar(cookiefile)
cookies.load()
except Exception:
import sqlite3
cookies = cookiejar.MozillaCookieJar()
con = sqlite3.connect(cookiefile)
cur = con.cursor()
try:
cur.execute("""SELECT host, path, isSecure, expiry, name, value
FROM moz_cookies""")
for item in cur.fetchall():
c = cookiejar.Cookie(
0, item[4], item[5], None, False, item[0],
item[0].startswith('.'), item[0].startswith('.'),
item[1], False, item[2], item[3], item[3] == '', None,
None, {},
)
cookies.set_cookie(c)
except Exception:
pass
# TODO: Chromium Cookies
# SELECT host_key, path, secure, expires_utc, name, encrypted_value
# FROM cookies
# http://n8henrie.com/2013/11/use-chromes-cookies-for-easier-downloading-with-python-requests/
def set_socks_proxy(proxy):
try:
import socks
socks_proxy_addrs = proxy.split(':')
socks.set_default_proxy(
socks.SOCKS5,
socks_proxy_addrs[0],
int(socks_proxy_addrs[1])
)
socket.socket = socks.socksocket
def getaddrinfo(*args):
return [
(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))
]
socket.getaddrinfo = getaddrinfo
except ImportError:
log.w(
'Error importing PySocks library, socks proxy ignored.'
'In order to use use socks proxy, please install PySocks.'
)
def script_main(download, download_playlist, **kwargs):
logging.basicConfig(format='[%(levelname)s] %(message)s')
def print_version():
version = get_version(
kwargs['repo_path'] if 'repo_path' in kwargs else __version__
)
log.i(
'version {}, a tiny downloader that scrapes the web.'.format(
version
)
)
parser = argparse.ArgumentParser(
prog='you-get',
usage='you-get [OPTION]... URL...',
description='A tiny downloader that scrapes the web',
add_help=False,
)
parser.add_argument(
'-V', '--version', action='store_true',
help='Print version and exit'
)
parser.add_argument(
'-h', '--help', action='store_true',
help='Print this help message and exit'
)
dry_run_grp = parser.add_argument_group(
'Dry-run options', '(no actual downloading)'
)
dry_run_grp = dry_run_grp.add_mutually_exclusive_group()
dry_run_grp.add_argument(
'-i', '--info', action='store_true', help='Print extracted information'
)
dry_run_grp.add_argument(
'-u', '--url', action='store_true',
help='Print extracted information with URLs'
)
dry_run_grp.add_argument(
'--json', action='store_true',
help='Print extracted URLs in JSON format'
)
download_grp = parser.add_argument_group('Download options')
download_grp.add_argument(
'-n', '--no-merge', action='store_true', default=False,
help='Do not merge video parts'
)
download_grp.add_argument(
'--no-caption', action='store_true',
help='Do not download captions (subtitles, lyrics, danmaku, ...)'
)
download_grp.add_argument(
'-f', '--force', action='store_true', default=False,
help='Force overwriting existing files'
)
download_grp.add_argument(
'-F', '--format', metavar='STREAM_ID',
help='Set video format to STREAM_ID'
)
download_grp.add_argument(
'-O', '--output-filename', metavar='FILE', help='Set output filename'
)
download_grp.add_argument(
'-o', '--output-dir', metavar='DIR', default='.',
help='Set output directory'
)
download_grp.add_argument(
'-p', '--player', metavar='PLAYER',
help='Stream extracted URL to a PLAYER'
)
download_grp.add_argument(
'-c', '--cookies', metavar='COOKIES_FILE',
help='Load cookies.txt or cookies.sqlite'
)
download_grp.add_argument(
'-t', '--timeout', metavar='SECONDS', type=int, default=600,
help='Set socket timeout'
)
download_grp.add_argument(
'-d', '--debug', action='store_true',
help='Show traceback and other debug info'
)
download_grp.add_argument(
'-I', '--input-file', metavar='FILE', type=argparse.FileType('r'),
help='Read non-playlist URLs from FILE'
)
download_grp.add_argument(
'-P', '--password', help='Set video visit password to PASSWORD'
)
download_grp.add_argument(
'-l', '--playlist', action='store_true',
help='Prefer to download a playlist'
)
download_grp.add_argument(
'-a', '--auto-rename', action='store_true', default=False,
help='Auto rename same name different files'
)
proxy_grp = parser.add_argument_group('Proxy options')
proxy_grp = proxy_grp.add_mutually_exclusive_group()
proxy_grp.add_argument(
'-x', '--http-proxy', metavar='HOST:PORT',
help='Use an HTTP proxy for downloading'
)
proxy_grp.add_argument(
'-y', '--extractor-proxy', metavar='HOST:PORT',
help='Use an HTTP proxy for extracting only'
)
proxy_grp.add_argument(
'--no-proxy', action='store_true', help='Never use a proxy'
)
proxy_grp.add_argument(
'-s', '--socks-proxy', metavar='HOST:PORT',
help='Use an SOCKS5 proxy for downloading'
)
download_grp.add_argument('--stream', help=argparse.SUPPRESS)
download_grp.add_argument('--itag', help=argparse.SUPPRESS)
parser.add_argument('URL', nargs='*', help=argparse.SUPPRESS)
args = parser.parse_args()
if args.help:
print_version()
parser.print_help()
sys.exit()
if args.version:
print_version()
sys.exit()
if args.debug:
# Set level of root logger to DEBUG
logging.getLogger().setLevel(logging.DEBUG)
global force
global dry_run
global json_output
global player
global extractor_proxy
global output_filename
global auto_rename
output_filename = args.output_filename
extractor_proxy = args.extractor_proxy
info_only = args.info
if args.force:
force = True
if args.auto_rename:
auto_rename = True
if args.url:
dry_run = True
if args.json:
json_output = True
# to fix extractors not use VideoExtractor
dry_run = True
info_only = False
if args.cookies:
load_cookies(args.cookies)
caption = True
stream_id = args.format or args.stream or args.itag
if args.no_caption:
caption = False
if args.player:
player = args.player
caption = False
if args.no_proxy:
set_http_proxy('')
else:
set_http_proxy(args.http_proxy)
if args.socks_proxy:
set_socks_proxy(args.socks_proxy)
URLs = []
if args.input_file:
logging.debug('you are trying to load urls from %s', args.input_file)
if args.playlist:
log.e(
"reading playlist from a file is unsupported "
"and won't make your life easier"
)
sys.exit(2)
URLs.extend(args.input_file.read().splitlines())
args.input_file.close()
URLs.extend(args.URL)
if not URLs:
parser.print_help()
sys.exit()
socket.setdefaulttimeout(args.timeout)
try:
extra = {}
if extractor_proxy:
extra['extractor_proxy'] = extractor_proxy
if stream_id:
extra['stream_id'] = stream_id
download_main(
download, download_playlist,
URLs, args.playlist,
output_dir=args.output_dir, merge=not args.no_merge,
info_only=info_only, json_output=json_output, caption=caption,
password=args.password,
**extra
)
except KeyboardInterrupt:
if args.debug:
raise
else:
sys.exit(1)
except UnicodeEncodeError:
if args.debug:
raise
log.e(
'[error] oops, the current environment does not seem to support '
'Unicode.'
)
log.e('please set it to a UTF-8-aware locale first,')
log.e(
'so as to save the video (with some Unicode characters) correctly.'
)
log.e('you can do it like this:')
log.e(' (Windows) % chcp 65001 ')
log.e(' (Linux) $ LC_CTYPE=en_US.UTF-8')
sys.exit(1)
except Exception:
if not args.debug:
log.e('[error] oops, something went wrong.')
log.e(
'don\'t panic, c\'est la vie. please try the following steps:'
)
log.e(' (1) Rule out any network problem.')
log.e(' (2) Make sure you-get is up-to-date.')
log.e(' (3) Check if the issue is already known, on')
log.e(' https://github.com/soimort/you-get/wiki/Known-Bugs')
log.e(' https://github.com/soimort/you-get/issues')
log.e(' (4) Run the command with \'--debug\' option,')
log.e(' and report this issue with the full output.')
else:
print_version()
log.i(args)
raise
sys.exit(1)
def google_search(url):
keywords = r1(r'https?://(.*)', url)
url = 'https://www.google.com/search?tbm=vid&q=%s' % parse.quote(keywords)
page = get_content(url, headers=fake_headers)
videos = re.findall(
r'<a href="(https?://[^"]+)" onmousedown="[^"]+">([^<]+)<', page
)
vdurs = re.findall(r'<span class="vdur _dwc">([^<]+)<', page)
durs = [r1(r'(\d+:\d+)', unescape_html(dur)) for dur in vdurs]
print('Google Videos search:')
for v in zip(videos, durs):
print('- video: {} [{}]'.format(
unescape_html(v[0][1]),
v[1] if v[1] else '?'
))
print('# you-get %s' % log.sprint(v[0][0], log.UNDERLINE))
print()
print('Best matched result:')
return(videos[0][0])
def url_to_module(url):
try:
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
assert video_host and video_url
except AssertionError:
url = google_search(url)
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
if video_host.endswith('.com.cn') or video_host.endswith('.ac.cn'):
video_host = video_host[:-3]
domain = r1(r'(\.[^.]+\.[^.]+)$', video_host) or video_host
assert domain, 'unsupported url: ' + url
# all non-ASCII code points must be quoted (percent-encoded UTF-8)
url = ''.join([ch if ord(ch) in range(128) else parse.quote(ch) for ch in url])
k = r1(r'([^.]+)', domain)
if k in SITES:
return (
import_module('.'.join(['you_get', 'extractors', SITES[k]])),
url
)
else:
import http.client
video_host = r1(r'https?://([^/]+)/', url) # .cn could be removed
if url.startswith('https://'):
conn = http.client.HTTPSConnection(video_host)
else:
conn = http.client.HTTPConnection(video_host)
conn.request('HEAD', video_url, headers=fake_headers)
res = conn.getresponse()
location = res.getheader('location')
if location and location != url and not location.startswith('/'):
return url_to_module(location)
else:
return import_module('you_get.extractors.universal'), url
def any_download(url, **kwargs):
m, url = url_to_module(url)
m.download(url, **kwargs)
def any_download_playlist(url, **kwargs):
m, url = url_to_module(url)
m.download_playlist(url, **kwargs)
def main(**kwargs):
script_main(any_download, any_download_playlist, **kwargs)
|
the-stack_0_22905 | import unittest
import tempfile
from pathlib import Path
from datetime import datetime, timedelta
from sitegen.content import ContentTag, TagCollection, PageContent, SiteInfo
from common import FakeTemplate, FakeTemplates, CollectionTestBase
CONFIG = {'site': {'url': 'http://bb.com', 'title': 'HELLO'}}
class ContentTagTests(unittest.TestCase, CollectionTestBase):
def setUp(self):
self.workdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.workdir.cleanup()
def test_get_context(self):
ct = ContentTag('test')
date = datetime.now().replace(second=0, microsecond=0) + timedelta(hours=1)
ct.append_content_file(self.make_content_file('blog', 'the-entry', 'The Entry', date=date))
context = ct.get_context(CONFIG)
assert 'items' in context
assert len(context.pop('items')) == 1
assert context['page_content'] == PageContent(title='test',
description='',
canonical_url='http://bb.com/tag/test',
date=date)
assert context['site_info'] == SiteInfo(site_name='HELLO',
base_url='http://bb.com',
section='tag')
def test_date_sorting(self):
ct = ContentTag('blog')
ct.append_content_file(self.make_content_file('blog', 'top-content', 'The Entry'))
ct.append_content_file(self.make_content_file('blog', 'middle-content', 'The Entry',
date=datetime.now() - timedelta(hours=1)))
ct.append_content_file(self.make_content_file('blog', 'bottom-content', 'The Entry',
date=datetime.now() - timedelta(hours=2)))
context = ct.get_context(CONFIG)
items = context['items']
assert len(items) == 3
assert items[0].name == 'top-content.md'
assert items[1].name == 'middle-content.md'
assert items[2].name == 'bottom-content.md'
def test_render_only_draft(self):
ct = ContentTag('tech')
ct.append_content_file(self.make_content_file('blog', 'the-content', 'The Entry', draft=True))
templates = FakeTemplates([FakeTemplate('list.html')])
rendered = ct.render(CONFIG, templates, self.workdir.name)
output_path = Path(self.workdir.name) / 'tag/tech/index.html'
assert output_path.exists()
assert output_path.read_text().startswith('list.html')
def test_render(self):
ct = ContentTag('tech')
ct.append_content_file(self.make_content_file('blog', 'the-content', 'The Entry'))
templates = FakeTemplates([FakeTemplate('list.html')])
rendered = ct.render(CONFIG, templates, self.workdir.name)
output_path = Path(self.workdir.name) / 'tag/tech/index.html'
assert output_path.exists()
assert output_path.read_text().startswith('list.html')
def test_prefer_tag_template(self):
ct = ContentTag('tech')
ct.append_content_file(self.make_content_file('blog', 'the-content', 'The Entry', draft=True))
templates = FakeTemplates([FakeTemplate('list.html'), FakeTemplate('tag.html')])
ct.render(CONFIG, templates, self.workdir.name)
output_path = Path(self.workdir.name) / 'tag/tech/index.html'
assert output_path.exists()
assert output_path.read_text().startswith('tag.html')
class TagCollectionTests(unittest.TestCase, CollectionTestBase):
def setUp(self):
self.workdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.workdir.cleanup()
def test_append_content_file(self):
tc = TagCollection()
cf = self.make_content_file('blog', 'the-entry', 'The Entry', tags=['hello', 'why-not'])
tc.append_content_file(cf)
assert len(tc.content_tags) == 2
for tag in ['hello', 'why-not']:
content_tag = tc.content_tags[tag]
assert content_tag.content_files[0] is cf
def test_get_context(self):
tc = TagCollection()
date = datetime.now().replace(second=0, microsecond=0) + timedelta(hours=1)
cf = self.make_content_file('blog', 'the-entry', 'The Entry', tags=['hello', 'why-not'], date=date)
tc.append_content_file(cf)
context = tc.get_context(CONFIG)
assert context.pop('items') == list(tc.content_tags.values())
assert context['page_content'] == PageContent(title='Tags',
description='',
canonical_url='http://bb.com/tag/',
date=date)
assert context['site_info'] == SiteInfo(site_name='HELLO',
base_url='http://bb.com',
section='tag')
def test_render_empty(self):
"""If there are no tags, don't do anything"""
tc = TagCollection()
templates = FakeTemplates([FakeTemplate('list.html')])
tc.render({'title': 'The Blog', 'baseurl': 'http://bb.com'}, templates, self.workdir.name)
tag_index = Path(self.workdir.name) / 'tag' / 'index.html'
assert not tag_index.exists()
def test_render(self):
tc = TagCollection()
cf = self.make_content_file('blog', 'the-entry', 'The Entry', tags=['hello', 'why-not'])
tc.append_content_file(cf)
templates = FakeTemplates([FakeTemplate('list.html')])
tc.render(CONFIG, templates, self.workdir.name)
tag_index = Path(self.workdir.name) / 'tag' / 'index.html'
assert tag_index.exists()
hello_index = Path(self.workdir.name) / 'tag' / 'hello' / 'index.html'
assert hello_index.exists()
whynot_index = Path(self.workdir.name) / 'tag' / 'why-not' / 'index.html'
assert whynot_index.exists()
|
the-stack_0_22909 |
# This code is inspired by Matthew Chan's solution to the Cart-Pole problem:
# https://gist.github.com/tuzzer/90701191b50c2e7bafca167858fcb234
import gym
env = gym.make('LunarLander-v2')
# Nop, fire left engine, main engine, right engine
ACTIONS = env.action_space.n
# Landing pad is always at coordinates (0,0). Coordinates are the first two numbers in state vector.
# Reward for moving from the top of the screen to landing pad and zero speed is about 100..140 points.
# If lander moves away from landing pad it loses reward back. Episode finishes if the lander crashes or
# comes to rest, receiving additional -100 or +100 points. Each leg ground contact is +10. Firing main
# engine is -0.3 points each frame. Solved is 200 points.
import numpy as np
import random
def discretize_state(state):
dstate = list(state[:5])
dstate[0] = int(0.5*(state[0]+0.7)*10/2.0) # pos x
dstate[1] = int(0.5*(state[1]+0.5)*10/2.0) # pos y
dstate[2] = int(0.5*(state[2]+1.5)*10/3.0) # vel x
dstate[3] = int(0.5*(state[3]+2)*10/3.0) # vel y
dstate[4] = int(0.5*(state[4]+3.14159)*10/(2*3.14159)) # angle
if dstate[0] >= 5: dstate[0] = 4
if dstate[1] >= 5: dstate[1] = 4
if dstate[2] >= 5: dstate[2] = 4
if dstate[3] >= 5: dstate[3] = 4
if dstate[4] >= 5: dstate[4] = 4
if dstate[0] < 0: dstate[0] = 0
if dstate[1] < 0: dstate[1] = 0
if dstate[2] < 0: dstate[2] = 0
if dstate[3] < 0: dstate[3] = 0
if dstate[4] < 0: dstate[4] = 0
return tuple(dstate)
def run(num_episodes, alpha, gamma, explore_mult):
max_rewards = []
last_reward = []
qtable = np.subtract(np.zeros((5, 5, 5, 5, 5, ACTIONS)), 100) # start all rewards at -100
explore_rate = 1.0
for episode in range(num_episodes):
s = env.reset()
state = discretize_state(s)
for step in range(10000):
# select action
if random.random() < explore_rate:
action = random.choice(range(ACTIONS))
else:
action = np.argmax(qtable[state])
(new_s, reward, done, _) = env.step(action)
new_state = discretize_state(new_s)
# update Q
best_future_q = np.amax(qtable[new_state]) # returns best possible reward from next state
prior_val = qtable[state + (action,)]
qtable[state + (action,)] = (1.0-alpha)*prior_val + alpha*(reward + gamma * best_future_q)
state = new_state
if done or step == 9999:
last_reward.append(reward)
break
if explore_rate > 0.01:
explore_rate *= explore_mult
max_rewards.append(np.amax(qtable))
return (max_rewards, last_reward[-50:], qtable) # return rewards from last 50 episodes
num_episodes = 100
for alpha in [0.05, 0.10, 0.15]:
for gamma in [0.85, 0.90, 0.95]:
(max_rewards, last_reward, _) = run(num_episodes=num_episodes, alpha=alpha, gamma=gamma, explore_mult=0.995)
print("alpha = %.2f, gamma = %.2f, mean last 50 outcomes = %.2f, q max: %.2f, q mean: %.2f" % (alpha, gamma, np.mean(last_reward), np.max(max_rewards), np.mean(max_rewards)))
(max_rewards, last_reward, qtable) = run(num_episodes=200, alpha=0.1, gamma=0.95, explore_mult=0.995)
print("mean last 50 outcomes = %.2f, q max: %.2f, q mean: %.2f" % (np.mean(last_reward), np.max(max_rewards), np.mean(max_rewards)))
np.save('qtable.npy', qtable)
# Use best qtable to play the game (no learning anymore)
import gym
import numpy as np
env = gym.make('LunarLander-v2')
qtable = np.load('qtable.npy')
for i in range(100):
s = env.reset()
state = discretize_state(s)
for step in range(10000):
env.render()
# select action
action = np.argmax(qtable[state])
(new_s, reward, done, _) = env.step(action)
new_state = discretize_state(new_s)
if done or step == 9999:
break
state = new_state
|
the-stack_0_22911 | from django.contrib.auth.models import User
from django.core.paginator import Paginator
from django.shortcuts import render
from blog.models.post import Post
NUM_OF_POSTS = 5
def home(request, username=None):
first_name = ''
last_name = ''
if username:
user = User.objects.get(username=username)
first_name = user.first_name
last_name = user.last_name
post_list = Post.objects.filter(user=user)
else:
post_list = Post.objects.all()
post_list = post_list.order_by('-pub_date')
paginator = Paginator(post_list, NUM_OF_POSTS) # Show NUM_OF_PAGES posts per page
page = request.GET.get('page')
posts = paginator.get_page(page)
return render(request, 'blog/home.html', {'posts': posts,
'first_name': first_name,
'last_name': last_name})
|
the-stack_0_22912 | """
problog.nnf_formula - d-DNNF
----------------------------
Provides access to d-DNNF formulae.
..
Part of the ProbLog distribution.
Copyright 2015 KU Leuven, DTAI Research Group
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
# from problog.ground_gringo import check_evidence
import tempfile
import os
import subprocess
import time
from collections import defaultdict, Counter
from . import system_info
from .evaluator import Evaluator, EvaluatableDSP, SemiringProbability
from .errors import InconsistentEvidenceError
from .formula import LogicDAG
from .cnf_formula import CNF, CNF_ASP
from .core import transform
from .errors import CompilationError
from .util import Timer, subprocess_check_output, subprocess_check_call
from .logic import Constant
class DSharpError(CompilationError):
"""DSharp has crashed."""
def __init__(self):
msg = "DSharp has encountered an error"
if system_info["os"] == "darwin":
msg += ". This is a known issue. See KNOWN_ISSUES for details on how to resolve this problem"
CompilationError.__init__(self, msg)
class DDNNF(LogicDAG, EvaluatableDSP):
"""A d-DNNF formula."""
transform_preference = 20
# noinspection PyUnusedLocal,PyUnusedLocal,PyUnusedLocal
def __init__(self, neg_cycles=False, **kwdargs):
LogicDAG.__init__(self, auto_compact=False, **kwdargs)
# self.n_models = n_models
self.neg_cycles = neg_cycles
def _create_evaluator(self, semiring, weights, **kwargs):
return SimpleDDNNFEvaluator(self, semiring, weights, self.neg_cycles, **kwargs)
class SimpleDDNNFEvaluator(Evaluator):
"""Evaluator for d-DNNFs."""
def __init__(self, formula, semiring, weights=None, neg_cycles=False, **kwargs):
Evaluator.__init__(self, formula, semiring, weights, **kwargs)
self.cache_intermediate = {} # weights of intermediate nodes
self.cache_models = {} # weights of models
self.neg_cycles = neg_cycles
self.keytotal = {}
self.keyworlds = {}
self.models = []
self.multi_sm = {}
self.valid_choices = set()
# print(formula.to_dot())
self.multi_stable_models()
def _initialize(self, with_evidence=True):
self.weights.clear()
model_weights = self.formula.extract_weights(self.semiring, self.given_weights)
self.weights = model_weights.copy()
if with_evidence:
for ev in self.evidence():
self.set_evidence(abs(ev), ev > 0)
if self.semiring.is_zero(self._get_z()):
raise InconsistentEvidenceError(context=" during evidence evaluation")
def propagate(self):
self._initialize()
def _get_z(self):
result = self.get_root_weight()
result = self.correct_weight(result)
return result
def evaluate_evidence(self, recompute=False):
return self.semiring.result(
self._evaluate_evidence(recompute=recompute), self.formula
)
# noinspection PyUnusedLocal
def _evaluate_evidence(self, recompute=False):
self._initialize(False)
for ev in self.evidence():
self._set_value(abs(ev), ev > 0)
result = self.get_root_weight()
return result
def evaluate_fact(self, node):
return self.evaluate(node)
# Basic
# def evaluate(self, node):
# if node == 0:
# if not self.semiring.is_nsp():
# result = self.semiring.one()
# else:
# result = self.get_root_weight()
# result = self.semiring.normalize(result, self._get_z())
# elif node is None:
# result = self.semiring.zero()
# else:
# ps = self._get_weight(abs(node))
# p = self._aggregate_weights(ps)
# ns = self._get_weight(-abs(node))
# n = self._aggregate_weights(ns)
# self._set_value(abs(node), (node > 0))
# result = self.get_root_weight()
# self._reset_value(abs(node), p, n)
# if self.has_evidence() or self.semiring.is_nsp():
# # print(result, self._get_z())
# result = self.semiring.normalize(result, self._get_z())
# return self.semiring.result(result, self.formula)
# Aggregate and correct later
def evaluate(self, node):
if node == 0:
if not self.semiring.is_nsp():
result = self.semiring.one()
else:
result = self.get_root_weight()
result = self.semiring.normalize(result, self._get_z())
elif node is None:
result = self.semiring.zero()
else:
p = self._get_weight(abs(node))
n = self._get_weight(-abs(node))
self._set_value(abs(node), (node > 0))
result = self.get_root_weight()
self._reset_value(abs(node), p, n)
# if not abs(node) in self.evidence():
# if not self.has_evidence():
result = self.correct_weight(result, node)
# if self.has_evidence() or self.semiring.is_nsp() or self.pasp:
result = self.semiring.normalize(result, self._get_z())
result = self.semiring.result(result, self.formula)
return result
def check_model_evidence(self, model):
# we overcount only the models that are compatible with evidence
ok_ev = True
for e in self.evidence():
ok_ev = ok_ev and e in model
return ok_ev
def correct_weight(self, w, node=None):
"""
compute the unnormalized weight first, then for each 1:many model to which the node belongs
remove the weight of the other models that the unnormalized weight includes
"""
for pw in self.multi_sm:
if pw in self.cache_models:
w_pw = self.cache_models[pw]
else:
w_pw = self.semiring.one()
for atom in pw:
w_at = self._get_weight(atom)
w_pw = self.semiring.times(w_pw, w_at)
n = len(self.multi_sm[pw])
# consider only models that are possible w.r.t. evidence (but n is w.r.t. all anyway)
models = [m for m in self.multi_sm[pw] if self.check_model_evidence(m)]
if not self.semiring.is_zero(w_pw):
for model in models:
if node in model or node is None:
# print(">", node, model)
extra_norm = self.semiring.value(1-1/n)
extra_weight = self.semiring.times(w_pw, extra_norm)
# w-extra = 1-(extra+(1-w))
a = self.semiring.negate(w)
b = self.semiring.plus(extra_weight,a)
w = self.semiring.negate(b)
return w
def query(self, index):
if len(list(self.evidence()))==0:
root_weight = self._get_z()
inconsistent_weight = self.semiring.negate(root_weight)
true_weight = self.evaluate(index)
false_weight = self.semiring.negate(self.semiring.plus(inconsistent_weight,true_weight))
return (true_weight, false_weight, inconsistent_weight)
else:
true_weight = self.evaluate(index)
false_weight = self.semiring.negate(true_weight)
return (true_weight, false_weight, self.semiring.zero())
# self._initialize()
# weights = self.weights.copy()
# valid_mass = self.semiring.zero()
# choice = self.valid_choices.pop()
# self.valid_choices.add(choice)
# for atom in choice:
# weights[abs(atom)] = (self.semiring.zero(), self.semiring.zero())
# valid_choices_weights = {}
# for vc in self.valid_choices:
# w = self.semiring.one()
# for atom in vc:
# aw = self.weights[abs(atom)][atom<0]
# w = self.semiring.times(w,aw)
# valid_choices_weights[vc] = w
# valid_mass = self.semiring.plus(valid_mass,w)
# for atom in vc:
# val = atom<0
# if val:
# neg = weights[abs(atom)][val]
# weights[abs(atom)] = (weights[abs(atom)][val], self.semiring.plus(neg, w))
# else:
# pos = weights[abs(atom)][val]
# weights[abs(atom)] = (self.semiring.plus(pos, w), weights[abs(atom)][val])
# p = self.semiring.zero()
# for vc in self.valid_choices:
# self.weights = weights
# for atom in vc:
# if atom>0:
# self.set_weight(abs(atom), self.semiring.one(), self.semiring.zero())
# else:
# self.set_weight(abs(atom), self.semiring.zero(), self.semiring.one())
# e = self.evaluate(index)
# pvc = self.semiring.times(valid_choices_weights[vc], e)
# p = self.semiring.plus(p, pvc)
# i = self.semiring.negate(valid_mass)
# tot = self.semiring.plus(p, i)
# n = self.semiring.negate(tot)
# return (p, n, i)
# Aggregate and correct later
def _reset_value(self, index, pos, neg):
self.set_weight(index, pos, neg)
# Basic
# def get_root_weight(self):
# """
# Get the WMC of the root of this formula.
# :return: The WMC of the root of this formula (WMC of node len(self.formula)), multiplied with weight of True
# (self.weights.get(0)).
# """
# weights = self._get_weight(len(self.formula))
# result = self._aggregate_weights(weights)
# return (
# self.semiring.times(result, self.weights.get(0)[0])
# if self.weights.get(0) is not None
# else result
# )
# Aggregate and correct
def get_root_weight(self):
"""
Get the WMC of the root of this formula.
:return: The WMC of the root of this formula (WMC of node len(self.formula)), multiplied with weight of True
(self.weights.get(0)).
"""
result = self._get_weight(len(self.formula))
return (
self.semiring.times(result, self.weights.get(0)[0])
if self.weights.get(0) is not None
else result
)
# Basic
# def _get_weight(self, index):
# if index == 0:
# return [self.semiring.one()]
# elif index is None:
# return [self.semiring.zero()]
# else:
# abs_index = abs(index)
# w = self.weights.get(abs_index) # Leaf nodes
# if w is not None:
# return [w[index < 0]]
# w = self.cache_intermediate.get(abs_index) # Intermediate nodes
# if w is None:
# w = self._calculate_weight(index)
# self.cache_intermediate[abs_index] = w
# return w
# Aggregate and correct later
def _get_weight(self, index):
if index == 0:
return self.semiring.one()
elif index is None:
return self.semiring.zero()
else:
abs_index = abs(index)
w = self.weights.get(abs_index) # Leaf nodes
if w is not None:
return w[index < 0]
w = self.cache_intermediate.get(abs_index) # Intermediate nodes
if w is None:
w = self._calculate_weight(index)
self.cache_intermediate[abs_index] = w
return w
def set_weight(self, index, pos, neg):
# index = index of atom in weights, so atom2var[key] = index
self.weights[index] = (pos, neg)
self.cache_intermediate.clear()
self.cache_models.clear()
def set_evidence(self, index, value):
curr_pos_weight, curr_neg_weight = self.weights.get(index)
pos, neg = self.semiring.to_evidence(
curr_pos_weight, curr_neg_weight, sign=value
)
if (value and self.semiring.is_zero(curr_pos_weight)) or (
not value and self.semiring.is_zero(curr_neg_weight)
):
raise InconsistentEvidenceError(self._deref_node(index))
self.set_weight(index, pos, neg)
def _deref_node(self, index):
return self.formula.get_node(index).name
# Aggregate and correct later
def _set_value(self, index, value):
"""Set value for given node.
:param index: index of node
:param value: value
"""
if value:
pos = self._get_weight(index)
self.set_weight(index, pos, self.semiring.zero())
else:
neg = self._get_weight(-index)
self.set_weight(index, self.semiring.zero(), neg)
# Basic
# def _set_value(self, index, value):
# """Set value for given node.
# :param index: index of node
# :param value: value
# """
# if value:
# poss = self._get_weight(index)
# pos = self._aggregate_weights(poss)
# self.set_weight(index, pos, self.semiring.zero())
# else:
# negs = self._get_weight(-index)
# neg = self._aggregate_weights(negs)
# self.set_weight(index, self.semiring.zero(), neg)
# # Basic
# def _aggregate_weights(self, weights):
# result = self.semiring.zero()
# for w in weights:
# result = self.semiring.plus(result, w)
# return result
# Basic: keep 0 worlds
# def _calculate_weight(self, key):
# assert key != 0
# assert key is not None
# # assert(key > 0)
# node = self.formula.get_node(abs(key))
# ntype = type(node).__name__
# if ntype == "atom":
# return [self.semiring.one()]
# else:
# assert key > 0
# childprobs = [self._get_weight(c) for c in node.children]
# # print(key, childprobs, len(self.multi_sm))
# if ntype == "conj":
# if len(self.multi_sm) == 0: # no multiple stable models: aggregate without normalization
# c = self.semiring.one()
# for p in childprobs:
# c = self.semiring.times(c, p[0])
# return [c]
# else:
# w_conj = list(self.wproduct(childprobs))
# n_children = len(w_conj)
# if key in self.keyworlds: # if we have to normalize something
# worlds = self.keyworlds[key]
# for c in range(0, n_children): # follow the list
# pw = frozenset(worlds[c])
# n = self.multi_sm.get(pw,1) # get normalization constant
# if n!=1 and not self.semiring.is_zero(w_conj[c]):
# norm = self.semiring.value(1/n)
# w_conj[c] = self.semiring.times(w_conj[c],norm) # replace with normalized
# return w_conj
# elif ntype == "disj":
# if len(self.multi_sm) == 0:
# d = self.semiring.zero()
# for p in childprobs:
# d = self.semiring.plus(d, p[0])
# return [d]
# else:
# cp_disj = []
# for weights in childprobs:
# cp_disj += [w for w in weights]
# return cp_disj
# else:
# raise TypeError("Unexpected node type: '%s'." % ntype)
# Aggregate and correct later
def _calculate_weight(self, key):
assert key != 0
assert key is not None
# assert(key > 0)
node = self.formula.get_node(abs(key))
ntype = type(node).__name__
if ntype == "atom":
return self.semiring.one()
else:
assert key > 0
childprobs = [self._get_weight(c) for c in node.children]
# print(key, list(zip(node.children, childprobs)))
if ntype == "conj":
p = self.semiring.one()
for c in childprobs:
p = self.semiring.times(p, c)
return p
elif ntype == "disj":
p = self.semiring.zero()
for c in childprobs:
p = self.semiring.plus(p, c)
return p
else:
raise TypeError("Unexpected node type: '%s'." % ntype)
# def get_worlds(self, key):
# if key == 0 or key is None:
# return [[]]
# node = self.formula.get_node(abs(key))
# ntype = type(node).__name__
# if ntype == 'atom':
# # keep track of logical and probabilistic atoms
# if abs(key) in self.labelled or abs(key) in self.choices:
# return [[key]]
# else: #ignore extra stuff from compiler
# return [[]]
# else:
# assert key > 0
# childworlds = [self.get_worlds(c) for c in node.children]
# # print("cws:", key, childworlds)
# if ntype == 'conj':
# cw_conj = list(self.product(childworlds))
# # print("cj:", key, cw_conj)
# for i, w in enumerate(cw_conj): # if the conjunction corresponds to some pw
# if self.choices.issubset(self.chosen(w)): # and we made all probabilistic choices
# cw_conj[i] = [] # forget about it when handing list to the partent
# pw = [id for id in w if abs(id) in self.choices]
# fw = frozenset(pw)
# if key in self.keyworlds: # remember that on this node we might need some normalization
# self.keyworlds[key].append(fw)
# else:
# self.keyworlds[key] = [fw]
# return cw_conj # this contains partial worlds
# elif ntype == 'disj':
# disj = []
# for cws in childworlds:
# disj += [w for w in cws if self.partial_choice(w)] # just flatten or
# # print("dws:", disj)
# return disj
# else:
# raise TypeError("Unexpected node type: '%s'." % ntype)
# Aggregate later
# def get_worlds(self, key):
# if key == 0 or key is None:
# return [[]]
# node = self.formula.get_node(abs(key))
# ntype = type(node).__name__
# if ntype == 'atom':
# # keep track of logical and probabilistic atoms
# # if abs(key) in self.labelled or abs(key) in self.choices:
# # return [[key]]
# # else: #ignore extra stuff from compiler
# # return [[]]
# return [[key]]
# else:
# assert key > 0
# childworlds = [self.get_worlds(c) for c in node.children]
# # print("cws:", key, childworlds)
# if ntype == 'conj':
# cw_conj = list(self.product(childworlds))
# # print("cj:", key, cw_conj)
# return cw_conj # this contains partial worlds
# elif ntype == 'disj':
# disj = []
# for cws in childworlds:
# disj += [w for w in cws] # just flatten or
# # print("dws:", disj)
# return disj
# else:
# raise TypeError("Unexpected node type: '%s'." % ntype)
# Aggregate later
def get_worlds(self, key):
if key == 0 or key is None:
return ((),)
node = self.formula.get_node(abs(key))
ntype = type(node).__name__
if ntype == 'atom':
return ((key, ), )
else:
assert key > 0
childworlds = [self.get_worlds(c) for c in node.children]
# print("cws:", key, childworlds)
if ntype == 'conj':
cw_conj = tuple(self.tproduct(childworlds))
# print("cj:", key, len(cw_conj), [len(w) for w in cw_conj])
return cw_conj # this contains partial worlds
elif ntype == 'disj':
# disj = childworlds.flatten()
disj = sum(childworlds, ())
# print("dws:", disj)
return disj
else:
raise TypeError("Unexpected node type: '%s'." % ntype)
def tproduct(self, ar_list):
if not ar_list:
yield ()
else:
for a in ar_list[0]:
for prod in self.tproduct(ar_list[1:]):
yield a+prod
def product(self, ar_list):
if not ar_list:
yield []
else:
for a in ar_list[0]:
for prod in self.product(ar_list[1:]):
yield a+prod
def wproduct(self, ar_list):
if not ar_list:
yield self.semiring.one()
else:
for w in ar_list[0]:
for prod in self.wproduct(ar_list[1:]):
yield self.semiring.times(w, prod)
def subset_diff(self,a,b):
return a.issubset(b) and a != b
def chosen(self, world):
return set([abs(id) for id in world])
def partial_choice(self, world):
chosen_facts = self.chosen(world) & self.choices
return self.subset_diff(chosen_facts, self.choices)
# def pwproduct(self, ar_list):
# if not ar_list:
# yield ([], self.semiring.one())
# else:
# for w, p in ar_list[0]:
# for wprod, pprod in self.pwproduct(ar_list[1:]):
# yield (w+wprod, self.semiring.times(p, pprod))
# Basic
# def multi_stable_models(self):
# self.labelled = [id for _, id, _ in self.formula.labeled()] # logical and probabilistic atoms
# weights = self.formula.get_weights()
# self.choices = set([key for key in weights if not isinstance(weights[key], bool)])
# root = len(self.formula._nodes)
# # print(weights)
# # print(self.labelled)
# # print(self.choices)
# ws = self.get_worlds(root)
# n_models = len(ws)
# worlds = [w for ws in self.keyworlds.values() for w in ws]
# print(worlds)
# self.multi_sm = Counter(worlds)
# # if the number of models is a multiple of the total number from the counter
# # then there must be some non-probabilistic choice in each world
# # then normalize each world w.r.t. that number
# n_pws = sum(self.multi_sm.values())
# n_logic_choices = n_models / n_pws
# self.multi_sm = {k: c*n_logic_choices for k, c in self.multi_sm.items() if c>1 or n_logic_choices>1}
def multi_stable_models(self):
self.labelled = [id for _, id, _ in self.formula.labeled()] # logical and probabilistic atoms
weights = self.formula.get_weights()
self.choices = set([key for key in weights if not isinstance(weights[key], bool)])
# print(len(self.choices),len(self.formula))
if self.neg_cycles:
root = len(self.formula._nodes)
# print(weights)
# print(self.labelled)
# print(self.choices)
start = time.time()
self.models = self.get_worlds(root)
for model in self.models:
choices = frozenset([atom for atom in model if abs(atom) in self.choices])
self.valid_choices.add(choices)
if choices in self.multi_sm:
self.multi_sm[choices].append(model)
else:
self.multi_sm[choices] = [model]
# if the number of models is a multiple of the total number from the counter
# then there must be some non-probabilistic choice in each world
# then normalize each world w.r.t. that number
# n_pws = sum(self.multi_sm.values())
# n_pws = len(self.multi_sm)
# self.n_logic_choices = n_models / n_pws
self.multi_sm = {k:self.multi_sm[k] for k in self.multi_sm if len(self.multi_sm[k])>1}
# print(self.keyworlds)
end = time.time()
print(f"Enumeration: {round(end-start,3)}s")
# print(self.multi_sm.values())
class Compiler(object):
"""Interface to CNF to d-DNNF compiler tool."""
__compilers = {}
@classmethod
def get_default(cls):
"""Get default compiler for this system."""
if system_info.get("c2d", False):
return _compile_with_c2d
else:
return _compile_with_dsharp
@classmethod
def get(cls, name):
"""Get compiler by name (or default if name not found).
:param name: name of the compiler
:returns: function used to call compiler
"""
result = cls.__compilers.get(name)
if result is None:
result = cls.get_default()
return result
@classmethod
def add(cls, name, func):
"""Add a compiler.
:param name: name of the compiler
:param func: function used to call the compiler
"""
cls.__compilers[name] = func
# if system_info.get("c2d", False):
# noinspection PyUnusedLocal
# @transform(CNF_ASP, DDNNF)
# def _compile_with_c2d(cnf, nnf=None, smooth=True, **kwdargs):
# fd, cnf_file = tempfile.mkstemp(".cnf")
# os.close(fd)
# nnf_file = cnf_file + ".nnf"
# if smooth:
# smoothl = ["-smooth_all"]
# else:
# smoothl = []
# cmd = ["c2d"] + smoothl + ["-reduce", "-in", cnf_file]
# try:
# os.remove(cnf_file)
# except OSError:
# pass
# try:
# os.remove(nnf_file)
# except OSError:
# pass
# return _compile(cnf, cmd, cnf_file, nnf_file)
# Compiler.add("c2d", _compile_with_c2d)
# noinspection PyUnusedLocal
# @transform(CNF, DDNNF)
# @transform(CNF_ASP, DDNNF)
# def _compile_with_dsharp(cnf, nnf=None, smooth=True, **kwdargs):
# result = None
# with Timer("DSharp compilation"):
# fd1, cnf_file = tempfile.mkstemp(".cnf")
# fd2, nnf_file = tempfile.mkstemp(".nnf")
# os.close(fd1)
# os.close(fd2)
# if smooth:
# smoothl = ["-smoothNNF"]
# else:
# smoothl = []
# cmd = ["dsharp", "-Fnnf", nnf_file] + smoothl + ["-disableAllLits", cnf_file] #
# try:
# result = _compile(cnf, cmd, cnf_file, nnf_file)
# except subprocess.CalledProcessError:
# raise DSharpError()
# try:
# os.remove(cnf_file)
# except OSError:
# pass
# try:
# os.remove(nnf_file)
# except OSError:
# pass
# return result
# Compiler.add("dsharp", _compile_with_dsharp)
# noinspection PyUnusedLocal
@transform(CNF_ASP, DDNNF)
def _compile_with_dsharp_asp(cnf, nnf=None, smooth=True, **kwdargs):
result = None
with Timer('DSharp compilation'):
fd1, cnf_file = tempfile.mkstemp('.cnf')
fd2, nnf_file = tempfile.mkstemp('.nnf')
os.close(fd1)
os.close(fd2)
if smooth:
smoothl = '-smoothNNF'
else:
smoothl = ''
# cmd = ['dsharp_with_unfounded', '-noIBCP', '-evidencePropagated', '-noPP', '-Fnnf', nnf_file, smoothl, '-disableAllLits', cnf_file]
# cmd = ['dsharp_with_unfounded', '-noIBCP', '-noPP', '-Fnnf', nnf_file, smoothl, '-disableAllLits', cnf_file]
cmd = ['dsharp_with_unfounded', '-noIBCP', '-noPP', '-Fnnf', nnf_file, '-smoothNNF', '-disableAllLits', cnf_file]
try:
result = _compile(cnf, cmd, cnf_file, nnf_file)
except subprocess.CalledProcessError:
raise DSharpError()
try:
os.remove(cnf_file)
except OSError:
pass
try:
os.remove(nnf_file)
except OSError:
pass
return result
Compiler.add('dsharp_asp', _compile_with_dsharp_asp)
def _compile(cnf, cmd, cnf_file, nnf_file):
names = cnf.get_names_with_label()
if cnf.is_trivial():
nnf = DDNNF()
weights = cnf.get_weights()
for i in range(1, cnf.atomcount + 1):
nnf.add_atom(i, weights.get(i))
or_nodes = []
for i in range(1, cnf.atomcount + 1):
or_nodes.append(nnf.add_or((i, -i)))
if or_nodes:
nnf.add_and(or_nodes)
for name, node, label in names:
nnf.add_name(name, node, label)
for c in cnf.constraints():
nnf.add_constraint(c.copy())
return nnf
else:
with open(cnf_file, "w") as f:
f.write(cnf.to_dimacs())
attempts_left = 1
success = False
while attempts_left and not success:
try:
start = time.time()
# out = subprocess_check_output(cmd)
# print(out)
with open(os.devnull, "w") as OUT_NULL:
subprocess_check_call(cmd, stdout=OUT_NULL)
end = time.time()
print(f"Compilation: {round(end-start,3)}s")
# i = out.find("# of solutions:")
# j = out.find("#SAT")
# n_models = float(out[i+17:j])
success = True
except subprocess.CalledProcessError as err:
attempts_left -= 1
if attempts_left == 0:
raise err
return _load_nnf(nnf_file, cnf)
def _load_nnf(filename, cnf):
nnf = DDNNF(cnf.neg_cycles, keep_all=True)
weights = cnf.get_weights()
names_inv = defaultdict(list)
for name, node, label in cnf.get_names_with_label():
names_inv[node].append((name, label))
with open(filename) as f:
line2node = {}
rename = {}
lnum = 0
for line in f:
line = line.strip().split()
if line[0] == "nnf":
pass
elif line[0] == "L":
name = int(line[1])
prob = weights.get(abs(name), True)
node = nnf.add_atom(abs(name), prob)
rename[abs(name)] = node
if name < 0:
node = -node
line2node[lnum] = node
if name in names_inv:
for actual_name, label in names_inv[name]:
nnf.add_name(actual_name, node, label)
del names_inv[name]
lnum += 1
elif line[0] == "A":
children = map(lambda x: line2node[int(x)], line[2:])
line2node[lnum] = nnf.add_and(children)
lnum += 1
elif line[0] == "O":
children = map(lambda x: line2node[int(x)], line[3:])
line2node[lnum] = nnf.add_or(children)
lnum += 1
else:
print("Unknown line type")
for name in names_inv:
for actual_name, label in names_inv[name]:
if name == 0:
nnf.add_name(actual_name, len(nnf), label)
else:
nnf.add_name(actual_name, None, label)
for c in cnf.constraints():
nnf.add_constraint(c.copy(rename))
return nnf
|
the-stack_0_22913 | __author__ = 'martinez'
try:
import pexceptions
except:
import pyseg.pexceptions
import numpy as np
try:
import disperse_io
except:
import pyseg.pexceptions
from pyto.io import ImageIO
from abc import *
##########################################################################################
# Class for holding the Geometry of a VertexMCF
#
#
class GeometryMCF:
# coords: array (shape=Nx3) with the coordinates of this geometry in the original tomogram
# list: array (shape=N) with the densities of this coordinates
def __init__(self, coords, densities):
self.__image = None
self.__mask = None
self.__size = None
self.__resolution = 1 # nm / voxel
self.__offset = None
self.__total_density = None
self.__avg_density = None
self.__density = None
self.__build_image(coords, densities)
########## Implemented functionality area
# In nm/voxel, by default 1
def set_resolution(self, resolution=1):
self.__resolution = resolution
def get_resolution(self):
return self.__resolution
# Size of the subvol which holds the geometry
def get_size(self):
return self.__size
# Offset of the subvol which holds the geometry
def get_offset(self):
return self.__offset
# Return density mean
def get_mean(self):
return np.mean(self.__density)
# Return the standard deviation
def get_std(self):
return np.std(self.__density)
# Top-left-front and Bottom-down-back corners is a 6 length Tuple
def get_bound(self):
return (self.__offset[0],
self.__offset[1],
self.__offset[2],
self.__offset[0] + self.__size[0],
self.__offset[1] + self.__size[1],
self.__offset[2] + self.__size[2])
# lbl: if not None all voxels are set to lbl
def get_numpy_image(self, lbl=None):
if lbl is None:
return self.__image
else:
return self.__mask * lbl
# lbl: if not None all voxels are set to lbl
def get_numpy_mask(self, lbl=None):
if lbl is None:
return self.__mask
else:
return self.__mask * lbl
# Return geometry volume in nm
def get_volume(self):
dv = self.__resolution * self.__resolution * self.__resolution
return self.__mask.sum() * dv
# Prints the geometry into a numpy volume
# vol: numpy array big enough for holding the geometry
# lbl: label used for printing, if None (default) density is printed
# th_den: number of sigmas above (+) or below vertex geometry density mean for thresholding,
# if None no threshold is applied
def print_in_numpy(self, vol, lbl=None, th_den=None):
o = self.__offset
s = self.__size
w = o + s
if (w[0] > vol.shape[0]) or (w[1] > vol.shape[1]) or (w[2] > vol.shape[2]):
error_msg = 'The volume cannot hold the geometry.'
raise pexceptions.PySegInputWarning(expr='print_in_numpy (Geometry)', msg=error_msg)
if th_den is not None:
mean = self.get_mean()
std = self.get_std()
self.__mask[self.__image > (mean + th_den*std)] = False
subvol = self.get_numpy_image(lbl)
mask_arr = self.get_array_mask()
vol[mask_arr[:, 0], mask_arr[:, 1], mask_arr[:, 2]] = subvol[self.__mask]
def get_mask_sum(self):
return self.__mask.sum()
# Return an array which indexes mask foreground voxels
# mode: 1 (default) in manifold coordinates system, otherwise in local geometry system
# th_den: number of sigmas above (+) or below vertex geometry density mean for thresholding,
# if None no threshold is applied
def get_array_mask(self, mode=1, th_den=None):
# Building the meshgrid
if mode == 1:
x_a = np.arange(self.__offset[0], self.__offset[0]+self.__size[0])
y_a = np.arange(self.__offset[1], self.__offset[1]+self.__size[1])
z_a = np.arange(self.__offset[2], self.__offset[2]+self.__size[2])
else:
x_a = np.arange(0, self.__size[0])
y_a = np.arange(0, self.__size[1])
z_a = np.arange(0, self.__size[2])
mg_y, mg_x, mg_z = np.meshgrid(y_a, x_a, z_a)
if th_den is None:
hold_mask = self.__mask
else:
mean = self.get_mean()
std = self.get_std()
hold_mask = np.copy(self.__mask)
hold_mask[self.__image > (mean + th_den*std)] = False
# Building coordinates array
x = mg_x[hold_mask]
y = mg_y[hold_mask]
z = mg_z[hold_mask]
if (len(x.shape) != 1) or (x.shape != y.shape) or (x.shape != z.shape):
error_msg = 'Unexpected state.'
raise pexceptions.PySegTransitionError(expr='get_array_mask (GeometryMCF)', msg=error_msg)
mask_array = np.zeros(shape=(x.size, 3), dtype=np.int)
mask_array[:, 0] = x.astype(np.int)
mask_array[:, 1] = y.astype(np.int)
mask_array[:, 2] = z.astype(np.int)
return mask_array
def get_total_density(self):
if self.__total_density is None:
self.__total_density = np.sum(self.__density)
return self.__total_density
# It only works if input density map is in range [0, 1]
def get_total_density_inv(self):
return np.sum(1 - self.__density)
# It this value has been already computed this function is faster than mean_density
def get_avg_density(self):
if self.__total_density is None:
self.__avg_density = np.mean(self.__density)
return self.__avg_density
# Return all densities in one dimensional array
def get_densities(self):
return self.__density
# Extend the geometry with another
# geom: this geometry is added to the current geometry
def extend(self, geom):
if geom.get_resolution() != self.__resolution:
error_msg = 'Input geometry resolution does not match current geomtry resolution.'
raise pexceptions.PySegTransitionError(expr='extend (GeometryMCF)', msg=error_msg)
# Compute new geometry size and offset
hold_offset = np.zeros(shape=3, dtype=np.int)
off_img_s = np.zeros(shape=3, dtype=np.int)
off_img_g = np.zeros(shape=3, dtype=np.int)
if self.__offset[0] < geom.__offset[0]:
hold_offset[0] = self.__offset[0]
off_img_g[0] = geom.__offset[0] - self.__offset[0]
else:
hold_offset[0] = geom.__offset[0]
off_img_s[0] = self.__offset[0] - geom.__offset[0]
if self.__offset[1] < geom.__offset[1]:
hold_offset[1] = self.__offset[1]
off_img_g[1] = geom.__offset[1] - self.__offset[1]
else:
hold_offset[1] = geom.__offset[1]
off_img_s[1] = self.__offset[1] - geom.__offset[1]
if self.__offset[2] < geom.__offset[2]:
hold_offset[2] = self.__offset[2]
off_img_g[2] = geom.__offset[2] - self.__offset[2]
else:
hold_offset[2] = geom.__offset[2]
off_img_s[2] = self.__offset[2] - geom.__offset[2]
hold_size = np.zeros(shape=3, dtype=np.int)
hold_s = self.__offset + self.__size
hold_g = geom.__offset + geom.__size
if hold_s[0] > hold_g[0]:
hold_size[0] = hold_s[0]
else:
hold_size[0] = hold_g[0]
if hold_s[1] > hold_g[1]:
hold_size[1] = hold_s[1]
else:
hold_size[1] = hold_g[1]
if hold_s[2] > hold_g[2]:
hold_size[2] = hold_s[2]
else:
hold_size[2] = hold_g[2]
hold_size -= hold_offset
# Create the extended container arrays
hold_density = np.concatenate((self.__density, geom.__density))
hold_image = np.zeros(shape=hold_size, dtype=self.__image.dtype)
hold_mask = np.zeros(shape=hold_size, dtype=self.__mask.dtype)
xsl, ysl, zsl = off_img_s[0], off_img_s[1], off_img_s[2]
xsh, ysh, zsh = off_img_s[0]+self.__size[0], off_img_s[1]+self.__size[1], \
off_img_s[2]+self.__size[2]
hold_mask[xsl:xsh, ysl:ysh, zsl:zsh] = self.__mask
hold_image[hold_mask] = self.__image[self.__mask]
hold_mask_2 = np.zeros(shape=hold_size, dtype=geom.__mask.dtype)
xgl, ygl, zgl = off_img_g[0], off_img_g[1], off_img_g[2]
xgh, ygh, zgh = off_img_g[0]+geom.__size[0], off_img_g[1]+geom.__size[1], \
off_img_g[2]+geom.__size[2]
hold_mask_2[xgl:xgh, ygl:ygh, zgl:zgh] = geom.__mask
hold_image[hold_mask_2] = geom.__image[geom.__mask]
# Update object state
self.__offset = hold_offset
self.__size = hold_size
self.__density = hold_density
self.__image = hold_image
self.__mask = hold_mask + hold_mask_2
self.__avg_density = None
self.__total_density = None
###### Internal methods
def __build_image(self, coords, densities):
# Get subvol borders
self.__offset = np.min(coords, axis=0)
self.__size = np.max(coords, axis=0)
self.__size = self.__size - self.__offset + 1
self.__density = densities
# Creates image and mask
self.__mask = np.zeros(shape=self.__size, dtype=np.bool)
self.__image = np.zeros(shape=self.__size, dtype=densities.dtype)
# Fill up image and mask
self.__total_density = 0
for i in range(len(densities)):
x, y, z = coords[i]
x, y, z = (x, y, z) - self.__offset
self.__mask[x, y, z] = True
den = densities[i]
self.__image[x, y, z] = den
self.__total_density += den
self.__avg_density = self.__total_density / len(densities)
##########################################################################################
# Abstract class for working as interface to geometries
#
#
class Geometry(metaclass=ABCMeta):
# For Abstract Base Classes in python
def __init__(self, manifold, density):
if (not isinstance(manifold, np.ndarray)) and (not isinstance(density, np.ndarray)):
error_msg = 'Booth manifold and density must be np.ndarray objects.'
raise pexceptions.PySegInputError(expr='__init___ (Geometry)', msg=error_msg)
self.__manifold = manifold
self.__density = density
self.__image = None
self.__mask = None
self.__size = None
self.__resolution = 1 # nm / voxel
self.__offset = None
self.__build_image()
self.__total_density = None
########## Implemented functionality area
# In nm/voxel, by default 1
def set_resolution(self, resolution=1):
self.__resolution = resolution
def get_manifold(self):
return self.__manifold
def get_density(self):
return self.__density
# Size of the subvol which holds the geometry
def get_size(self):
return self.__size
# Offset of the subvol which holds the geometry
def get_offset(self):
return self.__offset
# Top-left-front and Bottom-down-back corners is a 6 length Tuple
def get_bound(self):
return (self.__offset[0],
self.__offset[1],
self.__offset[2],
self.__offset[0] + self.__size[0],
self.__offset[1] + self.__size[1],
self.__offset[2] + self.__size[2])
# lbl: if not None all voxels are set to lbl
def get_numpy_image(self, lbl=None):
if lbl is None:
return self.__image
else:
return self.__mask * lbl
# lbl: if not None all voxels are set to lbl
def get_numpy_mask(self, lbl=None):
return self.__mask
# Return an array which indexes mask foreground voxels
# mode: 1 (default) in manifold coordinates system, otherwise in local geometry system
def get_array_mask(self, mode=1):
# Building the meshgrid
if mode == 1:
x_a = np.arange(self.__offset[0], self.__offset[0]+self.__size[0])
y_a = np.arange(self.__offset[1], self.__offset[1]+self.__size[1])
z_a = np.arange(self.__offset[2], self.__offset[2]+self.__size[2])
else:
x_a = np.arange(0, self.__size[0])
y_a = np.arange(0, self.__size[1])
z_a = np.arange(0, self.__size[2])
mg_y, mg_x, mg_z = np.meshgrid(y_a, x_a, z_a)
# Buiding coordinates array
x = mg_x[self.__mask]
y = mg_y[self.__mask]
z = mg_z[self.__mask]
if (len(x.shape) != 1) or (x.shape != y.shape) or (x.shape != z.shape):
error_msg = 'Unexpected state.'
raise pexceptions.PySegTransitionError(expr='get_array_mask (Geometry)', msg=error_msg)
mask_array = np.zeros(shape=(x.size, 3), dtype=np.int)
mask_array[:, 0] = x.astype(np.int)
mask_array[:, 1] = y.astype(np.int)
mask_array[:, 2] = z.astype(np.int)
return mask_array
# Eliminates a voxel in the geometry
def delete_voxel(self, x, y, z):
xi = int(x)
yi = int(y)
zi = int(z)
self.__mask[xi, yi, zi] = True
self.__mask[xi, yi, zi] = 0
# lbl: if not None all voxels are set to lbl
def get_vtk_image(self, lbl=None):
if lbl is None:
return disperse_io.numpy_to_vti(self.__image, self.__offset, self.__resolution*[1, 1, 1])
else:
return disperse_io.numpy_to_vti(self.__mask*lbl, self.__offset, self.__resolution*[1, 1, 1])
# lbl: if not None all voxels are set to lbl
def save_mrc_image(self, file_name, lbl=None):
mrc_image = ImageIO()
if lbl is None:
mrc_image.setData(self.__image)
else:
mrc_image.setData(self.__image * lbl)
mrc_image.writeMRC(file=file_name, length=self.__resolution*self.__size, nstart=self.__offset)
# Return density mean
def get_mean(self):
return np.mean(self.__density[self.__mask])
# Return the standard deviation
def get_std(self):
return np.std(self.__density[self.__mask])
# Apply an external to the geometry. The mask must be a numpy array big enough for embedding
# the geometry with format: 1-fg, 0-bg
def apply_ext_mask(self, mask):
# Cropping the mask
subvol = mask[self.__offset[0]:(self.__offset[0]+self.__size[0]),
self.__offset[1]:(self.__offset[1]+self.__size[1]),
self.__offset[2]:(self.__offset[2]+self.__size[2])]
self.__mask = self.__mask * subvol
self.__image = self.__image * subvol
# Prints the geometry into a numpy volume
# vol: numpy array big enough for holding the geometry
# lbl: label used for printing, if None (default) density is printed
# th_den: number of sigmas above (+) or below vertex geometry density mean for thresholding,
# if None no threshold is applied
def print_in_numpy(self, vol, lbl=None, th_den=None):
o = self.__offset
s = self.__size
w = o + s
if (w[0] > vol.shape[0]) or (w[1] > vol.shape[1]) or (w[2] > vol.shape[2]):
error_msg = 'The volume cannot hold the geometry.'
raise pexceptions.PySegInputWarning(expr='print_in_numpy (Geometry)', msg=error_msg)
if th_den is not None:
mean = self.get_mean()
std = self.get_std()
self.__mask[self.__density[o[0]:w[0], o[1]:w[1], o[2]:w[2]] > (mean + th_den*std)] = False
subvol = self.get_numpy_image(lbl)
mask_arr = self.get_array_mask()
vol[mask_arr[:, 0], mask_arr[:, 1], mask_arr[:, 2]] = subvol[self.__mask]
def get_total_density(self):
if self.__total_density is None:
self.__total_density = np.sum(self.__image[self.__mask])
return self.__total_density
###### Abstract methods
@abstractmethod
def __build_image(self):
raise NotImplementedError(
'__build_image() (Geometry). Abstract method, it requires an implementation.')
##########################################################################################
# Class for holding thee dimensional geometry of a single vertex
#
#
class PointGeometry(Geometry):
# pcoord: coordinates (x,y,z) of the point withing the manifold
# manifold: image with the manifold (Numpy ndarray)
# density: image with the density (Numpy ndarray)
def __init__(self, pcoord, manifold, density):
if len(pcoord) != 3:
error_msg = 'Input coordinates must be a 3D vector.'
raise pexceptions.PySegInputError(expr='__init___ (PointGeometry)', msg=error_msg)
self.__seed_coord = pcoord
super(PointGeometry, self).__init__(manifold, density)
######### Set/Get functions area
######### External area functionality
########### Internal Functionality area
# Build the 3D sub_image with the mask and the density for the geometry which surrounds a point
def _Geometry__build_image(self):
# Compute size
lbl = self._Geometry__manifold[int(np.floor(self.__seed_coord[0])),
int(np.floor(self.__seed_coord[1])),
int(np.floor(self.__seed_coord[2]))]
idx = np.where(self._Geometry__manifold == lbl)
xmin = np.min(idx[0])
ymin = np.min(idx[1])
zmin = np.min(idx[2])
xmax = np.max(idx[0])
ymax = np.max(idx[1])
zmax = np.max(idx[2])
self._Geometry__offset = np.asarray([xmin, ymin, zmin])
self._Geometry__size = np.asarray([xmax-xmin+1, ymax-ymin+1, zmax-zmin+1])
for i in range(self._Geometry__size.shape[0]):
if (self._Geometry__size[i] < 0) or\
(self._Geometry__size[i] > self._Geometry__manifold.shape[i]):
error_msg = 'Dimension lower than zero or bigger than input manifold.'
raise pexceptions.PySegTransitionError(expr='__build_image (PointGeometry)', msg=error_msg)
# Get image and mask
xmax1 = xmax + 1
ymax1 = ymax + 1
zmax1 = zmax + 1
self._Geometry__mask = self._Geometry__manifold[xmin:xmax1, ymin:ymax1, zmin:zmax1] == lbl
self._Geometry__image = self._Geometry__density[xmin:xmax1, ymin:ymax1, zmin:zmax1] \
* self._Geometry__mask
##########################################################################################
# Class for holding thee dimensional geometry of an Arc
#
#
class ArcGeometry(Geometry):
# pcoords: array of coordinates (x,y,z) of the points withing the manifold
# manifold: image with the manifold (Numpy ndarray)
# density: image with the density (Numpy ndarray)
def __init__(self, pcoords, manifold, density):
if (len(pcoords.shape) != 2) or (pcoords.shape[1] != 3):
error_msg = 'Input coordinates must be a numpy array 3D vectors.'
raise pexceptions.PySegInputError(expr='__init___ (ArcGeometry)', msg=error_msg)
self.__seed_coords = pcoords
super(ArcGeometry, self).__init__(manifold, density)
######### Set/Get functions area
######### External area functionality
########### Internal Functionality area
# Build the 3D sub_image with the mask and the density for the geometry which surrounds an array
# of points connected a string
def _Geometry__build_image(self):
# Compute image size
xmin = 0
ymin = 0
zmin = 0
xmax = self._Geometry__manifold.shape[0]
ymax = self._Geometry__manifold.shape[1]
zmax = self._Geometry__manifold.shape[2]
lbls = np.zeros(shape=self.__seed_coords.shape[0], dtype=np.int)
idxs = (-1) * np.ones(shape=lbls.shape, dtype=object)
for i in range(self.__seed_coords.shape[0]):
lbls[i] = self._Geometry__manifold[int(round(self.__seed_coords[i][0])),
int(round(self.__seed_coords[i][1])),
int(round(self.__seed_coords[i][2]))]
idx = np.where(self._Geometry__manifold == lbls[i])
idxs[i] = idx
hold = idx[0].min()
if hold < xmin: xmin = hold
hold = idx[1].min()
if hold < ymin: ymin = hold
hold = idx[2].min()
if hold < zmin: zmin = hold
hold = idx[0].min()
if hold > xmax: xmax = hold
hold = idx[1].max()
if hold > ymax: ymax = hold
hold = idx[2].max()
if hold > zmax: zmax = hold
self._Geometry__offset = np.asarray((xmin, ymin, zmin))
self._Geometry__size = np.asarray((xmax, ymax, zmax))
self._Geometry__size -= self._Geometry__offset
# Create the image and mask
self._Geometry__mask = np.zeros(shape=self._Geometry__size, dtype=np.bool)
self._Geometry__image = np.zeros(shape=self._Geometry__size, dtype=self._Geometry__density.dtype)
for i in range(self.__seed_coords.shape[0]):
self._Geometry__mask[idxs[i]] = True
self._Geometry__image[idxs[i]] = self._Geometry__density[idxs[i]]
|
the-stack_0_22919 | __author__ = 'Aaron Yang'
__email__ = '[email protected]'
__date__ = '2/26/2021 7:42 PM'
# arr = [1, 3, 2, 4, 6, 100]
arr = [1, 3, 2, 4, 6, 5]
dp = [-1] * len(arr)
dp[-1] = arr[-1]
def fun(arr, i):
if i >= len(arr):
return 0
if dp[i] != -1:
return dp[i]
dp[i] = max(arr[i] + fun(arr, i + arr[i]), fun(arr, i + 1))
return dp[i]
res = fun(arr, 0)
print(dp)
|
the-stack_0_22920 | from hemlock import Embedded, Page, Label, likert
from sqlalchemy_mutable import partial
import os
from random import shuffle
try:
from yaml import load, CLoader as Loader
except:
from yaml import load, Loader
dir_path = os.path.dirname(os.path.realpath(__file__))
versions = {
'IPIP-50': load(
open(os.path.join(dir_path, 'ipip50.yaml')), Loader=Loader
),
'TIPI': load(
open(os.path.join(dir_path, 'tipi.yaml')), Loader=Loader
),
'BFI-10': load(
open(os.path.join(dir_path, 'bfi10.yaml')), Loader=Loader
)
}
instructions_label = 'How much do you agree with the following statements?'
trait_abbreviations = {
'E': 'Extraversion',
'A': 'Agreeableness',
'C': 'Conscientiousness',
'N': 'Neuroticism',
'O': 'Openness'
}
def big5(
*items, version='IPIP-50', page=False, choices=5,
include_instructions=True, shuffle_items=False, record_index=False
):
"""
Create a big 5 personality questionnaire.
Parameters
----------
\*items :
Names of big 5 items to include. If no items are specified, this
function returns all big 5 items in the given version.
version : str, default='IPIP-50'
Version of the big 5 questionnaire. Currently supported are
`'IPIP-50'` (50-item version from the Interntaional Personality Item
Pool), `'TIPI'` (Ten-Item Personality Inventory), and `'BFI-10'`
(10-item Big 5 Inventory).
page : bool, default=False
Indicates that this function should return a page with the big 5
items. Otherwise, return a list of questions.
choices : int or list
Passed to `hemlock.likert`. 5, 7, and 9 mean 5-, 7-, and 9-point
Likert scales. Alternatively, pass a list of strings.
include_instructions : bool, default=True
Indicates that an instructions label should be included before the
items.
shuffle_items : bool, default=False
Indicates that items should be shuffled.
record_index : bool, default=False
Indicates to record the index of the big 5 items as they appear on the
page. Only applies of `page` is `True`.
Returns
-------
big5_questionnaire : hemlock.Page or list of hemlock.Question
If `page` is `True`, this function returns a page containing the
requested big 5 items. Otherise, it returns a list of questions.
"""
def gen_question(item):
# generates a question for a given big 5 item
_, label, ascending = item_bank[item]
return likert(
label,
choices=choices, reversed=not ascending,
var='Big5'+item, data_rows=-1, record_index=record_index,
)
item_bank = _get_item_bank(version)
if not items:
items = item_bank.keys()
questions = [gen_question(item) for item in items]
if shuffle_items:
shuffle(questions)
if include_instructions:
questions.insert(0, Label(instructions_label))
if page:
return Page(
*questions,
name='Big5',
timer=('Big5Time', -1),
submit=partial(_record_score, item_bank)
)
return questions
def _record_score(page, item_bank):
# records the aggregate score for each personality trait
def compute_scores():
scores = {}
questions = [q for q in page.questions if q.var and (q.data != '')]
for q in questions:
trait = item_bank[q.var[len('Big5'):]][0]
if trait not in scores:
scores[trait] = []
scores[trait].append(q.data)
return {
trait_abbreviations[trait]: sum(score)/len(score)
for trait, score in scores.items()
}
scores = compute_scores()
page.embedded = [
Embedded(trait, score, data_rows=-1)
for trait, score in scores.items()
]
g = page.part.g
if 'Big5' not in g:
g['Big5'] = {}
g['Big5'].update(scores)
def big5_traits(*traits, version='IPIP-50', **kwargs):
"""
Create a big 5 personality questionnaire for specified personality traits.
Parameters
----------
\*traits :
Strings of requested traits, `'E'` for extraversion, `'A'` for
agreeableness, `'C'` for conscientiousness, `'N'` for neuroticism,
`'O'` for openness.
version : str, default='IPIP-50'
Version of the big 5 questionnaire.
\*\*kwargs :
Keyword arguments are passed to `big5`.
Returns
-------
big5_questionnaire : hemlock.Page or list of hemlock.Question
If `page` is `True`, this function returns a page containing the
requested big 5 items. Otherise, it returns a list of questions.
"""
assert all([trait in 'EACNO' for trait in traits]), 'Traits must be one of "E", "A", "C", "N", "O"'
item_bank = _get_item_bank(version)
item_bank = {
key: val for key, val in item_bank.items() if val[0] in traits
}
obj = big5(version=item_bank, **kwargs)
if isinstance(obj, Page):
obj.name = 'Big5 '+' '.join(traits)
obj.timer.var = 'Big5'+''.join(traits)+'Timer'
return obj
def _get_item_bank(version):
"""
Get big 5 items.
Returns a dict mapping variable names to a (trait, label, ascending)
tuple. Trait is one of 'E', 'A', 'C', 'N', 'O'. Label is the question
label. Ascending indicates that score is ascending in choices (i.e.,
opposite of reverse coding).
"""
assert isinstance(version, (str, dict)), 'version must by str or dict'
if isinstance(version, str):
assert version in versions.keys(), "When passing version as str, must be one of "+str(versions.keys())
return versions[version]
return version |
the-stack_0_22921 | #!/usr/bin/env python
# encoding: utf-8
r"""
Module containg the classic Clawpack solvers
This module contains the pure and wrapped classic clawpack solvers. All
clawpack solvers inherit from the :class:`ClawSolver` superclass which in turn
inherits from the :class:`~pyclaw.evolve.solver.Solver` superclass. As such,
the only solver classes that should be directly used should be the
dimensionally dependent ones such as :class:`ClawSolver1D`.
:Authors:
Kyle T. Mandli (2008-09-11) Initial version
"""
# ============================================================================
# Copyright (C) 2008 Kyle T. Mandli <[email protected]>
#
# Distributed under the terms of the Berkeley Software Distribution (BSD)
# license
# http://www.opensource.org/licenses/
# ============================================================================
import numpy as np
from pyclaw.evolve.solver import Solver
from . import limiters
# ========================================================================
# User-defined routines
# ========================================================================
def start_step(solver, solutions):
r"""
Dummy routine called before each step
Replace this routine if you want to do something before each time step.
"""
pass
def src(solver, solutions, t, dt):
r"""
Dummy routine called to calculate a source term
Replace this routine if you want to include a source term.
"""
pass
# ============================================================================
# Generic Clawpack solver class
# ============================================================================
class ClawSolver(Solver):
r"""
Generic classic Clawpack solver
All Clawpack solvers inherit from this base class.
.. attribute:: mthlim
Limiter to be used on each wave. ``Default = [1]``
.. attribute:: order
Order of the solver, either 1 for first order or 2 for second order
corrections. ``Default = 2``
.. attribute:: src_split
Whether to use a source splitting method, 0 for none, 1 for first
order Godunov splitting and 2 for second order Strang splitting.
``Default = 0``
.. attribute:: fwave
Whether to split the flux into waves, requires that the Riemann solver
performs the splitting. ``Default = False``
.. attribute:: src
Source term function. Default is the stub function.
.. attribute:: start_step
Function called before each time step is taken. Default is the stub
function
:Initialization:
Input:
- *data* - (:class:`~pyclaw.data.Data`) Data object, the solver will look
for the named variables to instantiate itself.
Output:
- (:class:`ClawSolver`) - Initialized clawpack solver
:Version: 1.0 (2009-06-01)
"""
# ========== Generic Init Routine ========================================
def __init__(self, data=None):
r"""
See :class:`ClawSolver` for full documentation.
"""
# Required attributes for this solver
for attr in ["mthlim", "order", "src_split", "fwave", "src", "start_step"]:
self._required_attrs.append(attr)
# Default required attributes
self._default_attr_values["mthlim"] = [1]
self._default_attr_values["order"] = 2
self._default_attr_values["src_split"] = 0
self._default_attr_values["fwave"] = False
self._default_attr_values["src"] = src
self._default_attr_values["start_step"] = start_step
# Call general initialization function
super(ClawSolver, self).__init__(data)
# ========== Setup Routine ===============================================
def setup(self):
r"""
Called before any set of time steps.
This routine will be called once before the solver is used via the
:class:`~pyclaw.controller.Controller`. In the case of
:class:`ClawSolver` we make sure that the :attr:`mthlim` is a list.
"""
# Change mthlim to be an array regardless of how long it is
if not isinstance(self.mthlim, list) and self.mthlim is not None:
self.mthlim = [self.mthlim]
# ========== Riemann solver library routines =============================
def list_riemann_solvers(self):
r"""
List available Riemann solvers
This routine returns a list of available Riemann solvers which is
constructed in the Riemann solver package (:ref:`pyclaw_rp`). In this
case it lists all Riemann solvers.
:Output:
- (list) - List of Riemann solver names valid to be used with
:meth:`set_riemann_solver`
.. note::
These Riemann solvers are currently only accessible to the python
time stepping routines.
"""
rp_solver_list = []
# Construct list from each dimension list
for rp_solver in rp_solver_list_1d:
rp_solver_list.append("%s_1d" % rp_solver)
for rp_solver in rp_solver_list_2d:
rp_solver_list.append("%s_2d" % rp_solver)
for rp_solver in rp_solver_list_3d:
rp_solver_list.append("%s_3d" % rp_solver)
return rp_solver_list
def set_riemann_solver(self, solver_name):
r"""
Assigns the library solver solver_name as the Riemann solver.
:Input:
- *solver_name* - (string) Name of the solver to be used, raises a
NameError if the solver does not exist.
"""
raise Exception(
"Cannot set a Riemann solver with this class,"
+ " use one of the derived classes."
)
# ========== Time stepping routines ======================================
def step(self, solutions):
r"""
Evolve solutions one time step
This routine encodes the generic order in a full time step in this
order:
1. The :meth:`start_step` function is called
2. A half step on the source term :func:`src` if Strang splitting is
being used (:attr:`src_split` = 2)
3. A step on the homogeneous problem :math:`q_t + f(q)_x = 0` is taken
4. A second half step or a full step is taken on the source term
:func:`src` depending on whether Strang splitting was used
(:attr:`src_split` = 2) or Godunov splitting
(:attr:`src_split` = 1)
This routine is called from the method evolve_to_time defined in the
pyclaw.evolve.solver.Solver superclass.
:Input:
- *solutions* - (:class:`~pyclaw.solution.Solution`) Dictionary of
solutions to be evolved
:Output:
- (bool) - True if full step succeeded, False otherwise
"""
# Call b4step, pyclaw should be subclassed if this is needed
self.start_step(self, solutions)
# Source term splitting, pyclaw should be subclassed if this
# is needed
if self.src_split == 2:
self.src(self, solutions, solutions["n"].t, self.dt / 2.0)
# Take a step on the homogeneous problem
self.homogeneous_step(solutions)
# Check here if we violated the CFL condition, if we did, return
# immediately to evolve_to_time and let it deal with picking a new
# dt
if self.cfl >= self.cfl_max:
return False
# Strang splitting
if self.src_split == 2:
self.src(self, solutions, solutions["n"].t + self.dt / 2.0, self.dt / 2.0)
# Godunov Splitting
if self.src_split == 1:
self.src(self, solutions, solutions["n"].t, self.dt)
return True
def homogeneous_step(self, solutions):
r"""
Take one homogeneous step on the solutions
This is a dummy routine and must be overridden.
"""
raise Exception("Dummy routine, please override!")
# ============================================================================
# ClawPack 1d Solver Class
# ============================================================================
class ClawSolver1D(ClawSolver):
r"""
Clawpack evolution routine in 1D
This class represents the 1d clawpack solver on a single grid. Note that
there are routines here for interfacing with the fortran time stepping
routines and the python time stepping routines. The ones used are
dependent on the argument given to the initialization of the solver
(defaults to python).
.. attribute:: rp
Riemann solver function.
:Initialization:
Input:
- *data* - (:class:`~pyclaw.data.Data`) An instance of a Data object whose
parameters can be used to initialize this solver
Output:
- (:class:`ClawSolver1D`) - Initialized 1d clawpack solver
:Authors:
Kyle T. Mandli (2008-09-11) Initial version
"""
def __init__(self, data=None):
r"""
Create 1d Clawpack solver
See :class:`ClawSolver1D` for more info.
"""
# Add the functions as required attributes
self._required_attrs.append("rp")
self._default_attr_values["rp"] = None
# Import Riemann solvers
exec("import pyclaw.evolve.rp as rp", globals())
super(ClawSolver1D, self).__init__(data)
# ========== Riemann solver library routines =============================
def list_riemann_solvers(self):
r"""
List available Riemann solvers
This routine returns a list of available Riemann solvers which is
constructed in the Riemann solver package (_pyclaw_rp). In this case
it lists only the 1D Riemann solvers.
:Output:
- (list) - List of Riemann solver names valid to be used with
:meth:`set_riemann_solver`
.. note::
These Riemann solvers are currently only accessible to the python
time stepping routines.
"""
return rp.rp_solver_list_1d
def set_riemann_solver(self, solver_name):
r"""
Assigns the library solver solver_name as the Riemann solver.
:Input:
- *solver_name* - (string) Name of the solver to be used, raises a
``NameError`` if the solver does not exist.
"""
import logging
if solver_name in rp.rp_solver_list_1d:
exec("self.rp = rp.rp_%s_1d" % solver_name)
else:
logger = logging.getLogger("solver")
error_msg = "Could not find Riemann solver with name %s" % solver_name
logger.warning(error_msg)
raise NameError(error_msg)
# ========== Python Homogeneous Step =====================================
def homogeneous_step(self, solutions):
r"""
Take one time step on the homogeneous hyperbolic system
Takes one time step of size dt on the hyperbolic system defined in the
appropriate Riemann solver rp.
:Input:
- *solutions* - (:class:`~pyclaw.solution.Solution`) Solution that
will be evolved
:Version: 1.0 (2009-07-01)
"""
# Grid we will be working on
grid = solutions["n"].grids[0]
# Number of equations
meqn = solutions["n"].meqn
# Limiter to use in the pth family
limiter = np.array(self.mthlim, ndmin=1)
# Q with appended boundary conditions
q = grid.qbc()
# Flux vector
f = np.empty((2 * grid.mbc + grid.n[0], meqn))
dtdx = np.zeros((2 * grid.mbc + grid.n[0]))
# Find local value for dt/dx
if grid.capa is not None:
dtdx = self.dt / (grid.d[0] * grid.capa)
else:
dtdx += self.dt / grid.d[0]
# Solve Riemann problem at each interface
q_l = q[:-1, :]
q_r = q[1:, :]
if grid.aux is not None:
aux_l = grid.aux[:-1, :]
aux_r = grid.aux[1:, :]
else:
aux_l = None
aux_r = None
wave, s, amdq, apdq = self.rp(q_l, q_r, aux_l, aux_r, grid.aux_global)
# Update loop limits, these are the limits for the Riemann solver
# locations, which then update a grid cell value
# We include the Riemann problem just outside of the grid so we can
# do proper limiting at the grid edges
# LL | | UL
# | LL | | | | ... | | | UL | |
# | |
LL = grid.mbc - 1
UL = grid.mbc + grid.n[0] + 1
# Update q for Godunov update
for m in range(meqn):
q[LL:UL, m] -= dtdx[LL:UL] * apdq[LL - 1 : UL - 1, m]
q[LL - 1 : UL - 1, m] -= dtdx[LL - 1 : UL - 1] * amdq[LL - 1 : UL - 1, m]
# Compute maximum wave speed
self.cfl = 0.0
for mw in range(wave.shape[2]):
smax1 = max(dtdx[LL:UL] * s[LL - 1 : UL - 1, mw])
smax2 = max(-dtdx[LL - 1 : UL - 1] * s[LL - 1 : UL - 1, mw])
self.cfl = max(self.cfl, smax1, smax2)
# If we are doing slope limiting we have more work to do
if self.order == 2:
# Initialize flux corrections
f = np.zeros((grid.n[0] + 2 * grid.mbc, meqn))
# Apply Limiters to waves
if (limiter > 0).any():
wave = limiters.limit(grid.meqn, wave, s, limiter, dtdx)
# Compute correction fluxes for second order q_{xx} terms
dtdxave = 0.5 * (dtdx[LL - 1 : UL - 1] + dtdx[LL:UL])
if self.fwave:
for mw in range(wave.shape[2]):
sabs = np.abs(s[LL - 1 : UL - 1, mw])
om = 1.0 - sabs * dtdxave[: UL - LL]
ssign = np.sign(s[LL - 1 : UL - 1, mw])
for m in range(meqn):
f[LL:UL, m] += 0.5 * ssign * om * wave[LL - 1 : UL - 1, m, mw]
else:
for mw in range(wave.shape[2]):
sabs = np.abs(s[LL - 1 : UL - 1, mw])
om = 1.0 - sabs * dtdxave[: UL - LL]
for m in range(meqn):
f[LL:UL, m] += 0.5 * sabs * om * wave[LL - 1 : UL - 1, m, mw]
# Update q by differencing correction fluxes
for m in range(meqn):
q[LL : UL - 1, m] -= dtdx[LL : UL - 1] * (
f[LL + 1 : UL, m] - f[LL : UL - 1, m]
)
# Reset q update
grid.q = q[grid.mbc : -grid.mbc][:]
|
the-stack_0_22922 | #!/usr/bin/env python
import sys
import time
import signal
import math
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import Bool
from std_msgs.msg import UInt8
from std_msgs.msg import Float64
class Joint:
_tr2 = None
_id = ""
_state = None
_pub_pos = None
_pub_effort = None
_pub_stop = None
_pub_mode = None
def __init__(self, t, i):
self._tr2 = t
self._id = i
_topic = "/tr2/joints/" + self._id;
self._pub_stop = rospy.Publisher(_topic + "/stop", Bool, queue_size=10)
self._pub_mode = rospy.Publisher(_topic + "/mode", UInt8, queue_size=10)
self._pub_pos = rospy.Publisher(_topic + "/control/position", Float64, queue_size=10)
self._pub_vel = rospy.Publisher(_topic + "/control/velocity", Float64, queue_size=10)
self._pub_effort = rospy.Publisher(_topic + "/control/effort", Float64, queue_size=10)
def state(self):
return self._state
def release(self):
self._pub_stop.publish(0)
def actuate(self, m, motorDuration = 250):
if m > 1.0:
m = 1.0
elif m < -1.0:
m = -1.0
self._pub_effort.publish(m * 100.0)
def setPosition(self, p):
self._pub_pos.publish(p)
def setVelocity(self, v):
self._pub_vel.publish(v)
def stop(self):
self._pub_stop.publish(1)
def setMode(self, mode):
m = 0
if (mode == TR2.mode_backdrive):
m = 1
if (mode == TR2.mode_servo):
m = 2
self._pub_mode.publish(m)
|
the-stack_0_22923 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple, defaultdict
from distutils.util import strtobool
import itertools as it
import operator as op
import os
from absl import logging
import numpy as onp
import six
from six.moves import xrange
from ..config import flags
from .. import core
from .. import ad_util
from .. import tree_util
from .. import dtypes
from .. import linear_util as lu
from ..abstract_arrays import (ConcreteArray, ShapedArray, AbstractToken,
make_shaped_array, array_types, raise_to_shaped,
abstract_token)
from ..core import valid_jaxtype, Literal
from ..util import (partial, partialmethod, cache, safe_map, prod, unzip2,
memoize)
from ..lib import xla_bridge as xb
from ..lib import xla_client as xc
from . import partial_eval as pe
from . import ad
FLAGS = flags.FLAGS
flags.DEFINE_bool('jax_debug_nans',
strtobool(os.getenv('JAX_DEBUG_NANS', "False")),
'Add nan checks to every operation.')
flags.DEFINE_bool('jax_log_compiles',
strtobool(os.getenv('JAX_LOG_COMPILES', "False")),
'Print a message each time a `jit` computation is compiled.')
def _map(f, *xs): return tuple(map(f, *xs))
def identity(x): return x
### handlers
xb.register_constant_handler(core.Unit, lambda c, *_: c.Tuple())
def aval_to_xla_shape(aval):
try:
return xla_shape_handlers[type(aval)](aval)
except KeyError:
raise TypeError("No xla_shape_handler for type: {}".format(type(aval)))
xla_shape_handlers = {}
xla_shape_handlers[core.AbstractUnit] = lambda _: xc.Shape.tuple_shape(())
xla_shape_handlers[ShapedArray] = lambda a: xc.Shape.array_shape(a.dtype, a.shape)
xla_shape_handlers[ConcreteArray] = lambda a: xc.Shape.array_shape(a.dtype, a.shape)
def aval_to_result_handler(aval):
try:
return xla_result_handlers[type(aval)](aval)
except KeyError:
raise TypeError("No xla_result_handler for type: {}".format(type(aval)))
xla_result_handlers = {}
xla_result_handlers[core.AbstractUnit] = lambda _: lambda _: core.unit
def array_result_handler(aval): return partial(DeviceArray, raise_to_shaped(aval))
xla_result_handlers[ShapedArray] = array_result_handler
xla_result_handlers[ConcreteArray] = array_result_handler
def device_put(x, device=None):
x = canonicalize_dtype(x)
try:
return device_put_handlers[type(x)](x, device)
except KeyError:
raise TypeError("No device_put handler for type: {}".format(type(x)))
device_put_handlers = {}
device_put_handlers[core.Unit] = \
lambda _, device: xc.Buffer.from_pyval(
(), device, backend=xb.get_device_backend(device))
def _device_put_array(x, device):
return xc.Buffer.from_pyval(x, device, backend=xb.get_device_backend(device))
for _t in array_types:
device_put_handlers[_t] = _device_put_array
def _device_put_scalar(x, device):
return xc.Buffer.from_pyval(dtypes.coerce_to_array(x), device,
backend=xb.get_device_backend(device))
for _t in dtypes.python_scalar_dtypes.keys():
device_put_handlers[_t] = _device_put_array
# TODO(mattjj): try to remove this canonicalize_dtype stuff
def canonicalize_dtype(x):
typ = type(x)
handler = canonicalize_dtype_handlers.get(typ)
if handler: return handler(x)
for typ in typ.mro():
handler = canonicalize_dtype_handlers.get(typ)
if handler: return handler(x)
raise TypeError("No canonicalize_dtype handler for type: {}".format(type(x)))
canonicalize_dtype_handlers = {}
canonicalize_dtype_handlers[core.Unit] = identity
def _canonicalize_ndarray_dtype(x):
return onp.asarray(x, dtypes.canonicalize_dtype(dtypes.result_type(x)))
for _t in array_types:
canonicalize_dtype_handlers[_t] = _canonicalize_ndarray_dtype
def _canonicalize_python_scalar_dtype(typ, x):
return onp.asarray(
x, dtypes.canonicalize_dtype(dtypes.python_scalar_dtypes[typ]))
for _t in dtypes.python_scalar_dtypes.keys():
canonicalize_dtype_handlers[_t] = partial(_canonicalize_python_scalar_dtype, _t)
def abstractify(x):
typ = type(x)
aval_fn = pytype_aval_mappings.get(typ)
if aval_fn: return aval_fn(x)
for typ in typ.mro():
aval_fn = pytype_aval_mappings.get(typ)
if aval_fn: return aval_fn(x)
raise TypeError("No abstraction handler for type: {}".format(type(x)))
pytype_aval_mappings = {}
pytype_aval_mappings[core.Unit] = lambda _: core.abstract_unit
for _t in array_types:
pytype_aval_mappings[_t] = make_shaped_array
def _make_abstract_python_scalar(typ, _):
return ShapedArray((), dtypes.python_scalar_dtypes[typ], weak_type=True)
for _t in dtypes.python_scalar_dtypes.keys():
pytype_aval_mappings[_t] = partial(_make_abstract_python_scalar, _t)
### op-by-op execution
def apply_primitive(prim, *args, **params):
"""Impl rule that compiles and runs a single primitive 'prim' using XLA."""
abstract_args = map(abstractify, args)
compiled_fun = xla_primitive_callable(prim, *abstract_args, **params)
return compiled_fun(*args)
@cache()
def xla_primitive_callable(prim, *abstract_args, **params):
backend = params.get('backend', None)
aval_out = prim.abstract_eval(*abstract_args, **params)
if prim.multiple_results:
handlers = tuple(map(aval_to_result_handler, aval_out))
handle_result = lambda xs: tuple(h(x) for h, x in zip(handlers, xs.destructure()))
else:
handle_result = aval_to_result_handler(aval_out)
tuple_args = len(abstract_args) > 100
built_c = primitive_computation(prim, tuple_args, *abstract_args, **params)
compiled = built_c.Compile(compile_options=xb.get_compile_options(),
backend=xb.get_backend(backend))
return partial(_execute_compiled_primitive, prim, compiled, backend,
tuple_args, handle_result)
@cache()
def primitive_computation(prim, tuple_args, *avals, **params):
c = xb.make_computation_builder("primitive_computation_{}".format(prim.name))
c.SetOpMetadata(xc.OpMetadata(op_type=prim.name, op_name=str(params)))
backend = params.pop("backend", None)
platform = xb.get_backend(backend).platform
xla_args = _xla_callable_args(c, avals, tuple_args)
if prim in backend_specific_translations[platform]:
rule = backend_specific_translations[platform][prim]
rule(c, *xla_args, **params) # return val set as a side-effect on c
elif prim in translations:
rule = translations[prim]
rule(c, *xla_args, **params) # return val set as a side-effect on c
elif prim in reduction_translations:
rule = reduction_translations[prim]
rule(c, *xla_args, backend=backend, **params) # return val set as a side-effect on c
elif prim in initial_style_translations:
rule = initial_style_translations[prim]
rule(c, AxisEnv(), *xla_args, backend=backend, **params) # side-effect on c
else:
raise NotImplementedError("XLA translation rule for {} not found".format(prim))
c.ClearOpMetadata()
try:
return c.Build()
except RuntimeError as e:
msg = (" ".join(map(str, e.args)) + "\n"
"This is a bug in JAX's shape-checking rules; please report it!\n"
"https://github.com/google/jax/issues\n")
raise RuntimeError(msg)
def primitive_subcomputation(prim, *avals, **params):
return primitive_computation(prim, False, *avals, **params)
def _execute_compiled_primitive(prim, compiled, backend, tuple_args,
result_handler, *args):
device, = compiled.local_devices()
input_bufs = [device_put(x, device) for x in args if x is not token]
if tuple_args:
input_bufs = [make_tuple(input_bufs, device, backend)]
out_buf = compiled.Execute(input_bufs)
if FLAGS.jax_debug_nans:
check_nans(prim, out_buf.destructure() if prim.multiple_results else out_buf)
return result_handler(out_buf)
def check_nans(prim, bufs):
if prim.multiple_results:
for buf in bufs:
_check_nans(prim.name, buf.shape(), buf)
else:
_check_nans(prim.name, bufs.shape(), bufs)
def _check_nans(name, xla_shape, buf):
if xla_shape.is_tuple():
assert not xla_shape.tuple_shapes()
else:
if dtypes.issubdtype(xla_shape.element_type(), onp.floating):
if onp.any(onp.isnan(buf.to_py())):
msg = "invalid value (nan) encountered in {}"
raise FloatingPointError(msg.format(name))
### compiling jaxprs
def prefetch(x):
if isinstance(x, DeviceArray):
x.copy_to_host_async()
return x
def jaxpr_literals(jaxpr):
return it.chain.from_iterable(eqn_literals(eqn) for eqn in jaxpr.eqns)
def eqn_literals(eqn):
if eqn.bound_subjaxprs:
(subjaxpr, _, _), = eqn.bound_subjaxprs
for literal in jaxpr_literals(subjaxpr):
yield literal
if eqn.primitive in initial_style_translations:
for param in eqn.params.values():
if type(param) in (core.Jaxpr, core.TypedJaxpr):
subjaxpr = param if type(param) is core.Jaxpr else param.jaxpr
for literal in jaxpr_literals(subjaxpr):
yield literal
for v in eqn.invars:
if type(v) is core.Literal:
yield v.val
def jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, freevars, *args):
platform = xb.get_backend(backend).platform
def read(v):
if type(v) is Literal:
return c.Constant(canonicalize_dtype(v.val))
else:
return env[v]
def write(v, node):
assert node is not None
env[v] = node
env = {}
write(core.unitvar, c.Tuple())
_map(write, jaxpr.constvars, consts)
_map(write, jaxpr.freevars, freevars)
_map(write, jaxpr.invars, args)
for eqn in jaxpr.eqns:
c.SetOpMetadata(xc.OpMetadata( op_type=eqn.primitive.name, op_name=str(eqn)))
in_nodes = list(map(read, eqn.invars))
if eqn.primitive in backend_specific_translations[platform]:
rule = backend_specific_translations[platform][eqn.primitive]
ans = rule(c, *in_nodes, **eqn.params)
elif eqn.primitive in translations:
ans = translations[eqn.primitive](c, *in_nodes, **eqn.params)
elif eqn.primitive in reduction_translations:
new_params = check_backend_params(eqn.params, backend)
ans = reduction_translations[eqn.primitive](c, *in_nodes, backend=backend, **new_params)
elif eqn.primitive in initial_style_translations:
new_params = check_backend_params(eqn.params, backend)
rule = initial_style_translations[eqn.primitive]
ans = rule(c, axis_env, *in_nodes, backend=backend, **new_params)
elif eqn.primitive in parallel_translations:
new_params = check_backend_params(eqn.params, backend)
replica_groups = axis_groups(axis_env, new_params['axis_name'])
new_params = {k: new_params[k] for k in new_params if k != 'axis_name'}
rule = parallel_translations[eqn.primitive]
ans = rule(c, *in_nodes, replica_groups=replica_groups, backend=backend, **new_params)
elif eqn.primitive in call_translations:
new_params = check_backend_params(eqn.params, backend)
(subjaxpr, const_bindings, freevar_bindings), = eqn.bound_subjaxprs
const_nodes = _map(read, const_bindings)
freevar_nodes = _map(read, freevar_bindings)
rule = call_translations[eqn.primitive]
ans = rule(c, subjaxpr, axis_env, const_nodes, freevar_nodes, in_nodes,
backend=backend, **new_params)
else:
msg = "XLA translation rule for primitive '{}' not found"
raise NotImplementedError(msg.format(eqn.primitive.name))
c.GetShape(ans) # force xla to do shape error checking
out_nodes = xla_destructure(c, ans) if eqn.primitive.multiple_results else [ans]
c.ClearOpMetadata()
_map(write, eqn.outvars, out_nodes)
return _map(read, jaxpr.outvars)
def xla_destructure(c, ans):
num_elements = len(c.GetShape(ans).tuple_shapes())
return [c.GetTupleElement(ans, i) for i in range(num_elements)]
def check_backend_params(params, outer_backend):
# For nested calls, the outermost call sets the backend for all inner calls;
# it's an error if the inner call has a conflicting explicit backend spec.
inner_backend = params.get('backend', None)
if inner_backend and inner_backend != outer_backend:
msg = (
"Outer-jit backend specification {} must match explicit inner-jit "
"backend specification {}.")
raise ValueError(msg.format(outer_backend, inner_backend))
return {k: params[k] for k in params if k != 'backend'}
class AxisEnv(object):
def __init__(self, nreps=1, names=None, sizes=None, devices=None):
self.nreps = nreps
self.names = names if names else []
self.sizes = sizes if sizes else []
self.devices = devices
def extend_axis_env(env, name, size):
return AxisEnv(env.nreps, env.names + [name], env.sizes + [size], env.devices)
def axis_read(axis_env, axis_name):
return max(i for i, name in enumerate(axis_env.names) if name == axis_name)
def axis_groups(axis_env, name):
if isinstance(name, (list, tuple)):
mesh_axes = tuple(map(partial(axis_read, axis_env), name))
else:
mesh_axes = (axis_read(axis_env, name),)
return _axis_groups(axis_env.nreps, axis_env.sizes, mesh_axes)
def _axis_groups(nrep, mesh_spec, mesh_axes):
trailing_size, ragged = divmod(nrep, prod(mesh_spec))
assert not ragged
full_spec = list(mesh_spec) + [trailing_size]
iota = onp.arange(prod(full_spec)).reshape(full_spec)
groups = onp.reshape(
onp.moveaxis(iota, mesh_axes, onp.arange(len(mesh_axes))),
(prod(onp.take(full_spec, mesh_axes)), -1))
return tuple(map(tuple, groups.T))
def jaxpr_replicas(jaxpr):
return max(it.chain([1], (eqn_replicas(eqn) for eqn in jaxpr.eqns)))
def eqn_replicas(eqn):
if eqn.bound_subjaxprs:
(subjaxpr, _, _), = eqn.bound_subjaxprs
return eqn.params.get('axis_size', 1) * jaxpr_replicas(subjaxpr)
elif eqn.primitive in initial_style_translations:
nums = (jaxpr_replicas(param if type(param) is core.Jaxpr else param.jaxpr)
for param in eqn.params.values()
if type(param) in (core.Jaxpr, core.TypedJaxpr))
return max(it.chain([1], nums))
else:
return 1
# TODO(mattjj,skyewm): the functions here are utilities for checking if
# not-yet-supported features are used with multi-host programming
def jaxpr_has_pmap(jaxpr):
return any(eqn_has_pmap(eqn) for eqn in jaxpr.eqns)
def eqn_has_pmap(eqn):
if eqn.bound_subjaxprs:
(subjaxpr, _, _), = eqn.bound_subjaxprs
return jaxpr_has_pmap(subjaxpr)
elif eqn.primitive in initial_style_translations:
return any(jaxpr_has_pmap(param if type(param) is core.Jaxpr else param.jaxpr)
for param in eqn.params.values()
if type(param) in (core.Jaxpr, core.TypedJaxpr))
else:
return 'pmap' in eqn.primitive.name
def jaxpr_collectives(jaxpr):
return it.chain.from_iterable(eqn_collectives(eqn) for eqn in jaxpr.eqns)
def eqn_collectives(eqn):
if eqn.bound_subjaxprs:
(subjaxpr, _, _), = eqn.bound_subjaxprs
for c in jaxpr_collectives(subjaxpr):
yield c
elif eqn.primitive in initial_style_translations:
for param in eqn.params.values():
if type(param) is core.Jaxpr:
for c in jaxpr_collectives(param):
yield c
elif type(param) is core.TypedJaxpr:
for c in jaxpr_collectives(param.jaxpr):
yield c
else:
if eqn.primitive in parallel_translations:
yield eqn.primitive
### xla_call underlying jit
def _xla_call_impl(fun, *args, **params):
device = params['device']
backend = params.get('backend', None)
compiled_fun = _xla_callable(fun, device, backend, *map(abstractify, args))
try:
return compiled_fun(*args)
except FloatingPointError:
print("Invalid value encountered in the output of a jit function. "
"Calling the de-optimized version.")
return fun.call_wrapped(*args) # probably won't return
@lu.cache
def _xla_callable(fun, device, backend, *abstract_args):
pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]
with core.new_master(pe.StagingJaxprTrace, True) as master:
jaxpr, (pvals, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals)
assert not env # no subtraces here
del master, env
_map(prefetch, it.chain(consts, jaxpr_literals(jaxpr)))
result_handlers = tuple(map(_pval_to_result_handler, pvals))
# Computations that only produce constants and/or only rearrange their inputs,
# which are often produced from partial evaluation, don't need compilation,
# and don't need to force their (potentially lazy) arguments.
if not jaxpr.eqns:
device = _get_device(device, backend)
return partial(_execute_trivial, jaxpr, device, consts, result_handlers)
log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG
logging.log(log_priority,
"Compiling {} for args {}.".format(fun.__name__, abstract_args))
nreps = jaxpr_replicas(jaxpr)
if nreps > xb.device_count(backend):
msg = ("compiling computation that requires {} replicas, but only {} XLA "
"devices are available")
raise ValueError(msg.format(nreps, xb.device_count(backend)))
axis_env = AxisEnv(nreps, [], [])
if xb.host_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):
raise NotImplementedError(
"jit of multi-host pmap not implemented (and jit-of-pmap can cause "
"extra data movement anyway, so maybe you don't want it after all).")
tuple_args = len(abstract_args) > 100 # pass long arg lists as tuple for TPU
c = xb.make_computation_builder("jit_{}".format(fun.__name__))
xla_consts = _map(c.Constant, consts)
xla_args = _xla_callable_args(c, abstract_args, tuple_args)
out_nodes = jaxpr_subcomp(c, jaxpr, backend, axis_env, xla_consts, (), *xla_args)
built = c.Build(c.Tuple(*out_nodes))
if device is not None and nreps > 1:
raise ValueError("can't specify device assignment for jit-of-pmap")
options = xb.get_compile_options(
num_replicas=nreps, device_assignment=(device.id,) if device else None)
compiled = built.Compile(compile_options=options, backend=xb.get_backend(backend))
if nreps == 1:
return partial(_execute_compiled, compiled, backend, result_handlers, tuple_args)
else:
return partial(_execute_replicated, compiled, backend, result_handlers, tuple_args)
def _xla_callable_args(c, avals, tuple_args):
if not tuple_args:
xla_args = [c.ParameterWithShape(aval_to_xla_shape(a))
if a is not abstract_token else c.CreateToken() for a in avals]
return xla_args
else:
tuple_param = c.ParameterWithShape(xc.Shape.tuple_shape(
[aval_to_xla_shape(a) for a in avals if a is not abstract_token]))
xla_inputs = iter(xla_destructure(c, tuple_param))
xla_args = [next(xla_inputs) if a is not abstract_token else c.CreateToken()
for a in avals]
assert next(xla_inputs, None) is None
return xla_args
def _pval_to_result_handler(pval):
pv, const = pval
if pv is None:
return lambda _: const
else:
return aval_to_result_handler(pv)
def _execute_compiled(compiled, backend, handlers, tuple_args, *args):
device, = compiled.local_devices()
input_bufs = [device_put(x, device) for x in args if x is not token]
if tuple_args:
input_bufs = [make_tuple(input_bufs, device, backend)]
out_bufs = compiled.Execute(input_bufs).destructure()
if FLAGS.jax_debug_nans: check_nans(xla_call_p, out_bufs)
return [handler(out_buf) for handler, out_buf in zip(handlers, out_bufs)]
def _execute_replicated(compiled, backend, handlers, tuple_args, *args):
input_bufs = [
[device_put(x, device) for x in args if x is not token]
for device in compiled.local_devices()]
if tuple_args:
input_bufs = [[make_tuple(bufs, device, backend)] for bufs, device in
zip(input_bufs, compiled.local_devices())]
out_bufs = compiled.ExecutePerReplica(input_bufs)[0].destructure()
if FLAGS.jax_debug_nans: check_nans(xla_call_p, out_bufs)
return [handler(out_buf) for handler, out_buf in zip(handlers, out_bufs)]
def _execute_trivial(jaxpr, device, consts, handlers, *args):
env = {core.unitvar : core.unit}
_map(env.setdefault, jaxpr.invars, args)
_map(env.setdefault, jaxpr.constvars, consts)
outs = [canonicalize_dtype(v.val) if type(v) is Literal else env[v]
for v in jaxpr.outvars]
return [x if type(x) is DeviceArray else handler(device_put(x, device))
for handler, x in zip(handlers, outs)]
def make_tuple(bufs, device, backend):
return xb.get_backend(backend).make_tuple(bufs, device)
@memoize
def _get_device(device, backend):
# TODO(mattjj): after jaxlib update, avoid compile here, just to get device
c = xb.make_computation_builder("get_device")
built = c.Build(c.Tuple())
options = xb.get_compile_options(
num_replicas=1, device_assignment=(device.id,) if device else None)
compiled = built.Compile(compile_options=options, backend=xb.get_backend(backend))
out, = compiled.local_devices()
return out
xla_call_p = core.Primitive('xla_call')
xla_call_p.multiple_results = True
xla_call = partial(core.call_bind, xla_call_p)
xla_call_p.def_custom_bind(xla_call)
xla_call_p.def_impl(_xla_call_impl)
def _xla_call_translation_rule(c, jaxpr, axis_env, const_nodes, freevar_nodes,
in_nodes, device=None, backend=None):
del device # Ignored.
subc = xb.make_computation_builder("jaxpr_subcomputation") # TODO(mattjj): name
consts = [subc.ParameterWithShape(c.GetShape(n)) for n in const_nodes]
freevars = [subc.ParameterWithShape(c.GetShape(n)) for n in freevar_nodes]
args = [subc.ParameterWithShape(c.GetShape(n)) for n in in_nodes]
out_nodes = jaxpr_subcomp(subc, jaxpr, backend, axis_env, consts, freevars, *args)
subc = subc.Build(subc.Tuple(*out_nodes))
return c.Call(subc, list(const_nodes) + list(freevar_nodes) + list(in_nodes))
ad.primitive_transposes[xla_call_p] = partial(ad.call_transpose, xla_call_p)
### translation tables
translations = {}
reduction_translations = {}
parallel_translations = {}
initial_style_translations = {}
call_translations = {}
backend_specific_translations = defaultdict(dict)
translations[core.identity_p] = lambda c, x: x
call_translations[xla_call_p] = _xla_call_translation_rule
def zeros_like_translation_rule(c, x):
shape = c.GetShape(x)
if shape.is_tuple():
assert not shape.tuple_shapes()
return c.Tuple()
else:
zero = c.Constant(onp.array(0, shape.element_type()))
return c.Broadcast(zero, shape.dimensions())
translations[ad_util.zeros_like_p] = zeros_like_translation_rule
def add_jaxvals_translation_rule(c, x, y):
shape = c.GetShape(x)
if shape.is_tuple():
assert not shape.tuple_shapes()
return x
else:
return c.Add(x, y)
translations[ad_util.add_jaxvals_p] = add_jaxvals_translation_rule
def lower_fun(fun, instantiate=False, initial_style=False):
"""Build a translation rule for a traceable function."""
def f(c, *args, **params):
backend = params.pop('backend', None)
if initial_style:
axis_env, xla_args = args[0], args[1:]
else:
axis_env, xla_args = AxisEnv(), args
xla_shapes = tuple(map(c.GetShape, xla_args))
avals = map(_aval_from_xla_shape, xla_shapes)
pvals = [pe.PartialVal((a, core.unit)) for a in avals]
jaxpr, _, consts = pe.trace_to_jaxpr(
lu.wrap_init(fun, params), pvals, instantiate=True)
consts = _map(c.Constant, consts)
outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, (), *xla_args)
return c.Tuple(*outs)
return f
def _aval_from_xla_shape(xla_shape):
if xla_shape.is_tuple() and not xla_shape.tuple_shapes():
return core.abstract_unit
else:
return ShapedArray(xla_shape.dimensions(), xla_shape.element_type())
### device-persistent data
class Token(object): pass
token = Token()
pytype_aval_mappings[Token] = lambda _: abstract_token
core.pytype_aval_mappings[Token] = lambda _: abstract_token
xla_shape_handlers[AbstractToken] = lambda _: xc.Shape.token_shape()
xla_result_handlers[AbstractToken] = lambda _: lambda _: token
canonicalize_dtype_handlers[Token] = identity
class DeviceValue(object):
"""A DeviceValue represents a value backed by device memory."""
__slots__ = ["aval", "device_buffer", "__weakref__"]
def __init__(self, aval, device_buffer):
self.aval = aval
self.device_buffer = device_buffer
def _check_if_deleted(self):
if self.device_buffer is None:
raise ValueError("DeviceValue has been deleted.")
def block_until_ready(self):
"""Blocks the caller until the buffer's value has been computed on device.
This method is mostly useful for timing microbenchmarks that wish to
time how long a computation takes, without transferring the result back
to the host.
Returns the buffer object (`self`).
"""
self._check_if_deleted()
self.device_buffer.block_host_until_ready()
return self
def _forward_method(attrname, self, fun, *args):
return fun(getattr(self, attrname), *args)
_forward_to_value = partial(_forward_method, "_value")
class DeviceArray(DeviceValue):
"""A DeviceArray is an ndarray backed by a single device memory buffer."""
# We don't subclass ndarray because that would open up a host of issues,
# but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.
__slots__ = ["_npy_value"]
__array_priority__ = 100
def __init__(self, aval, device_buffer):
self.aval = aval
self.device_buffer = device_buffer
self._npy_value = None
if not core.skip_checks:
assert type(aval) is ShapedArray
npy_value = self._value
assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape
@property
def _value(self):
self._check_if_deleted()
if self._npy_value is None:
self._npy_value = self.device_buffer.to_py()
self._npy_value.flags.writeable = False
return self._npy_value
@property
def shape(self):
return self.aval.shape
@property
def dtype(self):
return self.aval.dtype
@property
def size(self):
return prod(self.aval.shape)
@property
def ndim(self):
return len(self.aval.shape)
def copy(self):
"""Returns an ndarray (backed by host memory, not device memory)."""
return onp.asarray(self)
def copy_to_host_async(self):
"""Requests a copy of the buffer to the host."""
self._check_if_deleted()
if self._npy_value is None:
self.device_buffer.copy_to_host_async()
def delete(self):
"""Deletes the device array and any cached copy on the host.
It is an error to access the contents of a `DeviceArray` after it has
been deleted.
Use of this method is optional; device buffers will be reclaimed
automatically by Python when a DeviceArray object is garbage collected.
However, it is sometimes useful to have more explicit control over the
time of deletion.
"""
self.device_buffer.delete()
self.device_buffer = None
self._npy_value = None
def __repr__(self):
line_width = onp.get_printoptions()['linewidth']
prefix = '{}('.format(self.__class__.__name__)
s = onp.array2string(self._value, prefix=prefix, suffix=',',
separator=', ', max_line_width=line_width)
dtype_str = 'dtype={})'.format(self.dtype.name)
last_line_len = len(s) - s.rfind('\n') + 1
sep = ' '
if last_line_len + len(dtype_str) + 1 > line_width:
sep = ' ' * len(prefix)
return "{}{},{}{}".format(prefix, s, sep, dtype_str)
def item(self):
if dtypes.issubdtype(self.dtype, onp.complexfloating):
return complex(self)
elif dtypes.issubdtype(self.dtype, onp.floating):
return float(self)
elif dtypes.issubdtype(self.dtype, onp.integer):
return int(self)
elif dtypes.issubdtype(self.dtype, onp.bool_):
return bool(self)
else:
raise TypeError(self.dtype)
def __len__(self):
try:
return self.aval.shape[0]
except IndexError:
raise TypeError("len() of unsized object") # same as numpy error
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
return self._value.__iter__()
def __reversed__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array")
else:
return reversed(self._value)
def __format__(self, format_spec):
# Simulates behavior of https://github.com/numpy/numpy/pull/9883
if self.ndim == 0:
return format(self._value[()], format_spec)
else:
return format(self._value, format_spec)
def __array__(self, dtype=None, context=None):
return onp.asarray(self._value, dtype=dtype)
__str__ = partialmethod(_forward_to_value, str)
__bool__ = __nonzero__ = partialmethod(_forward_to_value, bool)
__float__ = partialmethod(_forward_to_value, float)
__int__ = partialmethod(_forward_to_value, int)
if six.PY2:
__long__ = partialmethod(_forward_to_value, long) # noqa: F821
__complex__ = partialmethod(_forward_to_value, complex)
__hex__ = partialmethod(_forward_to_value, hex)
__oct__ = partialmethod(_forward_to_value, oct)
__index__ = partialmethod(_forward_to_value, op.index)
# pickle saves and loads just like an ndarray
__reduce__ = partialmethod(_forward_to_value, op.methodcaller("__reduce__"))
# clobbered when jax.numpy is imported, but useful in tests
def __eq__(self, other): return self._value == other
def __hash__(self):
raise TypeError("JAX DeviceArray, like numpy.ndarray, is not hashable.")
core.literalable_types.add(DeviceArray)
core.pytype_aval_mappings[DeviceArray] = ConcreteArray
pytype_aval_mappings[DeviceArray] = lambda x: x.aval
canonicalize_dtype_handlers[DeviceArray] = identity
def _device_array_constant_handler(c, val, canonicalize_types=True):
return c.Constant(onp.asarray(val), canonicalize_types=canonicalize_types)
xb.register_constant_handler(DeviceArray, _device_array_constant_handler)
def _device_put_device_array(x, device):
# TODO(skye): we're assuming the DeviceBuffers without "platform" are
# XrtBuffers. Figure out a less risky way to deal with XrtBuffers.
if (not hasattr(x.device_buffer, "platform") or
xb.get_device_backend(device).platform == x.device_buffer.platform()):
if device is None or x.device_buffer.device() == device:
return x.device_buffer
else:
return x.device_buffer.copy_to_device(device)
else:
# Buffers from different XLA backends are passed through the host.
return xc.Buffer.from_pyval(x, device, backend=xb.get_device_backend(device))
device_put_handlers[DeviceArray] = _device_put_device_array
def _device_put_impl(x, device=None):
try:
a = abstractify(x)
except TypeError:
raise TypeError("Argument '{}' of type {} is not a valid JAX type"
.format(x, type(x)))
handler = aval_to_result_handler(a)
return handler(device_put(x, device))
device_put_p = core.Primitive('device_put')
device_put_p.def_impl(_device_put_impl)
pe.custom_partial_eval_rules[device_put_p] = lambda trace, x, **params: x
ad.deflinear(device_put_p, lambda cotangent, **kwargs: [cotangent])
def _remat_translation_rule(c, jaxpr, axis_env, const_nodes, freevar_nodes, in_nodes,
backend=None, device=None, concrete=None):
# This looks a lot like _xla_call_translation_rule, except for a widget we use
# to foil CSE.
del device, concrete # Unused.
subc = xb.make_computation_builder("remat_call_subcomputation")
consts = [subc.ParameterWithShape(c.GetShape(n)) for n in const_nodes]
freevars = [subc.ParameterWithShape(c.GetShape(n)) for n in freevar_nodes]
args = [subc.ParameterWithShape(c.GetShape(n)) for n in in_nodes]
args = [_foil_cse(subc, x) for x in args]
out_nodes = jaxpr_subcomp(subc, jaxpr, backend, axis_env, consts, freevars, *args)
subc = subc.Build(subc.Tuple(*out_nodes))
return c.Call(subc, list(const_nodes) + list(freevar_nodes) + list(in_nodes))
call_translations[pe.remat_call_p] = _remat_translation_rule
def _foil_cse(c, x):
xla_shape = c.GetShape(x)
if xla_shape.is_tuple():
assert not xla_shape.tuple_shapes()
return x
else:
rng = c.RngNormal(c.Constant(onp.array(0, dtype=onp.float32)),
c.Constant(onp.array(1, dtype=onp.float32)),
[])
pred = c.Lt(rng, c.Constant(onp.finfo(onp.float32).max))
shape, dtype = xla_shape.dimensions(), xla_shape.numpy_dtype()
zero = c.Broadcast(c.Constant(onp.array(0, dtype=dtype)), shape)
return c.Select(pred, x, zero)
### lazy constants
class DeviceConstant(DeviceArray):
def copy_to_host_async(self): pass
@staticmethod
def constant_handler(c, constant_instance, canonicalize_types=True):
assert False
def _instantiate_device_constant(const, device=None, backend=None, cutoff=1e6):
# dispatch an XLA Computation to build the constant on the device if it's
# large, or alternatively build it on the host and transfer it if it's small
assert isinstance(const, DeviceConstant)
if const.size > cutoff:
c = xb.make_computation_builder("constant_instantiating_computation")
xla_const = const.constant_handler(c, const)
device_assignment = (device.id,) if device else None
opts = xb.get_compile_options(device_assignment=device_assignment)
compiled = c.Build(xla_const).Compile((), opts, backend=xb.get_backend(backend))
return compiled.Execute(())
else:
return xc.Buffer.from_pyval(onp.asarray(const), device,
backend=xb.get_backend(backend))
|
the-stack_0_22924 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Uniform distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution # pylint: disable=line-too-long
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Uniform(distribution.ContinuousDistribution):
"""Uniform distribution with `a` and `b` parameters.
The PDF of this distribution is constant between [`a`, `b`], and 0 elsewhere.
"""
def __init__(self, a=0.0, b=1.0, name="Uniform"):
"""Construct Uniform distributions with `a` and `b`.
The parameters `a` and `b` must be shaped in a way that supports
broadcasting (e.g. `b - a` is a valid operation).
Here are examples without broadcasting:
```python
# Without broadcasting
u1 = Uniform(3.0, 4.0) # a single uniform distribution [3, 4]
u2 = Uniform([1.0, 2.0], [3.0, 4.0]) # 2 distributions [1, 3], [2, 4]
u3 = Uniform([[1.0, 2.0],
[3.0, 4.0]],
[[1.5, 2.5],
[3.5, 4.5]]) # 4 distributions
```
And with broadcasting:
```python
u1 = Uniform(3.0, [5.0, 6.0, 7.0]) # 3 distributions
```
Args:
a: `float` or `double` tensor, the minimum endpoint.
b: `float` or `double` tensor, the maximum endpoint. Must be > `a`.
name: The name to prefix Ops created by this distribution class.
Raises:
InvalidArgumentError: if `a >= b`.
"""
with ops.op_scope([a, b], name):
with ops.control_dependencies([check_ops.assert_less(a, b)]):
a = array_ops.identity(a, name="a")
b = array_ops.identity(b, name="b")
self._a = a
self._b = b
self._name = name
self._batch_shape = self._ones().get_shape()
self._event_shape = tensor_shape.TensorShape([])
contrib_tensor_util.assert_same_float_dtype((a, b))
@property
def name(self):
return self._name
@property
def dtype(self):
return self.a.dtype
def batch_shape(self, name="batch_shape"):
with ops.name_scope(self.name):
with ops.op_scope([], name):
return array_ops.shape(self._ones())
def get_batch_shape(self):
return self._batch_shape
def event_shape(self, name="event_shape"):
with ops.name_scope(self.name):
with ops.op_scope([], name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
return self._event_shape
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def pdf(self, x, name="pdf"):
"""The PDF of observations in `x` under these Uniform distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `a` and `b`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`. If `x` is `nan`, will
return `nan`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, x], name):
x = ops.convert_to_tensor(x, name="x")
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s" %
(x.dtype, self.dtype))
broadcasted_x = x * self._ones()
return math_ops.select(
math_ops.is_nan(broadcasted_x), broadcasted_x, math_ops.select(
math_ops.logical_or(broadcasted_x < self.a,
broadcasted_x > self.b),
array_ops.zeros_like(broadcasted_x),
(1.0 / self.range()) * array_ops.ones_like(broadcasted_x)))
def log_pdf(self, x, name="log_pdf"):
return super(Uniform, self).log_pdf(x, name)
def cdf(self, x, name="cdf"):
"""CDF of observations in `x` under these Uniform distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `a` and `b`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`. If `x` is `nan`, will
return `nan`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, x], name):
x = ops.convert_to_tensor(x, name="x")
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s" %
(x.dtype, self.dtype))
broadcasted_x = x * self._ones()
zeros = array_ops.zeros_like(x + self.a + self.b, dtype=self.dtype)
ones = array_ops.ones_like(x + self.a + self.b, dtype=self.dtype)
result_if_not_big = math_ops.select(
x < self.a, zeros, (broadcasted_x - self.a) / self.range())
return math_ops.select(x >= self.b, ones, result_if_not_big)
def log_cdf(self, x, name="log_cdf"):
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, x], name):
x = ops.convert_to_tensor(x, name="x")
return math_ops.log(self.cdf(x))
def entropy(self, name="entropy"):
"""The entropy of Uniform distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, self.range()], name):
return math_ops.log(self.range())
def sample(self, n, seed=None, name="sample"):
"""Sample `n` observations from the Uniform Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`
with values of type `self.dtype`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, n], name):
n = ops.convert_to_tensor(n, name="n")
n_val = tensor_util.constant_value(n)
shape = array_ops.concat(0, [array_ops.pack([n]), self.batch_shape()])
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
# Provide some hints to shape inference
inferred_shape = tensor_shape.vector(n_val).concatenate(
self.get_batch_shape())
samples.set_shape(inferred_shape)
return (array_ops.expand_dims(self.a, 0) + array_ops.expand_dims(
self.range(), 0) * samples)
def mean(self, name="mean"):
with ops.name_scope(self.name):
with ops.op_scope([self._a, self._b], name):
return (self.a + self.b) / 2
def variance(self, name="variance"):
with ops.name_scope(self.name):
with ops.op_scope([self.range()], name):
return math_ops.square(self.range()) / 12.
def std(self, name="std"):
with ops.name_scope(self.name):
with ops.op_scope([self.range()], name):
return self.range() / math_ops.sqrt(12.)
def range(self, name="range"):
"""`b - a`."""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b], name):
return self.b - self.a
@property
def is_reparameterized(self):
return True
# TODO(rsepassi): Find a more efficient way of doing the broadcasting in_ones
# and _zeros.
def _ones(self):
return array_ops.ones_like(self.a + self.b)
def _zeros(self):
return array_ops.zeros_like(self.a + self.b)
|
the-stack_0_22926 | import collections
from datetime import timedelta
from io import StringIO
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import needs_i8_conversion
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Interval,
IntervalIndex,
Series,
Timedelta,
TimedeltaIndex,
)
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_value_counts(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.value_counts()
counter = collections.Counter(obj)
expected = Series(dict(counter.most_common()), dtype=np.int64, name=obj.name)
expected.index = expected.index.astype(obj.dtype)
if isinstance(obj, pd.MultiIndex):
expected.index = Index(expected.index)
# TODO: Order of entries with the same count is inconsistent on CI (gh-32449)
if obj.duplicated().any():
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_null(null_obj, index_or_series_obj):
orig = index_or_series_obj
obj = orig.copy()
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif len(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(orig, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
counter = collections.Counter(obj.dropna())
expected = Series(dict(counter.most_common()), dtype=np.int64)
expected.index = expected.index.astype(obj.dtype)
result = obj.value_counts()
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# can't use expected[null_obj] = 3 as
# IntervalIndex doesn't allow assignment
new_entry = Series({np.nan: 3}, dtype=np.int64)
expected = expected.append(new_entry)
result = obj.value_counts(dropna=False)
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_value_counts_inferred(index_or_series):
klass = index_or_series
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
def test_value_counts_bins(index_or_series):
klass = index_or_series
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
msg = "bins argument only works with numeric data"
with pytest.raises(TypeError, match=msg):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 1, 3, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({}) if klass is dict else klass({}, dtype=object)
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
def test_value_counts_datetime64(index_or_series):
klass = index_or_series
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
# GH 35922. NaN-like now sorts to the beginning of duplicate counts
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", pd.NaT, "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1, 1], index=idx)
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts_with_nan(dropna, index_or_series):
# GH31944
klass = index_or_series
values = [True, pd.NA, np.nan]
s = klass(values)
res = s.value_counts(dropna=dropna)
if dropna is True:
expected = Series([1], index=[True])
else:
expected = Series([2, 1], index=[pd.NA, True])
tm.assert_series_equal(res, expected)
|
the-stack_0_22928 | from src.util import Color
from src.settings import Settings, ColumnConfig
class Cell(object):
def __init__(self, column_config, item):
self.justify = column_config.justify
self.format = column_config.fmt
item_key = item[0] if isinstance(item, (list, tuple)) else item
column_colors = Settings.CELL_COLORS.get(column_config.name) or {}
self._primary_color = column_colors.get(item_key)
self._backup_color = column_colors.get(None)
replacements = Settings.CELL_REPLACEMENTS.get(column_config.name) or {}
self.item = self._render_cell_value(replacements.get(item_key, item))
self.width = max(len(l) for l in self.item)
self.height = len(self.item)
def render(self, width, line_num):
line = self.item[line_num] if line_num < self.height else ''
padding = ' ' * (width - len(line))
return (
(padding if self.justify == ColumnConfig.RIGHT else '')
+ (self._colorize(line) if line else '')
+ (padding if self.justify == ColumnConfig.LEFT else '')
)
def _render_cell_value(self, item):
if isinstance(item, bool):
return [Settings.TRUE if item else Settings.FALSE]
elif isinstance(item, (int, float)):
return [self.format.format(item)]
elif isinstance(item, (list, set)):
return [','.join(item)]
else:
return str(item).split('\n')
def _colorize(self, line):
color = self._primary_color or self._backup_color
if not color:
return line
if isinstance(color, str):
return Color.paint(color, line)
else:
fg, bg = color
return Color.paint(fg, bg, line)
class CompositeCell(object):
PADDING = 1
def __init__(self, column_config, items):
self.justify = column_config.justify
self.cells = []
self.width = 0
self.height = 1
for idx, item in enumerate(items):
name = column_config.name[idx]
config = ColumnConfig(name)
if item is None:
continue
cell = Cell(config, item)
self.cells.append(cell)
if cell.width > 0:
self.width += (cell.width + self.PADDING)
self.height = max(self.height, cell.height)
self.width = max(self.width - self.PADDING, 0)
def render(self, width, line_num):
line_parts = []
line_width = 0
for cell in self.cells:
cell_render = cell.render(0, line_num)
if cell_render:
line_parts.append(cell_render)
line_width += cell.width
line = (' ' * self.PADDING).join(line_parts)
line_width += max((len(line_parts) * self.PADDING - 1), 0)
padding = ' ' * (width - line_width)
return (
(padding if self.justify == ColumnConfig.RIGHT else '')
+ line
+ (padding if self.justify == ColumnConfig.LEFT else '')
)
class HeaderCell(Cell):
def __init__(self, column_config):
super().__init__(column_config, column_config.display_name)
self._primary_color = Settings.THEME_COLOR
def _render_cell_value(self, item):
return [item]
class EmptyCell(Cell):
height = 1
width = 0
@classmethod
def render(cls, width, line_num):
return ' ' * width
class DividerCell(EmptyCell):
@classmethod
def render(cls, width, line_num):
return Color.paint(Settings.THEME_COLOR, '-' * width)
|
the-stack_0_22929 | import numpy as np
import torch
import torch.optim as optim
import tqdm
import time
import os
from collections import defaultdict
from tensorboardX import SummaryWriter
from model_training.common.losses import get_loss
from model_training.common.metrics import get_metric
class Trainer:
def __init__(self, model, config, train_dl, val_dl, device):
self.model = model
self.config = config
self.train_dl = train_dl
self.val_dl = val_dl
self.device = device
if not os.path.exists(config["log_path"]):
os.mkdir(config["log_path"])
def train(self):
self._init_params()
for epoch in range(self.epochs):
train_loss = self._run_epoch(epoch)
val_loss, metrics = self._validate()
self.scheduler.step(val_loss)
self._set_checkpoint(val_loss)
print(f"\nEpoch: {epoch}; train loss = {train_loss}; validation loss = {val_loss}")
self._write_to_tensorboard(epoch, train_loss, val_loss, metrics)
def _save_checkpoint(self, file_prefix):
torch.save(
{
'model': self.model.state_dict()
},
os.path.join(self.log_path, '{}.h5'.format(file_prefix)))
def _set_checkpoint(self, val_loss):
""" Saves model weights in the last checkpoint.
Also, model is saved as the best model if model has the best loss
"""
if val_loss < self.best_loss:
self.best_loss = val_loss
self._save_checkpoint(file_prefix='best')
self._save_checkpoint(file_prefix='last')
def _init_params(self):
self.epochs = self.config["num_epochs"]
self.criterion = get_loss(self.config['loss'])
self.optimizer = self._get_optimizer()
self.scheduler = self._get_scheduler()
self.metrics = {metric_name: get_metric(metric_name, device=self.device) for metric_name in
self.config["metrics"]}
self.log_path = os.path.join(self.config['log_path'], f'train-{time.time()}')
os.mkdir(self.log_path)
self.writer = SummaryWriter(self.log_path)
self.best_loss = float("inf")
self.model.to(self.device)
def _run_epoch(self, epoch):
self.model.train()
losses = []
lr = self.optimizer.param_groups[0]['lr']
status_bar = tqdm.tqdm(total=len(self.train_dl))
status_bar.set_description(f'Epoch {epoch}, lr {lr}')
for X, y in self.train_dl:
self.model.zero_grad()
X, y = X.to(self.device), y.to(self.device)
y_pred, _ = self.model(X)
loss = self.criterion(y_pred, y)
loss.backward()
self.optimizer.step()
losses.append(loss.item())
status_bar.update()
status_bar.set_postfix(loss=losses[-1])
status_bar.close()
return np.mean(losses)
def _validate(self):
self.model.eval()
losses, metrics = [], defaultdict(list)
status_bar = tqdm.tqdm(total=len(self.val_dl))
with torch.no_grad():
for X, y in self.val_dl:
X, y = X.to(self.device), y.to(self.device)
y_pred, _ = self.model(X)
loss = self.criterion(y_pred, y)
losses.append(loss.item())
for metric_name in self.metrics:
metrics[metric_name].append(self.metrics[metric_name](y_pred, y))
status_bar.update()
status_bar.set_postfix(loss=losses[-1])
status_bar.close()
return np.mean(losses), dict(zip(metrics.keys(), map(np.mean, metrics.values())))
def _get_scheduler(self):
""" Creates scheduler for a given optimizer from Trainer config
Returns:
torch.optim.lr_scheduler._LRScheduler: optimizer scheduler
"""
scheduler_config = self.config['scheduler']
if scheduler_config['name'] == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,
mode=scheduler_config['mode'],
patience=scheduler_config['patience'],
factor=scheduler_config['factor'],
min_lr=scheduler_config['min_lr'])
else:
raise ValueError(f"Scheduler [{scheduler_config['name']}] not recognized.")
return scheduler
def _get_optimizer(self):
""" Creates model optimizer from Trainer config
Returns:
torch.optim.optimizer.Optimizer: model optimizer
"""
optimizer_config = self.config['optimizer']
params = self._get_params()
if optimizer_config['name'] == 'adam':
optimizer = optim.Adam(params, lr=optimizer_config['lr'],
weight_decay=optimizer_config.get('weight_decay', 0))
elif optimizer_config['name'] == 'sgd':
optimizer = optim.SGD(params,
lr=optimizer_config['lr'],
momentum=optimizer_config.get('momentum', 0),
weight_decay=optimizer_config.get('weight_decay', 0))
else:
raise ValueError(f"Optimizer [{optimizer_config['name']}] not recognized.")
return optimizer
def _write_to_tensorboard(self, epoch, train_loss, val_loss, val_metrics):
for scalar_prefix, loss in zip(('Validation', 'Train'), (train_loss, val_loss)):
self.writer.add_scalar(f'{scalar_prefix}_Loss', loss, epoch)
for metric_name in val_metrics:
self.writer.add_scalar(f'Validation_{metric_name}', val_metrics[metric_name], epoch)
def _get_params(self):
return self.model.parameters()
|
the-stack_0_22930 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Logging module for PyInstaller
"""
__all__ = ['getLogger', 'INFO', 'WARN', 'DEBUG', 'TRACE', 'ERROR', 'FATAL']
import logging
from logging import getLogger, INFO, WARN, DEBUG, ERROR, FATAL
TRACE = logging.TRACE = DEBUG - 5
logging.addLevelName(TRACE, 'TRACE')
FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
logger = getLogger('PyInstaller')
def __add_options(parser):
levels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL')
parser.add_argument('--log-level',
choices=levels, metavar="LEVEL",
default='INFO',
dest='loglevel',
help=('Amount of detail in build-time console messages. '
'LEVEL may be one of %s (default: %%(default)s).'
% ', '.join(levels))
)
def __process_options(parser, opts):
try:
level = getattr(logging, opts.loglevel.upper())
except AttributeError:
parser.error('Unknown log level `%s`' % opts.loglevel)
else:
logger.setLevel(level)
|
the-stack_0_22931 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# -*- encoding: utf8 -*-
# -*- decoding: utf-8 -*-
import rospy
from geometry_msgs.msg import Vector3, Twist, PoseWithCovarianceStamped
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import math
import numpy as np
from math import pi
from filter.kalman_filter import KalmanFilter, ExtendedKalmanFilter
from global_odom import GlobalOdom
from tf_broadcaster import TfBroadcaster
class Initialization:
def __init__(self, n_sample=30):
sub_uwb = rospy.Subscriber("/robot_pose/uwb", Odometry, self.callback_uwb, n_sample)
self.n_sample = n_sample
self.uwb = [0.0, 0.0]
self.uwb_buf = []
self.buf_x = []
self.buf_y = []
# Calculate initial pose
p1 = self.get_sample_mean()
self.forward()
# -- Line fitting -- #
k, m = np.polyfit(self.buf_x, self.buf_y, 1)
p1_fit = [self.buf_x[0], self.buf_x[0]*k + m]
p2_fit = [self.buf_x[-1], self.buf_x[-1]*k + m]
t_fit = math.atan2(p2_fit[1]-p1_fit[1], p2_fit[0]-p1_fit[0])
# ------------------ #
p2 = self.get_sample_mean()
t = math.atan2(p2[1]-p1[1], p2[0]-p1[0])
print(np.degrees(t), np.degrees(t_fit))
print("INFO - initial pose", p2[0], p2[1], np.degrees(t))
self.pose = p2
self.t = t # radian
sub_uwb.unregister()
def callback_uwb(self, msg, n_sample):
pose = msg.pose.pose.position
self.uwb[0] = pose.x
self.uwb[1] = pose.y
if len(self.uwb_buf) < n_sample:
self.uwb_buf.append([pose.x, pose.y, pose.z])
else:
self.uwb_buf.pop(0)
self.uwb_buf.append([pose.x, pose.y, pose.z])
if len(self.buf_x) < 50:
self.buf_x.append(pose.x)
self.buf_y.append(pose.y)
else:
self.buf_x.pop(0)
self.buf_y.pop(0)
self.buf_x.append(pose.x)
self.buf_y.append(pose.y)
def get_sample_mean(self):
self.uwb_buf = []
while (len(self.uwb_buf) < self.n_sample):
if rospy.is_shutdown():
break
mean = np.mean(self.uwb_buf, axis=0)
return mean
def forward(self, vx=0.2, t=5):
pub_move = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
rospy.sleep(0.5)
twist = Twist()
twist.linear.x = vx
pub_move.publish(twist)
rospy.sleep(t)
twist.linear.x = 0.0
pub_move.publish(twist)
rospy.sleep(0.5)
class Localization:
def __init__(self, init_pose, init_t):
self.br = TfBroadcaster()
self.uwb_kf = KalmanFilter(2)
self.amcl_kf = ExtendedKalmanFilter(3)
self.amcl_covariance_is_high = False
self.x = np.matrix([
[init_pose[0]],
[init_pose[1]],
[init_t]
])
self.P = np.diag([0.1, 0.1, 0.1])
self.buf_pose = []
self.t = 0.0
self.t_threshold = 10.0
rospy.Subscriber("/robot_pose/uwb", Odometry, self.callback_uwb)
# rospy.Subscriber("/robot_pose/optitrack", Vector3, self.callback_opti)
### Turn on TF broadcast
rospy.Subscriber("/odom", Odometry, self.callback_odom)
### Turn on AMCL
rospy.Subscriber("/amcl_pose", PoseWithCovarianceStamped, self.callback_amcl)
self.pub_kf = rospy.Publisher("/robot_pose/kf", Vector3, queue_size=1000)
self.pub_initialpose = rospy.Publisher("/initialpose", PoseWithCovarianceStamped, queue_size=1000)
self.pub_initial_pose(init_pose[0], init_pose[1], init_t)
self.pub_amcl_cov = rospy.Publisher("/robot_pose/amcl_cov", Vector3, queue_size=1000)
def pub_initial_pose(self, x, y, t):
q = quaternion_from_euler(0.0, 0.0, t)
msg = PoseWithCovarianceStamped()
msg.pose.pose.position.x = x
msg.pose.pose.position.y = y
msg.pose.pose.orientation.x = q[0]
msg.pose.pose.orientation.y = q[1]
msg.pose.pose.orientation.z = q[2]
msg.pose.pose.orientation.w = q[3]
msg.pose.covariance = [
0.1, 0.01, 0.0, 0.0, 0.0, 0.0,
0.01, 0.1, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.1
]
rospy.sleep(0.5)
self.pub_initialpose.publish(msg)
print("send initial_pose", np.degrees(t))
def estimate(self, odom_v, odom_cov):
t = self.x[2][0]
dist = np.sqrt(odom_v[0] ** 2 + odom_v[1] ** 2)
dx = dist * math.cos(t)
dy = dist * math.sin(t)
dt = np.radians(odom_v[2])
u = np.matrix([
[dx],
[dy],
[dt]
])
Q = np.diag([odom_cov[0], odom_cov[1], odom_cov[2]]) # State covariance
self.x, self.P = self.amcl_kf.estimate(self.x, self.P, u, Q)
t = self.x[2][0]
if t > pi:
t = t - 2*pi
elif t < -pi:
t = 2*pi + t
self.x[2][0] = t
msg_kf = Vector3(self.x.item(0), self.x.item(1), self.x.item(2))
self.pub_kf.publish(msg_kf)
def callback_opti(self, msg):
pass
def callback_odom(self, msg):
time = msg.header.stamp
msg_kf = Vector3(self.x.item(0), self.x.item(1), self.x.item(2))
self.br.send(msg_kf, time)
def callback_uwb(self, msg):
pose = msg.pose.pose.position
z = np.matrix([
[pose.x],
[pose.y]
])
self.buf_pose = [pose.x, pose.y, pose.z]
dist_z = abs(self.buf_pose[2] - 1.18)
# dist_z = 0.001
# dist_z = 0.01
# dist_z = 0.1
x = np.matrix([
[self.x.item(0)],
[self.x.item(1)]
])
P = np.diag([self.P.item(0), self.P.item(1)])
# Todo
R = np.diag([dist_z, dist_z])
# R = np.diag([0.01, 0.01])
x, P = self.uwb_kf.update(x, P, z, R)
self.x[0][0] = x[0][0]
self.x[1][0] = x[1][0]
self.P[0][0] = P[0][0]
self.P[1][0] = P[1][0]
def callback_amcl(self, msg):
if len(self.buf_pose) > 0:
pose = msg.pose.pose.position
q = msg.pose.pose.orientation
angle = euler_from_quaternion([q.x, q.y, q.z, q.w]) # radians
t = angle[2]
cov_x = msg.pose.covariance[0]
cov_y = msg.pose.covariance[7]
cov_t = msg.pose.covariance[35]
dist_kf_x = (self.buf_pose[0] - pose.x)
dist_kf_y = (self.buf_pose[1] - pose.y)
dist_cov = cov_x + cov_y
dist_z = abs(self.buf_pose[2] - 1.18)
msg_cov = Vector3(cov_x, dist_z, dist_z ** 2)
self.pub_amcl_cov.publish(msg_cov)
if dist_kf_x < cov_x or dist_kf_y < cov_y:
# Initiate AMCL pose when its covariance is high
self.pub_initial_pose(self.buf_pose[0], self.buf_pose[1], self.x.item(2))
self.amcl_covariance_is_high = False
else:
z = np.matrix([
[pose.x],
[pose.y],
[t]
])
R = np.diag([cov_x, cov_y, cov_t])
self.x, self.P = self.amcl_kf.update(self.x, self.P, z, R)
if __name__ == '__main__':
print("Node: Localize with UWB")
rospy.init_node('localize_uwb', anonymous=True)
rate = rospy.Rate(10)
init = Initialization()
odom = GlobalOdom(init.pose, init.t)
localize = Localization(init.pose, init.t)
while True and not rospy.is_shutdown():
odom.update()
odom.pub()
# print("dt", odom.v[2])
localize.estimate(odom.encoder_v, odom.cov)
# if(odom.v[0] > 0 or odom.v[1] > 0):
# localize.update_t()
rate.sleep() |
the-stack_0_22932 | # -*- coding: utf-8 -*-
import uuid
import signal
from threading import Thread, Event, Timer
from typing import List
from importlib import import_module
from asterisk_mirror.config import AsteriskConfig
from asterisk_mirror.stepper import Stepper
from asterisk_mirror.logics import MorseLogic, YearLogic, FlucLogic
# innner methods
def _merge_dict(source: str, destination: str):
for key, value in source.items():
if isinstance(value, dict):
node = destination.setdefault(key, {})
_merge_dict(value, node)
else:
destination[key] = value
return destination
# AsteriskMirror
class AsteriskMirror:
def __init__(self):
# configurations
config = AsteriskConfig()
self.stop_event = Event()
self.main_thread = None
self.timer_thread = None
self.stepper = Stepper([config.get('System.step_pin', int), config.get('System.direction_pin', int), config.get('System.enable_pin', int)])
self.transition = config.get('System.transition', int)
self.logics = []
self.logic_index = -1
# load and append logics
module = import_module('asterisk_mirror.logics')
for logic_str in config.get('System.logics').split(','):
logic_cls = getattr(module, logic_str.strip())
logic = logic_cls(self.stepper)
self.logics.append(logic)
print("AsteriskMirror [", "transition:", self.transition, "]")
def start(self):
if self.main_thread is not None:
print("AsteriskMirror: already started.")
return
print ("AsteriskMirror: starting...")
# renew threads
if self.timer_thread is not None:
self.stop_event.set()
self.timer_thread = Thread(target=self.timer_run)
self.stop_event.clear()
self.main_thread = Thread(target=self.run)
# start threads
self.main_thread.start()
self.timer_thread.start()
def stop(self):
print("AsteriskMirror: stopping...")
self.stop_event.set()
self.stepper.exit()
self.timer_thread = None
self.main_thread = None
def timer_run(self):
while not self.stop_event.is_set():
# set a new index of logics
self.logic_index = (self.logic_index+1)%len(self.logics)
print("AsteriskMirror: changes logic:", self.logics[self.logic_index])
# interrupt stepper thread and main thread
self.stepper.interrupt()
self.stop_event.wait(self.transition)
def run(self):
#print("AsteriskMirror.run starting...")
while not self.stop_event.is_set():
if self.logic_index >= 0 and len(self.logics) > 0:
logic = self.logics[self.logic_index]
logic.run()
else:
# wait until a right logic-index will be set
self.stop_event.wait(1)
# main
def main():
AsteriskConfig()
mirror = AsteriskMirror()
mirror.start()
# handles SIGINT(ctrl-c) and SIGTERM
def handler(signal, frame):
mirror.stop()
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
if __name__ == '__main__':
main() |
the-stack_0_22933 | from bfieldtools import sphtools
import pytest
import numpy as np
from numpy.testing import assert_allclose
def test_coord_changes():
point = np.array([[0.4, 0.4, 1]])
sphpoint = sphtools.cartesian2spherical(point)
point2 = sphtools.spherical2cartesian(sphpoint)
assert_allclose(point, point2)
def test_sph_eval():
"""
Simply test that the function evaluate, assert shapes but nothing more
"""
l = 4
x = np.array([0.4, 0.4, 1])
theta = np.array([0.4, 0.4, 1])
phi = np.array([0.4, 0.4, 1])
for m in [-2, 0, 2]:
m = 2
assert sphtools.lpmn_em(l, m, x).shape == (3,)
assert sphtools.derlpmn_em(l, m, x).shape == (3,)
assert sphtools.xlm(l, m, theta).shape == (3,)
assert sphtools.ylm(l, m, theta, phi).shape == (3,)
assert sphtools.derxlm(l, m, theta).shape == (3,)
assert sphtools.sinxlm(l, m, theta).shape == (3,)
assert sphtools.dthylm(l, m, theta, phi).shape == (3,)
assert sphtools.dphiylm(l, m, theta, phi).shape == (3,)
assert sphtools.Plm(l, m, theta, phi).shape == (3, 3)
assert sphtools.Blm(l, m, theta, phi).shape == (3, 3)
assert sphtools.Wlm(l, m, theta, phi).shape == (3, 3)
assert sphtools.Vlm(l, m, theta, phi).shape == (3, 3)
def test_potential():
p = np.array([[1, 0, 0], [-1, 0, 0]])
acoeffs = np.array([1, 0, 0])
bcoeffs = np.array([0, 0, 0])
lmax = 1
U = sphtools.potential(p, acoeffs, bcoeffs, lmax)
assert U[0] == -U[-1]
def test_field():
p = np.array([[1, 0, 0], [-1, 2, 0]])
# make a homogeneous field
acoeffs = np.array([0, 0, 0, 0, 0, 0, 0, 0])
bcoeffs = np.array([1, 0, 0, 0, 0, 0, 0, 0])
lmax = 2
B = sphtools.field(p, acoeffs, bcoeffs, lmax)
assert_allclose(B[0], B[1], atol=1e-16)
basis = sphtools.basis_fields(p, lmax)
B_b = basis[0] @ acoeffs + basis[1] @ bcoeffs
assert_allclose(B_b[0], B_b[1], atol=1e-16)
assert_allclose(B, B_b, atol=1e-16)
p = np.array([[0, 1, 0], [100, 2, 500]])
# make a homogeneous field
acoeffs = np.array([0, 0, 0, 0, 0, 0, 0, 0])
bcoeffs = np.array([0, 1, 0, 0, 0, 0, 0, 0])
lmax = 2
B = sphtools.field(p, acoeffs, bcoeffs, lmax)
assert_allclose(B[0], B[1], atol=1e-16)
basis = sphtools.basis_fields(p, lmax)
B_b = basis[0] @ acoeffs + basis[1] @ bcoeffs
assert_allclose(B_b[0], B_b[1], atol=1e-16)
assert_allclose(B, B_b, atol=1e-16)
def test_innerproduct():
sph = sphtools.SphBasis(40)
atol = 1e-12
Ylm1 = sphtools.ylm(2, 1, sph.sqp[:, 1], sph.sqp[:, 2])
Ylm2 = sphtools.ylm(2, 2, sph.sqp[:, 1], sph.sqp[:, 2])
assert_allclose(sph.innerproduct(Ylm1[:, None], Ylm1[:, None]), 1, atol=atol)
assert_allclose(sph.innerproduct(Ylm2[:, None], Ylm2[:, None]), 1, atol=atol)
assert_allclose(sph.innerproduct(Ylm1[:, None], Ylm2[:, None]), 0, atol=atol)
Vlm1 = sphtools.Vlm(2, 1, sph.sqp[:, 1], sph.sqp[:, 2])
Vlm2 = sphtools.Vlm(5, 2, sph.sqp[:, 1], sph.sqp[:, 2])
assert_allclose(sph.innerproduct(Vlm1, Vlm1), 1, atol=atol)
assert_allclose(sph.innerproduct(Vlm2, Vlm2), 1, atol=atol)
assert_allclose(sph.innerproduct(Vlm1, Vlm2), 0, atol=atol)
Wlm1 = sphtools.Wlm(3, 1, sph.sqp[:, 1], sph.sqp[:, 2])
Wlm2 = sphtools.Wlm(5, 2, sph.sqp[:, 1], sph.sqp[:, 2])
assert_allclose(sph.innerproduct(Wlm1, Wlm1), 1, atol=atol)
assert_allclose(sph.innerproduct(Wlm2, Wlm2), 1, atol=atol)
assert_allclose(sph.innerproduct(Wlm1, Wlm2), 0, atol=atol)
Xlm1 = sphtools.Xlm(3, 1, sph.sqp[:, 1], sph.sqp[:, 2])
Xlm2 = sphtools.Xlm(4, 2, sph.sqp[:, 1], sph.sqp[:, 2])
assert_allclose(sph.innerproduct(Xlm1, Xlm1), 1, atol=atol)
assert_allclose(sph.innerproduct(Xlm2, Xlm2), 1, atol=atol)
assert_allclose(sph.innerproduct(Xlm1, Xlm2), 0, atol=atol)
assert_allclose(sph.innerproduct(Xlm1, Vlm1), 0, atol=atol)
assert_allclose(sph.innerproduct(Xlm1, Vlm2), 0, atol=atol)
assert_allclose(sph.innerproduct(Xlm2, Vlm1), 0, atol=atol)
assert_allclose(sph.innerproduct(Xlm2, Vlm2), 0, atol=atol)
assert_allclose(sph.innerproduct(Wlm1, Vlm1), 0, atol=atol)
assert_allclose(sph.innerproduct(Wlm1, Vlm2), 0, atol=atol)
assert_allclose(sph.innerproduct(Wlm2, Vlm1), 0, atol=atol)
assert_allclose(sph.innerproduct(Wlm2, Vlm2), 0, atol=atol)
def test_mesh_coupling():
"""
Test compute_sphcoeffs_mesh with sphere
"""
from bfieldtools.sphtools import compute_sphcoeffs_mesh
from bfieldtools.utils import load_example_mesh
from bfieldtools.sphtools import basis_potentials, basis_fields
from bfieldtools.sphtools import ylm, cartesian2spherical
from bfieldtools.mesh_calculus import mass_matrix
mesh = load_example_mesh("unit_sphere")
mesh.vertices *= 1 / np.sqrt(mesh.area / (4 * np.pi)) # Scale to unit radius
R = 2
mesh.vertices *= R # Scale to R
c = compute_sphcoeffs_mesh(mesh, 3)
# Test potential
sp = cartesian2spherical(mesh.vertices)
M = mass_matrix(mesh, lumped=True)
u1, u2 = basis_potentials(mesh.vertices, 3)
diff1 = []
diff2 = []
for ll in range(1, 4):
for m in range(-ll, ll + 1):
s = ylm(ll, m, sp[:, 1], sp[:, 2])
p = u1 @ c[0] @ s
# p should be p= ll/(2*ll+1)s, test this
coeff = s @ M @ p / (s @ M @ s)
diff1.append(coeff - ll / (2 * ll + 1))
p = u2 @ c[1] @ s
# p should be p= -(ll+1)/(2*ll+1)s, test this
coeff = s @ M @ p / (s @ M @ s)
diff2.append(coeff + (ll + 1) / (2 * ll + 1))
# The integration accuracy is quite low so set the tolerance high
assert np.allclose(diff1, 0, atol=1e-2)
assert np.allclose(diff2, 0, atol=1e-2)
# Test field
b1, b2 = basis_fields(mesh.vertices, 3)
b1 = np.einsum("ijk,ij->ik", b1, mesh.vertex_normals)
b2 = np.einsum("ijk,ij->ik", b2, mesh.vertex_normals)
diff1 = []
diff2 = []
mu0 = 4 * np.pi * 1e-7
for ll in range(1, 4):
for m in range(-ll, ll + 1):
s = ylm(ll, m, sp[:, 1], sp[:, 2])
p = b1 @ c[0] @ s
# p should be p= mu0*(ll+1)*ll/(2*ll+1)/R s, test this
coeff = s @ M @ p / (s @ M @ s)
diff1.append(coeff / mu0 - (ll + 1) * ll / (2 * ll + 1) / R)
p = b2 @ c[1] @ s
# p should be p= mu0*(ll+1)*ll/(2*ll+1)/R s, test this
coeff = s @ M @ p / (s @ M @ s)
diff2.append(coeff / mu0 - (ll + 1) * ll / (2 * ll + 1) / R)
assert np.allclose(diff1, 0, atol=1e-2)
assert np.allclose(diff2, 0, atol=1e-2)
|
the-stack_0_22934 | '''
Uma grande emissora de televisão quer fazer uma enquete entre os seus telespectadores para
saber qual o melhor jogador após cada jogo. Para isto, faz-se necessário o desenvolvimento
de um programa, que será utilizado pelas telefonistas, para a computação dos votos. Sua equipe
foi contratada para desenvolver este programa, utilizando a linguagem de programação C++. Para
computar cada voto, a telefonista digitará um número, entre 1 e 23, correspondente ao número da
camisa do jogador. Um número de jogador igual zero, indica que a votação foi encerrada. Se um
número inválido for digitado, o programa deve ignorá-lo, mostrando uma breve mensagem de aviso,
e voltando a pedir outro número. Após o final da votação, o programa deverá exibir:
O total de votos computados;
Os númeos e respectivos votos de todos os jogadores que receberam votos;
O percentual de votos de cada um destes jogadores;
O número do jogador escolhido como o melhor jogador da partida, juntamente com o número de votos
e o percentual de votos dados a ele.
Observe que os votos inválidos e o zero final não devem ser computados como votos. O resultado
aparece ordenado pelo número do jogador. O programa deve fazer uso de arrays. O programa deverá
executar o cálculo do percentual de cada jogador através de uma função. Esta função receberá dois
parâmetros: o número de votos de um jogador e o total de votos. A função calculará o percentual e
retornará o valor calculado. Abaixo segue uma tela de exemplo. O disposição das informações deve
ser o mais próxima possível ao exemplo. Os dados são fictícios e podem mudar a cada execução do
programa. Ao final, o programa deve ainda gravar os dados referentes ao resultado da votação em
um arquivo texto no disco, obedecendo a mesma disposição apresentada na tela.
Enquete: Quem foi o melhor jogador?
Número do jogador (0=fim): 9
Número do jogador (0=fim): 10
Número do jogador (0=fim): 9
Número do jogador (0=fim): 10
Número do jogador (0=fim): 11
Número do jogador (0=fim): 10
Número do jogador (0=fim): 50
Informe um valor entre 1 e 23 ou 0 para sair!
Número do jogador (0=fim): 9
Número do jogador (0=fim): 9
Número do jogador (0=fim): 0
Resultado da votação:
Foram computados 8 votos.
Jogador Votos %
9 4 50,0%
10 3 37,5%
11 1 12,5%
O melhor jogador foi o número 9, com 4 votos, correspondendo a 50% do total de votos.
'''
########## PROGRAMA ###########
def cabecalho():
print ("Enquete: Quem foi o melhor jogador? \n")
print ("Informe um valor entre 1 e 23 ou 0 para sair! \n")
Resultado = open('Resultado da Pesquisa.txt', 'w')
Resultado.write("Enquete: Quem foi o melhor jogador? \n")
Resultado.write("\n")
Resultado.write("Informe um valor entre 1 e 23 ou 0 para sair! \n")
cabecalho()
votos = []
jogador = 1
joga = []
maior = []
maior_porc = []
maior_joga = 1
maior_i = 1
while jogador != 0:
jogador = int(input("Número do jogador (0=fim): "))
if jogador < 23 and jogador != 0:
joga.append(jogador)
if jogador < 0 or jogador > 23:
print("Informe um valor entre 1 e 23 ou 0 para sair!")
print('\n')
for i in range(1,24):
votos.append(joga.count(i))
if joga.count(i) > maior_joga:
maior_joga = joga.count(i)
maior_i = i
Resultado.write('\n')
Resultado.write('===================================================== \n')
Resultado.write('================ RESULTADO DOS VOTOS ================ \n')
Resultado.write('===================================================== \n')
Resultado.write('\n')
Resultado.write("Foram computados %i votos. \n" %len(joga))
Resultado.write('\n')
Resultado.write('Jogador \t\t Votos \t\t' ' % \n')
for i in range(1,24):
if joga.count(i) > 0 and joga.count(i)/sum(votos) * 100 > 0:
(i, joga.count(i), (joga.count(i)/sum(votos) * 100))
Resultado.write(' ')
Resultado.write(str(i))
Resultado.write('\t\t\t ')
Resultado.write(str(joga.count(i)))
Resultado.write('\t\t')
Resultado.write(str('{:>5.2f}'.format(joga.count(i)/sum(votos) * 100)))
Resultado.write('\n')
maior_porc.append(joga.count(i)/sum(votos) * 100)
maior.append(maior_i)
maior.append(max(votos))
maior.append(max(maior_porc))
Resultado.write("\n")
Resultado.write("O melhor jogador foi o número %i, com %i votos, correspondendo a %2.2f%% do total de votos."%(maior[0], maior[1], maior[2]))
Resultado.close()
##### PARA IMPRIMIR A PARTIR DA LINHA QUE SE DESEJA #####
Resultado = open('Resultado da Pesquisa.txt', 'r')
texto = Resultado.readlines()
for linha in range(1):
for i in range(4, len(texto)):
print("%s"%texto[i], end = '')
Resultado.close()
|
the-stack_0_22936 | from collections import defaultdict
class TaskWarriorSettings:
def __init__(self, taskrc_path):
self._raw_data = {}
self._uda_keys = defaultdict(list)
self._parse_file(taskrc_path)
self.udas = [
UserDefinedAttribute(self, name, keys)
for name, keys in self._uda_keys.items()
]
def _parse_file(self, path):
with open(path, 'r') as f:
for line in f.readlines():
line = line.strip()
if line.startswith('include'):
path = line[8:]
self._parse_file(path)
if '=' in line:
self._parse_setting(line)
def _parse_setting(self, line):
split_line = line.split('=', maxsplit=1)
key_str, value = split_line
# convert value lists
if ',' in value:
value = value.split(',')
# save the raw key/value
self._raw_data[key_str] = value
# organize UDAs
if key_str.startswith('uda'):
_, uda_name, _ = key_str.split('.', maxsplit=2)
self._uda_keys[uda_name].append(key_str)
def get(self, key):
return self._raw_data.get(key)
class UserDefinedAttribute:
def __init__(self, taskrc, name, keys):
self.name = name
self.default = ''
self.label = ''
self.values = []
for key in keys:
value = taskrc.get(key)
attr = key.split('.')[-1]
setattr(self, attr, value)
|
the-stack_0_22937 | #!/usr/bin/env python
#####################################
# Installation module for brutex
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="David Kennedy (ReL1K)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update BruteX - a brute forcing tool"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/1N3/BruteX"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="brutex"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git,snmp"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git,net-snmp"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS=""
# THIS WILL CREATE AN AUTOMATIC LAUNCHER FOR THE TOOL
LAUNCHER="brutex"
|
the-stack_0_22938 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
Spack allows very fine-grained control over how packages are installed and
over how they are built and configured. To make this easy, it has its own
syntax for declaring a dependence. We call a descriptor of a particular
package configuration a "spec".
The syntax looks like this:
.. code-block:: sh
$ spack install mpileaks ^openmpi @1.2:1.4 +debug %intel @12.1 =bgqos_0
0 1 2 3 4 5 6
The first part of this is the command, 'spack install'. The rest of the
line is a spec for a particular installation of the mpileaks package.
0. The package to install
1. A dependency of the package, prefixed by ^
2. A version descriptor for the package. This can either be a specific
version, like "1.2", or it can be a range of versions, e.g. "1.2:1.4".
If multiple specific versions or multiple ranges are acceptable, they
can be separated by commas, e.g. if a package will only build with
versions 1.0, 1.2-1.4, and 1.6-1.8 of mavpich, you could say:
depends_on("[email protected],1.2:1.4,1.6:1.8")
3. A compile-time variant of the package. If you need openmpi to be
built in debug mode for your package to work, you can require it by
adding +debug to the openmpi spec when you depend on it. If you do
NOT want the debug option to be enabled, then replace this with -debug.
4. The name of the compiler to build with.
5. The versions of the compiler to build with. Note that the identifier
for a compiler version is the same '@' that is used for a package version.
A version list denoted by '@' is associated with the compiler only if
if it comes immediately after the compiler name. Otherwise it will be
associated with the current package spec.
6. The architecture to build with. This is needed on machines where
cross-compilation is required
Here is the EBNF grammar for a spec::
spec-list = { spec [ dep-list ] }
dep_list = { ^ spec }
spec = id [ options ]
options = { @version-list | +variant | -variant | ~variant |
%compiler | arch=architecture | [ flag ]=value}
flag = { cflags | cxxflags | fcflags | fflags | cppflags |
ldflags | ldlibs }
variant = id
architecture = id
compiler = id [ version-list ]
version-list = version [ { , version } ]
version = id | id: | :id | id:id
id = [A-Za-z0-9_][A-Za-z0-9_.-]*
Identifiers using the <name>=<value> command, such as architectures and
compiler flags, require a space before the name.
There is one context-sensitive part: ids in versions may contain '.', while
other ids may not.
There is one ambiguity: since '-' is allowed in an id, you need to put
whitespace space before -variant for it to be tokenized properly. You can
either use whitespace, or you can just use ~variant since it means the same
thing. Spack uses ~variant in directory names and in the canonical form of
specs to avoid ambiguity. Both are provided because ~ can cause shell
expansion when it is the first character in an id typed on the command line.
"""
import base64
import sys
import collections
import ctypes
import hashlib
import itertools
import os
import re
import six
from operator import attrgetter
from llnl.util.filesystem import find_headers, find_libraries, is_exe
from llnl.util.lang import key_ordering, HashableMap, ObjectWrapper, dedupe
from llnl.util.lang import check_kwargs, memoized
from llnl.util.tty.color import cwrite, colorize, cescape, get_color_when
import llnl.util.tty as tty
import spack.paths
import spack.architecture
import spack.compiler
import spack.compilers as compilers
import spack.dependency as dp
import spack.error
import spack.hash_types as ht
import spack.parse
import spack.repo
import spack.store
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
from spack.util.module_cmd import get_path_from_module, load_module
from spack.error import NoLibrariesError, NoHeadersError
from spack.error import SpecError, UnsatisfiableSpecError
from spack.provider_index import ProviderIndex
from spack.util.crypto import prefix_bits
from spack.util.executable import Executable
from spack.util.prefix import Prefix
from spack.util.spack_yaml import syaml_dict
from spack.util.string import comma_or
from spack.variant import MultiValuedVariant, AbstractVariant
from spack.variant import BoolValuedVariant, substitute_abstract_variants
from spack.variant import VariantMap, UnknownVariantError
from spack.variant import DuplicateVariantError
from spack.variant import UnsatisfiableVariantSpecError
from spack.version import VersionList, VersionRange, Version, ver
from ruamel.yaml.error import MarkedYAMLError
__all__ = [
'Spec',
'parse',
'SpecError',
'SpecParseError',
'DuplicateDependencyError',
'DuplicateVariantError',
'DuplicateCompilerSpecError',
'UnsupportedCompilerError',
'UnknownVariantError',
'DuplicateArchitectureError',
'InconsistentSpecError',
'InvalidDependencyError',
'NoProviderError',
'MultipleProviderError',
'UnsatisfiableSpecError',
'UnsatisfiableSpecNameError',
'UnsatisfiableVersionSpecError',
'UnsatisfiableCompilerSpecError',
'UnsatisfiableVariantSpecError',
'UnsatisfiableCompilerFlagSpecError',
'UnsatisfiableArchitectureSpecError',
'UnsatisfiableProviderSpecError',
'UnsatisfiableDependencySpecError',
'AmbiguousHashError',
'InvalidHashError',
'NoSuchHashError',
'RedundantSpecError']
#: Valid pattern for an identifier in Spack
identifier_re = r'\w[\w-]*'
compiler_color = '@g' #: color for highlighting compilers
version_color = '@c' #: color for highlighting versions
architecture_color = '@m' #: color for highlighting architectures
enabled_variant_color = '@B' #: color for highlighting enabled variants
disabled_variant_color = '@r' #: color for highlighting disabled varaints
dependency_color = '@.' #: color for highlighting dependencies
hash_color = '@K' #: color for highlighting package hashes
#: This map determines the coloring of specs when using color output.
#: We make the fields different colors to enhance readability.
#: See llnl.util.tty.color for descriptions of the color codes.
color_formats = {'%': compiler_color,
'@': version_color,
'=': architecture_color,
'+': enabled_variant_color,
'~': disabled_variant_color,
'^': dependency_color,
'#': hash_color}
#: Regex used for splitting by spec field separators.
#: These need to be escaped to avoid metacharacters in
#: ``color_formats.keys()``.
_separators = '[\\%s]' % '\\'.join(color_formats.keys())
#: Versionlist constant so we don't have to build a list
#: every time we call str()
_any_version = VersionList([':'])
#: Max integer helps avoid passing too large a value to cyaml.
maxint = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
default_format = '{name}{@version}'
default_format += '{%compiler.name}{@compiler.version}{compiler_flags}'
default_format += '{variants}{arch=architecture}'
def colorize_spec(spec):
"""Returns a spec colorized according to the colors specified in
color_formats."""
class insert_color:
def __init__(self):
self.last = None
def __call__(self, match):
# ignore compiler versions (color same as compiler)
sep = match.group(0)
if self.last == '%' and sep == '@':
return cescape(sep)
self.last = sep
return '%s%s' % (color_formats[sep], cescape(sep))
return colorize(re.sub(_separators, insert_color(), str(spec)) + '@.')
@key_ordering
class ArchSpec(object):
def __init__(self, spec_or_platform_tuple=(None, None, None)):
""" Architecture specification a package should be built with.
Each ArchSpec is comprised of three elements: a platform (e.g. Linux),
an OS (e.g. RHEL6), and a target (e.g. x86_64).
Args:
spec_or_platform_tuple (ArchSpec or str or tuple): if an ArchSpec
is passed it will be duplicated into the new instance.
Otherwise information on platform, OS and target should be
passed in either as a spec string or as a tuple.
"""
# If another instance of ArchSpec was passed, duplicate it
if isinstance(spec_or_platform_tuple, ArchSpec):
self._dup(spec_or_platform_tuple)
return
# If the argument to __init__ is a spec string, parse it
# and construct an ArchSpec
def _string_or_none(s):
if s and s != 'None':
return str(s)
return None
if isinstance(spec_or_platform_tuple, six.string_types):
spec_fields = spec_or_platform_tuple.split("-")
msg = "invalid arch spec [{0}]"
assert len(spec_fields) == 3, msg.format(spec_or_platform_tuple)
platform, operating_system, target = spec_fields
platform_tuple = _string_or_none(platform),\
_string_or_none(operating_system), target
if isinstance(spec_or_platform_tuple, tuple):
platform, operating_system, target = spec_or_platform_tuple
platform_tuple = _string_or_none(platform), \
_string_or_none(operating_system), target
msg = "invalid arch spec tuple [{0}]"
assert len(platform_tuple) == 3, msg.format(platform_tuple)
self.platform, self.os, self.target = platform_tuple
def _autospec(self, spec_like):
if isinstance(spec_like, ArchSpec):
return spec_like
return ArchSpec(spec_like)
def _cmp_key(self):
return self.platform, self.os, self.target
def _dup(self, other):
self.platform = other.platform
self.os = other.os
self.target = other.target
@property
def platform(self):
"""The platform of the architecture."""
return self._platform
@platform.setter
def platform(self, value):
# The platform of the architecture spec will be verified as a
# supported Spack platform before it's set to ensure all specs
# refer to valid platforms.
value = str(value) if value is not None else None
self._platform = value
@property
def os(self):
"""The OS of this ArchSpec."""
return self._os
@os.setter
def os(self, value):
# The OS of the architecture spec will update the platform field
# if the OS is set to one of the reserved OS types so that the
# default OS type can be resolved. Since the reserved OS
# information is only available for the host machine, the platform
# will assumed to be the host machine's platform.
value = str(value) if value is not None else None
if value in spack.architecture.Platform.reserved_oss:
curr_platform = str(spack.architecture.platform())
self.platform = self.platform or curr_platform
if self.platform != curr_platform:
raise ValueError(
"Can't set arch spec OS to reserved value '%s' when the "
"arch platform (%s) isn't the current platform (%s)" %
(value, self.platform, curr_platform))
spec_platform = spack.architecture.get_platform(self.platform)
value = str(spec_platform.operating_system(value))
self._os = value
@property
def target(self):
"""The target of the architecture."""
return self._target
@target.setter
def target(self, value):
# The target of the architecture spec will update the platform field
# if the target is set to one of the reserved target types so that
# the default target type can be resolved. Since the reserved target
# information is only available for the host machine, the platform
# will assumed to be the host machine's platform.
def target_or_none(t):
if isinstance(t, spack.architecture.Target):
return t
if t and t != 'None':
return spack.architecture.Target(t)
return None
value = target_or_none(value)
if str(value) in spack.architecture.Platform.reserved_targets:
curr_platform = str(spack.architecture.platform())
self.platform = self.platform or curr_platform
if self.platform != curr_platform:
raise ValueError(
"Can't set arch spec target to reserved value '%s' when "
"the arch platform (%s) isn't the current platform (%s)" %
(value, self.platform, curr_platform))
spec_platform = spack.architecture.get_platform(self.platform)
value = spec_platform.target(value)
self._target = value
def satisfies(self, other, strict=False):
"""Predicate to check if this spec satisfies a constraint.
Args:
other (ArchSpec or str): constraint on the current instance
strict (bool): if ``False`` the function checks if the current
instance *might* eventually satisfy the constraint. If
``True`` it check if the constraint is satisfied right now.
Returns:
True if the constraint is satisfied, False otherwise.
"""
other = self._autospec(other)
# Check platform and os
for attribute in ('platform', 'os'):
other_attribute = getattr(other, attribute)
self_attribute = getattr(self, attribute)
if strict or self.concrete:
if other_attribute and self_attribute != other_attribute:
return False
else:
if other_attribute and self_attribute and \
self_attribute != other_attribute:
return False
# Check target
return self._satisfies_target(other.target, strict=strict)
def _satisfies_target(self, other_target, strict):
self_target = self.target
need_to_check = bool(other_target) if strict or self.concrete \
else bool(other_target and self_target)
# If there's no need to check we are fine
if not need_to_check:
return True
for target_range in str(other_target).split(','):
t_min, sep, t_max = target_range.partition(':')
# Checking against a single specific target
if not sep and self_target == t_min:
return True
if not sep and self_target != t_min:
return False
# Check against a range
min_ok = self_target.microarchitecture >= t_min if t_min else True
max_ok = self_target.microarchitecture <= t_max if t_max else True
if min_ok and max_ok:
return True
return False
def constrain(self, other):
"""Projects all architecture fields that are specified in the given
spec onto the instance spec if they're missing from the instance
spec.
This will only work if the two specs are compatible.
Args:
other (ArchSpec or str): constraints to be added
Returns:
True if the current instance was constrained, False otherwise.
"""
other = self._autospec(other)
if not self.satisfies(other):
raise UnsatisfiableArchitectureSpecError(self, other)
constrained = False
for attr in ('platform', 'os', 'target'):
svalue, ovalue = getattr(self, attr), getattr(other, attr)
if svalue is None and ovalue is not None:
setattr(self, attr, ovalue)
constrained = True
return constrained
def copy(self):
"""Copy the current instance and returns the clone."""
clone = ArchSpec.__new__(ArchSpec)
clone._dup(self)
return clone
@property
def concrete(self):
"""True if the spec is concrete, False otherwise"""
# return all(v for k, v in six.iteritems(self.to_cmp_dict()))
return self.platform and self.os and self.target
def to_dict(self):
d = syaml_dict([
('platform', self.platform),
('platform_os', self.os),
('target', self.target.to_dict_or_value())])
return syaml_dict([('arch', d)])
@staticmethod
def from_dict(d):
"""Import an ArchSpec from raw YAML/JSON data.
This routine implements a measure of compatibility with older
versions of Spack. Spack releases before 0.10 used a single
string with no OS or platform identifiers. We import old Spack
architectures with platform ``spack09``, OS ``unknown``, and the
old arch string as the target.
Specs from `0.10` or later have a more fleshed out architecture
descriptor with a platform, an OS, and a target.
"""
if not isinstance(d['arch'], dict):
return ArchSpec(('spack09', 'unknown', d['arch']))
d = d['arch']
operating_system = d.get('platform_os', None) or d['os']
target = spack.architecture.Target.from_dict_or_value(d['target'])
return ArchSpec((d['platform'], operating_system, target))
def __str__(self):
return "%s-%s-%s" % (self.platform, self.os, self.target)
def __repr__(self):
# TODO: this needs to be changed (repr is meant to return valid
# TODO: Python code to return an instance equivalent to the current
# TODO: one).
return str(self)
def __contains__(self, string):
return string in str(self) or string in self.target
@key_ordering
class CompilerSpec(object):
"""The CompilerSpec field represents the compiler or range of compiler
versions that a package should be built with. CompilerSpecs have a
name and a version list. """
def __init__(self, *args):
nargs = len(args)
if nargs == 1:
arg = args[0]
# If there is one argument, it's either another CompilerSpec
# to copy or a string to parse
if isinstance(arg, six.string_types):
c = SpecParser().parse_compiler(arg)
self.name = c.name
self.versions = c.versions
elif isinstance(arg, CompilerSpec):
self.name = arg.name
self.versions = arg.versions.copy()
else:
raise TypeError(
"Can only build CompilerSpec from string or " +
"CompilerSpec. Found %s" % type(arg))
elif nargs == 2:
name, version = args
self.name = name
self.versions = VersionList()
self.versions.add(ver(version))
else:
raise TypeError(
"__init__ takes 1 or 2 arguments. (%d given)" % nargs)
def _add_version(self, version):
self.versions.add(version)
def _autospec(self, compiler_spec_like):
if isinstance(compiler_spec_like, CompilerSpec):
return compiler_spec_like
return CompilerSpec(compiler_spec_like)
def satisfies(self, other, strict=False):
other = self._autospec(other)
return (self.name == other.name and
self.versions.satisfies(other.versions, strict=strict))
def constrain(self, other):
"""Intersect self's versions with other.
Return whether the CompilerSpec changed.
"""
other = self._autospec(other)
# ensure that other will actually constrain this spec.
if not other.satisfies(self):
raise UnsatisfiableCompilerSpecError(other, self)
return self.versions.intersect(other.versions)
@property
def concrete(self):
"""A CompilerSpec is concrete if its versions are concrete and there
is an available compiler with the right version."""
return self.versions.concrete
@property
def version(self):
if not self.concrete:
raise SpecError("Spec is not concrete: " + str(self))
return self.versions[0]
def copy(self):
clone = CompilerSpec.__new__(CompilerSpec)
clone.name = self.name
clone.versions = self.versions.copy()
return clone
def _cmp_key(self):
return (self.name, self.versions)
def to_dict(self):
d = syaml_dict([('name', self.name)])
d.update(self.versions.to_dict())
return syaml_dict([('compiler', d)])
@staticmethod
def from_dict(d):
d = d['compiler']
return CompilerSpec(d['name'], VersionList.from_dict(d))
def __str__(self):
out = self.name
if self.versions and self.versions != _any_version:
vlist = ",".join(str(v) for v in self.versions)
out += "@%s" % vlist
return out
def __repr__(self):
return str(self)
@key_ordering
class DependencySpec(object):
"""DependencySpecs connect two nodes in the DAG, and contain deptypes.
Dependencies can be one (or more) of several types:
- build: needs to be in the PATH at build time.
- link: is linked to and added to compiler flags.
- run: needs to be in the PATH for the package to run.
Fields:
- spec: Spec depended on by parent.
- parent: Spec that depends on `spec`.
- deptypes: list of strings, representing dependency relationships.
"""
def __init__(self, parent, spec, deptypes):
self.parent = parent
self.spec = spec
self.deptypes = tuple(sorted(set(deptypes)))
def update_deptypes(self, deptypes):
deptypes = set(deptypes)
deptypes.update(self.deptypes)
deptypes = tuple(sorted(deptypes))
changed = self.deptypes != deptypes
self.deptypes = deptypes
return changed
def copy(self):
return DependencySpec(self.parent, self.spec, self.deptypes)
def _cmp_key(self):
return (self.parent.name if self.parent else None,
self.spec.name if self.spec else None,
self.deptypes)
def __str__(self):
return "%s %s--> %s" % (self.parent.name if self.parent else None,
self.deptypes,
self.spec.name if self.spec else None)
_valid_compiler_flags = [
'cflags', 'cxxflags', 'fflags', 'ldflags', 'ldlibs', 'cppflags']
class FlagMap(HashableMap):
def __init__(self, spec):
super(FlagMap, self).__init__()
self.spec = spec
def satisfies(self, other, strict=False):
if strict or (self.spec and self.spec._concrete):
return all(f in self and set(self[f]) == set(other[f])
for f in other)
else:
return all(set(self[f]) == set(other[f])
for f in other if (other[f] != [] and f in self))
def constrain(self, other):
"""Add all flags in other that aren't in self to self.
Return whether the spec changed.
"""
if other.spec and other.spec._concrete:
for k in self:
if k not in other:
raise UnsatisfiableCompilerFlagSpecError(
self[k], '<absent>')
changed = False
for k in other:
if k in self and not set(self[k]) <= set(other[k]):
raise UnsatisfiableCompilerFlagSpecError(
' '.join(f for f in self[k]),
' '.join(f for f in other[k]))
elif k not in self:
self[k] = other[k]
changed = True
return changed
@staticmethod
def valid_compiler_flags():
return _valid_compiler_flags
def copy(self):
clone = FlagMap(None)
for name, value in self.items():
clone[name] = value
return clone
def _cmp_key(self):
return tuple((k, tuple(v)) for k, v in sorted(six.iteritems(self)))
def __str__(self):
sorted_keys = [k for k in sorted(self.keys()) if self[k] != []]
cond_symbol = ' ' if len(sorted_keys) > 0 else ''
return cond_symbol + ' '.join(
str(key) + '=\"' + ' '.join(
str(f) for f in self[key]) + '\"'
for key in sorted_keys) + cond_symbol
class DependencyMap(HashableMap):
"""Each spec has a DependencyMap containing specs for its dependencies.
The DependencyMap is keyed by name. """
def __str__(self):
return "{deps: %s}" % ', '.join(str(d) for d in sorted(self.values()))
def _command_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'command' attribute.
Tries to search for ``spec.name`` in the ``spec.prefix.bin`` directory.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
spec (Spec): spec that is being queried
cls (type(spec)): type of spec, to match the signature of the
descriptor ``__get__`` method
Returns:
Executable: An executable of the command
Raises:
RuntimeError: If the command is not found
"""
path = os.path.join(spec.prefix.bin, spec.name)
if is_exe(path):
return Executable(path)
else:
msg = 'Unable to locate {0} command in {1}'
raise RuntimeError(msg.format(spec.name, spec.prefix.bin))
def _headers_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'headers' attribute.
Tries to search for ``*.h`` files recursively starting from
``spec.prefix.include``.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
spec (Spec): spec that is being queried
cls (type(spec)): type of spec, to match the signature of the
descriptor ``__get__`` method
Returns:
HeaderList: The headers in ``prefix.include``
Raises:
NoHeadersError: If no headers are found
"""
headers = find_headers('*', root=spec.prefix.include, recursive=True)
if headers:
return headers
else:
msg = 'Unable to locate {0} headers in {1}'
raise NoHeadersError(msg.format(spec.name, spec.prefix.include))
def _libs_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'libs' attribute.
Tries to search for ``lib{spec.name}`` recursively starting from
``spec.prefix``. If ``spec.name`` starts with ``lib``, searches for
``{spec.name}`` instead.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
spec (Spec): spec that is being queried
cls (type(spec)): type of spec, to match the signature of the
descriptor ``__get__`` method
Returns:
LibraryList: The libraries found
Raises:
NoLibrariesError: If no libraries are found
"""
# Variable 'name' is passed to function 'find_libraries', which supports
# glob characters. For example, we have a package with a name 'abc-abc'.
# Now, we don't know if the original name of the package is 'abc_abc'
# (and it generates a library 'libabc_abc.so') or 'abc-abc' (and it
# generates a library 'libabc-abc.so'). So, we tell the function
# 'find_libraries' to give us anything that matches 'libabc?abc' and it
# gives us either 'libabc-abc.so' or 'libabc_abc.so' (or an error)
# depending on which one exists (there is a possibility, of course, to
# get something like 'libabcXabc.so, but for now we consider this
# unlikely).
name = spec.name.replace('-', '?')
# Avoid double 'lib' for packages whose names already start with lib
if not name.startswith('lib'):
name = 'lib' + name
# If '+shared' search only for shared library; if '~shared' search only for
# static library; otherwise, first search for shared and then for static.
search_shared = [True] if ('+shared' in spec) else \
([False] if ('~shared' in spec) else [True, False])
for shared in search_shared:
libs = find_libraries(name, spec.prefix, shared=shared, recursive=True)
if libs:
return libs
msg = 'Unable to recursively locate {0} libraries in {1}'
raise NoLibrariesError(msg.format(spec.name, spec.prefix))
class ForwardQueryToPackage(object):
"""Descriptor used to forward queries from Spec to Package"""
def __init__(self, attribute_name, default_handler=None):
"""Create a new descriptor.
Parameters:
attribute_name (str): name of the attribute to be
searched for in the Package instance
default_handler (callable, optional): default function to be
called if the attribute was not found in the Package
instance
"""
self.attribute_name = attribute_name
self.default = default_handler
def __get__(self, instance, cls):
"""Retrieves the property from Package using a well defined chain
of responsibility.
The order of call is:
1. if the query was through the name of a virtual package try to
search for the attribute `{virtual_name}_{attribute_name}`
in Package
2. try to search for attribute `{attribute_name}` in Package
3. try to call the default handler
The first call that produces a value will stop the chain.
If no call can handle the request then AttributeError is raised with a
message indicating that no relevant attribute exists.
If a call returns None, an AttributeError is raised with a message
indicating a query failure, e.g. that library files were not found in a
'libs' query.
"""
pkg = instance.package
try:
query = instance.last_query
except AttributeError:
# There has been no query yet: this means
# a spec is trying to access its own attributes
_ = instance[instance.name] # NOQA: ignore=F841
query = instance.last_query
callbacks_chain = []
# First in the chain : specialized attribute for virtual packages
if query.isvirtual:
specialized_name = '{0}_{1}'.format(
query.name, self.attribute_name
)
callbacks_chain.append(lambda: getattr(pkg, specialized_name))
# Try to get the generic method from Package
callbacks_chain.append(lambda: getattr(pkg, self.attribute_name))
# Final resort : default callback
if self.default is not None:
callbacks_chain.append(lambda: self.default(self, instance, cls))
# Trigger the callbacks in order, the first one producing a
# value wins
value = None
message = None
for f in callbacks_chain:
try:
value = f()
# A callback can return None to trigger an error indicating
# that the query failed.
if value is None:
msg = "Query of package '{name}' for '{attrib}' failed\n"
msg += "\tprefix : {spec.prefix}\n"
msg += "\tspec : {spec}\n"
msg += "\tqueried as : {query.name}\n"
msg += "\textra parameters : {query.extra_parameters}"
message = msg.format(
name=pkg.name, attrib=self.attribute_name,
spec=instance, query=instance.last_query)
else:
return value
break
except AttributeError:
pass
# value is 'None'
if message is not None:
# Here we can use another type of exception. If we do that, the
# unit test 'test_getitem_exceptional_paths' in the file
# lib/spack/spack/test/spec_dag.py will need to be updated to match
# the type.
raise AttributeError(message)
# 'None' value at this point means that there are no appropriate
# properties defined and no default handler, or that all callbacks
# raised AttributeError. In this case, we raise AttributeError with an
# appropriate message.
fmt = '\'{name}\' package has no relevant attribute \'{query}\'\n'
fmt += '\tspec : \'{spec}\'\n'
fmt += '\tqueried as : \'{spec.last_query.name}\'\n'
fmt += '\textra parameters : \'{spec.last_query.extra_parameters}\'\n'
message = fmt.format(
name=pkg.name,
query=self.attribute_name,
spec=instance
)
raise AttributeError(message)
def __set__(self, instance, value):
cls_name = type(instance).__name__
msg = "'{0}' object attribute '{1}' is read-only"
raise AttributeError(msg.format(cls_name, self.attribute_name))
class SpecBuildInterface(ObjectWrapper):
command = ForwardQueryToPackage(
'command',
default_handler=_command_default_handler
)
headers = ForwardQueryToPackage(
'headers',
default_handler=_headers_default_handler
)
libs = ForwardQueryToPackage(
'libs',
default_handler=_libs_default_handler
)
def __init__(self, spec, name, query_parameters):
super(SpecBuildInterface, self).__init__(spec)
# Represents a query state in a BuildInterface object
QueryState = collections.namedtuple(
'QueryState', ['name', 'extra_parameters', 'isvirtual']
)
is_virtual = Spec.is_virtual(name)
self.last_query = QueryState(
name=name,
extra_parameters=query_parameters,
isvirtual=is_virtual
)
@key_ordering
class Spec(object):
#: Cache for spec's prefix, computed lazily in the corresponding property
_prefix = None
def __init__(self, spec_like=None,
normal=False, concrete=False, external_path=None,
external_module=None, full_hash=None):
"""Create a new Spec.
Arguments:
spec_like (optional string): if not provided, we initialize
an anonymous Spec that matches any Spec object; if
provided we parse this as a Spec string.
Keyword arguments:
# assign special fields from constructor
self._normal = normal
self._concrete = concrete
self.external_path = external_path
self.external_module = external_module
self._full_hash = full_hash
"""
# Copy if spec_like is a Spec.
if isinstance(spec_like, Spec):
self._dup(spec_like)
return
# init an empty spec that matches anything.
self.name = None
self.versions = VersionList(':')
self.variants = VariantMap(self)
self.architecture = None
self.compiler = None
self.external_path = None
self.external_module = None
self.compiler_flags = FlagMap(self)
self._dependents = DependencyMap()
self._dependencies = DependencyMap()
self.namespace = None
self._hash = None
self._build_hash = None
self._cmp_key_cache = None
self._package = None
# Most of these are internal implementation details that can be
# set by internal Spack calls in the constructor.
#
# For example, Specs are by default not assumed to be normal, but
# in some cases we've read them from a file want to assume
# normal. This allows us to manipulate specs that Spack doesn't
# have package.py files for.
self._normal = normal
self._concrete = concrete
self.external_path = external_path
self.external_module = external_module
self._full_hash = full_hash
if isinstance(spec_like, six.string_types):
spec_list = SpecParser(self).parse(spec_like)
if len(spec_list) > 1:
raise ValueError("More than one spec in string: " + spec_like)
if len(spec_list) < 1:
raise ValueError("String contains no specs: " + spec_like)
elif spec_like is not None:
raise TypeError("Can't make spec out of %s" % type(spec_like))
@property
def external(self):
return bool(self.external_path) or bool(self.external_module)
def get_dependency(self, name):
dep = self._dependencies.get(name)
if dep is not None:
return dep
raise InvalidDependencyError(self.name, name)
def _find_deps(self, where, deptype):
deptype = dp.canonical_deptype(deptype)
return [dep for dep in where.values()
if deptype and (not dep.deptypes or
any(d in deptype for d in dep.deptypes))]
def dependencies(self, deptype='all'):
return [d.spec
for d in self._find_deps(self._dependencies, deptype)]
def dependents(self, deptype='all'):
return [d.parent
for d in self._find_deps(self._dependents, deptype)]
def dependencies_dict(self, deptype='all'):
return dict((d.spec.name, d)
for d in self._find_deps(self._dependencies, deptype))
def dependents_dict(self, deptype='all'):
return dict((d.parent.name, d)
for d in self._find_deps(self._dependents, deptype))
#
# Private routines here are called by the parser when building a spec.
#
def _add_version(self, version):
"""Called by the parser to add an allowable version."""
self.versions.add(version)
def _add_flag(self, name, value):
"""Called by the parser to add a known flag.
Known flags currently include "arch"
"""
valid_flags = FlagMap.valid_compiler_flags()
if name == 'arch' or name == 'architecture':
parts = tuple(value.split('-'))
plat, os, tgt = parts if len(parts) == 3 else (None, None, value)
self._set_architecture(platform=plat, os=os, target=tgt)
elif name == 'platform':
self._set_architecture(platform=value)
elif name == 'os' or name == 'operating_system':
self._set_architecture(os=value)
elif name == 'target':
self._set_architecture(target=value)
elif name in valid_flags:
assert(self.compiler_flags is not None)
self.compiler_flags[name] = spack.compiler.tokenize_flags(value)
else:
# FIXME:
# All other flags represent variants. 'foo=true' and 'foo=false'
# map to '+foo' and '~foo' respectively. As such they need a
# BoolValuedVariant instance.
if str(value).upper() == 'TRUE' or str(value).upper() == 'FALSE':
self.variants[name] = BoolValuedVariant(name, value)
else:
self.variants[name] = AbstractVariant(name, value)
def _set_architecture(self, **kwargs):
"""Called by the parser to set the architecture."""
arch_attrs = ['platform', 'os', 'target']
if self.architecture and self.architecture.concrete:
raise DuplicateArchitectureError(
"Spec for '%s' cannot have two architectures." % self.name)
if not self.architecture:
new_vals = tuple(kwargs.get(arg, None) for arg in arch_attrs)
self.architecture = ArchSpec(new_vals)
else:
new_attrvals = [(a, v) for a, v in six.iteritems(kwargs)
if a in arch_attrs]
for new_attr, new_value in new_attrvals:
if getattr(self.architecture, new_attr):
raise DuplicateArchitectureError(
"Spec for '%s' cannot have two '%s' specified "
"for its architecture" % (self.name, new_attr))
else:
setattr(self.architecture, new_attr, new_value)
def _set_compiler(self, compiler):
"""Called by the parser to set the compiler."""
if self.compiler:
raise DuplicateCompilerSpecError(
"Spec for '%s' cannot have two compilers." % self.name)
self.compiler = compiler
def _add_dependency(self, spec, deptypes):
"""Called by the parser to add another spec as a dependency."""
if spec.name in self._dependencies:
raise DuplicateDependencyError(
"Cannot depend on '%s' twice" % spec)
# create an edge and add to parent and child
dspec = DependencySpec(self, spec, deptypes)
self._dependencies[spec.name] = dspec
spec._dependents[self.name] = dspec
#
# Public interface
#
@property
def fullname(self):
return (
('%s.%s' % (self.namespace, self.name)) if self.namespace else
(self.name if self.name else ''))
@property
def root(self):
"""Follow dependent links and find the root of this spec's DAG.
Spack specs have a single root (the package being installed).
"""
if not self._dependents:
return self
return next(iter(self._dependents.values())).parent.root
@property
def package(self):
if not self._package:
self._package = spack.repo.get(self)
return self._package
@property
def package_class(self):
"""Internal package call gets only the class object for a package.
Use this to just get package metadata.
"""
return spack.repo.path.get_pkg_class(self.fullname)
@property
def virtual(self):
"""Right now, a spec is virtual if no package exists with its name.
TODO: revisit this -- might need to use a separate namespace and
be more explicit about this.
Possible idea: just use conventin and make virtual deps all
caps, e.g., MPI vs mpi.
"""
return Spec.is_virtual(self.name)
@staticmethod
def is_virtual(name):
"""Test if a name is virtual without requiring a Spec."""
return (name is not None) and (not spack.repo.path.exists(name))
@property
def concrete(self):
"""A spec is concrete if it describes a single build of a package.
More formally, a spec is concrete if concretize() has been called
on it and it has been marked `_concrete`.
Concrete specs either can be or have been built. All constraints
have been resolved, optional dependencies have been added or
removed, a compiler has been chosen, and all variants have
values.
"""
return self._concrete
def traverse(self, **kwargs):
direction = kwargs.get('direction', 'children')
depth = kwargs.get('depth', False)
get_spec = lambda s: s.spec
if direction == 'parents':
get_spec = lambda s: s.parent
if depth:
for d, dspec in self.traverse_edges(**kwargs):
yield d, get_spec(dspec)
else:
for dspec in self.traverse_edges(**kwargs):
yield get_spec(dspec)
def traverse_edges(self, visited=None, d=0, deptype='all',
dep_spec=None, **kwargs):
"""Generic traversal of the DAG represented by this spec.
This will yield each node in the spec. Options:
order [=pre|post]
Order to traverse spec nodes. Defaults to preorder traversal.
Options are:
'pre': Pre-order traversal; each node is yielded before its
children in the dependency DAG.
'post': Post-order traversal; each node is yielded after its
children in the dependency DAG.
cover [=nodes|edges|paths]
Determines how extensively to cover the dag. Possible values:
'nodes': Visit each node in the dag only once. Every node
yielded by this function will be unique.
'edges': If a node has been visited once but is reached along a
new path from the root, yield it but do not descend
into it. This traverses each 'edge' in the DAG once.
'paths': Explore every unique path reachable from the root.
This descends into visited subtrees and will yield
nodes twice if they're reachable by multiple paths.
depth [=False]
Defaults to False. When True, yields not just nodes in the
spec, but also their depth from the root in a (depth, node)
tuple.
key [=id]
Allow a custom key function to track the identity of nodes
in the traversal.
root [=True]
If False, this won't yield the root node, just its descendents.
direction [=children|parents]
If 'children', does a traversal of this spec's children. If
'parents', traverses upwards in the DAG towards the root.
"""
# get initial values for kwargs
depth = kwargs.get('depth', False)
key_fun = kwargs.get('key', id)
if isinstance(key_fun, six.string_types):
key_fun = attrgetter(key_fun)
yield_root = kwargs.get('root', True)
cover = kwargs.get('cover', 'nodes')
direction = kwargs.get('direction', 'children')
order = kwargs.get('order', 'pre')
deptype = dp.canonical_deptype(deptype)
# Make sure kwargs have legal values; raise ValueError if not.
def validate(name, val, allowed_values):
if val not in allowed_values:
raise ValueError("Invalid value for %s: %s. Choices are %s"
% (name, val, ",".join(allowed_values)))
validate('cover', cover, ('nodes', 'edges', 'paths'))
validate('direction', direction, ('children', 'parents'))
validate('order', order, ('pre', 'post'))
if visited is None:
visited = set()
key = key_fun(self)
# Node traversal does not yield visited nodes.
if key in visited and cover == 'nodes':
return
def return_val(dspec):
if not dspec:
# make a fake dspec for the root.
if direction == 'parents':
dspec = DependencySpec(self, None, ())
else:
dspec = DependencySpec(None, self, ())
return (d, dspec) if depth else dspec
yield_me = yield_root or d > 0
# Preorder traversal yields before successors
if yield_me and order == 'pre':
yield return_val(dep_spec)
# Edge traversal yields but skips children of visited nodes
if not (key in visited and cover == 'edges'):
visited.add(key)
# This code determines direction and yields the children/parents
if direction == 'children':
where = self._dependencies
succ = lambda dspec: dspec.spec
elif direction == 'parents':
where = self._dependents
succ = lambda dspec: dspec.parent
else:
raise ValueError('Invalid traversal direction: %s' % direction)
for name, dspec in sorted(where.items()):
dt = dspec.deptypes
if dt and not any(d in deptype for d in dt):
continue
for child in succ(dspec).traverse_edges(
visited, d + 1, deptype, dspec, **kwargs):
yield child
# Postorder traversal yields after successors
if yield_me and order == 'post':
yield return_val(dep_spec)
@property
def short_spec(self):
"""Returns a version of the spec with the dependencies hashed
instead of completely enumerated."""
spec_format = '{name}{@version}{%compiler}'
spec_format += '{variants}{arch=architecture}{/hash:7}'
return self.format(spec_format)
@property
def cshort_spec(self):
"""Returns an auto-colorized version of ``self.short_spec``."""
spec_format = '{name}{@version}{%compiler}'
spec_format += '{variants}{arch=architecture}{/hash:7}'
return self.cformat(spec_format)
@property
def prefix(self):
if not self._concrete:
raise SpecError("Spec is not concrete: " + str(self))
if self._prefix is None:
upstream, record = spack.store.db.query_by_spec_hash(
self.dag_hash())
if record and record.path:
self.prefix = record.path
else:
self.prefix = spack.store.layout.path_for_spec(self)
return self._prefix
@prefix.setter
def prefix(self, value):
self._prefix = Prefix(value)
def _spec_hash(self, hash):
"""Utility method for computing different types of Spec hashes.
Arguments:
hash (SpecHashDescriptor): type of hash to generate.
"""
# TODO: curently we strip build dependencies by default. Rethink
# this when we move to using package hashing on all specs.
yaml_text = syaml.dump(self.to_node_dict(hash=hash),
default_flow_style=True, width=maxint)
sha = hashlib.sha1(yaml_text.encode('utf-8'))
b32_hash = base64.b32encode(sha.digest()).lower()
if sys.version_info[0] >= 3:
b32_hash = b32_hash.decode('utf-8')
return b32_hash
def _cached_hash(self, hash, length=None):
"""Helper function for storing a cached hash on the spec.
This will run _spec_hash() with the deptype and package_hash
parameters, and if this spec is concrete, it will store the value
in the supplied attribute on this spec.
Arguments:
hash (SpecHashDescriptor): type of hash to generate.
"""
if not hash.attr:
return self._spec_hash(hash)[:length]
hash_string = getattr(self, hash.attr, None)
if hash_string:
return hash_string[:length]
else:
hash_string = self._spec_hash(hash)
if self.concrete:
setattr(self, hash.attr, hash_string)
return hash_string[:length]
def dag_hash(self, length=None):
"""This is Spack's default hash, used to identify installations.
At the moment, it excludes build dependencies to avoid rebuilding
packages whenever build dependency versions change. We will
revise this to include more detailed provenance when the
concretizer can more aggressievly reuse installed dependencies.
"""
return self._cached_hash(ht.dag_hash, length)
def build_hash(self, length=None):
"""Hash used to store specs in environments.
This hash includes build dependencies, and we need to preserve
them to be able to rebuild an entire environment for a user.
"""
return self._cached_hash(ht.build_hash, length)
def full_hash(self, length=None):
"""Hash to determine when to rebuild packages in the build pipeline.
This hash includes the package hash, so that we know when package
files has changed between builds. It does not currently include
build dependencies, though it likely should.
TODO: investigate whether to include build deps here.
"""
return self._cached_hash(ht.full_hash, length)
def dag_hash_bit_prefix(self, bits):
"""Get the first <bits> bits of the DAG hash as an integer type."""
return base32_prefix_bits(self.dag_hash(), bits)
def to_node_dict(self, hash=ht.dag_hash):
"""Create a dictionary representing the state of this Spec.
``to_node_dict`` creates the content that is eventually hashed by
Spack to create identifiers like the DAG hash (see
``dag_hash()``). Example result of ``to_node_dict`` for the
``sqlite`` package::
{
'sqlite': {
'version': '3.28.0',
'arch': {
'platform': 'darwin',
'platform_os': 'mojave',
'target': 'x86_64',
},
'compiler': {
'name': 'clang',
'version': '10.0.0-apple',
},
'namespace': 'builtin',
'parameters': {
'fts': 'true',
'functions': 'false',
'cflags': [],
'cppflags': [],
'cxxflags': [],
'fflags': [],
'ldflags': [],
'ldlibs': [],
},
'dependencies': {
'readline': {
'hash': 'zvaa4lhlhilypw5quj3akyd3apbq5gap',
'type': ['build', 'link'],
}
},
}
}
Note that the dictionary returned does *not* include the hash of
the *root* of the spec, though it does include hashes for each
dependency, and (optionally) the package file corresponding to
each node.
See ``to_dict()`` for a "complete" spec hash, with hashes for
each node and nodes for each dependency (instead of just their
hashes).
Arguments:
hash (SpecHashDescriptor) type of hash to generate.
"""
d = syaml_dict()
if self.versions:
d.update(self.versions.to_dict())
if self.architecture:
d.update(self.architecture.to_dict())
if self.compiler:
d.update(self.compiler.to_dict())
if self.namespace:
d['namespace'] = self.namespace
params = syaml_dict(
sorted(
v.yaml_entry() for _, v in self.variants.items()
)
)
params.update(sorted(self.compiler_flags.items()))
if params:
d['parameters'] = params
if self.external:
d['external'] = {
'path': self.external_path,
'module': self.external_module
}
if not self._concrete:
d['concrete'] = False
if 'patches' in self.variants:
variant = self.variants['patches']
if hasattr(variant, '_patches_in_order_of_appearance'):
d['patches'] = variant._patches_in_order_of_appearance
if hash.package_hash:
d['package_hash'] = self.package.content_hash()
deps = self.dependencies_dict(deptype=hash.deptype)
if deps:
d['dependencies'] = syaml_dict([
(name,
syaml_dict([
('hash', dspec.spec._cached_hash(hash)),
('type', sorted(str(s) for s in dspec.deptypes))])
) for name, dspec in sorted(deps.items())
])
return syaml_dict([(self.name, d)])
def to_dict(self, hash=ht.dag_hash):
"""Create a dictionary suitable for writing this spec to YAML or JSON.
This dictionaries like the one that is ultimately written to a
``spec.yaml`` file in each Spack installation directory. For
example, for sqlite::
{
'spec': [
{
'sqlite': {
'version': '3.28.0',
'arch': {
'platform': 'darwin',
'platform_os': 'mojave',
'target': 'x86_64',
},
'compiler': {
'name': 'clang',
'version': '10.0.0-apple',
},
'namespace': 'builtin',
'parameters': {
'fts': 'true',
'functions': 'false',
'cflags': [],
'cppflags': [],
'cxxflags': [],
'fflags': [],
'ldflags': [],
'ldlibs': [],
},
'dependencies': {
'readline': {
'hash': 'zvaa4lhlhilypw5quj3akyd3apbq5gap',
'type': ['build', 'link'],
}
},
'hash': '722dzmgymxyxd6ovjvh4742kcetkqtfs'
}
},
# ... more node dicts for readline and its dependencies ...
]
}
Note that this dictionary starts with the 'spec' key, and what
follows is a list starting with the root spec, followed by its
dependencies in preorder. Each node in the list also has a
'hash' key that contains the hash of the node *without* the hash
field included.
In the example, the package content hash is not included in the
spec, but if ``package_hash`` were true there would be an
additional field on each node called ``package_hash``.
``from_dict()`` can be used to read back in a spec that has been
converted to a dictionary, serialized, and read back in.
Arguments:
deptype (tuple or str): dependency types to include when
traversing the spec.
package_hash (bool): whether to include package content
hashes in the dictionary.
"""
node_list = []
for s in self.traverse(order='pre', deptype=hash.deptype):
node = s.to_node_dict(hash)
node[s.name]['hash'] = s.dag_hash()
if 'build' in hash.deptype:
node[s.name]['build_hash'] = s.build_hash()
node_list.append(node)
return syaml_dict([('spec', node_list)])
def to_record_dict(self):
"""Return a "flat" dictionary with name and hash as top-level keys.
This is similar to ``to_node_dict()``, but the name and the hash
are "flattened" into the dictionary for easiler parsing by tools
like ``jq``. Instead of being keyed by name or hash, the
dictionary "name" and "hash" fields, e.g.::
{
"name": "openssl"
"hash": "3ws7bsihwbn44ghf6ep4s6h4y2o6eznv"
"version": "3.28.0",
"arch": {
...
}
But is otherwise the same as ``to_node_dict()``.
"""
dictionary = syaml_dict()
dictionary["name"] = self.name
dictionary["hash"] = self.dag_hash()
dictionary.update(self.to_node_dict()[self.name])
return dictionary
def to_yaml(self, stream=None, hash=ht.dag_hash):
return syaml.dump(
self.to_dict(hash), stream=stream, default_flow_style=False)
def to_json(self, stream=None, hash=ht.dag_hash):
return sjson.dump(self.to_dict(hash), stream)
@staticmethod
def from_node_dict(node):
name = next(iter(node))
node = node[name]
spec = Spec(name, full_hash=node.get('full_hash', None))
spec.namespace = node.get('namespace', None)
spec._hash = node.get('hash', None)
spec._build_hash = node.get('build_hash', None)
if 'version' in node or 'versions' in node:
spec.versions = VersionList.from_dict(node)
if 'arch' in node:
spec.architecture = ArchSpec.from_dict(node)
if 'compiler' in node:
spec.compiler = CompilerSpec.from_dict(node)
else:
spec.compiler = None
if 'parameters' in node:
for name, value in node['parameters'].items():
if name in _valid_compiler_flags:
spec.compiler_flags[name] = value
else:
spec.variants[name] = MultiValuedVariant.from_node_dict(
name, value)
elif 'variants' in node:
for name, value in node['variants'].items():
spec.variants[name] = MultiValuedVariant.from_node_dict(
name, value
)
for name in FlagMap.valid_compiler_flags():
spec.compiler_flags[name] = []
if 'external' in node:
spec.external_path = None
spec.external_module = None
# This conditional is needed because sometimes this function is
# called with a node already constructed that contains a 'versions'
# and 'external' field. Related to virtual packages provider
# indexes.
if node['external']:
spec.external_path = node['external']['path']
spec.external_module = node['external']['module']
if spec.external_module is False:
spec.external_module = None
else:
spec.external_path = None
spec.external_module = None
# specs read in are concrete unless marked abstract
spec._concrete = node.get('concrete', True)
if 'patches' in node:
patches = node['patches']
if len(patches) > 0:
mvar = spec.variants.setdefault(
'patches', MultiValuedVariant('patches', ())
)
mvar.value = patches
# FIXME: Monkey patches mvar to store patches order
mvar._patches_in_order_of_appearance = patches
# Don't read dependencies here; from_node_dict() is used by
# from_yaml() to read the root *and* each dependency spec.
return spec
@staticmethod
def dependencies_from_node_dict(node):
name = next(iter(node))
node = node[name]
if 'dependencies' not in node:
return
for t in Spec.read_yaml_dep_specs(node['dependencies']):
yield t
@staticmethod
def read_yaml_dep_specs(dependency_dict):
"""Read the DependencySpec portion of a YAML-formatted Spec.
This needs to be backward-compatible with older spack spec
formats so that reindex will work on old specs/databases.
"""
for dep_name, elt in dependency_dict.items():
if isinstance(elt, six.string_types):
# original format, elt is just the dependency hash.
dag_hash, deptypes = elt, ['build', 'link']
elif isinstance(elt, tuple):
# original deptypes format: (used tuples, not future-proof)
dag_hash, deptypes = elt
elif isinstance(elt, dict):
# new format: elements of dependency spec are keyed.
dag_hash, deptypes = elt['hash'], elt['type']
else:
raise SpecError("Couldn't parse dependency types in spec.")
yield dep_name, dag_hash, list(deptypes)
@staticmethod
def from_literal(spec_dict, normal=True):
"""Builds a Spec from a dictionary containing the spec literal.
The dictionary must have a single top level key, representing the root,
and as many secondary level keys as needed in the spec.
The keys can be either a string or a Spec or a tuple containing the
Spec and the dependency types.
Args:
spec_dict (dict): the dictionary containing the spec literal
normal (bool): if True the same key appearing at different levels
of the ``spec_dict`` will map to the same object in memory.
Examples:
A simple spec ``foo`` with no dependencies:
.. code-block:: python
{'foo': None}
A spec ``foo`` with a ``(build, link)`` dependency ``bar``:
.. code-block:: python
{'foo':
{'bar:build,link': None}}
A spec with a diamond dependency and various build types:
.. code-block:: python
{'dt-diamond': {
'dt-diamond-left:build,link': {
'dt-diamond-bottom:build': None
},
'dt-diamond-right:build,link': {
'dt-diamond-bottom:build,link,run': None
}
}}
The same spec with a double copy of ``dt-diamond-bottom`` and
no diamond structure:
.. code-block:: python
{'dt-diamond': {
'dt-diamond-left:build,link': {
'dt-diamond-bottom:build': None
},
'dt-diamond-right:build,link': {
'dt-diamond-bottom:build,link,run': None
}
}, normal=False}
Constructing a spec using a Spec object as key:
.. code-block:: python
mpich = Spec('mpich')
libelf = Spec('[email protected]')
expected_normalized = Spec.from_literal({
'mpileaks': {
'callpath': {
'dyninst': {
'libdwarf': {libelf: None},
libelf: None
},
mpich: None
},
mpich: None
},
})
"""
# Maps a literal to a Spec, to be sure we are reusing the same object
spec_cache = LazySpecCache()
def spec_builder(d):
# The invariant is that the top level dictionary must have
# only one key
assert len(d) == 1
# Construct the top-level spec
spec_like, dep_like = next(iter(d.items()))
# If the requirements was for unique nodes (default)
# then re-use keys from the local cache. Otherwise build
# a new node every time.
if not isinstance(spec_like, Spec):
spec = spec_cache[spec_like] if normal else Spec(spec_like)
else:
spec = spec_like
if dep_like is None:
return spec
def name_and_dependency_types(s):
"""Given a key in the dictionary containing the literal,
extracts the name of the spec and its dependency types.
Args:
s (str): key in the dictionary containing the literal
"""
t = s.split(':')
if len(t) > 2:
msg = 'more than one ":" separator in key "{0}"'
raise KeyError(msg.format(s))
n = t[0]
if len(t) == 2:
dtypes = tuple(dt.strip() for dt in t[1].split(','))
else:
dtypes = ()
return n, dtypes
def spec_and_dependency_types(s):
"""Given a non-string key in the literal, extracts the spec
and its dependency types.
Args:
s (spec or tuple): either a Spec object or a tuple
composed of a Spec object and a string with the
dependency types
"""
if isinstance(s, Spec):
return s, ()
spec_obj, dtypes = s
return spec_obj, tuple(dt.strip() for dt in dtypes.split(','))
# Recurse on dependencies
for s, s_dependencies in dep_like.items():
if isinstance(s, six.string_types):
dag_node, dependency_types = name_and_dependency_types(s)
else:
dag_node, dependency_types = spec_and_dependency_types(s)
dependency_spec = spec_builder({dag_node: s_dependencies})
spec._add_dependency(dependency_spec, dependency_types)
return spec
return spec_builder(spec_dict)
@staticmethod
def from_dict(data):
"""Construct a spec from YAML.
Parameters:
data -- a nested dict/list data structure read from YAML or JSON.
"""
nodes = data['spec']
# Read nodes out of list. Root spec is the first element;
# dependencies are the following elements.
dep_list = [Spec.from_node_dict(node) for node in nodes]
if not dep_list:
raise SpecError("YAML spec contains no nodes.")
deps = dict((spec.name, spec) for spec in dep_list)
spec = dep_list[0]
for node in nodes:
# get dependency dict from the node.
name = next(iter(node))
if 'dependencies' not in node[name]:
continue
yaml_deps = node[name]['dependencies']
for dname, dhash, dtypes in Spec.read_yaml_dep_specs(yaml_deps):
# Fill in dependencies by looking them up by name in deps dict
deps[name]._dependencies[dname] = DependencySpec(
deps[name], deps[dname], dtypes)
return spec
@staticmethod
def from_yaml(stream):
"""Construct a spec from YAML.
Parameters:
stream -- string or file object to read from.
"""
try:
data = syaml.load(stream)
return Spec.from_dict(data)
except MarkedYAMLError as e:
raise syaml.SpackYAMLError("error parsing YAML spec:", str(e))
@staticmethod
def from_json(stream):
"""Construct a spec from JSON.
Parameters:
stream -- string or file object to read from.
"""
try:
data = sjson.load(stream)
return Spec.from_dict(data)
except Exception as e:
tty.debug(e)
raise sjson.SpackJSONError("error parsing JSON spec:", str(e))
def _concretize_helper(self, concretizer, presets=None, visited=None):
"""Recursive helper function for concretize().
This concretizes everything bottom-up. As things are
concretized, they're added to the presets, and ancestors
will prefer the settings of their children.
"""
if presets is None:
presets = {}
if visited is None:
visited = set()
if self.name in visited:
return False
if self.concrete:
visited.add(self.name)
return False
changed = False
# Concretize deps first -- this is a bottom-up process.
for name in sorted(self._dependencies.keys()):
changed |= self._dependencies[name].spec._concretize_helper(
concretizer, presets, visited
)
if self.name in presets:
changed |= self.constrain(presets[self.name])
else:
# Concretize virtual dependencies last. Because they're added
# to presets below, their constraints will all be merged, but we'll
# still need to select a concrete package later.
if not self.virtual:
changed |= any(
(concretizer.concretize_architecture(self),
concretizer.concretize_compiler(self),
concretizer.adjust_target(self),
# flags must be concretized after compiler
concretizer.concretize_compiler_flags(self),
concretizer.concretize_version(self),
concretizer.concretize_variants(self)))
presets[self.name] = self
visited.add(self.name)
return changed
def _replace_with(self, concrete):
"""Replace this virtual spec with a concrete spec."""
assert(self.virtual)
for name, dep_spec in self._dependents.items():
dependent = dep_spec.parent
deptypes = dep_spec.deptypes
# remove self from all dependents, unless it is already removed
if self.name in dependent._dependencies:
del dependent._dependencies[self.name]
# add the replacement, unless it is already a dep of dependent.
if concrete.name not in dependent._dependencies:
dependent._add_dependency(concrete, deptypes)
def _expand_virtual_packages(self, concretizer):
"""Find virtual packages in this spec, replace them with providers,
and normalize again to include the provider's (potentially virtual)
dependencies. Repeat until there are no virtual deps.
Precondition: spec is normalized.
.. todo::
If a provider depends on something that conflicts with
other dependencies in the spec being expanded, this can
produce a conflicting spec. For example, if mpich depends
on hwloc@:1.3 but something in the spec needs hwloc1.4:,
then we should choose an MPI other than mpich. Cases like
this are infrequent, but should implement this before it is
a problem.
"""
# Make an index of stuff this spec already provides
self_index = ProviderIndex(self.traverse(), restrict=True)
changed = False
done = False
while not done:
done = True
for spec in list(self.traverse()):
replacement = None
if spec.external:
continue
if spec.virtual:
replacement = self._find_provider(spec, self_index)
if replacement:
# TODO: may break if in-place on self but
# shouldn't happen if root is traversed first.
spec._replace_with(replacement)
done = False
break
if not replacement:
# Get a list of possible replacements in order of
# preference.
candidates = concretizer.choose_virtual_or_external(spec)
# Try the replacements in order, skipping any that cause
# satisfiability problems.
for replacement in candidates:
if replacement is spec:
break
# Replace spec with the candidate and normalize
copy = self.copy()
copy[spec.name]._dup(replacement, deps=False)
try:
# If there are duplicate providers or duplicate
# provider deps, consolidate them and merge
# constraints.
copy.normalize(force=True)
break
except SpecError:
# On error, we'll try the next replacement.
continue
# If replacement is external then trim the dependencies
if replacement.external:
if (spec._dependencies):
changed = True
spec._dependencies = DependencyMap()
replacement._dependencies = DependencyMap()
replacement.architecture = self.architecture
# TODO: could this and the stuff in _dup be cleaned up?
def feq(cfield, sfield):
return (not cfield) or (cfield == sfield)
if replacement is spec or (
feq(replacement.name, spec.name) and
feq(replacement.versions, spec.versions) and
feq(replacement.compiler, spec.compiler) and
feq(replacement.architecture, spec.architecture) and
feq(replacement._dependencies, spec._dependencies) and
feq(replacement.variants, spec.variants) and
feq(replacement.external_path,
spec.external_path) and
feq(replacement.external_module,
spec.external_module)):
continue
# Refine this spec to the candidate. This uses
# replace_with AND dup so that it can work in
# place. TODO: make this more efficient.
if spec.virtual:
spec._replace_with(replacement)
changed = True
if spec._dup(replacement, deps=False, cleardeps=False):
changed = True
spec._dependencies.owner = spec
self_index.update(spec)
done = False
break
return changed
def concretize(self, tests=False):
"""A spec is concrete if it describes one build of a package uniquely.
This will ensure that this spec is concrete.
Args:
tests (list or bool): list of packages that will need test
dependencies, or True/False for test all/none
If this spec could describe more than one version, variant, or build
of a package, this will add constraints to make it concrete.
Some rigorous validation and checks are also performed on the spec.
Concretizing ensures that it is self-consistent and that it's
consistent with requirements of its packages. See flatten() and
normalize() for more details on this.
"""
if not self.name:
raise SpecError("Attempting to concretize anonymous spec")
if self._concrete:
return
changed = True
force = False
user_spec_deps = self.flat_dependencies(copy=False)
import spack.concretize
concretizer = spack.concretize.Concretizer(self.copy())
while changed:
changes = (self.normalize(force, tests=tests,
user_spec_deps=user_spec_deps),
self._expand_virtual_packages(concretizer),
self._concretize_helper(concretizer))
changed = any(changes)
force = True
visited_user_specs = set()
for dep in self.traverse():
visited_user_specs.add(dep.name)
visited_user_specs.update(x.name for x in dep.package.provided)
extra = set(user_spec_deps.keys()).difference(visited_user_specs)
if extra:
raise InvalidDependencyError(self.name, extra)
# This dictionary will store object IDs rather than Specs as keys
# since the Spec __hash__ will change as patches are added to them
spec_to_patches = {}
for s in self.traverse():
# After concretizing, assign namespaces to anything left.
# Note that this doesn't count as a "change". The repository
# configuration is constant throughout a spack run, and
# normalize and concretize evaluate Packages using Repo.get(),
# which respects precedence. So, a namespace assignment isn't
# changing how a package name would have been interpreted and
# we can do it as late as possible to allow as much
# compatibility across repositories as possible.
if s.namespace is None:
s.namespace = spack.repo.path.repo_for_pkg(s.name).namespace
if s.concrete:
continue
# Add any patches from the package to the spec.
patches = []
for cond, patch_list in s.package_class.patches.items():
if s.satisfies(cond):
for patch in patch_list:
patches.append(patch)
if patches:
spec_to_patches[id(s)] = patches
# Also record all patches required on dependencies by
# depends_on(..., patch=...)
for dspec in self.traverse_edges(deptype=all,
cover='edges', root=False):
pkg_deps = dspec.parent.package_class.dependencies
if dspec.spec.name not in pkg_deps:
continue
if dspec.spec.concrete:
continue
patches = []
for cond, dependency in pkg_deps[dspec.spec.name].items():
if dspec.parent.satisfies(cond):
for pcond, patch_list in dependency.patches.items():
if dspec.spec.satisfies(pcond):
for patch in patch_list:
patches.append(patch)
if patches:
all_patches = spec_to_patches.setdefault(id(dspec.spec), [])
all_patches.extend(patches)
for spec in self.traverse():
if id(spec) not in spec_to_patches:
continue
patches = list(dedupe(spec_to_patches[id(spec)]))
mvar = spec.variants.setdefault(
'patches', MultiValuedVariant('patches', ())
)
mvar.value = tuple(p.sha256 for p in patches)
# FIXME: Monkey patches mvar to store patches order
full_order_keys = list(tuple(p.ordering_key) + (p.sha256,) for p
in patches)
ordered_hashes = sorted(full_order_keys)
tty.debug("Ordered hashes [{0}]: ".format(spec.name) +
', '.join('/'.join(str(e) for e in t)
for t in ordered_hashes))
mvar._patches_in_order_of_appearance = list(
t[-1] for t in ordered_hashes)
for s in self.traverse():
if s.external_module and not s.external_path:
compiler = spack.compilers.compiler_for_spec(
s.compiler, s.architecture)
for mod in compiler.modules:
load_module(mod)
s.external_path = get_path_from_module(s.external_module)
# Mark everything in the spec as concrete, as well.
self._mark_concrete()
# Now that the spec is concrete we should check if
# there are declared conflicts
#
# TODO: this needs rethinking, as currently we can only express
# TODO: internal configuration conflicts within one package.
matches = []
for x in self.traverse():
for conflict_spec, when_list in x.package_class.conflicts.items():
if x.satisfies(conflict_spec, strict=True):
for when_spec, msg in when_list:
if x.satisfies(when_spec, strict=True):
when = when_spec.copy()
when.name = x.name
matches.append((x, conflict_spec, when, msg))
if matches:
raise ConflictsInSpecError(self, matches)
# Check if we can produce an optimized binary (will throw if
# there are declared inconsistencies)
self.architecture.target.optimization_flags(self.compiler)
def _mark_concrete(self, value=True):
"""Mark this spec and its dependencies as concrete.
Only for internal use -- client code should use "concretize"
unless there is a need to force a spec to be concrete.
"""
for s in self.traverse():
if (not value) and s.concrete and s.package.installed:
continue
s._normal = value
s._concrete = value
def concretized(self):
"""This is a non-destructive version of concretize(). First clones,
then returns a concrete version of this package without modifying
this package. """
clone = self.copy(caches=False)
clone.concretize()
return clone
def flat_dependencies(self, **kwargs):
"""Return a DependencyMap containing all of this spec's
dependencies with their constraints merged.
If copy is True, returns merged copies of its dependencies
without modifying the spec it's called on.
If copy is False, clears this spec's dependencies and
returns them. This disconnects all dependency links including
transitive dependencies, except for concrete specs: if a spec
is concrete it will not be disconnected from its dependencies
(although a non-concrete spec with concrete dependencies will
be disconnected from those dependencies).
"""
copy = kwargs.get('copy', True)
flat_deps = {}
try:
deptree = self.traverse(root=False)
for spec in deptree:
if spec.name not in flat_deps:
if copy:
spec = spec.copy(deps=False)
flat_deps[spec.name] = spec
else:
flat_deps[spec.name].constrain(spec)
if not copy:
for spec in flat_deps.values():
if not spec.concrete:
spec._dependencies.clear()
spec._dependents.clear()
self._dependencies.clear()
return flat_deps
except UnsatisfiableSpecError as e:
# Here, the DAG contains two instances of the same package
# with inconsistent constraints. Users cannot produce
# inconsistent specs like this on the command line: the
# parser doesn't allow it. Spack must be broken!
raise InconsistentSpecError("Invalid Spec DAG: %s" % e.message)
def index(self, deptype='all'):
"""Return DependencyMap that points to all the dependencies in this
spec."""
dm = DependencyMap()
for spec in self.traverse(deptype=deptype):
dm[spec.name] = spec
return dm
def _evaluate_dependency_conditions(self, name):
"""Evaluate all the conditions on a dependency with this name.
Args:
name (str): name of dependency to evaluate conditions on.
Returns:
(Dependency): new Dependency object combining all constraints.
If the package depends on <name> in the current spec
configuration, return the constrained dependency and
corresponding dependency types.
If no conditions are True (and we don't depend on it), return
``(None, None)``.
"""
conditions = self.package_class.dependencies[name]
substitute_abstract_variants(self)
# evaluate when specs to figure out constraints on the dependency.
dep = None
for when_spec, dependency in conditions.items():
if self.satisfies(when_spec, strict=True):
if dep is None:
dep = dp.Dependency(self.name, Spec(name), type=())
try:
dep.merge(dependency)
except UnsatisfiableSpecError as e:
e.message = (
"Conflicting conditional dependencies for spec"
"\n\n\t{0}\n\n"
"Cannot merge constraint"
"\n\n\t{1}\n\n"
"into"
"\n\n\t{2}"
.format(self, dependency.spec, dep.spec))
raise e
return dep
def _find_provider(self, vdep, provider_index):
"""Find provider for a virtual spec in the provider index.
Raise an exception if there is a conflicting virtual
dependency already in this spec.
"""
assert(vdep.virtual)
# note that this defensively copies.
providers = provider_index.providers_for(vdep)
# If there is a provider for the vpkg, then use that instead of
# the virtual package.
if providers:
# Remove duplicate providers that can concretize to the same
# result.
for provider in providers:
for spec in providers:
if spec is not provider and provider.satisfies(spec):
providers.remove(spec)
# Can't have multiple providers for the same thing in one spec.
if len(providers) > 1:
raise MultipleProviderError(vdep, providers)
return providers[0]
else:
# The user might have required something insufficient for
# pkg_dep -- so we'll get a conflict. e.g., user asked for
# mpi@:1.1 but some package required [email protected]:.
required = provider_index.providers_for(vdep.name)
if len(required) > 1:
raise MultipleProviderError(vdep, required)
elif required:
raise UnsatisfiableProviderSpecError(required[0], vdep)
def _merge_dependency(
self, dependency, visited, spec_deps, provider_index, tests):
"""Merge dependency information from a Package into this Spec.
Args:
dependency (Dependency): dependency metadata from a package;
this is typically the result of merging *all* matching
dependency constraints from the package.
visited (set): set of dependency nodes already visited by
``normalize()``.
spec_deps (dict): ``dict`` of all dependencies from the spec
being normalized.
provider_index (dict): ``provider_index`` of virtual dep
providers in the ``Spec`` as normalized so far.
NOTE: Caller should assume that this routine owns the
``dependency`` parameter, i.e., it needs to be a copy of any
internal structures.
This is the core of ``normalize()``. There are some basic steps:
* If dep is virtual, evaluate whether it corresponds to an
existing concrete dependency, and merge if so.
* If it's real and it provides some virtual dep, see if it provides
what some virtual dependency wants and merge if so.
* Finally, if none of the above, merge dependency and its
constraints into this spec.
This method returns True if the spec was changed, False otherwise.
"""
changed = False
dep = dependency.spec
# If it's a virtual dependency, try to find an existing
# provider in the spec, and merge that.
if dep.virtual:
visited.add(dep.name)
provider = self._find_provider(dep, provider_index)
if provider:
dep = provider
else:
index = ProviderIndex([dep], restrict=True)
items = list(spec_deps.items())
for name, vspec in items:
if not vspec.virtual:
continue
if index.providers_for(vspec):
vspec._replace_with(dep)
del spec_deps[vspec.name]
changed = True
else:
required = index.providers_for(vspec.name)
if required:
raise UnsatisfiableProviderSpecError(required[0], dep)
provider_index.update(dep)
# If the spec isn't already in the set of dependencies, add it.
# Note: dep is always owned by this method. If it's from the
# caller, it's a copy from _evaluate_dependency_conditions. If it
# comes from a vdep, it's a defensive copy from _find_provider.
if dep.name not in spec_deps:
if self.concrete:
return False
spec_deps[dep.name] = dep
changed = True
else:
# merge package/vdep information into spec
try:
changed |= spec_deps[dep.name].constrain(dep)
except UnsatisfiableSpecError as e:
fmt = 'An unsatisfiable {0}'.format(e.constraint_type)
fmt += ' constraint has been detected for spec:'
fmt += '\n\n{0}\n\n'.format(spec_deps[dep.name].tree(indent=4))
fmt += 'while trying to concretize the partial spec:'
fmt += '\n\n{0}\n\n'.format(self.tree(indent=4))
fmt += '{0} requires {1} {2} {3}, but spec asked for {4}'
e.message = fmt.format(
self.name,
dep.name,
e.constraint_type,
e.required,
e.provided)
raise
# Add merged spec to my deps and recurse
spec_dependency = spec_deps[dep.name]
if dep.name not in self._dependencies:
self._add_dependency(spec_dependency, dependency.type)
changed |= spec_dependency._normalize_helper(
visited, spec_deps, provider_index, tests)
return changed
def _normalize_helper(self, visited, spec_deps, provider_index, tests):
"""Recursive helper function for _normalize."""
if self.name in visited:
return False
visited.add(self.name)
# If we descend into a virtual spec, there's nothing more
# to normalize. Concretize will finish resolving it later.
if self.virtual or self.external:
return False
# Avoid recursively adding constraints for already-installed packages:
# these may include build dependencies which are not needed for this
# install (since this package is already installed).
if self.concrete and self.package.installed:
return False
# Combine constraints from package deps with constraints from
# the spec, until nothing changes.
any_change = False
changed = True
while changed:
changed = False
for dep_name in self.package_class.dependencies:
# Do we depend on dep_name? If so pkg_dep is not None.
dep = self._evaluate_dependency_conditions(dep_name)
# If dep is a needed dependency, merge it.
if dep:
merge = (
# caller requested test dependencies
tests is True or (tests and self.name in tests) or
# this is not a test-only dependency
dep.type - set(['test']))
if merge:
changed |= self._merge_dependency(
dep, visited, spec_deps, provider_index, tests)
any_change |= changed
return any_change
def normalize(self, force=False, tests=False, user_spec_deps=None):
"""When specs are parsed, any dependencies specified are hanging off
the root, and ONLY the ones that were explicitly provided are there.
Normalization turns a partial flat spec into a DAG, where:
1. Known dependencies of the root package are in the DAG.
2. Each node's dependencies dict only contains its known direct
deps.
3. There is only ONE unique spec for each package in the DAG.
* This includes virtual packages. If there a non-virtual
package that provides a virtual package that is in the spec,
then we replace the virtual package with the non-virtual one.
TODO: normalize should probably implement some form of cycle
detection, to ensure that the spec is actually a DAG.
"""
if not self.name:
raise SpecError("Attempting to normalize anonymous spec")
# Set _normal and _concrete to False when forced
if force:
self._mark_concrete(False)
if self._normal:
return False
# Ensure first that all packages & compilers in the DAG exist.
self.validate_or_raise()
# Clear the DAG and collect all dependencies in the DAG, which will be
# reapplied as constraints. All dependencies collected this way will
# have been created by a previous execution of 'normalize'.
# A dependency extracted here will only be reintegrated if it is
# discovered to apply according to _normalize_helper, so
# user-specified dependencies are recorded separately in case they
# refer to specs which take several normalization passes to
# materialize.
all_spec_deps = self.flat_dependencies(copy=False)
if user_spec_deps:
for name, spec in user_spec_deps.items():
if name not in all_spec_deps:
all_spec_deps[name] = spec
else:
all_spec_deps[name].constrain(spec)
# Initialize index of virtual dependency providers if
# concretize didn't pass us one already
provider_index = ProviderIndex(
[s for s in all_spec_deps.values()], restrict=True)
# traverse the package DAG and fill out dependencies according
# to package files & their 'when' specs
visited = set()
any_change = self._normalize_helper(
visited, all_spec_deps, provider_index, tests)
# Mark the spec as normal once done.
self._normal = True
return any_change
def normalized(self):
"""
Return a normalized copy of this spec without modifying this spec.
"""
clone = self.copy()
clone.normalize()
return clone
def validate_or_raise(self):
"""Checks that names and values in this spec are real. If they're not,
it will raise an appropriate exception.
"""
# FIXME: this function should be lazy, and collect all the errors
# FIXME: before raising the exceptions, instead of being greedy and
# FIXME: raise just the first one encountered
for spec in self.traverse():
# raise an UnknownPackageError if the spec's package isn't real.
if (not spec.virtual) and spec.name:
spack.repo.get(spec.fullname)
# validate compiler in addition to the package name.
if spec.compiler:
if not compilers.supported(spec.compiler):
raise UnsupportedCompilerError(spec.compiler.name)
# Ensure correctness of variants (if the spec is not virtual)
if not spec.virtual:
pkg_cls = spec.package_class
pkg_variants = pkg_cls.variants
# reserved names are variants that may be set on any package
# but are not necessarily recorded by the package's class
not_existing = set(spec.variants) - (
set(pkg_variants) | set(spack.directives.reserved_names))
if not_existing:
raise UnknownVariantError(spec.name, not_existing)
substitute_abstract_variants(spec)
def constrain(self, other, deps=True):
"""Merge the constraints of other with self.
Returns True if the spec changed as a result, False if not.
"""
# If we are trying to constrain a concrete spec, either the spec
# already satisfies the constraint (and the method returns False)
# or it raises an exception
if self.concrete:
if self.satisfies(other):
return False
else:
raise UnsatisfiableSpecError(
self, other, 'constrain a concrete spec'
)
other = self._autospec(other)
if not (self.name == other.name or
(not self.name) or
(not other.name)):
raise UnsatisfiableSpecNameError(self.name, other.name)
if (other.namespace is not None and
self.namespace is not None and
other.namespace != self.namespace):
raise UnsatisfiableSpecNameError(self.fullname, other.fullname)
if not self.versions.overlaps(other.versions):
raise UnsatisfiableVersionSpecError(self.versions, other.versions)
for v in [x for x in other.variants if x in self.variants]:
if not self.variants[v].compatible(other.variants[v]):
raise UnsatisfiableVariantSpecError(
self.variants[v], other.variants[v]
)
# TODO: Check out the logic here
sarch, oarch = self.architecture, other.architecture
if sarch is not None and oarch is not None:
if sarch.platform is not None and oarch.platform is not None:
if sarch.platform != oarch.platform:
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
if sarch.os is not None and oarch.os is not None:
if sarch.os != oarch.os:
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
if sarch.target is not None and oarch.target is not None:
if sarch.target != oarch.target:
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
changed = False
if self.compiler is not None and other.compiler is not None:
changed |= self.compiler.constrain(other.compiler)
elif self.compiler is None:
changed |= (self.compiler != other.compiler)
self.compiler = other.compiler
changed |= self.versions.intersect(other.versions)
changed |= self.variants.constrain(other.variants)
changed |= self.compiler_flags.constrain(other.compiler_flags)
old = str(self.architecture)
sarch, oarch = self.architecture, other.architecture
if sarch is None or other.architecture is None:
self.architecture = sarch or oarch
else:
if sarch.platform is None or oarch.platform is None:
self.architecture.platform = sarch.platform or oarch.platform
if sarch.os is None or oarch.os is None:
sarch.os = sarch.os or oarch.os
if sarch.target is None or oarch.target is None:
sarch.target = sarch.target or oarch.target
changed |= (str(self.architecture) != old)
if deps:
changed |= self._constrain_dependencies(other)
return changed
def _constrain_dependencies(self, other):
"""Apply constraints of other spec's dependencies to this spec."""
other = self._autospec(other)
if not other._dependencies:
return False
# TODO: might want more detail than this, e.g. specific deps
# in violation. if this becomes a priority get rid of this
# check and be more specific about what's wrong.
if not other.satisfies_dependencies(self):
raise UnsatisfiableDependencySpecError(other, self)
# Handle common first-order constraints directly
changed = False
for name in self.common_dependencies(other):
changed |= self[name].constrain(other[name], deps=False)
if name in self._dependencies:
changed |= self._dependencies[name].update_deptypes(
other._dependencies[name].deptypes)
# Update with additional constraints from other spec
for name in other.dep_difference(self):
dep_spec_copy = other.get_dependency(name)
dep_copy = dep_spec_copy.spec
deptypes = dep_spec_copy.deptypes
self._add_dependency(dep_copy.copy(), deptypes)
changed = True
return changed
def common_dependencies(self, other):
"""Return names of dependencies that self an other have in common."""
common = set(
s.name for s in self.traverse(root=False))
common.intersection_update(
s.name for s in other.traverse(root=False))
return common
def constrained(self, other, deps=True):
"""Return a constrained copy without modifying this spec."""
clone = self.copy(deps=deps)
clone.constrain(other, deps)
return clone
def dep_difference(self, other):
"""Returns dependencies in self that are not in other."""
mine = set(s.name for s in self.traverse(root=False))
mine.difference_update(
s.name for s in other.traverse(root=False))
return mine
def _autospec(self, spec_like):
"""
Used to convert arguments to specs. If spec_like is a spec, returns
it. If it's a string, tries to parse a string. If that fails, tries
to parse a local spec from it (i.e. name is assumed to be self's name).
"""
if isinstance(spec_like, Spec):
return spec_like
return Spec(spec_like)
def satisfies(self, other, deps=True, strict=False, strict_deps=False):
"""Determine if this spec satisfies all constraints of another.
There are two senses for satisfies:
* `loose` (default): the absence of a constraint in self
implies that it *could* be satisfied by other, so we only
check that there are no conflicts with other for
constraints that this spec actually has.
* `strict`: strict means that we *must* meet all the
constraints specified on other.
"""
other = self._autospec(other)
# The only way to satisfy a concrete spec is to match its hash exactly.
if other.concrete:
return self.concrete and self.dag_hash() == other.dag_hash()
# A concrete provider can satisfy a virtual dependency.
if not self.virtual and other.virtual:
try:
pkg = spack.repo.get(self.fullname)
except spack.repo.UnknownEntityError:
# If we can't get package info on this spec, don't treat
# it as a provider of this vdep.
return False
if pkg.provides(other.name):
for provided, when_specs in pkg.provided.items():
if any(self.satisfies(when_spec, deps=False, strict=strict)
for when_spec in when_specs):
if provided.satisfies(other):
return True
return False
# Otherwise, first thing we care about is whether the name matches
if self.name != other.name and self.name and other.name:
return False
# namespaces either match, or other doesn't require one.
if (other.namespace is not None and
self.namespace is not None and
self.namespace != other.namespace):
return False
if self.versions and other.versions:
if not self.versions.satisfies(other.versions, strict=strict):
return False
elif strict and (self.versions or other.versions):
return False
# None indicates no constraints when not strict.
if self.compiler and other.compiler:
if not self.compiler.satisfies(other.compiler, strict=strict):
return False
elif strict and (other.compiler and not self.compiler):
return False
var_strict = strict
if (not self.name) or (not other.name):
var_strict = True
if not self.variants.satisfies(other.variants, strict=var_strict):
return False
# Architecture satisfaction is currently just string equality.
# If not strict, None means unconstrained.
if self.architecture and other.architecture:
if not self.architecture.satisfies(other.architecture, strict):
return False
elif strict and (other.architecture and not self.architecture):
return False
if not self.compiler_flags.satisfies(
other.compiler_flags,
strict=strict):
return False
# If we need to descend into dependencies, do it, otherwise we're done.
if deps:
deps_strict = strict
if self._concrete and not other.name:
# We're dealing with existing specs
deps_strict = True
return self.satisfies_dependencies(other, strict=deps_strict)
else:
return True
def satisfies_dependencies(self, other, strict=False):
"""
This checks constraints on common dependencies against each other.
"""
other = self._autospec(other)
# If there are no constraints to satisfy, we're done.
if not other._dependencies:
return True
if strict:
# if we have no dependencies, we can't satisfy any constraints.
if not self._dependencies:
return False
selfdeps = self.traverse(root=False)
otherdeps = other.traverse(root=False)
if not all(any(d.satisfies(dep, strict=True) for d in selfdeps)
for dep in otherdeps):
return False
elif not self._dependencies:
# if not strict, this spec *could* eventually satisfy the
# constraints on other.
return True
# Handle first-order constraints directly
for name in self.common_dependencies(other):
if not self[name].satisfies(other[name], deps=False):
return False
# For virtual dependencies, we need to dig a little deeper.
self_index = ProviderIndex(self.traverse(), restrict=True)
other_index = ProviderIndex(other.traverse(), restrict=True)
# This handles cases where there are already providers for both vpkgs
if not self_index.satisfies(other_index):
return False
# These two loops handle cases where there is an overly restrictive
# vpkg in one spec for a provider in the other (e.g., mpi@3: is not
# compatible with mpich2)
for spec in self.virtual_dependencies():
if (spec.name in other_index and
not other_index.providers_for(spec)):
return False
for spec in other.virtual_dependencies():
if spec.name in self_index and not self_index.providers_for(spec):
return False
return True
def virtual_dependencies(self):
"""Return list of any virtual deps in this spec."""
return [spec for spec in self.traverse() if spec.virtual]
@property
@memoized
def patches(self):
"""Return patch objects for any patch sha256 sums on this Spec.
This is for use after concretization to iterate over any patches
associated with this spec.
TODO: this only checks in the package; it doesn't resurrect old
patches from install directories, but it probably should.
"""
if not self.concrete:
raise SpecError("Spec is not concrete: " + str(self))
if 'patches' not in self.variants:
return []
# FIXME: _patches_in_order_of_appearance is attached after
# FIXME: concretization to store the order of patches somewhere.
# FIXME: Needs to be refactored in a cleaner way.
# translate patch sha256sums to patch objects by consulting the index
patches = []
for sha256 in self.variants['patches']._patches_in_order_of_appearance:
index = spack.repo.path.patch_index
patch = index.patch_for_package(sha256, self.package)
patches.append(patch)
return patches
def _dup(self, other, deps=True, cleardeps=True, caches=None):
"""Copy the spec other into self. This is an overwriting
copy. It does not copy any dependents (parents), but by default
copies dependencies.
To duplicate an entire DAG, call _dup() on the root of the DAG.
Args:
other (Spec): spec to be copied onto ``self``
deps (bool or Sequence): if True copies all the dependencies. If
False copies None. If a sequence of dependency types copy
only those types.
cleardeps (bool): if True clears the dependencies of ``self``,
before possibly copying the dependencies of ``other`` onto
``self``
caches (bool or None): preserve cached fields such as
``_normal``, ``_concrete``, and ``_cmp_key_cache``. By
default this is ``False`` if DAG structure would be
changed by the copy, ``True`` if it's an exact copy.
Returns:
True if ``self`` changed because of the copy operation,
False otherwise.
"""
# We don't count dependencies as changes here
changed = True
if hasattr(self, 'name'):
changed = (self.name != other.name and
self.versions != other.versions and
self.architecture != other.architecture and
self.compiler != other.compiler and
self.variants != other.variants and
self._normal != other._normal and
self.concrete != other.concrete and
self.external_path != other.external_path and
self.external_module != other.external_module and
self.compiler_flags != other.compiler_flags)
self._package = None
# Local node attributes get copied first.
self.name = other.name
self.versions = other.versions.copy()
self.architecture = other.architecture.copy() if other.architecture \
else None
self.compiler = other.compiler.copy() if other.compiler else None
if cleardeps:
self._dependents = DependencyMap()
self._dependencies = DependencyMap()
self.compiler_flags = other.compiler_flags.copy()
self.compiler_flags.spec = self
self.variants = other.variants.copy()
# FIXME: we manage _patches_in_order_of_appearance specially here
# to keep it from leaking out of spec.py, but we should figure
# out how to handle it more elegantly in the Variant classes.
for k, v in other.variants.items():
patches = getattr(v, '_patches_in_order_of_appearance', None)
if patches:
self.variants[k]._patches_in_order_of_appearance = patches
self.variants.spec = self
self.external_path = other.external_path
self.external_module = other.external_module
self.namespace = other.namespace
# Cached fields are results of expensive operations.
# If we preserved the original structure, we can copy them
# safely. If not, they need to be recomputed.
if caches is None:
caches = (deps is True or deps == dp.all_deptypes)
# If we copy dependencies, preserve DAG structure in the new spec
if deps:
# If caller restricted deptypes to be copied, adjust that here.
# By default, just copy all deptypes
deptypes = dp.all_deptypes
if isinstance(deps, (tuple, list)):
deptypes = deps
self._dup_deps(other, deptypes, caches)
self._concrete = other._concrete
if caches:
self._hash = other._hash
self._build_hash = other._build_hash
self._cmp_key_cache = other._cmp_key_cache
self._normal = other._normal
self._full_hash = other._full_hash
else:
self._hash = None
self._build_hash = None
self._cmp_key_cache = None
self._normal = False
self._full_hash = None
return changed
def _dup_deps(self, other, deptypes, caches):
new_specs = {self.name: self}
for dspec in other.traverse_edges(cover='edges',
root=False):
if (dspec.deptypes and
not any(d in deptypes for d in dspec.deptypes)):
continue
if dspec.parent.name not in new_specs:
new_specs[dspec.parent.name] = dspec.parent.copy(
deps=False, caches=caches)
if dspec.spec.name not in new_specs:
new_specs[dspec.spec.name] = dspec.spec.copy(
deps=False, caches=caches)
new_specs[dspec.parent.name]._add_dependency(
new_specs[dspec.spec.name], dspec.deptypes)
def copy(self, deps=True, **kwargs):
"""Make a copy of this spec.
Args:
deps (bool or tuple): Defaults to True. If boolean, controls
whether dependencies are copied (copied if True). If a
tuple is provided, *only* dependencies of types matching
those in the tuple are copied.
kwargs: additional arguments for internal use (passed to ``_dup``).
Returns:
A copy of this spec.
Examples:
Deep copy with dependnecies::
spec.copy()
spec.copy(deps=True)
Shallow copy (no dependencies)::
spec.copy(deps=False)
Only build and run dependencies::
deps=('build', 'run'):
"""
clone = Spec.__new__(Spec)
clone._dup(self, deps=deps, **kwargs)
return clone
@property
def version(self):
if not self.versions.concrete:
raise SpecError("Spec version is not concrete: " + str(self))
return self.versions[0]
def __getitem__(self, name):
"""Get a dependency from the spec by its name. This call implicitly
sets a query state in the package being retrieved. The behavior of
packages may be influenced by additional query parameters that are
passed after a colon symbol.
Note that if a virtual package is queried a copy of the Spec is
returned while for non-virtual a reference is returned.
"""
query_parameters = name.split(':')
if len(query_parameters) > 2:
msg = 'key has more than one \':\' symbol.'
msg += ' At most one is admitted.'
raise KeyError(msg)
name, query_parameters = query_parameters[0], query_parameters[1:]
if query_parameters:
# We have extra query parameters, which are comma separated
# values
csv = query_parameters.pop().strip()
query_parameters = re.split(r'\s*,\s*', csv)
try:
value = next(
itertools.chain(
# Regular specs
(x for x in self.traverse() if x.name == name),
(x for x in self.traverse()
if (not x.virtual) and x.package.provides(name))
)
)
except StopIteration:
raise KeyError("No spec with name %s in %s" % (name, self))
if self._concrete:
return SpecBuildInterface(value, name, query_parameters)
return value
def __contains__(self, spec):
"""True if this spec or some dependency satisfies the spec.
Note: If ``spec`` is anonymous, we ONLY check whether the root
satisfies it, NOT dependencies. This is because most anonymous
specs (e.g., ``@1.2``) don't make sense when applied across an
entire DAG -- we limit them to the root.
"""
spec = self._autospec(spec)
# if anonymous or same name, we only have to look at the root
if not spec.name or spec.name == self.name:
return self.satisfies(spec)
else:
return any(s.satisfies(spec) for s in self.traverse(root=False))
def sorted_deps(self):
"""Return a list of all dependencies sorted by name."""
deps = self.flat_dependencies()
return tuple(deps[name] for name in sorted(deps))
def _eq_dag(self, other, vs, vo, deptypes):
"""Recursive helper for eq_dag and ne_dag. Does the actual DAG
traversal."""
vs.add(id(self))
vo.add(id(other))
if self.ne_node(other):
return False
if len(self._dependencies) != len(other._dependencies):
return False
ssorted = [self._dependencies[name]
for name in sorted(self._dependencies)]
osorted = [other._dependencies[name]
for name in sorted(other._dependencies)]
for s_dspec, o_dspec in zip(ssorted, osorted):
if deptypes and s_dspec.deptypes != o_dspec.deptypes:
return False
s, o = s_dspec.spec, o_dspec.spec
visited_s = id(s) in vs
visited_o = id(o) in vo
# Check for duplicate or non-equal dependencies
if visited_s != visited_o:
return False
# Skip visited nodes
if visited_s or visited_o:
continue
# Recursive check for equality
if not s._eq_dag(o, vs, vo, deptypes):
return False
return True
def eq_dag(self, other, deptypes=True):
"""True if the full dependency DAGs of specs are equal."""
return self._eq_dag(other, set(), set(), deptypes)
def ne_dag(self, other, deptypes=True):
"""True if the full dependency DAGs of specs are not equal."""
return not self.eq_dag(other, set(), set(), deptypes)
def _cmp_node(self):
"""Comparison key for just *this node* and not its deps."""
return (self.name,
self.namespace,
tuple(self.versions),
self.variants,
self.architecture,
self.compiler,
self.compiler_flags)
def eq_node(self, other):
"""Equality with another spec, not including dependencies."""
return self._cmp_node() == other._cmp_node()
def ne_node(self, other):
"""Inequality with another spec, not including dependencies."""
return self._cmp_node() != other._cmp_node()
def _cmp_key(self):
"""This returns a key for the spec *including* DAG structure.
The key is the concatenation of:
1. A tuple describing this node in the DAG.
2. The hash of each of this node's dependencies' cmp_keys.
"""
if self._cmp_key_cache:
return self._cmp_key_cache
dep_tuple = tuple(
(d.spec.name, hash(d.spec), tuple(sorted(d.deptypes)))
for name, d in sorted(self._dependencies.items()))
key = (self._cmp_node(), dep_tuple)
if self._concrete:
self._cmp_key_cache = key
return key
def colorized(self):
return colorize_spec(self)
def format(self, format_string=default_format, **kwargs):
r"""Prints out particular pieces of a spec, depending on what is
in the format string.
Using the ``{attribute}`` syntax, any field of the spec can be
selected. Those attributes can be recursive. For example,
``s.format({compiler.version})`` will print the version of the
compiler.
Commonly used attributes of the Spec for format strings include::
name
version
compiler
compiler.name
compiler.version
compiler_flags
variants
architecture
architecture.platform
architecture.os
architecture.target
prefix
Some additional special-case properties can be added::
hash[:len] The DAG hash with optional length argument
spack_root The spack root directory
spack_install The spack install directory
The ``^`` sigil can be used to access dependencies by name.
``s.format({^mpi.name})`` will print the name of the MPI
implementation in the spec.
The ``@``, ``%``, ``arch=``, and ``/`` sigils
can be used to include the sigil with the printed
string. These sigils may only be used with the appropriate
attributes, listed below::
@ ``{@version}``, ``{@compiler.version}``
% ``{%compiler}``, ``{%compiler.name}``
arch= ``{arch=architecture}``
/ ``{/hash}``, ``{/hash:7}``, etc
The ``@`` sigil may also be used for any other property named
``version``. Sigils printed with the attribute string are only
printed if the attribute string is non-empty, and are colored
according to the color of the attribute.
Sigils are not used for printing variants. Variants listed by
name naturally print with their sigil. For example,
``spec.format('{variants.debug}')`` would print either
``+debug`` or ``~debug`` depending on the name of the
variant. Non-boolean variants print as ``name=value``. To
print variant names or values independently, use
``spec.format('{variants.<name>.name}')`` or
``spec.format('{variants.<name>.value}')``.
Spec format strings use ``\`` as the escape character. Use
``\{`` and ``\}`` for literal braces, and ``\\`` for the
literal ``\`` character. Also use ``\$`` for the literal ``$``
to differentiate from previous, deprecated format string
syntax.
The previous format strings are deprecated. They can still be
accessed by the ``old_format`` method. The ``format`` method
will call ``old_format`` if the character ``$`` appears
unescaped in the format string.
Args:
format_string (str): string containing the format to be expanded
Keyword Args:
color (bool): True if returned string is colored
transform (dict): maps full-string formats to a callable \
that accepts a string and returns another one
"""
# If we have an unescaped $ sigil, use the deprecated format strings
if re.search(r'[^\\]*\$', format_string):
return self.old_format(format_string, **kwargs)
color = kwargs.get('color', False)
transform = kwargs.get('transform', {})
out = six.StringIO()
def write(s, c=None):
f = cescape(s)
if c is not None:
f = color_formats[c] + f + '@.'
cwrite(f, stream=out, color=color)
def write_attribute(spec, attribute, color):
current = spec
if attribute.startswith('^'):
attribute = attribute[1:]
dep, attribute = attribute.split('.', 1)
current = self[dep]
if attribute == '':
raise SpecFormatStringError(
'Format string attributes must be non-empty')
attribute = attribute.lower()
sig = ''
if attribute[0] in '@%/':
# color sigils that are inside braces
sig = attribute[0]
attribute = attribute[1:]
elif attribute.startswith('arch='):
sig = ' arch=' # include space as separator
attribute = attribute[5:]
parts = attribute.split('.')
assert parts
# check that the sigil is valid for the attribute.
if sig == '@' and parts[-1] not in ('versions', 'version'):
raise SpecFormatSigilError(sig, 'versions', attribute)
elif sig == '%' and attribute not in ('compiler', 'compiler.name'):
raise SpecFormatSigilError(sig, 'compilers', attribute)
elif sig == '/' and not re.match(r'hash(:\d+)?$', attribute):
raise SpecFormatSigilError(sig, 'DAG hashes', attribute)
elif sig == ' arch=' and attribute not in ('architecture', 'arch'):
raise SpecFormatSigilError(sig, 'the architecture', attribute)
# find the morph function for our attribute
morph = transform.get(attribute, lambda s, x: x)
# Special cases for non-spec attributes and hashes.
# These must be the only non-dep component of the format attribute
if attribute == 'spack_root':
write(morph(spec, spack.paths.spack_root))
return
elif attribute == 'spack_install':
write(morph(spec, spack.store.layout.root))
return
elif re.match(r'hash(:\d)?', attribute):
col = '#'
if ':' in attribute:
_, length = attribute.split(':')
write(sig + morph(spec, spec.dag_hash(int(length))), col)
else:
write(sig + morph(spec, spec.dag_hash()), col)
return
# Iterate over components using getattr to get next element
for idx, part in enumerate(parts):
if not part:
raise SpecFormatStringError(
'Format string attributes must be non-empty'
)
if part.startswith('_'):
raise SpecFormatStringError(
'Attempted to format private attribute'
)
else:
if isinstance(current, VariantMap):
# subscript instead of getattr for variant names
current = current[part]
else:
# aliases
if part == 'arch':
part = 'architecture'
elif part == 'version':
# Version requires concrete spec, versions does not
# when concrete, they print the same thing
part = 'versions'
try:
current = getattr(current, part)
except AttributeError:
parent = '.'.join(parts[:idx])
m = 'Attempted to format attribute %s.' % attribute
m += 'Spec.%s has no attribute %s' % (parent, part)
raise SpecFormatStringError(m)
if isinstance(current, VersionList):
if current == _any_version:
# We don't print empty version lists
return
if callable(current):
raise SpecFormatStringError(
'Attempted to format callable object'
)
if not current:
# We're not printing anything
return
# Set color codes for various attributes
col = None
if 'variants' in parts:
col = '+'
elif 'architecture' in parts:
col = '='
elif 'compiler' in parts or 'compiler_flags' in parts:
col = '%'
elif 'version' in parts:
col = '@'
# Finally, write the ouptut
write(sig + morph(spec, str(current)), col)
attribute = ''
in_attribute = False
escape = False
for c in format_string:
if escape:
out.write(c)
escape = False
elif c == '\\':
escape = True
elif in_attribute:
if c == '}':
write_attribute(self, attribute, color)
attribute = ''
in_attribute = False
else:
attribute += c
else:
if c == '}':
raise SpecFormatStringError(
'Encountered closing } before opening {'
)
elif c == '{':
in_attribute = True
else:
out.write(c)
if in_attribute:
raise SpecFormatStringError(
'Format string terminated while reading attribute.'
'Missing terminating }.'
)
return out.getvalue()
def old_format(self, format_string='$_$@$%@+$+$=', **kwargs):
"""
The format strings you can provide are::
$_ Package name
$. Full package name (with namespace)
$@ Version with '@' prefix
$% Compiler with '%' prefix
$%@ Compiler with '%' prefix & compiler version with '@' prefix
$%+ Compiler with '%' prefix & compiler flags prefixed by name
$%@+ Compiler, compiler version, and compiler flags with same
prefixes as above
$+ Options
$= Architecture prefixed by 'arch='
$/ 7-char prefix of DAG hash with '-' prefix
$$ $
You can also use full-string versions, which elide the prefixes::
${PACKAGE} Package name
${FULLPACKAGE} Full package name (with namespace)
${VERSION} Version
${COMPILER} Full compiler string
${COMPILERNAME} Compiler name
${COMPILERVER} Compiler version
${COMPILERFLAGS} Compiler flags
${OPTIONS} Options
${ARCHITECTURE} Architecture
${PLATFORM} Platform
${OS} Operating System
${TARGET} Target
${SHA1} Dependencies 8-char sha1 prefix
${HASH:len} DAG hash with optional length specifier
${DEP:name:OPTION} Evaluates as OPTION would for self['name']
${SPACK_ROOT} The spack root directory
${SPACK_INSTALL} The default spack install directory,
${SPACK_PREFIX}/opt
${PREFIX} The package prefix
${NAMESPACE} The package namespace
Note these are case-insensitive: for example you can specify either
``${PACKAGE}`` or ``${package}``.
Optionally you can provide a width, e.g. ``$20_`` for a 20-wide name.
Like printf, you can provide '-' for left justification, e.g.
``$-20_`` for a left-justified name.
Anything else is copied verbatim into the output stream.
Args:
format_string (str): string containing the format to be expanded
Keyword Args:
color (bool): True if returned string is colored
transform (dict): maps full-string formats to a callable \
that accepts a string and returns another one
Examples:
The following line:
.. code-block:: python
s = spec.format('$_$@$+')
translates to the name, version, and options of the package, but no
dependencies, arch, or compiler.
TODO: allow, e.g., ``$6#`` to customize short hash length
TODO: allow, e.g., ``$//`` for full hash.
"""
color = kwargs.get('color', False)
# Dictionary of transformations for named tokens
token_transforms = dict(
(k.upper(), v) for k, v in kwargs.get('transform', {}).items())
length = len(format_string)
out = six.StringIO()
named = escape = compiler = False
named_str = fmt = ''
def write(s, c=None):
f = cescape(s)
if c is not None:
f = color_formats[c] + f + '@.'
cwrite(f, stream=out, color=color)
iterator = enumerate(format_string)
for i, c in iterator:
if escape:
fmt = '%'
if c == '-':
fmt += c
i, c = next(iterator)
while c in '0123456789':
fmt += c
i, c = next(iterator)
fmt += 's'
if c == '_':
name = self.name if self.name else ''
out.write(fmt % name)
elif c == '.':
name = self.fullname if self.fullname else ''
out.write(fmt % name)
elif c == '@':
if self.versions and self.versions != _any_version:
write(fmt % (c + str(self.versions)), c)
elif c == '%':
if self.compiler:
write(fmt % (c + str(self.compiler.name)), c)
compiler = True
elif c == '+':
if self.variants:
write(fmt % str(self.variants), c)
elif c == '=':
if self.architecture and str(self.architecture):
a_str = ' arch' + c + str(self.architecture) + ' '
write(fmt % (a_str), c)
elif c == '/':
out.write('/' + fmt % (self.dag_hash(7)))
elif c == '$':
if fmt != '%s':
raise ValueError("Can't use format width with $$.")
out.write('$')
elif c == '{':
named = True
named_str = ''
escape = False
elif compiler:
if c == '@':
if (self.compiler and self.compiler.versions and
self.compiler.versions != _any_version):
write(c + str(self.compiler.versions), '%')
elif c == '+':
if self.compiler_flags:
write(fmt % str(self.compiler_flags), '%')
compiler = False
elif c == '$':
escape = True
compiler = False
else:
out.write(c)
compiler = False
elif named:
if not c == '}':
if i == length - 1:
raise ValueError("Error: unterminated ${ in format:"
"'%s'" % format_string)
named_str += c
continue
named_str = named_str.upper()
# Retrieve the token transformation from the dictionary.
#
# The default behavior is to leave the string unchanged
# (`lambda x: x` is the identity function)
transform = token_transforms.get(named_str, lambda s, x: x)
if named_str == 'PACKAGE':
name = self.name if self.name else ''
write(fmt % transform(self, name))
elif named_str == 'FULLPACKAGE':
name = self.fullname if self.fullname else ''
write(fmt % transform(self, name))
elif named_str == 'VERSION':
if self.versions and self.versions != _any_version:
write(fmt % transform(self, str(self.versions)), '@')
elif named_str == 'COMPILER':
if self.compiler:
write(fmt % transform(self, self.compiler), '%')
elif named_str == 'COMPILERNAME':
if self.compiler:
write(fmt % transform(self, self.compiler.name), '%')
elif named_str in ['COMPILERVER', 'COMPILERVERSION']:
if self.compiler:
write(
fmt % transform(self, self.compiler.versions),
'%'
)
elif named_str == 'COMPILERFLAGS':
if self.compiler:
write(
fmt % transform(self, str(self.compiler_flags)),
'%'
)
elif named_str == 'OPTIONS':
if self.variants:
write(fmt % transform(self, str(self.variants)), '+')
elif named_str in ["ARCHITECTURE", "PLATFORM", "TARGET", "OS"]:
if self.architecture and str(self.architecture):
if named_str == "ARCHITECTURE":
write(
fmt % transform(self, str(self.architecture)),
'='
)
elif named_str == "PLATFORM":
platform = str(self.architecture.platform)
write(fmt % transform(self, platform), '=')
elif named_str == "OS":
operating_sys = str(self.architecture.os)
write(fmt % transform(self, operating_sys), '=')
elif named_str == "TARGET":
target = str(self.architecture.target)
write(fmt % transform(self, target), '=')
elif named_str == 'SHA1':
if self.dependencies:
out.write(fmt % transform(self, str(self.dag_hash(7))))
elif named_str == 'SPACK_ROOT':
out.write(fmt % transform(self, spack.paths.prefix))
elif named_str == 'SPACK_INSTALL':
out.write(fmt % transform(self, spack.store.root))
elif named_str == 'PREFIX':
out.write(fmt % transform(self, self.prefix))
elif named_str.startswith('HASH'):
if named_str.startswith('HASH:'):
_, hashlen = named_str.split(':')
hashlen = int(hashlen)
else:
hashlen = None
out.write(fmt % (self.dag_hash(hashlen)))
elif named_str == 'NAMESPACE':
out.write(fmt % transform(self, self.namespace))
elif named_str.startswith('DEP:'):
_, dep_name, dep_option = named_str.lower().split(':', 2)
dep_spec = self[dep_name]
out.write(fmt % (dep_spec.format('${%s}' % dep_option)))
named = False
elif c == '$':
escape = True
if i == length - 1:
raise ValueError("Error: unterminated $ in format: '%s'"
% format_string)
else:
out.write(c)
result = out.getvalue()
return result
def cformat(self, *args, **kwargs):
"""Same as format, but color defaults to auto instead of False."""
kwargs = kwargs.copy()
kwargs.setdefault('color', None)
return self.format(*args, **kwargs)
def dep_string(self):
return ''.join(" ^" + dep.format() for dep in self.sorted_deps())
def __str__(self):
ret = self.format() + self.dep_string()
return ret.strip()
def install_status(self):
"""Helper for tree to print DB install status."""
if not self.concrete:
return None
try:
record = spack.store.db.get_record(self)
return record.installed
except KeyError:
return None
def _installed_explicitly(self):
"""Helper for tree to print DB install status."""
if not self.concrete:
return None
try:
record = spack.store.db.get_record(self)
return record.explicit
except KeyError:
return None
def tree(self, **kwargs):
"""Prints out this spec and its dependencies, tree-formatted
with indentation."""
color = kwargs.pop('color', get_color_when())
depth = kwargs.pop('depth', False)
hashes = kwargs.pop('hashes', False)
hlen = kwargs.pop('hashlen', None)
status_fn = kwargs.pop('status_fn', False)
cover = kwargs.pop('cover', 'nodes')
indent = kwargs.pop('indent', 0)
fmt = kwargs.pop('format', default_format)
prefix = kwargs.pop('prefix', None)
show_types = kwargs.pop('show_types', False)
deptypes = kwargs.pop('deptypes', 'all')
recurse_dependencies = kwargs.pop('recurse_dependencies', True)
check_kwargs(kwargs, self.tree)
out = ""
for d, dep_spec in self.traverse_edges(
order='pre', cover=cover, depth=True, deptypes=deptypes):
node = dep_spec.spec
if prefix is not None:
out += prefix(node)
out += " " * indent
if depth:
out += "%-4d" % d
if status_fn:
status = status_fn(node)
if node.package.installed_upstream:
out += colorize("@g{[^]} ", color=color)
elif status is None:
out += colorize("@K{ - } ", color=color) # not installed
elif status:
out += colorize("@g{[+]} ", color=color) # installed
else:
out += colorize("@r{[-]} ", color=color) # missing
if hashes:
out += colorize('@K{%s} ', color=color) % node.dag_hash(hlen)
if show_types:
types = set()
if cover == 'nodes':
# when only covering nodes, we merge dependency types
# from all dependents before showing them.
for name, ds in node.dependents_dict().items():
if ds.deptypes:
types.update(set(ds.deptypes))
elif dep_spec.deptypes:
# when covering edges or paths, we show dependency
# types only for the edge through which we visited
types = set(dep_spec.deptypes)
out += '['
for t in dp.all_deptypes:
out += ''.join(t[0] if t in types else ' ')
out += '] '
out += (" " * d)
if d > 0:
out += "^"
out += node.format(fmt, color=color) + "\n"
# Check if we wanted just the first line
if not recurse_dependencies:
break
return out
def __repr__(self):
return str(self)
@property
def platform(self):
return self.architecture.platform
@property
def os(self):
return self.architecture.os
@property
def target(self):
# This property returns the underlying microarchitecture object
# to give to the attribute the appropriate comparison semantic
return self.architecture.target.microarchitecture
class LazySpecCache(collections.defaultdict):
"""Cache for Specs that uses a spec_like as key, and computes lazily
the corresponding value ``Spec(spec_like``.
"""
def __init__(self):
super(LazySpecCache, self).__init__(Spec)
def __missing__(self, key):
value = self.default_factory(key)
self[key] = value
return value
#: These are possible token types in the spec grammar.
HASH, DEP, AT, COLON, COMMA, ON, OFF, PCT, EQ, ID, VAL, FILE = range(12)
#: Regex for fully qualified spec names. (e.g., builtin.hdf5)
spec_id_re = r'\w[\w.-]*'
class SpecLexer(spack.parse.Lexer):
"""Parses tokens that make up spack specs."""
def __init__(self):
super(SpecLexer, self).__init__([
(r'\^', lambda scanner, val: self.token(DEP, val)),
(r'\@', lambda scanner, val: self.token(AT, val)),
(r'\:', lambda scanner, val: self.token(COLON, val)),
(r'\,', lambda scanner, val: self.token(COMMA, val)),
(r'\+', lambda scanner, val: self.token(ON, val)),
(r'\-', lambda scanner, val: self.token(OFF, val)),
(r'\~', lambda scanner, val: self.token(OFF, val)),
(r'\%', lambda scanner, val: self.token(PCT, val)),
(r'\=', lambda scanner, val: self.token(EQ, val)),
# Filenames match before identifiers, so no initial filename
# component is parsed as a spec (e.g., in subdir/spec.yaml)
(r'[/\w.-]+\.yaml[^\b]*', lambda scanner, v: self.token(FILE, v)),
# Hash match after filename. No valid filename can be a hash
# (files end w/.yaml), but a hash can match a filename prefix.
(r'/', lambda scanner, val: self.token(HASH, val)),
# Identifiers match after filenames and hashes.
(spec_id_re, lambda scanner, val: self.token(ID, val)),
(r'\s+', lambda scanner, val: None)],
[EQ],
[(r'[\S].*', lambda scanner, val: self.token(VAL, val)),
(r'\s+', lambda scanner, val: None)],
[VAL])
# Lexer is always the same for every parser.
_lexer = SpecLexer()
class SpecParser(spack.parse.Parser):
def __init__(self, initial_spec=None):
"""Construct a new SpecParser.
Args:
initial_spec (Spec, optional): provide a Spec that we'll parse
directly into. This is used to avoid construction of a
superfluous Spec object in the Spec constructor.
"""
super(SpecParser, self).__init__(_lexer)
self.previous = None
self._initial = initial_spec
def do_parse(self):
specs = []
try:
while self.next:
# Try a file first, but if it doesn't succeed, keep parsing
# as from_file may backtrack and try an id.
if self.accept(FILE):
spec = self.spec_from_file()
if spec:
specs.append(spec)
continue
if self.accept(ID):
self.previous = self.token
if self.accept(EQ):
# We're parsing an anonymous spec beginning with a
# key-value pair.
if not specs:
self.push_tokens([self.previous, self.token])
self.previous = None
specs.append(self.spec(None))
else:
if specs[-1].concrete:
# Trying to add k-v pair to spec from hash
raise RedundantSpecError(specs[-1],
'key-value pair')
# We should never end up here.
# This requires starting a new spec with ID, EQ
# After another spec that is not concrete
# If the previous spec is not concrete, this is
# handled in the spec parsing loop
# If it is concrete, see the if statement above
# If there is no previous spec, we don't land in
# this else case.
self.unexpected_token()
else:
# We're parsing a new spec by name
self.previous = None
specs.append(self.spec(self.token.value))
elif self.accept(HASH):
# We're finding a spec by hash
specs.append(self.spec_by_hash())
elif self.accept(DEP):
if not specs:
# We're parsing an anonymous spec beginning with a
# dependency. Push the token to recover after creating
# anonymous spec
self.push_tokens([self.token])
specs.append(self.spec(None))
else:
dep = None
if self.accept(FILE):
# this may return None, in which case we backtrack
dep = self.spec_from_file()
if not dep and self.accept(HASH):
# We're finding a dependency by hash for an
# anonymous spec
dep = self.spec_by_hash()
dep = dep.copy(deps=('link', 'run'))
if not dep:
# We're adding a dependency to the last spec
self.expect(ID)
dep = self.spec(self.token.value)
# Raise an error if the previous spec is already
# concrete (assigned by hash)
if specs[-1]._hash:
raise RedundantSpecError(specs[-1], 'dependency')
# command line deps get empty deptypes now.
# Real deptypes are assigned later per packages.
specs[-1]._add_dependency(dep, ())
else:
# If the next token can be part of a valid anonymous spec,
# create the anonymous spec
if self.next.type in (AT, ON, OFF, PCT):
# Raise an error if the previous spec is already
# concrete (assigned by hash)
if specs and specs[-1]._hash:
raise RedundantSpecError(specs[-1],
'compiler, version, '
'or variant')
specs.append(self.spec(None))
else:
self.unexpected_token()
except spack.parse.ParseError as e:
raise SpecParseError(e)
# If the spec has an os or a target and no platform, give it
# the default platform
platform_default = spack.architecture.platform().name
for spec in specs:
for s in spec.traverse():
if s.architecture and not s.architecture.platform and \
(s.architecture.os or s.architecture.target):
s._set_architecture(platform=platform_default)
return specs
def spec_from_file(self):
"""Read a spec from a filename parsed on the input stream.
There is some care taken here to ensure that filenames are a last
resort, and that any valid package name is parsed as a name
before we consider it as a file. Specs are used in lots of places;
we don't want the parser touching the filesystem unnecessarily.
The parse logic is as follows:
1. We require that filenames end in .yaml, which means that no valid
filename can be interpreted as a hash (hashes can't have '.')
2. We avoid treating paths like /path/to/spec.yaml as hashes, or paths
like subdir/spec.yaml as ids by lexing filenames before hashes.
3. For spec names that match file and id regexes, like 'builtin.yaml',
we backtrack from spec_from_file() and treat them as spec names.
"""
path = self.token.value
# don't treat builtin.yaml, builtin.yaml-cpp, etc. as filenames
if re.match(spec_id_re + '$', path):
self.push_tokens([spack.parse.Token(ID, self.token.value)])
return None
# Special case where someone omits a space after a filename. Consider:
#
# libdwarf^/some/path/to/libelf.yamllibdwarf ^../../libelf.yaml
#
# The error is clearly an omitted space. To handle this, the FILE
# regex admits text *beyond* .yaml, and we raise a nice error for
# file names that don't end in .yaml.
if not path.endswith(".yaml"):
raise SpecFilenameError(
"Spec filename must end in .yaml: '{0}'".format(path))
# if we get here, we're *finally* interpreting path as a filename
if not os.path.exists(path):
raise NoSuchSpecFileError("No such spec file: '{0}'".format(path))
with open(path) as f:
return Spec.from_yaml(f)
def parse_compiler(self, text):
self.setup(text)
return self.compiler()
def spec_by_hash(self):
self.expect(ID)
dag_hash = self.token.value
matches = spack.store.db.get_by_hash(dag_hash)
if not matches:
raise NoSuchHashError(dag_hash)
if len(matches) != 1:
raise AmbiguousHashError(
"Multiple packages specify hash beginning '%s'."
% dag_hash, *matches)
return matches[0]
def spec(self, name):
"""Parse a spec out of the input. If a spec is supplied, initialize
and return it instead of creating a new one."""
spec_namespace = None
spec_name = None
if name:
spec_namespace, dot, spec_name = name.rpartition('.')
if not spec_namespace:
spec_namespace = None
self.check_identifier(spec_name)
if self._initial is None:
spec = Spec()
else:
# this is used by Spec.__init__
spec = self._initial
self._initial = None
spec.namespace = spec_namespace
spec.name = spec_name
while self.next:
if self.accept(AT):
vlist = self.version_list()
spec.versions = VersionList()
for version in vlist:
spec._add_version(version)
elif self.accept(ON):
name = self.variant()
spec.variants[name] = BoolValuedVariant(name, True)
elif self.accept(OFF):
name = self.variant()
spec.variants[name] = BoolValuedVariant(name, False)
elif self.accept(PCT):
spec._set_compiler(self.compiler())
elif self.accept(ID):
self.previous = self.token
if self.accept(EQ):
# We're adding a key-value pair to the spec
self.expect(VAL)
spec._add_flag(self.previous.value, self.token.value)
self.previous = None
else:
# We've found the start of a new spec. Go back to do_parse
# and read this token again.
self.push_tokens([self.token])
self.previous = None
break
elif self.accept(HASH):
# Get spec by hash and confirm it matches what we already have
hash_spec = self.spec_by_hash()
if hash_spec.satisfies(spec):
spec._dup(hash_spec)
break
else:
raise InvalidHashError(spec, hash_spec.dag_hash())
else:
break
return spec
def variant(self, name=None):
if name:
return name
else:
self.expect(ID)
self.check_identifier()
return self.token.value
def version(self):
start = None
end = None
if self.accept(ID):
start = self.token.value
if self.accept(COLON):
if self.accept(ID):
if self.next and self.next.type is EQ:
# This is a start: range followed by a key=value pair
self.push_tokens([self.token])
else:
end = self.token.value
elif start:
# No colon, but there was a version.
return Version(start)
else:
# No colon and no id: invalid version.
self.next_token_error("Invalid version specifier")
if start:
start = Version(start)
if end:
end = Version(end)
return VersionRange(start, end)
def version_list(self):
vlist = []
vlist.append(self.version())
while self.accept(COMMA):
vlist.append(self.version())
return vlist
def compiler(self):
self.expect(ID)
self.check_identifier()
compiler = CompilerSpec.__new__(CompilerSpec)
compiler.name = self.token.value
compiler.versions = VersionList()
if self.accept(AT):
vlist = self.version_list()
for version in vlist:
compiler._add_version(version)
else:
compiler.versions = VersionList(':')
return compiler
def check_identifier(self, id=None):
"""The only identifiers that can contain '.' are versions, but version
ids are context-sensitive so we have to check on a case-by-case
basis. Call this if we detect a version id where it shouldn't be.
"""
if not id:
id = self.token.value
if '.' in id:
self.last_token_error(
"{0}: Identifier cannot contain '.'".format(id))
def parse(string):
"""Returns a list of specs from an input string.
For creating one spec, see Spec() constructor.
"""
return SpecParser().parse(string)
def save_dependency_spec_yamls(
root_spec_as_yaml, output_directory, dependencies=None):
"""Given a root spec (represented as a yaml object), index it with a subset
of its dependencies, and write each dependency to a separate yaml file
in the output directory. By default, all dependencies will be written
out. To choose a smaller subset of dependencies to be written, pass a
list of package names in the dependencies parameter. In case of any
kind of error, SaveSpecDependenciesError is raised with a specific
message about what went wrong."""
root_spec = Spec.from_yaml(root_spec_as_yaml)
dep_list = dependencies
if not dep_list:
dep_list = [dep.name for dep in root_spec.traverse()]
for dep_name in dep_list:
if dep_name not in root_spec:
msg = 'Dependency {0} does not exist in root spec {1}'.format(
dep_name, root_spec.name)
raise SpecDependencyNotFoundError(msg)
dep_spec = root_spec[dep_name]
yaml_path = os.path.join(output_directory, '{0}.yaml'.format(dep_name))
with open(yaml_path, 'w') as fd:
fd.write(dep_spec.to_yaml(hash=ht.build_hash))
def base32_prefix_bits(hash_string, bits):
"""Return the first <bits> bits of a base32 string as an integer."""
if bits > len(hash_string) * 5:
raise ValueError("Too many bits! Requested %d bit prefix of '%s'."
% (bits, hash_string))
hash_bytes = base64.b32decode(hash_string, casefold=True)
return prefix_bits(hash_bytes, bits)
class SpecParseError(SpecError):
"""Wrapper for ParseError for when we're parsing specs."""
def __init__(self, parse_error):
super(SpecParseError, self).__init__(parse_error.message)
self.string = parse_error.string
self.pos = parse_error.pos
class DuplicateDependencyError(SpecError):
"""Raised when the same dependency occurs in a spec twice."""
class DuplicateCompilerSpecError(SpecError):
"""Raised when the same compiler occurs in a spec twice."""
class UnsupportedCompilerError(SpecError):
"""Raised when the user asks for a compiler spack doesn't know about."""
def __init__(self, compiler_name):
super(UnsupportedCompilerError, self).__init__(
"The '%s' compiler is not yet supported." % compiler_name)
class DuplicateArchitectureError(SpecError):
"""Raised when the same architecture occurs in a spec twice."""
class InconsistentSpecError(SpecError):
"""Raised when two nodes in the same spec DAG have inconsistent
constraints."""
class InvalidDependencyError(SpecError):
"""Raised when a dependency in a spec is not actually a dependency
of the package."""
def __init__(self, pkg, deps):
self.invalid_deps = deps
super(InvalidDependencyError, self).__init__(
'Package {0} does not depend on {1}'.format(pkg, comma_or(deps)))
class NoProviderError(SpecError):
"""Raised when there is no package that provides a particular
virtual dependency.
"""
def __init__(self, vpkg):
super(NoProviderError, self).__init__(
"No providers found for virtual package: '%s'" % vpkg)
self.vpkg = vpkg
class MultipleProviderError(SpecError):
"""Raised when there is no package that provides a particular
virtual dependency.
"""
def __init__(self, vpkg, providers):
"""Takes the name of the vpkg"""
super(MultipleProviderError, self).__init__(
"Multiple providers found for '%s': %s"
% (vpkg, [str(s) for s in providers]))
self.vpkg = vpkg
self.providers = providers
class UnsatisfiableSpecNameError(UnsatisfiableSpecError):
"""Raised when two specs aren't even for the same package."""
def __init__(self, provided, required):
super(UnsatisfiableSpecNameError, self).__init__(
provided, required, "name")
class UnsatisfiableVersionSpecError(UnsatisfiableSpecError):
"""Raised when a spec version conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableVersionSpecError, self).__init__(
provided, required, "version")
class UnsatisfiableCompilerSpecError(UnsatisfiableSpecError):
"""Raised when a spec comiler conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableCompilerSpecError, self).__init__(
provided, required, "compiler")
class UnsatisfiableCompilerFlagSpecError(UnsatisfiableSpecError):
"""Raised when a spec variant conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableCompilerFlagSpecError, self).__init__(
provided, required, "compiler_flags")
class UnsatisfiableArchitectureSpecError(UnsatisfiableSpecError):
"""Raised when a spec architecture conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableArchitectureSpecError, self).__init__(
provided, required, "architecture")
class UnsatisfiableProviderSpecError(UnsatisfiableSpecError):
"""Raised when a provider is supplied but constraints don't match
a vpkg requirement"""
def __init__(self, provided, required):
super(UnsatisfiableProviderSpecError, self).__init__(
provided, required, "provider")
# TODO: get rid of this and be more specific about particular incompatible
# dep constraints
class UnsatisfiableDependencySpecError(UnsatisfiableSpecError):
"""Raised when some dependency of constrained specs are incompatible"""
def __init__(self, provided, required):
super(UnsatisfiableDependencySpecError, self).__init__(
provided, required, "dependency")
class AmbiguousHashError(SpecError):
def __init__(self, msg, *specs):
spec_fmt = '{namespace}.{name}{@version}{%compiler}{compiler_flags}'
spec_fmt += '{variants}{arch=architecture}{/hash:7}'
specs_str = '\n ' + '\n '.join(spec.format(spec_fmt)
for spec in specs)
super(AmbiguousHashError, self).__init__(msg + specs_str)
class InvalidHashError(SpecError):
def __init__(self, spec, hash):
super(InvalidHashError, self).__init__(
"The spec specified by %s does not match provided spec %s"
% (hash, spec))
class NoSuchHashError(SpecError):
def __init__(self, hash):
super(NoSuchHashError, self).__init__(
"No installed spec matches the hash: '%s'"
% hash)
class SpecFilenameError(SpecError):
"""Raised when a spec file name is invalid."""
class NoSuchSpecFileError(SpecFilenameError):
"""Raised when a spec file doesn't exist."""
class RedundantSpecError(SpecError):
def __init__(self, spec, addition):
super(RedundantSpecError, self).__init__(
"Attempting to add %s to spec %s which is already concrete."
" This is likely the result of adding to a spec specified by hash."
% (addition, spec))
class SpecFormatStringError(SpecError):
"""Called for errors in Spec format strings."""
class SpecFormatSigilError(SpecFormatStringError):
"""Called for mismatched sigils and attributes in format strings"""
def __init__(self, sigil, requirement, used):
msg = 'The sigil %s may only be used for %s.' % (sigil, requirement)
msg += ' It was used with the attribute %s.' % used
super(SpecFormatSigilError, self).__init__(msg)
class ConflictsInSpecError(SpecError, RuntimeError):
def __init__(self, spec, matches):
message = 'Conflicts in concretized spec "{0}"\n'.format(
spec.short_spec
)
visited = set()
long_message = ''
match_fmt_default = '{0}. "{1}" conflicts with "{2}"\n'
match_fmt_custom = '{0}. "{1}" conflicts with "{2}" [{3}]\n'
for idx, (s, c, w, msg) in enumerate(matches):
if s not in visited:
visited.add(s)
long_message += 'List of matching conflicts for spec:\n\n'
long_message += s.tree(indent=4) + '\n'
if msg is None:
long_message += match_fmt_default.format(idx + 1, c, w)
else:
long_message += match_fmt_custom.format(idx + 1, c, w, msg)
super(ConflictsInSpecError, self).__init__(message, long_message)
class SpecDependencyNotFoundError(SpecError):
"""Raised when a failure is encountered writing the dependencies of
a spec."""
|
the-stack_0_22939 | import pytest
from django.test import RequestFactory
from stock_market_platform.users.api.views import UserViewSet
from stock_market_platform.users.models import User
pytestmark = pytest.mark.django_db
class TestUserViewSet:
def test_get_queryset(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert user in view.get_queryset()
def test_me(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
response = view.me(request)
assert response.data == {
"username": user.username,
"name": user.name,
"url": f"http://testserver/api/users/{user.username}/",
}
|
the-stack_0_22941 | # A tool to setup the Python registry.
class error(Exception):
pass
import sys # at least we can count on this!
def FileExists(fname):
"""Check if a file exists. Returns true or false.
"""
import os
try:
os.stat(fname)
return 1
except os.error as details:
return 0
def IsPackageDir(path, packageName, knownFileName):
"""Given a path, a ni package name, and possibly a known file name in
the root of the package, see if this path is good.
"""
import os
if knownFileName is None:
knownFileName = "."
return FileExists(os.path.join(os.path.join(path, packageName),knownFileName))
def IsDebug():
"""Return "_d" if we're running a debug version.
This is to be used within DLL names when locating them.
"""
import importlib.machinery
return '_d' if '_d.pyd' in importlib.machinery.EXTENSION_SUFFIXES else ''
def FindPackagePath(packageName, knownFileName, searchPaths):
"""Find a package.
Given a ni style package name, check the package is registered.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
import regutil, os
pathLook = regutil.GetRegisteredNamedPath(packageName)
if pathLook and IsPackageDir(pathLook, packageName, knownFileName):
return pathLook, None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if IsPackageDir(pathLook, packageName, knownFileName):
# Found it
ret = os.path.abspath(pathLook)
return ret, ret
raise error("The package %s can not be located" % packageName)
def FindHelpPath(helpFile, helpDesc, searchPaths):
# See if the current registry entry is OK
import os, win32api, win32con
try:
key = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Windows\\Help", 0, win32con.KEY_ALL_ACCESS)
try:
try:
path = win32api.RegQueryValueEx(key, helpDesc)[0]
if FileExists(os.path.join(path, helpFile)):
return os.path.abspath(path)
except win32api.error:
pass # no registry entry.
finally:
key.Close()
except win32api.error:
pass
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, helpFile)):
return os.path.abspath(pathLook)
pathLook = os.path.join(pathLook, "Help")
if FileExists(os.path.join( pathLook, helpFile)):
return os.path.abspath(pathLook)
raise error("The help file %s can not be located" % helpFile)
def FindAppPath(appName, knownFileName, searchPaths):
"""Find an application.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
# Look in the first path.
import regutil, string, os
regPath = regutil.GetRegisteredNamedPath(appName)
if regPath:
pathLook = regPath.split(";")[0]
if regPath and FileExists(os.path.join(pathLook, knownFileName)):
return None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, knownFileName)):
# Found it
return os.path.abspath(pathLook)
raise error("The file %s can not be located for application %s" % (knownFileName, appName))
def FindPythonExe(exeAlias, possibleRealNames, searchPaths):
"""Find an exe.
Returns the full path to the .exe, and a boolean indicating if the current
registered entry is OK. We don't trust the already registered version even
if it exists - it may be wrong (ie, for a different Python version)
"""
import win32api, regutil, string, os, sys
if possibleRealNames is None:
possibleRealNames = exeAlias
# Look first in Python's home.
found = os.path.join(sys.prefix, possibleRealNames)
if not FileExists(found): # for developers
if "64 bit" in sys.version:
found = os.path.join(sys.prefix, "PCBuild", "amd64", possibleRealNames)
else:
found = os.path.join(sys.prefix, "PCBuild", possibleRealNames)
if not FileExists(found):
found = LocateFileName(possibleRealNames, searchPaths)
registered_ok = 0
try:
registered = win32api.RegQueryValue(regutil.GetRootKey(), regutil.GetAppPathsKey() + "\\" + exeAlias)
registered_ok = found==registered
except win32api.error:
pass
return found, registered_ok
def QuotedFileName(fname):
"""Given a filename, return a quoted version if necessary
"""
import regutil, string
try:
fname.index(" ") # Other chars forcing quote?
return '"%s"' % fname
except ValueError:
# No space in name.
return fname
def LocateFileName(fileNamesString, searchPaths):
"""Locate a file name, anywhere on the search path.
If the file can not be located, prompt the user to find it for us
(using a common OpenFile dialog)
Raises KeyboardInterrupt if the user cancels.
"""
import regutil, string, os
fileNames = fileNamesString.split(";")
for path in searchPaths:
for fileName in fileNames:
try:
retPath = os.path.join(path, fileName)
os.stat(retPath)
break
except os.error:
retPath = None
if retPath:
break
else:
fileName = fileNames[0]
try:
import win32ui, win32con
except ImportError:
raise error("Need to locate the file %s, but the win32ui module is not available\nPlease run the program again, passing as a parameter the path to this file." % fileName)
# Display a common dialog to locate the file.
flags=win32con.OFN_FILEMUSTEXIST
ext = os.path.splitext(fileName)[1]
filter = "Files of requested type (*%s)|*%s||" % (ext,ext)
dlg = win32ui.CreateFileDialog(1,None,fileName,flags,filter,None)
dlg.SetOFNTitle("Locate " + fileName)
if dlg.DoModal() != win32con.IDOK:
raise KeyboardInterrupt("User cancelled the process")
retPath = dlg.GetPathName()
return os.path.abspath(retPath)
def LocatePath(fileName, searchPaths):
"""Like LocateFileName, but returns a directory only.
"""
import os
return os.path.abspath(os.path.split(LocateFileName(fileName, searchPaths))[0])
def LocateOptionalPath(fileName, searchPaths):
"""Like LocatePath, but returns None if the user cancels.
"""
try:
return LocatePath(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocateOptionalFileName(fileName, searchPaths = None):
"""Like LocateFileName, but returns None if the user cancels.
"""
try:
return LocateFileName(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocatePythonCore(searchPaths):
"""Locate and validate the core Python directories. Returns a list
of paths that should be used as the core (ie, un-named) portion of
the Python path.
"""
import os, regutil
currentPath = regutil.GetRegisteredNamedPath(None)
if currentPath:
presearchPaths = currentPath.split(";")
else:
presearchPaths = [os.path.abspath(".")]
libPath = None
for path in presearchPaths:
if FileExists(os.path.join(path, "os.py")):
libPath = path
break
if libPath is None and searchPaths is not None:
libPath = LocatePath("os.py", searchPaths)
if libPath is None:
raise error("The core Python library could not be located.")
corePath = None
suffix = IsDebug()
for path in presearchPaths:
if FileExists(os.path.join(path, "unicodedata%s.pyd" % suffix)):
corePath = path
break
if corePath is None and searchPaths is not None:
corePath = LocatePath("unicodedata%s.pyd" % suffix, searchPaths)
if corePath is None:
raise error("The core Python path could not be located.")
installPath = os.path.abspath(os.path.join(libPath, ".."))
return installPath, [libPath, corePath]
def FindRegisterPackage(packageName, knownFile, searchPaths, registryAppName = None):
"""Find and Register a package.
Assumes the core registry setup correctly.
In addition, if the location located by the package is already
in the **core** path, then an entry is registered, but no path.
(no other paths are checked, as the application whose path was used
may later be uninstalled. This should not happen with the core)
"""
import regutil, string
if not packageName: raise error("A package name must be supplied")
corePaths = regutil.GetRegisteredNamedPath(None).split(";")
if not searchPaths: searchPaths = corePaths
registryAppName = registryAppName or packageName
try:
pathLook, pathAdd = FindPackagePath(packageName, knownFile, searchPaths)
if pathAdd is not None:
if pathAdd in corePaths:
pathAdd = ""
regutil.RegisterNamedPath(registryAppName, pathAdd)
return pathLook
except error as details:
print("*** The %s package could not be registered - %s" % (packageName, details))
print("*** Please ensure you have passed the correct paths on the command line.")
print("*** - For packages, you should pass a path to the packages parent directory,")
print("*** - and not the package directory itself...")
def FindRegisterApp(appName, knownFiles, searchPaths):
"""Find and Register a package.
Assumes the core registry setup correctly.
"""
import regutil, string
if type(knownFiles)==type(''):
knownFiles = [knownFiles]
paths=[]
try:
for knownFile in knownFiles:
pathLook = FindAppPath(appName, knownFile, searchPaths)
if pathLook:
paths.append(pathLook)
except error as details:
print("*** ", details)
return
regutil.RegisterNamedPath(appName, ";".join(paths))
def FindRegisterPythonExe(exeAlias, searchPaths, actualFileNames = None):
"""Find and Register a Python exe (not necessarily *the* python.exe)
Assumes the core registry setup correctly.
"""
import regutil, string
fname, ok = FindPythonExe(exeAlias, actualFileNames, searchPaths)
if not ok:
regutil.RegisterPythonExe(fname, exeAlias)
return fname
def FindRegisterHelpFile(helpFile, searchPaths, helpDesc = None ):
import regutil
try:
pathLook = FindHelpPath(helpFile, helpDesc, searchPaths)
except error as details:
print("*** ", details)
return
# print "%s found at %s" % (helpFile, pathLook)
regutil.RegisterHelpFile(helpFile, pathLook, helpDesc)
def SetupCore(searchPaths):
"""Setup the core Python information in the registry.
This function makes no assumptions about the current state of sys.path.
After this function has completed, you should have access to the standard
Python library, and the standard Win32 extensions
"""
import sys
for path in searchPaths:
sys.path.append(path)
import os
import regutil, win32api,win32con
installPath, corePaths = LocatePythonCore(searchPaths)
# Register the core Pythonpath.
print(corePaths)
regutil.RegisterNamedPath(None, ';'.join(corePaths))
# Register the install path.
hKey = win32api.RegCreateKey(regutil.GetRootKey() , regutil.BuildDefaultPythonKey())
try:
# Core Paths.
win32api.RegSetValue(hKey, "InstallPath", win32con.REG_SZ, installPath)
finally:
win32api.RegCloseKey(hKey)
# Register the win32 core paths.
win32paths = os.path.abspath( os.path.split(win32api.__file__)[0]) + ";" + \
os.path.abspath( os.path.split(LocateFileName("win32con.py;win32con.pyc", sys.path ) )[0] )
# Python has builtin support for finding a "DLLs" directory, but
# not a PCBuild. Having it in the core paths means it is ignored when
# an EXE not in the Python dir is hosting us - so we add it as a named
# value
check = os.path.join(sys.prefix, "PCBuild")
if "64 bit" in sys.version:
check = os.path.join(check, "amd64")
if os.path.isdir(check):
regutil.RegisterNamedPath("PCBuild",check)
def RegisterShellInfo(searchPaths):
"""Registers key parts of the Python installation with the Windows Shell.
Assumes a valid, minimal Python installation exists
(ie, SetupCore() has been previously successfully run)
"""
import regutil, win32con
suffix = IsDebug()
# Set up a pointer to the .exe's
exePath = FindRegisterPythonExe("Python%s.exe" % suffix, searchPaths)
regutil.SetRegistryDefaultValue(".py", "Python.File", win32con.HKEY_CLASSES_ROOT)
regutil.RegisterShellCommand("Open", QuotedFileName(exePath)+" \"%1\" %*", "&Run")
regutil.SetRegistryDefaultValue("Python.File\\DefaultIcon", "%s,0" % exePath, win32con.HKEY_CLASSES_ROOT)
FindRegisterHelpFile("Python.hlp", searchPaths, "Main Python Documentation")
FindRegisterHelpFile("ActivePython.chm", searchPaths, "Main Python Documentation")
# We consider the win32 core, as it contains all the win32 api type
# stuff we need.
# FindRegisterApp("win32", ["win32con.pyc", "win32api%s.pyd" % suffix], searchPaths)
usage = """\
regsetup.py - Setup/maintain the registry for Python apps.
Run without options, (but possibly search paths) to repair a totally broken
python registry setup. This should allow other options to work.
Usage: %s [options ...] paths ...
-p packageName -- Find and register a package. Looks in the paths for
a sub-directory with the name of the package, and
adds a path entry for the package.
-a appName -- Unconditionally add an application name to the path.
A new path entry is create with the app name, and the
paths specified are added to the registry.
-c -- Add the specified paths to the core Pythonpath.
If a path appears on the core path, and a package also
needs that same path, the package will not bother
registering it. Therefore, By adding paths to the
core path, you can avoid packages re-registering the same path.
-m filename -- Find and register the specific file name as a module.
Do not include a path on the filename!
--shell -- Register everything with the Win95/NT shell.
--upackage name -- Unregister the package
--uapp name -- Unregister the app (identical to --upackage)
--umodule name -- Unregister the module
--description -- Print a description of the usage.
--examples -- Print examples of usage.
""" % sys.argv[0]
description="""\
If no options are processed, the program attempts to validate and set
the standard Python path to the point where the standard library is
available. This can be handy if you move Python to a new drive/sub-directory,
in which case most of the options would fail (as they need at least string.py,
os.py etc to function.)
Running without options should repair Python well enough to run with
the other options.
paths are search paths that the program will use to seek out a file.
For example, when registering the core Python, you may wish to
provide paths to non-standard places to look for the Python help files,
library files, etc.
See also the "regcheck.py" utility which will check and dump the contents
of the registry.
"""
examples="""\
Examples:
"regsetup c:\\wierd\\spot\\1 c:\\wierd\\spot\\2"
Attempts to setup the core Python. Looks in some standard places,
as well as the 2 wierd spots to locate the core Python files (eg, Python.exe,
python14.dll, the standard library and Win32 Extensions.
"regsetup -a myappname . .\subdir"
Registers a new Pythonpath entry named myappname, with "C:\\I\\AM\\HERE" and
"C:\\I\\AM\\HERE\subdir" added to the path (ie, all args are converted to
absolute paths)
"regsetup -c c:\\my\\python\\files"
Unconditionally add "c:\\my\\python\\files" to the 'core' Python path.
"regsetup -m some.pyd \\windows\\system"
Register the module some.pyd in \\windows\\system as a registered
module. This will allow some.pyd to be imported, even though the
windows system directory is not (usually!) on the Python Path.
"regsetup --umodule some"
Unregister the module "some". This means normal import rules then apply
for that module.
"""
if __name__=='__main__':
if len(sys.argv)>1 and sys.argv[1] in ['/?','-?','-help','-h']:
print(usage)
elif len(sys.argv)==1 or not sys.argv[1][0] in ['/','-']:
# No args, or useful args.
searchPath = sys.path[:]
for arg in sys.argv[1:]:
searchPath.append(arg)
# Good chance we are being run from the "regsetup.py" directory.
# Typically this will be "\somewhere\win32\Scripts" and the
# "somewhere" and "..\Lib" should also be searched.
searchPath.append("..\\Build")
searchPath.append("..\\Lib")
searchPath.append("..")
searchPath.append("..\\..")
# for developers:
# also search somewhere\lib, ..\build, and ..\..\build
searchPath.append("..\\..\\lib")
searchPath.append("..\\build")
if "64 bit" in sys.version:
searchPath.append("..\\..\\pcbuild\\amd64")
else:
searchPath.append("..\\..\\pcbuild")
print("Attempting to setup/repair the Python core")
SetupCore(searchPath)
RegisterShellInfo(searchPath)
FindRegisterHelpFile("PyWin32.chm", searchPath, "Pythonwin Reference")
# Check the registry.
print("Registration complete - checking the registry...")
import regcheck
regcheck.CheckRegistry()
else:
searchPaths = []
import getopt, string
opts, args = getopt.getopt(sys.argv[1:], 'p:a:m:c',
['shell','upackage=','uapp=','umodule=','description','examples'])
for arg in args:
searchPaths.append(arg)
for o,a in opts:
if o=='--description':
print(description)
if o=='--examples':
print(examples)
if o=='--shell':
print("Registering the Python core.")
RegisterShellInfo(searchPaths)
if o=='-p':
print("Registering package", a)
FindRegisterPackage(a,None,searchPaths)
if o in ['--upackage', '--uapp']:
import regutil
print("Unregistering application/package", a)
regutil.UnregisterNamedPath(a)
if o=='-a':
import regutil
path = ";".join(searchPaths)
print("Registering application", a,"to path",path)
regutil.RegisterNamedPath(a,path)
if o=='-c':
if not len(searchPaths):
raise error("-c option must provide at least one additional path")
import win32api, regutil
currentPaths = regutil.GetRegisteredNamedPath(None).split(";")
oldLen = len(currentPaths)
for newPath in searchPaths:
if newPath not in currentPaths:
currentPaths.append(newPath)
if len(currentPaths)!=oldLen:
print("Registering %d new core paths" % (len(currentPaths)-oldLen))
regutil.RegisterNamedPath(None,";".join(currentPaths))
else:
print("All specified paths are already registered.")
|
the-stack_0_22942 | import os
import requests
from flask import Blueprint, request, send_from_directory, current_app
import urllib.request
import json
from pypinyin import lazy_pinyin
from werkzeug.utils import secure_filename
from subprocess import Popen, PIPE
from entity.ConstantMessage import ConstantMessage
from entity.StatusCode import StatusCode
from util.Result import success, error
from util.randomFilename import random_filename
image = Blueprint('image', __name__)
server_ip = '172.16.20.190'
basepath="/home/NFSshare/docker_registry"
@image.route('getImageList', methods=['GET'])
def getImageList():
'''
获取镜像列表
:return:
'''
try:
cmd = "http://%s:5000/v2/_catalog" % server_ip
# {"repositories":["cuda11.04-ubuntu18.04-wenet","wenet-k8s-torch","wennet-k8s-torch"]}
msg = urllib.request.urlopen(cmd).read()
objs = json.loads(msg)
# item_global = {"global": []}
result = {}
for i in objs["repositories"]:
# curl -XGET http://172.16.20.190:5000/v2/wenet-k8s-torch/tags/list
result[i] = []
# {"name":"wenet-k8s-torch","tags":["1.4","1.5","1.7","1.8","1.9","1.10","1.11","1.12","1.14","2.1","2.3"]}
cmd2 = "http://%s:5000/v2/%s/tags/list" % (server_ip, i)
msg2 = urllib.request.urlopen(cmd2).read()
imageobjs = json.loads(msg2)
if imageobjs['tags'] is not None:
for version in imageobjs['tags']:
result[i].append("%s" % (version))
# item_global['global'].append("%s:%s" % (i, version)).
none_err = []
for k in result.keys():
if not result[k]:
none_err.append(k)
if none_err:
for k in none_err:
result.pop(k)
return success(result)
except BaseException as e:
return error(None)
@image.route('getImage', methods=['GET'])
def getImage():
'''
查找name的所有版本镜像
:return:
'''
image_name= request.args.get("imageName")
result = {}
# item_global = {"global": []}
cmd = "http://%s:5000/v2/%s/tags/list" % (server_ip, image_name)
try:
response = urllib.request.urlopen(cmd)
result[image_name] = []
msg = response.read()
imageobjs = json.loads(msg)
if imageobjs['tags'] is not None:
for version in imageobjs['tags']:
result[image_name].append("%s" % (version))
if len(result[image_name]) == 0:
result = None
return success(result)
except urllib.error.HTTPError as e:
return success(None)
@image.route('pushImage', methods=['PUT'])
def pushImage():
'''
上传镜像
:return:
'''
#####
#上传镜像文件到资源管理器中
f = request.files['file']
imageName = request.args.get('imageName')
imageVersion = request.args.get('imageVersion')
if imageVersion==None:
imageVersion = 'latest'
fileName = random_filename(secure_filename(''.join(lazy_pinyin(f.filename)))) # 使用第三方库(pypinyin),将中文名转换成拼音
upload_path = os.path.join(basepath, fileName)
f.save(upload_path)
p = Popen(["docker", "load", "-i", upload_path], stdout=PIPE)
stdout, stderror = p.communicate()
output = stdout.decode('UTF-8')
lines = output.strip().split(os.linesep)
results = lines[0]
if "Loaded image:" not in results:
return "errors: %s" % output.strip()
if stderror:
return "errors: %s" % stderror.decode('UTF-8').strip()
raw_image_name_full = results.split(" ")[-1].strip()
raw_image_name = raw_image_name_full.split("/")[-1]
if imageName:
image_name = "%s:5000/%s:%s" % (server_ip, imageName, imageVersion)
else:
image_name = "%s:5000/%s" % (server_ip,raw_image_name)
os.system("docker tag %s %s" % (raw_image_name_full, image_name))
# raw_name =
p = Popen(["docker", "push", image_name], stdout=PIPE)
stdout, stderror = p.communicate()
output = stdout.decode('UTF-8').strip()
# lines = output.split(os.linesep)
os.remove(upload_path)
if stderror:
return error("%s" % stderror.decode('UTF-8').strip())
return success(None)
@image.route('pullImage', methods=['GET'])
def pullImage():
'''
下载镜像
:return:
'''
imageName = request.args.get('imageName')
imageVersion = request.args.get('imageVersion')
image_path = "%s:5000/%s:%s" % (server_ip, imageName, imageVersion)
p = Popen(["docker", "pull", image_path],
stdout=PIPE)
stdout, stderror = p.communicate()
if stderror:
return error(error("errors: %s" % stderror.decode('UTF-8').strip()))
download_filename = "%s:%s.tar" % (imageName, imageVersion)
download_filename_full = os.path.join(basepath, download_filename)
p2 = Popen(["docker", "save", image_path, "-o", download_filename_full], stdout=PIPE)
stdout2, stderror2 = p2.communicate()
if stderror2:
return error("errors: %s" % stderror.decode('UTF-8').strip())
if os.path.exists(download_filename_full):
def generate():
with open(download_filename_full,'rb') as f:
yield from f
os.remove(download_filename_full)
r = current_app.response_class(generate(), mimetype='"application/x-tar"')
r.headers.set('Content-Disposition', 'attachment', filename=imageName+imageVersion+'.tar')
return r
return error(ConstantMessage.DOWNLOAD_ERROR)
@image.route('deleteImage', methods=['DELETE'])
def deleteImage():
'''
删除镜像
:return:
'''
imageName = request.args.get('imageName')
imageVersion = request.args.get('imageVersion')
repo_port = 5000
head = {'Accept': 'application/vnd.docker.distribution.manifest.v2+json'}
url_base = "http://" + server_ip + ":" + str(repo_port) + "/v2/"
url = url_base + imageName + "/manifests/" + imageVersion
res_for_manifest = requests.get(url, headers=head, verify=False)
manifest = res_for_manifest.headers['Docker-Content-Digest']
delete_operation_url = url_base + imageName + "/manifests/" + manifest
delete_result = requests.delete(delete_operation_url, verify=False)
if delete_result.status_code == 202:
return success(None)
# "%d: delete %s success......" % (int(delete_result.status_code), image_name + ':' + version)
else:
return error(None)
# "%d: delete %s fail......" % (int(delete_result.status_code), image_name + ':' + version) |
the-stack_0_22943 | from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from .views import ProjectDetailView, ProjectListView, ProjectCreateView, ProjectUpdateView
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('project/<int:pk>/', ProjectDetailView.as_view(), name='project-detail'),
path('project/new/', ProjectCreateView.as_view(), name='project-create'),
path('project/<int:pk>/update/', ProjectUpdateView.as_view(), name='project-update'),
path('api/profiles/', views.ProfileList.as_view()),
path('api/projects/', views.ProjectList.as_view()),
path('vote/<project/<int:pk/',views.vote, name='vote'),
path('search/',views.search_project, name='search_project'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) |
the-stack_0_22945 | # Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dask.distributed import wait, default_client
from cugraph.dask.common.input_utils import get_local_data
from cugraph.raft.dask.common.comms import worker_state
from cugraph.opg.link_analysis import mg_pagerank_wrapper as mg_pagerank
import warnings
def call_pagerank(sID, data, local_data, alpha, max_iter,
tol, personalization, nstart):
sessionstate = worker_state(sID)
return mg_pagerank.mg_pagerank(data[0],
local_data,
sessionstate['wid'],
sessionstate['handle'],
alpha,
max_iter,
tol,
personalization,
nstart)
def pagerank(input_graph,
alpha=0.85,
personalization=None,
max_iter=100,
tol=1.0e-5,
nstart=None,
load_balance=True):
"""
Find the PageRank values for each vertex in a graph using multiple GPUs.
cuGraph computes an approximation of the Pagerank using the power method.
The input graph must contain edge list as dask-cudf dataframe with
one partition per GPU.
Parameters
----------
graph : cugraph.DiGraph
cuGraph graph descriptor, should contain the connectivity information
as dask cudf edge list dataframe(edge weights are not used for this
algorithm). Undirected Graph not currently supported.
alpha : float
The damping factor alpha represents the probability to follow an
outgoing edge, standard value is 0.85.
Thus, 1.0-alpha is the probability to “teleport” to a random vertex.
Alpha should be greater than 0.0 and strictly lower than 1.0.
max_iter : int
The maximum number of iterations before an answer is returned.
If this value is lower or equal to 0 cuGraph will use the default
value, which is 30.
tolerance : float
Currently not supported. Set to default value 1.0e-5.
personalization : cudf.Dataframe
GPU Dataframe containing the personalizatoin information.
Currently not supported.
nstart : cudf.Dataframe
GPU Dataframe containing the initial guess for pagerank.
Currently not supported.
Returns
-------
PageRank : cudf.DataFrame
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding PageRank values.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['pagerank'] : cudf.Series
Contains the PageRank score
Examples
--------
>>> import cugraph.dask as dcg
>>> chunksize = dcg.get_chunksize(input_data_path)
>>> ddf = dask_cudf.read_csv(input_data_path, chunksize=chunksize,
delimiter=' ',
names=['src', 'dst', 'value'],
dtype=['int32', 'int32', 'float32'])
>>> dg = cugraph.DiGraph()
>>> dg.from_dask_cudf_edgelist(ddf)
>>> pr = dcg.pagerank(dg)
"""
if tol != 1.0e-5:
warnings.warn("Tolerance is currently not supported. \
Setting it to default 1.0e-5")
tol = 1.0e-5
if personalization is not None or nstart is not None:
warnings.warn("personalization and nstart currently not \
supported. Setting them to None")
personalization = None
nstart = None
client = default_client()
if(input_graph.local_data is not None and
input_graph.local_data['by'] == 'dst'):
data = input_graph.local_data['data']
comms = input_graph.local_data['comms']
else:
data, comms = get_local_data(input_graph, by='dst')
result = dict([(data.worker_info[wf[0]]["rank"],
client.submit(
call_pagerank,
comms.sessionId,
wf[1],
data.local_data,
alpha,
max_iter,
tol,
personalization,
nstart,
workers=[wf[0]]))
for idx, wf in enumerate(data.worker_to_parts.items())])
wait(result)
return result[0].result()
|
the-stack_0_22947 | #
# @lc app=leetcode id=19 lang=python3
#
# [19] Remove Nth Node From End of List
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode: # type:ignore
dummy = ListNode() # type:ignore
dummy.next = head
fast, prev = head, dummy
for _ in range(n):
fast = fast.next
while fast:
fast = fast.next
prev = prev.next
prev.next = prev.next.next
return dummy.next
# @lc code=end
|
the-stack_0_22949 | import importlib
from typing import Dict, List
from sqlalchemy import MetaData, cast, column, insert, select
from sqlalchemy.sql.schema import Table as SqlaTable
from astro.sql.operators.sql_decorator import SqlDecoratedOperator
from astro.sql.table import Table
from astro.utils.schema_util import (
get_error_string_for_multiple_dbs,
get_table_name,
tables_from_same_db,
)
from astro.utils.table_handler import TableHandler
from astro.utils.task_id_helper import get_unique_task_id
class SqlAppendOperator(SqlDecoratedOperator, TableHandler):
template_fields = ("main_table", "append_table")
def __init__(
self,
append_table: Table,
main_table: Table,
columns: List[str] = [],
casted_columns: dict = {},
**kwargs,
):
self.append_table = append_table
self.main_table = main_table
self.sql = ""
self.columns = columns
self.casted_columns = casted_columns
task_id = get_unique_task_id("append_table")
def null_function():
pass
super().__init__(
raw_sql=True,
parameters={},
task_id=kwargs.get("task_id") or task_id,
op_args=(),
python_callable=null_function,
handler=lambda x: None,
**kwargs,
)
def execute(self, context: Dict):
if not tables_from_same_db([self.append_table, self.main_table]):
raise ValueError(
get_error_string_for_multiple_dbs([self.append_table, self.main_table])
)
self.main_table.conn_id = self.main_table.conn_id or self.append_table.conn_id
self.conn_id = self.main_table.conn_id or self.append_table.conn_id
self.database = self.main_table.database or self.append_table.database
self.warehouse = self.main_table.warehouse or self.append_table.warehouse
self.schema = self.main_table.schema or self.append_table.schema
self.sql = self.append(
main_table=self.main_table,
append_table=self.append_table,
columns=self.columns,
casted_columns=self.casted_columns,
conn_id=self.main_table.conn_id,
)
super().execute(context)
return self.main_table
def append(
self, main_table: Table, columns, casted_columns, append_table: Table, conn_id
):
engine = self.get_sql_alchemy_engine()
if self.conn_type in ["postgres", "postgresql"]:
metadata = MetaData(schema=self.schema)
else:
metadata = MetaData()
# TO Do - fix bigquery and postgres reflection table issue.
main_table_sqla = SqlaTable(
get_table_name(main_table), metadata, autoload_with=engine
)
append_table_sqla = SqlaTable(
get_table_name(append_table), metadata, autoload_with=engine
)
column_names = [column(c) for c in columns]
sqlalchemy = importlib.import_module("sqlalchemy")
casted_fields = [
cast(column(k), getattr(sqlalchemy, v)) for k, v in casted_columns.items()
]
main_columns = [k for k, v in casted_columns.items()]
main_columns.extend([c for c in columns])
if len(column_names) + len(casted_fields) == 0:
column_names = [column(c) for c in append_table_sqla.c.keys()]
main_columns = column_names
column_names.extend(casted_fields)
sel = select(column_names).select_from(append_table_sqla)
return insert(main_table_sqla).from_select(main_columns, sel)
|
the-stack_0_22950 | """Test built-in blueprints."""
import asyncio
import contextlib
from datetime import timedelta
import pathlib
from unittest.mock import patch
from homeassistant.components import automation
from homeassistant.components.blueprint import models
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util, yaml
from tests.common import async_fire_time_changed, async_mock_service
BUILTIN_BLUEPRINT_FOLDER = pathlib.Path(automation.__file__).parent / "blueprints"
@contextlib.contextmanager
def patch_blueprint(blueprint_path: str, data_path):
"""Patch blueprint loading from a different source."""
orig_load = models.DomainBlueprints._load_blueprint
@callback
def mock_load_blueprint(self, path):
if path != blueprint_path:
assert False, f"Unexpected blueprint {path}"
return orig_load(self, path)
return models.Blueprint(
yaml.load_yaml(data_path), expected_domain=self.domain, path=path
)
with patch(
"homeassistant.components.blueprint.models.DomainBlueprints._load_blueprint",
mock_load_blueprint,
):
yield
async def test_notify_leaving_zone(hass):
"""Test notifying leaving a zone blueprint."""
def set_person_state(state, extra={}):
hass.states.async_set(
"person.test_person", state, {"friendly_name": "Paulus", **extra}
)
set_person_state("School")
assert await async_setup_component(
hass, "zone", {"zone": {"name": "School", "latitude": 1, "longitude": 2}}
)
with patch_blueprint(
"notify_leaving_zone.yaml",
BUILTIN_BLUEPRINT_FOLDER / "notify_leaving_zone.yaml",
):
assert await async_setup_component(
hass,
"automation",
{
"automation": {
"use_blueprint": {
"path": "notify_leaving_zone.yaml",
"input": {
"person_entity": "person.test_person",
"zone_entity": "zone.school",
"notify_device": "abcdefgh",
},
}
}
},
)
with patch(
"homeassistant.components.mobile_app.device_action.async_call_action_from_config"
) as mock_call_action:
# Leaving zone to no zone
set_person_state("not_home")
await hass.async_block_till_done()
assert len(mock_call_action.mock_calls) == 1
_hass, config, variables, _context = mock_call_action.mock_calls[0][1]
message_tpl = config.pop("message")
assert config == {
"domain": "mobile_app",
"type": "notify",
"device_id": "abcdefgh",
}
message_tpl.hass = hass
assert message_tpl.async_render(variables) == "Paulus has left School"
# Should not increase when we go to another zone
set_person_state("bla")
await hass.async_block_till_done()
assert len(mock_call_action.mock_calls) == 1
# Should not increase when we go into the zone
set_person_state("School")
await hass.async_block_till_done()
assert len(mock_call_action.mock_calls) == 1
# Should not increase when we move in the zone
set_person_state("School", {"extra_key": "triggers change with same state"})
await hass.async_block_till_done()
assert len(mock_call_action.mock_calls) == 1
# Should increase when leaving zone for another zone
set_person_state("Just Outside School")
await hass.async_block_till_done()
assert len(mock_call_action.mock_calls) == 2
# Verify trigger works
await hass.services.async_call(
"automation",
"trigger",
{"entity_id": "automation.automation_0"},
blocking=True,
)
assert len(mock_call_action.mock_calls) == 3
async def test_motion_light(hass):
"""Test motion light blueprint."""
hass.states.async_set("binary_sensor.kitchen", "off")
with patch_blueprint(
"motion_light.yaml",
BUILTIN_BLUEPRINT_FOLDER / "motion_light.yaml",
):
assert await async_setup_component(
hass,
"automation",
{
"automation": {
"use_blueprint": {
"path": "motion_light.yaml",
"input": {
"light_target": {"entity_id": "light.kitchen"},
"motion_entity": "binary_sensor.kitchen",
},
}
}
},
)
turn_on_calls = async_mock_service(hass, "light", "turn_on")
turn_off_calls = async_mock_service(hass, "light", "turn_off")
# Turn on motion
hass.states.async_set("binary_sensor.kitchen", "on")
# Can't block till done because delay is active
# So wait 5 event loop iterations to process script
for _ in range(5):
await asyncio.sleep(0)
assert len(turn_on_calls) == 1
# Test light doesn't turn off if motion stays
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=200))
for _ in range(5):
await asyncio.sleep(0)
assert len(turn_off_calls) == 0
# Test light turns off off 120s after last motion
hass.states.async_set("binary_sensor.kitchen", "off")
for _ in range(5):
await asyncio.sleep(0)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=120))
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
# Test restarting the script
hass.states.async_set("binary_sensor.kitchen", "on")
for _ in range(5):
await asyncio.sleep(0)
assert len(turn_on_calls) == 2
assert len(turn_off_calls) == 1
hass.states.async_set("binary_sensor.kitchen", "off")
for _ in range(5):
await asyncio.sleep(0)
hass.states.async_set("binary_sensor.kitchen", "on")
for _ in range(15):
await asyncio.sleep(0)
assert len(turn_on_calls) == 3
assert len(turn_off_calls) == 1
# Verify trigger works
await hass.services.async_call(
"automation",
"trigger",
{"entity_id": "automation.automation_0"},
)
for _ in range(25):
await asyncio.sleep(0)
assert len(turn_on_calls) == 4
|
the-stack_0_22951 |
import sys
import os
import csv
import hashlib
import ntpath
import logging
from pathlib import Path,PureWindowsPath,PurePosixPath, PurePath
from PyQt5 import QtWidgets
from PyQt5 import QtSql, QtGui, QtCore
from .importTableModel import *
'''
This class manages imports: first step is to import
FITS files and their headers scanning dirs. Then we
can add information like FWHM, noise etc importing
a CSV file as the output from SubFrameSelector from
Pixinsight. There are read and save method for both
operations (FITS import and CSV import).
The models (both FITS and CSV) are managed by
ImportTableModel.
'''
class ImportCsvTab():
logger = logging.getLogger(__name__)
def __init__(self, mainW, app):
super().__init__()
self.mainW = mainW
self.app = app
# GUI logging handler
self.handler = h = QtHandler(self.printLogMessage)
fs = "%(asctime)s %(levelname)-8s %(message)s"
formatter = logging.Formatter(fs)
h.setFormatter(formatter)
h.setLevel(logging.INFO)
self.logger.addHandler(h)
# Slot for printing log message in the main thread
def printLogMessage(self, msg, record):
color = self.app.COLORS.get(record.levelno, "black")
s = '<pre><font color="%s">%s</font></pre>' % (color, msg)
self.mainW.ui.plainTextEditLogCsv.appendHtml(s)
def importCsvFile(self):
filename, _ = QtWidgets.QFileDialog.getOpenFileName(
self.mainW,
'Select a CSV file to open…',
QtCore.QDir.homePath(),
'CSV Files (*.csv) ;; All Files (*)'
)
if filename:
with open(filename) as fh:
csvreader = csv.reader(fh)
dataTemp = list(csvreader)
self.mainW.ui.lineEditCsv.setText(filename)
self.logger.info(f"Reading {filename}")
else:
return
csvList = self.app.filterDictToList('pix_csv')
# First rows of the CSV file don't contain images.
checkCsv = False
self._data = []
self._headers = []
i = 1
for row, val in enumerate(dataTemp):
if dataTemp[row][0] == 'Subframe Scale':
subframeScale = str(dataTemp[row][1])
if dataTemp[row][0] == 'Camera Gain':
cameraGain = str(dataTemp[row][1])
if dataTemp[row][0] == 'Camera Resolution':
cameraResolution = str(dataTemp[row][1])
if dataTemp[row][0] == 'Scale Unit':
scaleUnit = str(dataTemp[row][1])
if dataTemp[row][0] == 'Data Unit':
dataUnit = str(dataTemp[row][1])
if checkCsv == True:
dataTemp[row].insert(0, subframeScale)
dataTemp[row].insert(1, cameraGain)
dataTemp[row].insert(2, cameraResolution)
dataTemp[row].insert(3, scaleUnit)
dataTemp[row].insert(4, dataUnit)
self.logger.info(f"Row n {i}")
i+=1
# Items (columns) for each row
filteredRow = []
for col in range(len(val)):
if str(col) in csvList:
item = dataTemp[row][col]
# Check if the file (hash) exists in the database
if col == 8:
filenameMatch = ntpath.splitext(
ntpath.basename(item))[0]
'''
TO BE COMPLETED:
filenameMatch = ntpath.splitext(
ntpath.basename(item))[0]
sqlStatementF = "SELECT file FROM images where file like '%"+filenameMatch+"'%"
rF = self.app.db.exec(sqlStatementF)
rF.next()
if rF.value(0):
print("trovato")
else:
print("FITS file not found")
'''
hashItem = self.hashFile(item)
pathFrom =self.mainW.ui.lineEditPathConversionFrom.text()
pathTo = self.mainW.ui.lineEditPathConversionTo.text()
if len(pathFrom)>0:
pathFrom=pathFrom.replace('\\','/')
pathTo=pathTo.replace('\\','/')
item = item.replace(pathFrom, pathTo)
item =str(PurePath(item))
hashItem = self.hashFile(item)
sqlStatement = "SELECT hash FROM images where hash = '"+hashItem+"'"
r = self.app.db.exec(sqlStatement)
r.next()
if r.value(0):
strStatus = "File found"
self.logger.info(
f"File {filenameMatch} found in the database")
else:
strStatus = "File not found"
self.logger.error(
f"File {filenameMatch} not found in the database")
filteredRow.insert(col, str(item))
# Append info about matching fits file found/not found
filteredRow.insert(len(filteredRow), strStatus)
self._data.append(filteredRow)
# Headers row are read after 'index' in csv file.
if dataTemp[row][0] == 'Index':
self._headers = self.app.filterDictToList(
'pix_csv', 'description')
self._headers.append('Status')
checkCsv = True
if checkCsv == True:
self.model = ImportTableModel(self._data, self._headers)
self.mainW.ui.tableViewImportCsv.setModel(self.model)
self.mainW.ui.tableViewImportCsv.setSortingEnabled(False)
else:
self.logger.error("Invalid CSV format")
def deleteRows(self):
indexes = self.mainW.ui.tableViewImportCsv.selectedIndexes()
countRows = self.mainW.ui.tableViewImportCsv.selectionModel().selectedRows()
if indexes:
ret = QMessageBox.question(None,
"Delete rows",
"Are you sure to delete selected rows?",
QMessageBox.Yes | QMessageBox.No
)
if ret == QMessageBox.Yes:
self.model.removeRows(indexes[0].row(), countRows, None)
def hashFile(self, fileName):
try:
fileName = PurePath(fileName)
with open(fileName, 'rb') as afile:
hasher = hashlib.md5()
buf = afile.read(self.app.BLOCKSIZE)
for i in range(5):
hasher.update(buf)
buf = afile.read(self.app.BLOCKSIZE)
hash = hasher.hexdigest()
return hash
except Exception as e:
self.logger.error(f"Hash file: fits not found: {fileName}")
return ""
# Data from PI csv (FWHM, Noise etc) are imported in db
def saveCsv(self):
fieldUpdate = self.app.filterDictToList('pix_csv', 'keys')
rows = self.model.rowCount(self.mainW.ui.tableViewImportCsv.rootIndex())
self.logger.info(f"Saving {rows} CSV rows")
# Each row from table view is an SQL update statement
for row in range(rows):
query = "UPDATE images SET "
csvHash = ''
for col in range(len(fieldUpdate)):
currentIndex = self.model.index(row, col)
item = self.model.data(currentIndex, QtCore.Qt.DisplayRole)
if item is not None:
query += str(fieldUpdate[col]) + "= '"+str(item)+"',"
if fieldUpdate[col] == "csvFile":
fileName = str(item)
csvHash = self.hashFile(item)
else:
query += "'',"
query = query[:-1]
query += " WHERE hash = '"+csvHash + "';"
try:
ret = self.app.db.exec(query)
error = ret.lastError().text()
if ret.lastError().number() > 0:
self.logger.error(f"{error}")
self.logger.error(f"{query}")
else:
if ret.numRowsAffected() < 1:
self.logger.error(f"{fileName} not found")
else:
self.model.setData(
self.model.index(row, 10), "OK: FITS file updated", QtCore.Qt.EditRole)
self.logger.info(f"{fileName} updated")
self.logger.debug(
f"OK: FITS file updated with query {query}")
except Exception as e:
self.logger.error(f"Update error {e}")
self.model.layoutChanged.emit()
# Force image list data reload
self.mainW.imageSourceModel.select()
while (self.mainW.imageSourceModel.canFetchMore()):
self.mainW.imageSourceModel.fetchMore()
self.mainW.filterRegExpChanged()
"""
This two classes manage the GUI log messages using an additional
log handler. Be careful that log messages are logged from outside
of the main thread and widgets are not thread safe so we cannot
update the GUI from inside the worker but build a signal/slot for
that.
"""
class Signaller(QtCore.QObject):
signal = QtCore.pyqtSignal(str, logging.LogRecord)
class QtHandler(logging.Handler):
def __init__(self, slotfunc, *args, **kwargs):
super(QtHandler, self).__init__(*args, **kwargs)
self.signaller = Signaller()
self.signaller.signal.connect(slotfunc)
def emit(self, record):
s = self.format(record)
self.signaller.signal.emit(s, record)
|
the-stack_0_22954 | #!/usr/bin/env python
#
# Copyright 2012-2015 eNovance <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import subprocess
from oslo_utils import fileutils
from aodh.tests import base
class BinTestCase(base.BaseTestCase):
def setUp(self):
super(BinTestCase, self).setUp()
content = ("[database]\n"
"connection=log://localhost\n")
content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content,
prefix='aodh',
suffix='.conf')
def tearDown(self):
super(BinTestCase, self).tearDown()
os.remove(self.tempfile)
def test_dbsync_run(self):
subp = subprocess.Popen(['aodh-dbsync',
"--config-file=%s" % self.tempfile])
self.assertEqual(0, subp.wait())
def test_run_expirer_ttl_disabled(self):
subp = subprocess.Popen(['aodh-expirer',
'-d',
"--config-file=%s" % self.tempfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, __ = subp.communicate()
self.assertEqual(0, subp.poll())
self.assertIn(b"Nothing to clean, database alarm history "
b"time to live is disabled", out)
def test_run_expirer_ttl_enabled(self):
content = ("[database]\n"
"alarm_history_time_to_live=1\n"
"alarm_histories_delete_batch_size=10\n"
"connection=log://localhost\n")
content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content,
prefix='aodh',
suffix='.conf')
subp = subprocess.Popen(['aodh-expirer',
'-d',
"--config-file=%s" % self.tempfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, __ = subp.communicate()
self.assertEqual(0, subp.poll())
msg = "Dropping alarm history 10 data with TTL 1"
msg = msg.encode('utf-8')
self.assertIn(msg, out)
class BinEvaluatorTestCase(base.BaseTestCase):
def setUp(self):
super(BinEvaluatorTestCase, self).setUp()
content = ("[database]\n"
"connection=log://localhost\n")
content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content,
prefix='aodh',
suffix='.conf')
self.subp = None
def tearDown(self):
super(BinEvaluatorTestCase, self).tearDown()
if self.subp:
self.subp.kill()
os.remove(self.tempfile)
def test_starting_evaluator(self):
self.subp = subprocess.Popen(['aodh-evaluator',
"--config-file=%s" % self.tempfile],
stderr=subprocess.PIPE)
self.assertIsNone(self.subp.poll())
class BinNotifierTestCase(BinEvaluatorTestCase):
def test_starting_notifier(self):
self.subp = subprocess.Popen(['aodh-notifier',
"--config-file=%s" % self.tempfile],
stderr=subprocess.PIPE)
self.assertIsNone(self.subp.poll())
|
the-stack_0_22955 | # coding: utf-8
from __future__ import print_function, division, absolute_import
def user_preference_sorter(prefer_quality, prefer_format, prefer_dash=False):
def do_sort(obj):
prio = 0
if obj.type == 'dash':
prio += 50 if prefer_dash else -50
if obj.format == prefer_format:
prio += 20
# Bonus & penalty for exact matches, no score for "obj.hd == None"
if obj.hd is True and prefer_quality == "hd":
prio += 20
elif obj.hd is False and prefer_quality == "sd":
prio += 20
elif obj.hd is True and prefer_quality == "sd":
prio -= 10
elif obj.hd is False and prefer_quality == "hd":
prio -= 10
# Prefer versions with "more" audio tracks
try:
translations = len(obj.languages) - 1
prio += translations
except AttributeError:
pass
# Prefer "native" over "translated" for now (streaming)...
try:
if obj.translated:
prio -= 5
except AttributeError:
pass
return -prio
return do_sort
def maybe_json(json, attr, default):
try:
return json[attr]
except KeyError:
return default
def json_date_to_info(json, field, info):
if field not in json or not json[field] or len(json[field]) < 10:
return
try:
y, m, d = [int(x) for x in json[field][0:10].split('-')]
info['date'] = "%02d.%02d.%04d" % (d, m, y)
info['year'] = y
info['aired'] = "%04d-%02d-%02d" % (y, m, d)
info['dateadded'] = "%04d-%02d-%02d" % (y, m, d)
except ValueError:
return
def calc_aspect(s):
try:
aspect = [float(x) for x in s.split(':')]
if len(aspect) == 2:
return aspect[0] / aspect[1]
except ValueError:
return None
|
the-stack_0_22956 | # Copyright 2013 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import netaddr
from neutron_lib.api.definitions import external_net as enet_apidef
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import l3_ext_gw_mode
from neutron_lib import constants
from neutron_lib import context as nctx
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import testscenarios
from webob import exc
from neutron.common import utils
from neutron.db import l3_db
from neutron.db import l3_gwmode_db
from neutron.db.models import l3 as l3_models
from neutron.extensions import l3
from neutron.objects import network as net_obj
from neutron.objects import ports as port_obj
from neutron.objects import router as l3_obj
from neutron.objects import subnet as subnet_obj
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
FAKE_GW_PORT_ID = _uuid()
FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff'
FAKE_FIP_EXT_PORT_ID = _uuid()
FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66'
FAKE_FIP_INT_PORT_ID = _uuid()
FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa'
FAKE_ROUTER_PORT_ID = _uuid()
FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb'
class TestExtensionManager(object):
def get_resources(self):
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# A simple class for making a concrete class out of the mixin
# for the case of a plugin that integrates l3 routing.
class TestDbIntPlugin(test_l3.TestL3NatIntPlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = [enet_apidef.ALIAS, l3_apidef.ALIAS,
l3_ext_gw_mode.ALIAS]
# A simple class for making a concrete class out of the mixin
# for the case of a l3 router service plugin
class TestDbSepPlugin(test_l3.TestL3NatServicePlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = [l3_apidef.ALIAS, l3_ext_gw_mode.ALIAS]
class TestGetEnableSnat(testscenarios.WithScenarios, base.BaseTestCase):
scenarios = [
('enabled', {'enable_snat_by_default': True}),
('disabled', {'enable_snat_by_default': False})]
def setUp(self):
super(TestGetEnableSnat, self).setUp()
self.config(enable_snat_by_default=self.enable_snat_by_default)
def _test_get_enable_snat(self, expected, info):
observed = l3_gwmode_db.L3_NAT_dbonly_mixin._get_enable_snat(info)
self.assertEqual(expected, observed)
def test_get_enable_snat_without_gw_info(self):
self._test_get_enable_snat(self.enable_snat_by_default, {})
def test_get_enable_snat_without_enable_snat(self):
info = {'network_id': _uuid()}
self._test_get_enable_snat(self.enable_snat_by_default, info)
def test_get_enable_snat_with_snat_enabled(self):
self._test_get_enable_snat(True, {'enable_snat': True})
def test_get_enable_snat_with_snat_disabled(self):
self._test_get_enable_snat(False, {'enable_snat': False})
class TestL3GwModeMixin(testlib_api.SqlTestCase):
def setUp(self):
super(TestL3GwModeMixin, self).setUp()
plugin = __name__ + '.' + TestDbIntPlugin.__name__
self.setup_coreplugin(plugin)
self.target_object = TestDbIntPlugin()
# Patch the context
ctx_patcher = mock.patch('neutron_lib.context', autospec=True)
mock_context = ctx_patcher.start()
self.context = mock_context.get_admin_context()
# This ensure also calls to elevated work in unit tests
self.context.elevated.return_value = self.context
self.context.session = db_api.get_writer_session()
# Create sample data for tests
self.ext_net_id = _uuid()
self.int_net_id = _uuid()
self.int_sub_id = _uuid()
self.tenant_id = 'the_tenant'
self.network = net_obj.Network(
self.context,
id=self.ext_net_id,
project_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.net_ext = net_obj.ExternalNetwork(
self.context, network_id=self.ext_net_id)
self.network.create()
self.net_ext.create()
self.router = l3_models.Router(
id=_uuid(),
name=None,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE,
enable_snat=True,
gw_port_id=None)
self.context.session.add(self.router)
self.context.session.flush()
self.router_gw_port = port_obj.Port(
self.context,
id=FAKE_GW_PORT_ID,
project_id=self.tenant_id,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_GW,
admin_state_up=True,
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_GW_PORT_MAC),
network_id=self.ext_net_id)
self.router_gw_port.create()
self.router.gw_port_id = self.router_gw_port.id
self.context.session.add(self.router)
self.context.session.flush()
self.fip_ext_port = port_obj.Port(
self.context,
id=FAKE_FIP_EXT_PORT_ID,
project_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_FLOATINGIP,
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_FIP_EXT_PORT_MAC),
network_id=self.ext_net_id)
self.fip_ext_port.create()
self.context.session.flush()
self.int_net = net_obj.Network(
self.context,
id=self.int_net_id,
project_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.int_sub = subnet_obj.Subnet(self.context,
id=self.int_sub_id,
project_id=self.tenant_id,
ip_version=constants.IP_VERSION_4,
cidr=utils.AuthenticIPNetwork('3.3.3.0/24'),
gateway_ip=netaddr.IPAddress('3.3.3.1'),
network_id=self.int_net_id)
self.router_port = port_obj.Port(
self.context,
id=FAKE_ROUTER_PORT_ID,
project_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_ROUTER_PORT_MAC),
network_id=self.int_net_id)
self.router_port_ip_info = port_obj.IPAllocation(self.context,
port_id=self.router_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.1')
self.int_net.create()
self.int_sub.create()
self.router_port.create()
self.router_port_ip_info.create()
self.context.session.flush()
self.fip_int_port = port_obj.Port(
self.context,
id=FAKE_FIP_INT_PORT_ID,
project_id=self.tenant_id,
admin_state_up=True,
device_id='something',
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova',
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_FIP_INT_PORT_MAC),
network_id=self.int_net_id)
self.fip_int_ip_info = port_obj.IPAllocation(self.context,
port_id=self.fip_int_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.3')
self.fip = l3_obj.FloatingIP(
self.context,
id=_uuid(),
floating_ip_address=netaddr.IPAddress('1.1.1.2'),
floating_network_id=self.ext_net_id,
floating_port_id=FAKE_FIP_EXT_PORT_ID,
fixed_port_id=None,
fixed_ip_address=None,
router_id=None)
self.fip_int_port.create()
self.fip_int_ip_info.create()
self.fip.create()
self.context.session.flush()
self.context.session.expire_all()
self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID,
'tenant_id': self.tenant_id}
def _get_gwports_dict(self, gw_ports):
return dict((gw_port['id'], gw_port)
for gw_port in gw_ports)
def _reset_ext_gw(self):
# Reset external gateway
self.router.gw_port_id = None
self.context.session.add(self.router)
self.context.session.flush()
def _test_update_router_gw(self, current_enable_snat, gw_info=None,
expected_enable_snat=True):
if not current_enable_snat:
previous_gw_info = {'network_id': self.ext_net_id,
'enable_snat': current_enable_snat}
self.target_object._update_router_gw_info(
self.context, self.router.id, previous_gw_info)
self.target_object._update_router_gw_info(
self.context, self.router.id, gw_info)
router = self.target_object._get_router(
self.context, self.router.id)
try:
self.assertEqual(FAKE_GW_PORT_ID,
router.gw_port.id)
self.assertEqual(netaddr.EUI(FAKE_GW_PORT_MAC),
router.gw_port.mac_address)
except AttributeError:
self.assertIsNone(router.gw_port)
self.assertEqual(expected_enable_snat, router.enable_snat)
def test_update_router_gw_with_gw_info_none(self):
self._test_update_router_gw(current_enable_snat=True)
def test_update_router_gw_without_info_and_snat_disabled_previously(self):
self._test_update_router_gw(current_enable_snat=False)
def test_update_router_gw_with_network_only(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(current_enable_snat=True, gw_info=info)
def test_update_router_gw_with_network_and_snat_disabled_previously(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(current_enable_snat=False, gw_info=info)
def test_update_router_gw_with_snat_disabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': False}
self._test_update_router_gw(
current_enable_snat=True, gw_info=info, expected_enable_snat=False)
def test_update_router_gw_with_snat_enabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': True}
self._test_update_router_gw(current_enable_snat=False, gw_info=info)
def test_make_router_dict_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
self.assertIsNone(router_dict[l3_apidef.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': True,
'external_fixed_ips': []},
router_dict[l3_apidef.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': False,
'external_fixed_ips': []},
router_dict[l3_apidef.EXTERNAL_GW_INFO])
def test_build_routers_list_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(self.context,
[router_dict],
[])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertTrue(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertFalse(router.get('enable_snat'))
def test_build_routers_list_with_gw_port_mismatch(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict], {})
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin):
def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None):
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbIntPlugin')
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
ext_mgr = ext_mgr or TestExtensionManager()
super(ExtGwModeIntTestCase, self).setUp(plugin=plugin,
ext_mgr=ext_mgr,
service_plugins=svc_plugins)
def _set_router_external_gateway(self, router_id, network_id,
snat_enabled=None,
expected_code=exc.HTTPOk.code,
neutron_context=None):
ext_gw_info = {'network_id': network_id}
# Need to set enable_snat also if snat_enabled == False
if snat_enabled is not None:
ext_gw_info['enable_snat'] = snat_enabled
return self._update('routers', router_id,
{'router': {'external_gateway_info':
ext_gw_info}},
expected_code=expected_code,
neutron_context=neutron_context)
def test_router_gateway_set_fail_after_port_create(self):
with self.router() as r, self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
plugin = directory.get_plugin()
with mock.patch.object(plugin, '_get_port',
side_effect=ValueError()):
self._set_router_external_gateway(r['router']['id'],
ext_net_id,
expected_code=500)
ports = [p for p in plugin.get_ports(nctx.get_admin_context())
if p['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_GW]
self.assertFalse(ports)
def test_router_gateway_set_retry(self):
with self.router() as r, self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
with mock.patch.object(
l3_db.L3_NAT_dbonly_mixin, '_validate_gw_info',
side_effect=[db_exc.RetryRequest(None), ext_net_id]):
self._set_router_external_gateway(r['router']['id'],
ext_net_id)
res = self._show('routers', r['router']['id'])['router']
self.assertEqual(ext_net_id,
res['external_gateway_info']['network_id'])
def test_router_create_with_gwinfo_invalid_ext_ip(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
ext_info = {
'network_id': s['subnet']['network_id'],
'external_fixed_ips': [{'ip_address': '10.0.0.'}]
}
error_code = exc.HTTPBadRequest.code
res = self._create_router(
self.fmt, _uuid(), arg_list=('external_gateway_info',),
external_gateway_info=ext_info,
expected_code=error_code
)
msg = ("Invalid input for external_gateway_info. "
"Reason: '10.0.0.' is not a valid IP address.")
body = jsonutils.loads(res.body)
self.assertEqual(msg, body['NeutronError']['message'])
def test_router_create_show_no_ext_gwinfo(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def _test_router_create_show_ext_gwinfo(self, snat_input_value,
snat_expected_value):
name = 'router1'
tenant_id = _uuid()
with self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
input_value = {'network_id': ext_net_id}
if snat_input_value in (True, False):
input_value['enable_snat'] = snat_input_value
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info',
{'network_id': ext_net_id,
'enable_snat': snat_expected_value,
'external_fixed_ips': [{
'ip_address': mock.ANY,
'subnet_id': s['subnet']['id']}]})]
with self.router(
name=name, admin_state_up=True, tenant_id=tenant_id,
external_gateway_info=input_value) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def test_router_create_show_ext_gwinfo_default(self):
self._test_router_create_show_ext_gwinfo(None, True)
def test_router_create_show_ext_gwinfo_with_snat_enabled(self):
self._test_router_create_show_ext_gwinfo(True, True)
def test_router_create_show_ext_gwinfo_with_snat_disabled(self):
self._test_router_create_show_ext_gwinfo(False, False)
def _test_router_update_ext_gwinfo(self, snat_input_value,
snat_expected_value=False,
expected_http_code=exc.HTTPOk.code):
with self.router() as r:
with self.subnet() as s:
try:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
self._set_router_external_gateway(
r['router']['id'], ext_net_id,
snat_enabled=snat_input_value,
expected_code=expected_http_code)
if expected_http_code != exc.HTTPOk.code:
return
body = self._show('routers', r['router']['id'])
res_gw_info = body['router']['external_gateway_info']
self.assertEqual(ext_net_id, res_gw_info['network_id'])
self.assertEqual(snat_expected_value,
res_gw_info['enable_snat'])
finally:
self._remove_external_gateway_from_router(
r['router']['id'], ext_net_id)
def test_router_update_ext_gwinfo_default(self):
self._test_router_update_ext_gwinfo(None, True)
def test_router_update_ext_gwinfo_with_snat_enabled(self):
self._test_router_update_ext_gwinfo(True, True)
def test_router_update_ext_gwinfo_with_snat_disabled(self):
self._test_router_update_ext_gwinfo(False, False)
def test_router_update_ext_gwinfo_with_invalid_snat_setting(self):
self._test_router_update_ext_gwinfo(
'xxx', None, expected_http_code=exc.HTTPBadRequest.code)
class ExtGwModeSepTestCase(ExtGwModeIntTestCase):
def setUp(self, plugin=None):
# Store l3 resource attribute map as it will be updated
self._l3_attribute_map_bk = {}
for item in l3_apidef.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3_apidef.RESOURCE_ATTRIBUTE_MAP[item].copy())
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin')
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbSepPlugin')
svc_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
super(ExtGwModeSepTestCase, self).setUp(plugin=plugin,
svc_plugins=svc_plugins)
|
the-stack_0_22957 | import os
import socket
from urllib.request import _parse_proxy
from .modifier import RequestModifier
from .proxy2 import ThreadingHTTPServer
from .storage import RequestStorage
class ProxyHTTPServer(ThreadingHTTPServer):
address_family = socket.AF_INET
def __init__(self, *args, proxy_config=None, options=None, **kwargs):
# Each server instance gets its own storage
self.storage = RequestStorage(
base_dir=proxy_config.pop('request_storage_base_dir', None)
)
# Each server instance gets a request modifier
self.modifier = RequestModifier()
# The server's upstream proxy configuration (if any)
self.proxy_config = self._sanitise_proxy_config(
self._merge_with_env(proxy_config or {}))
# Additional proxy server configuration
self.options = options or {}
# A scope for proxy to be interested
self.scopes = []
super().__init__(*args, **kwargs)
def _merge_with_env(self, proxy_config):
"""Merge upstream proxy configuration with configuration loaded
from the environment.
"""
http_proxy = os.environ.get('http_proxy')
https_proxy = os.environ.get('https_proxy')
no_proxy = os.environ.get('no_proxy')
merged = {}
if http_proxy:
merged['http'] = http_proxy
if https_proxy:
merged['https'] = https_proxy
if no_proxy:
merged['no_proxy'] = no_proxy
merged.update(proxy_config)
return merged
def _sanitise_proxy_config(self, proxy_config):
"""Parse the proxy configuration into something more usable."""
for proxy_type in ('http', 'https'):
# Parse the upstream proxy URL into (scheme, user, password, hostport)
# for ease of access.
if proxy_config.get(proxy_type) is not None:
proxy_config[proxy_type] = _parse_proxy(
proxy_config[proxy_type])
if proxy_config:
proxy_config['no_proxy'] = [host.strip() for host in proxy_config.get('no_proxy', '').split(',')
if host]
return proxy_config
def shutdown(self):
super().shutdown()
self.storage.cleanup()
|
the-stack_0_22958 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from scrapy.http import Request
from scrapy.exceptions import DontCloseSpider
from scrapy.spiders import Spider, CrawlSpider
from kafka import TopicPartition
from scrapy import signals
from sodo.utils.string import bytes_to_str
from sodo.utils.connection import kafka_consumer_from_settings, client_from_settings
from sodo import default_settings
from sodo.hook import HookABC
logger = logging.getLogger(__name__)
class KafkaMixin(HookABC):
logger = logger
"""
Mixin class to implement reading message from kafka topic.
"""
topic = None
group_id = None
batch_size = None
topic_partitions = None
request_url_prefix = None
request_params = {
"method": 'GET'
}
kafka_priority_partition = 1
count = 0
def set_kafka(self, crawler=None):
if crawler is None:
raise ValueError("crawler is required")
settings = crawler.settings
if not hasattr(self, 'topic'):
self.topic = self.name
self.group_id = settings.get('SODO_KAFKA_SPIDER_GROUP_ID', self.group_id)
self.request_url_prefix = settings.get('SODO_KAFKA_SPIDER_MESSAGE_PREFIX', self.request_url_prefix)
self.request_url_prefix = settings.get('SODO_KAFKA_SPIDER_URL_PREFIX', self.request_url_prefix)
self.kafka_priority_partition = settings.getint("SODO_KAFKA_PRIORITY_PARTITION",
default_settings.SODO_KAFKA_PRIORITY_PARTITION)
if self.batch_size is None:
# set kafka batch size, CONCURRENT_REQUESTS for default.
self.batch_size = settings.getint(
'SODO_KAFKA_START_MESSAGE_BATCH_SIZE',
settings.getint('CONCURRENT_REQUESTS'),
)
self.consumer = self.set_consumer(settings)
self.logger.info("Start Pull Messages from kafka topic '%s'" % self.topic)
def set_consumer(self, settings):
raise NotImplementedError('{} set_consumer is not defined'.format(self.__class__.__name__))
def message_pre_process(self, message):
"""pre process kafka message before into KafkaTopicSpider"""
return message
def next_requests(self):
"""
Pull data from kafka and get a request to be scheduled.
"""
raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
def make_request_from_message(self, msg):
"""
:param msg: message from kafka
:return:
"""
url = bytes_to_str(self.message_pre_process(msg))
if self.request_url_prefix is not None:
url = self.request_url_prefix + str(url)
return self.make_requests_from_url(url, meta={'topic_message': bytes_to_str(msg)})
def make_requests_from_url(self, url, meta=None):
return Request(url, meta=meta, **self.request_params)
def schedule_next_request(self):
"""Schedules a request if available """
requests = self.next_requests()
if requests is not None:
for req in requests:
if req is not None:
self.crawler.engine.crawl(req, spider=self)
# ----------------------------default signals hooks-----------------------------
def spider_idle_hook(self, *args, **kwargs):
"""Default hook:Schedules a request if available, otherwise waits."""
self.schedule_next_request()
raise DontCloseSpider
def item_scraped_hook(self, *args, **kwargs):
self.schedule_next_request()
def _set_crawler(self, crawler):
self.crawler = crawler
self.settings = crawler.settings
self.set_kafka(crawler)
self._set_signals(crawler)
self._set_kafka_topic_spider_signals(crawler)
def _set_kafka_topic_spider_signals(self, crawler):
crawler.signals.connect(self.spider_idle_hook,
signal=signals.spider_idle)
crawler.signals.connect(self.item_scraped_hook,
signal=signals.item_scraped)
class KafkaAutoCommitMixin(KafkaMixin):
def next_requests(self):
"""
Pull data from kafka and get a request to be scheduled.
"""
if self.batch_size == -1:
self.logger.info("Fetch ALL Messages from kafka topic: '%s'" % self.topic)
for message in self.consumer:
yield self.make_request_from_message(message.value)
count = 0
for message in self.consumer:
req = self.make_request_from_message(message.value)
if req:
yield req
count += 1
if not count < self.batch_size:
break
else:
self.logger.info("Request not made from message: %r", message)
if count:
self.logger.info("Read %s requests from topic : '%s'", count, self.topic)
def set_consumer(self, settings):
return kafka_consumer_from_settings(settings, self.topic,
group_id=self.group_id)
class KafkaNotAutoCommitMixin(KafkaMixin):
"""
key: 'kafka:%(module)s:%(topic_name)s:%(partition)s'
value: offset
"""
def set_consumer(self, settings):
kafka_consumer_client = kafka_consumer_from_settings(settings, group_id=self.group_id, enable_auto_commit=False)
if self.topic_partitions is None:
self.topic_partitions = kafka_consumer_client.partitions_for_topic(self.topic)
return kafka_consumer_client
def get_partition_offset(self, partition):
"""
:param partition:
:return:
"""
key = self.kafka_redis_key_format % {"module": "spider", "topic": self.topic, "partition": partition}
offset = self.redis_client.get(key)
if offset is None:
tp = TopicPartition(topic=self.topic, partition=partition)
offset = self.consumer.end_offsets([tp])[tp]
# update offset to redis
self.update_partition_offset(partition=partition, offset=offset)
return int(bytes_to_str(offset))
def get_partitions(self):
"""get consumer kafka topic partitions"""
return [partition for partition in self.topic_partitions]
def update_partition_offset(self, partition: int, offset: int):
"""
:param partition: partition
:param offset: offset
:return:
"""
key = self.kafka_redis_key_format % {"module": "spider", "topic": self.topic, "partition": partition}
self.redis_client.set(key, offset)
def set_redis_client(self, crawler=None):
if crawler is None:
crawler = getattr(self, 'crawler', None)
if crawler is None:
raise ValueError("crawler is required")
settings = crawler.settings
self.kafka_redis_key_format = settings.get("KAFKA_REDIS_KEY_FORMAT", default_settings.KAFKA_REDIS_KEY_FORMAT)
self.redis_client = client_from_settings(settings)
def start_consumer(self):
if self.batch_size == -1:
msg = self.consumer.poll(timeout_ms=200)
self.logger.info(
"Fetch ALL Messages from kafka topic: {} ".format(self.topic))
if len(msg) > 0:
for messages in msg.values():
for message in messages:
self.update_partition_offset(message.partition, message.offset)
yield self.make_request_from_message(message.value)
if self.batch_size > 0:
count = 0
msg = self.consumer.poll(timeout_ms=200, max_records=self.batch_size)
for messages in msg.values():
if len(msg) > 0:
for message in messages:
req = self.make_request_from_message(message.value)
if req:
self.update_partition_offset(message.partition, message.offset)
yield req
count += 1
if not count < self.batch_size:
break
else:
self.logger.info("Request not made from message: %r", message)
if count:
self.logger.info("Read %s requests from topic : '%s'", count, self.topic)
def next_requests(self):
tps = [TopicPartition(topic=self.topic, partition=p) for p in self.topic_partitions]
self.consumer.assign(tps)
for partition in self.topic_partitions:
offset = self.get_partition_offset(partition)
self.consumer.seek(TopicPartition(topic=self.topic, partition=partition), offset)
return self.start_consumer()
class KafkaTopicPriorityMixin(KafkaNotAutoCommitMixin):
def fetch_one_message_by_partition(self, partition, max_records=1):
current_offset = self.get_partition_offset(partition)
topic_partition = TopicPartition(topic=self.topic, partition=partition)
self.consumer.assign([topic_partition])
self.consumer.seek(topic_partition, current_offset + 1)
res = self.consumer.poll(timeout_ms=100, max_records=max_records)
self.consumer.seek(topic_partition, current_offset)
return res
def set_consumer(self, settings):
kafka_consumer_client = kafka_consumer_from_settings(settings, group_id=self.group_id, enable_auto_commit=False)
if self.topic_partitions is None:
self.topic_partitions = kafka_consumer_client.partitions_for_topic(self.topic)
return kafka_consumer_client
def next_requests(self):
if self.kafka_priority_partition not in self.get_partitions():
raise ValueError(" kafka num.partitions not greater than 1 or partition num out of range")
one_msg = self.fetch_one_message_by_partition(self.kafka_priority_partition)
if len(one_msg) > 0:
return self.start_consumer()
else:
other_partition = [partition for partition in self.get_partitions() if
partition != self.kafka_priority_partition]
self.consumer.assign(
[TopicPartition(topic=self.topic, partition=partition) for partition in other_partition])
for partition in other_partition:
partition_offset = self.get_partition_offset(partition)
self.consumer.seek(TopicPartition(topic=self.topic, partition=partition), partition_offset + 1)
return self.start_consumer()
class KafkaTopicPrioritySpider(KafkaTopicPriorityMixin, Spider):
@classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(KafkaTopicPrioritySpider, self).from_crawler(crawler, *args, **kwargs)
obj.set_redis_client(crawler)
return obj
def parse(self, response):
raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
class KafkaTopicPriorityCrawlSpider(KafkaTopicPriorityMixin, CrawlSpider):
@classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(KafkaTopicPriorityCrawlSpider, self).from_crawler(crawler, *args, **kwargs)
obj.set_redis_client(crawler)
return obj
def parse(self, response):
raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
class KafkaTopicAdvancedSpider(KafkaNotAutoCommitMixin, Spider):
@classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(KafkaTopicAdvancedSpider, self).from_crawler(crawler, *args, **kwargs)
obj.set_redis_client(crawler)
return obj
def parse(self, response):
raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
class KafkaTopicAdvancedCrawlSpider(KafkaNotAutoCommitMixin, CrawlSpider):
@classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(KafkaTopicAdvancedCrawlSpider, self).from_crawler(crawler, *args, **kwargs)
obj.set_redis_client(crawler)
return obj
def parse(self, response):
raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
class KafkaTopicSpider(KafkaAutoCommitMixin, Spider):
def parse(self, response):
raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
class KafkaTopicCrawlSpider(KafkaAutoCommitMixin, CrawlSpider):
def parse(self, response):
raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
|
the-stack_0_22960 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Defines the Pupper robot related constants and URDF specs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gin
import numpy as np
import puppersim.data as pd
URDF_PATH = pd.getDataPath()+"/pupper_v2a.urdf" #or pupper_v2b with toes, but no visual meshes
NUM_MOTORS = 12
NUM_LEGS = 4
MOTORS_PER_LEG = 3
INIT_RACK_POSITION = [0, 0, 1]
INIT_POSITION = [0, 0, 0.17]
# Will be default to (0, 0, 0, 1) once the new laikago_toes_zup.urdf checked in.
INIT_ORIENTATION = [0, 0, 0, 1]
# Can be different from the motors
JOINT_NAMES = (
# front right leg
"rightFrontLegMotor",
"rightFrontUpperLegMotor",
"rightFrontLowerLegMotor",
# front left leg
"leftFrontLegMotor",
"leftFrontUpperLegMotor",
"leftFrontLowerLegMotor",
# rear right leg
"rightRearLegMotor",
"rightRearUpperLegMotor",
"rightRearLowerLegMotor",
# rear left leg
"leftRearLegMotor",
"leftRearUpperLegMotor",
"leftRearLowerLegMotor",
)
INIT_ABDUCTION_ANGLE = 0
INIT_HIP_ANGLE = 0.6
INIT_KNEE_ANGLE = -1.2
# Note this matches the Laikago SDK/control convention, but is different from
# URDF's internal joint angles which needs to be computed using the joint
# offsets and directions. The conversion formula is (sdk_joint_angle + offset) *
# joint direction.
INIT_JOINT_ANGLES = collections.OrderedDict(
zip(JOINT_NAMES,
(INIT_ABDUCTION_ANGLE, INIT_HIP_ANGLE, INIT_KNEE_ANGLE) * NUM_LEGS))
# Used to convert the robot SDK joint angles to URDF joint angles.
JOINT_DIRECTIONS = collections.OrderedDict(
zip(JOINT_NAMES, (-1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1)))
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = 0#-0.6
KNEE_JOINT_OFFSET = 0#0.66
# Used to convert the robot SDK joint angles to URDF joint angles.
JOINT_OFFSETS = collections.OrderedDict(
zip(JOINT_NAMES,
[HIP_JOINT_OFFSET, UPPER_LEG_JOINT_OFFSET, KNEE_JOINT_OFFSET] *
NUM_LEGS))
LEG_NAMES = (
"front_right",
"front_left",
"rear_right",
"rear_left",
)
LEG_ORDER = (
"front_right",
"front_left",
"back_right",
"back_left",
)
END_EFFECTOR_NAMES = (
"rightFrontToe",
"leftFrontToe",
"rightRearToe",
"leftRearToe",
)
MOTOR_NAMES = JOINT_NAMES
MOTOR_GROUP = collections.OrderedDict((
(LEG_NAMES[0], JOINT_NAMES[0:3]),
(LEG_NAMES[1], JOINT_NAMES[3:6]),
(LEG_NAMES[2], JOINT_NAMES[6:9]),
(LEG_NAMES[3], JOINT_NAMES[9:12]),
))
# Regulates the joint angle change when in position control mode.
MAX_MOTOR_ANGLE_CHANGE_PER_STEP = 0.02
# The hip joint location in the CoM frame.
HIP_POSITIONS = collections.OrderedDict((
(LEG_NAMES[0], (0.21, -0.1157, 0)),
(LEG_NAMES[1], (0.21, 0.1157, 0)),
(LEG_NAMES[2], (-0.21, -0.1157, 0)),
(LEG_NAMES[3], (-0.21, 0.1157, 0)),
))
MOTOR_ACTION_LOWER_LIMIT = np.array([-0.18,0.1,-2.3]*4)
MOTOR_ACTION_UPPER_LIMIT = np.array([0.18,0.7,-0.6]*4)
# Add the gin constants to be used for gin binding in config. Append "PUPPER_"
# for unique binding names.
gin.constant("pupper_constants.PUPPER_NUM_MOTORS", NUM_MOTORS)
gin.constant("pupper_constants.PUPPER_URDF_PATH", URDF_PATH)
gin.constant("pupper_constants.PUPPER_INIT_POSITION", INIT_POSITION)
gin.constant("pupper_constants.PUPPER_INIT_ORIENTATION", INIT_ORIENTATION)
gin.constant("pupper_constants.PUPPER_INIT_JOINT_ANGLES", INIT_JOINT_ANGLES)
gin.constant("pupper_constants.PUPPER_JOINT_DIRECTIONS", JOINT_DIRECTIONS)
gin.constant("pupper_constants.PUPPER_JOINT_OFFSETS", JOINT_OFFSETS)
gin.constant("pupper_constants.PUPPER_MOTOR_NAMES", MOTOR_NAMES)
gin.constant("pupper_constants.PUPPER_END_EFFECTOR_NAMES", END_EFFECTOR_NAMES)
gin.constant("pupper_constants.PUPPER_MOTOR_GROUP", MOTOR_GROUP)
gin.constant("pupper_constants.MOTOR_ACTION_LOWER_LIMIT", MOTOR_ACTION_LOWER_LIMIT)
gin.constant("pupper_constants.MOTOR_ACTION_UPPER_LIMIT", MOTOR_ACTION_UPPER_LIMIT)
|
the-stack_0_22962 | from django.db import connection
from products.models import Category, Product, Price
from .get_current_object import get_no_bsr_current_category_obj, is_product, get_current_seller_obj, \
get_current_price_obj, get_bsr_category_obj
from .get_instances_kwargs import get_bsr_category_kwargs, set_kwargs, get_not_bsr_category_kwargs, \
get_product_kwargs, get_seller_kwargs, get_prices_kwargs
from user.models import Seller
def form_data(validated_jobs, objs_querysets):
product_update_list = []
product_create_list = []
product_update_fields_list = []
categories_bulk_create_list = []
categories_bulk_update_list = []
categories_update_fields_list = []
sellers_bulk_create_list = []
sellers_bulk_update_list = []
sellers_update_fields_list = []
prices_bulk_create_list = []
prices_bulk_update_list = []
prices_update_fields_list = []
parents_list = []
categories_queryset = objs_querysets['categories_queryset']
products_queryset = objs_querysets['product_queryset']
sellers_queryset = objs_querysets['sellers_queryset']
prices_queryset = objs_querysets['prices_queryset']
for jobs_data in validated_jobs:
category_data, category = form_category_data(jobs_data, categories_queryset)
categories_bulk_create_list += category_data['categories_bulk_create_list']
categories_bulk_update_list += category_data['categories_bulk_update_list']
categories_update_fields_list += category_data['categories_update_fields_list']
if category_data.get('parents_list'):
parents_list += category_data['parents_list']
product_data, product = form_product_data(jobs_data, category, products_queryset)
product_update_list += product_data['product_bulk_update_list']
product_create_list += product_data['product_bulk_create_list']
product_update_fields_list += product_data['product_bulk_update_fields_list']
seller_data = form_seller_and_price_data(jobs_data, product, sellers_queryset, prices_queryset)
sellers_bulk_create_list += seller_data['sellers_bulk_create_list']
sellers_bulk_update_list += seller_data['sellers_bulk_update_list']
sellers_update_fields_list += seller_data['sellers_update_fields_list']
prices_bulk_create_list += seller_data['prices_bulk_create_list']
prices_bulk_update_list += seller_data['prices_bulk_update_list']
prices_update_fields_list += seller_data['prices_update_fields_list']
product_update_fields_list = list(set(product_update_fields_list))
categories_update_fields_list = list(set(categories_update_fields_list))
sellers_update_fields_list = list(set(sellers_update_fields_list))
prices_update_fields_list = list(set(prices_update_fields_list))
if 'id' in product_update_fields_list:
product_update_fields_list.remove('id')
if 'id' in categories_update_fields_list:
categories_update_fields_list.remove('id')
if 'id' in sellers_update_fields_list:
sellers_update_fields_list.remove('id')
if 'id' in prices_update_fields_list:
prices_update_fields_list.remove('id')
formed_data = {
'product_update_list': product_update_list,
'product_create_list': product_create_list,
'product_update_fields_list': product_update_fields_list,
'categories_bulk_create_list': categories_bulk_create_list,
'categories_bulk_update_list': categories_bulk_update_list,
'categories_update_fields_list': categories_update_fields_list,
'sellers_bulk_create_list': sellers_bulk_create_list,
'sellers_bulk_update_list': sellers_bulk_update_list,
'sellers_update_fields_list': sellers_update_fields_list,
'prices_bulk_create_list': prices_bulk_create_list,
'prices_bulk_update_list': prices_bulk_update_list,
'prices_update_fields_list': prices_update_fields_list,
}
return formed_data
def form_category_data(product_data, categories_queryset):
if product_data.get('bsr'):
category_objs_data, category = form_bsr_category_data(product_data, categories_queryset)
else:
category_objs_data, category = form_not_bsr_category_data(product_data, categories_queryset)
category_data = {
'categories_bulk_update_list': category_objs_data['categories_bulk_update_list'],
'categories_update_fields_list': category_objs_data['categories_update_fields_list'],
'categories_bulk_create_list': category_objs_data['categories_bulk_create_list'],
}
if category_objs_data.get('parents_list'):
category_data['parents_list'] = category_objs_data['parents_list']
return category_data, category
def form_bsr_category_data(product_data, categories_list):
categories_bulk_update_list = []
categories_bulk_create_list = []
categories_update_fields_list = []
parents_list = []
category = None
parent = None
for category_data in product_data['bsr']:
category = get_bsr_category_obj(categories_list, category_data, parent=parent)
kwargs = get_bsr_category_kwargs(category_data, parent, category)
category, categories_bulk_update_list, categories_bulk_create_list = form_bsr_object_category_data(categories_bulk_update_list, categories_bulk_create_list, kwargs, category)
categories_update_fields_list = set_kwargs(categories_update_fields_list, kwargs)
parents_list.append(parent)
parent = category
category_data = {
'categories_bulk_update_list': categories_bulk_update_list,
'categories_bulk_create_list': categories_bulk_create_list,
'categories_update_fields_list': categories_update_fields_list,
'parents_list': parents_list
}
return category_data, category
def form_not_bsr_category_data(product_data, categories_list):
categories_bulk_update_list = []
categories_update_fields_list = []
categories_bulk_create_list = []
category = get_no_bsr_current_category_obj(categories_list, product_data)
kwargs = get_not_bsr_category_kwargs(product_data, category)
category, categories_bulk_update_list, categories_bulk_create_list = form_no_bsr_object_category_data(categories_bulk_update_list, categories_bulk_create_list, kwargs, )
categories_update_fields_list = set_kwargs(categories_update_fields_list, kwargs)
category_data = {
'categories_bulk_update_list': categories_bulk_update_list,
'categories_bulk_create_list': categories_bulk_create_list,
'categories_update_fields_list': categories_update_fields_list,
}
return category_data, category
def form_product_data(product_data, category, products_list):
product_bulk_update_fields_list = []
product = is_product(products_list, product_data)
kwargs = get_product_kwargs(product_data, category)
product_obj, product_bulk_create_list, product_bulk_update_list = form_object_product_data(kwargs, product)
product_bulk_update_fields_list = set_kwargs(product_bulk_update_fields_list, kwargs)
product_data = {
'product_bulk_update_list': product_bulk_update_list,
'product_bulk_create_list': product_bulk_create_list,
'product_bulk_update_fields_list': product_bulk_update_fields_list,
}
return product_data, product_obj
def form_seller_and_price_data(product_data, product, sellers_list, prices_list):
sellers_bulk_create_list = []
sellers_bulk_update_list = []
sellers_update_fields_list = []
prices_bulk_create_list = []
prices_bulk_update_list = []
prices_update_fields_list = []
for seller_price_data in product_data['seller_list']:
seller_data, seller = form_seller_data(seller_price_data, sellers_list)
sellers_bulk_update_list += seller_data['sellers_bulk_update_list']
sellers_bulk_create_list += seller_data['sellers_bulk_create_list']
sellers_update_fields_list += seller_data['sellers_update_fields_list']
prices_data = form_prices_data(seller_price_data, product, seller, prices_list)
prices_bulk_create_list += prices_data['prices_bulk_create_list']
prices_bulk_update_list += prices_data['prices_bulk_update_list']
prices_update_fields_list += prices_data['prices_update_fields_list']
seller_price_data = {
'sellers_bulk_create_list': sellers_bulk_create_list,
'sellers_bulk_update_list': sellers_bulk_update_list,
'sellers_update_fields_list': sellers_update_fields_list,
'prices_bulk_create_list': prices_bulk_create_list,
'prices_bulk_update_list': prices_bulk_update_list,
'prices_update_fields_list': prices_update_fields_list,
}
return seller_price_data
def form_seller_data(seller_price_data, sellers_list):
sellers_bulk_update_list = []
sellers_bulk_create_list = []
sellers_update_fields_list = []
# kwargs = get_seller_kwargs(seller_price_data)
seller = get_current_seller_obj(sellers_list, seller_price_data)
# seller, sellers_bulk_update_list, sellers_bulk_create_list = form_instance_data(Seller,
# sellers_bulk_update_list, kwargs,
# bulk_create_list=sellers_bulk_create_list)
# sellers_update_fields_list = check_kwargs(sellers_update_fields_list, kwargs)
seller_data = {
'sellers_bulk_update_list': sellers_bulk_update_list,
'sellers_bulk_create_list': sellers_bulk_create_list,
'sellers_update_fields_list': sellers_update_fields_list,
}
return seller_data, seller
def form_prices_data(seller_data, product, seller, prices_list):
prices_bulk_create_list = []
prices_bulk_update_list = []
prices_update_fields_list = []
# kwargs = get_prices_kwargs(seller_data, seller, product)
# price = get_current_price_obj(prices_list, seller, product)
# instance, prices_bulk_update_list, prices_bulk_create_list = form_instance_data(Price,
# prices_bulk_update_list, kwargs,
# bulk_create_list=prices_bulk_create_list)
# prices_update_fields_list = check_kwargs(prices_update_fields_list, kwargs)
prices_data = {
'prices_bulk_create_list': prices_bulk_create_list,
'prices_bulk_update_list': prices_bulk_update_list,
'prices_update_fields_list': prices_update_fields_list,
}
return prices_data
# def form_instance_data(model, bulk_update_list, kwargs_data, **kwargs):
# if kwargs_data.get('id'):
# pop_field = kwargs.get('pop_field')
# if pop_field is not None:
# kwargs_data.pop(pop_field)
# instance = model(**kwargs_data)
# bulk_update_list.append(instance)
#
# else:
# instance = model(**kwargs_data)
# if kwargs.get('save'):
# instance.save()
#
# bulk_create_list = kwargs.get('bulk_create_list')
# if bulk_create_list is not None:
# bulk_create_list.append(instance)
# return instance, bulk_update_list, bulk_create_list
#
# bulk_create_list = kwargs.get('bulk_create_list')
# if bulk_create_list is not None:
# return instance, bulk_update_list, kwargs.get('bulk_create_list'),
# return instance, bulk_update_list
def form_no_bsr_object_category_data(categories_bulk_update_list, categories_bulk_create_list, kwargs_data):
if kwargs_data.get('id'):
category_obj = Category(**kwargs_data)
categories_bulk_update_list.append(category_obj)
category_obj = Category(**kwargs_data)
categories_bulk_create_list.append(category_obj)
return category_obj, categories_bulk_update_list, categories_bulk_create_list
def form_bsr_object_category_data(categories_bulk_update_list, categories_bulk_create_list, kwargs_data, category):
if category:
category_obj = Category(**kwargs_data)
categories_bulk_update_list.append(category_obj)
category_obj = Category(**kwargs_data)
categories_bulk_create_list.append(category_obj)
return category_obj, categories_bulk_update_list, categories_bulk_create_list
def form_object_product_data(kwargs_data, product):
product_bulk_update_list = []
product_bulk_create_list = []
if product:
product_obj = Product(**kwargs_data)
product_bulk_update_list.append(product_obj)
return product_obj, product_bulk_create_list, product_bulk_update_list
product_obj = Product(**kwargs_data)
product_bulk_create_list.append(product_obj)
return product_obj, product_bulk_create_list, product_bulk_update_list
|
the-stack_0_22964 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from . import packet_base
from . import packet_utils
from ryu.lib import stringify
ICMP_ECHO_REPLY = 0
ICMP_DEST_UNREACH = 3
ICMP_SRC_QUENCH = 4
ICMP_REDIRECT = 5
ICMP_ECHO_REQUEST = 8
ICMP_TIME_EXCEEDED = 11
ICMP_ECHO_REPLY_CODE = 0
ICMP_HOST_UNREACH_CODE = 1
ICMP_PORT_UNREACH_CODE = 3
ICMP_TTL_EXPIRED_CODE = 0
class icmp(packet_base.PacketBase):
"""ICMP (RFC 792) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== ====================
Attribute Description
============== ====================
type Type
code Code
csum CheckSum \
(0 means automatically-calculate when encoding)
data Payload. \
Either a bytearray, or \
ryu.lib.packet.icmp.echo or \
ryu.lib.packet.icmp.dest_unreach or \
ryu.lib.packet.icmp.TimeExceeded object \
NOTE for icmp.echo: \
This includes "unused" 16 bits and the following \
"Internet Header + 64 bits of Original Data Datagram" of \
the ICMP header. \
NOTE for icmp.dest_unreach and icmp.TimeExceeded: \
This includes "unused" 8 or 24 bits and the following \
"Internet Header + leading octets of original datagram" \
of the original packet.
============== ====================
"""
_PACK_STR = '!BBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ICMP_TYPES = {}
@staticmethod
def register_icmp_type(*args):
def _register_icmp_type(cls):
for type_ in args:
icmp._ICMP_TYPES[type_] = cls
return cls
return _register_icmp_type
def __init__(self, type_=ICMP_ECHO_REQUEST, code=0, csum=0, data=None):
super(icmp, self).__init__()
self.type = type_
self.code = code
self.csum = csum
self.data = data
@classmethod
def parser(cls, buf):
(type_, code, csum) = struct.unpack_from(cls._PACK_STR, buf)
msg = cls(type_, code, csum)
offset = cls._MIN_LEN
if len(buf) > offset:
cls_ = cls._ICMP_TYPES.get(type_, None)
if cls_:
msg.data = cls_.parser(buf, offset)
else:
msg.data = buf[offset:]
return msg, None, None
def serialize(self, payload, prev):
hdr = bytearray(struct.pack(icmp._PACK_STR, self.type,
self.code, self.csum))
if self.data is not None:
if self.type in icmp._ICMP_TYPES:
hdr += self.data.serialize()
else:
hdr += self.data
else:
self.data = echo()
hdr += self.data.serialize()
if self.csum == 0:
self.csum = packet_utils.checksum(hdr)
struct.pack_into('!H', hdr, 2, self.csum)
return hdr
def __len__(self):
return self._MIN_LEN + len(self.data)
@icmp.register_icmp_type(ICMP_ECHO_REPLY, ICMP_ECHO_REQUEST)
class echo(stringify.StringifyMixin):
"""ICMP sub encoder/decoder class for Echo and Echo Reply messages.
This is used with ryu.lib.packet.icmp.icmp for
ICMP Echo and Echo Reply messages.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== ====================
Attribute Description
============== ====================
id Identifier
seq Sequence Number
data Internet Header + 64 bits of Original Data Datagram
============== ====================
"""
_PACK_STR = '!HH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, id_=0, seq=0, data=None):
super(echo, self).__init__()
self.id = id_
self.seq = seq
self.data = data
@classmethod
def parser(cls, buf, offset):
(id_, seq) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(id_, seq)
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(echo._PACK_STR, self.id,
self.seq))
if self.data is not None:
hdr += self.data
return hdr
def __len__(self):
length = self._MIN_LEN
if self.data is not None:
length += len(self.data)
return length
@icmp.register_icmp_type(ICMP_DEST_UNREACH)
class dest_unreach(stringify.StringifyMixin):
"""ICMP sub encoder/decoder class for Destination Unreachable Message.
This is used with ryu.lib.packet.icmp.icmp for
ICMP Destination Unreachable Message.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
[RFC1191] reserves bits for the "Next-Hop MTU" field.
[RFC4884] introduced 8-bit data length attribute.
.. tabularcolumns:: |l|p{35em}|
============== =====================================================
Attribute Description
============== =====================================================
data_len data length
mtu Next-Hop MTU
NOTE: This field is required when icmp code is 4
code 4 = fragmentation needed and DF set
data Internet Header + leading octets of original datagram
============== =====================================================
"""
_PACK_STR = '!xBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, data_len=0, mtu=0, data=None):
super(dest_unreach, self).__init__()
if ((data_len >= 0) and (data_len <= 255)):
self.data_len = data_len
else:
raise ValueError('Specified data length (%d) is invalid.' % data_len)
self.mtu = mtu
self.data = data
@classmethod
def parser(cls, buf, offset):
(data_len, mtu) = struct.unpack_from(cls._PACK_STR,
buf, offset)
msg = cls(data_len, mtu)
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(dest_unreach._PACK_STR,
self.data_len, self.mtu))
if self.data is not None:
hdr += self.data
return hdr
def __len__(self):
length = self._MIN_LEN
if self.data is not None:
length += len(self.data)
return length
@icmp.register_icmp_type(ICMP_TIME_EXCEEDED)
class TimeExceeded(stringify.StringifyMixin):
"""ICMP sub encoder/decoder class for Time Exceeded Message.
This is used with ryu.lib.packet.icmp.icmp for
ICMP Time Exceeded Message.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
[RFC4884] introduced 8-bit data length attribute.
.. tabularcolumns:: |l|L|
============== ====================
Attribute Description
============== ====================
data_len data length
data Internet Header + leading octets of original datagram
============== ====================
"""
_PACK_STR = '!xBxx'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, data_len=0, data=None):
if ((data_len >= 0) and (data_len <= 255)):
self.data_len = data_len
else:
raise ValueError('Specified data length (%d) is invalid.' % data_len)
self.data = data
@classmethod
def parser(cls, buf, offset):
(data_len, ) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(data_len)
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(TimeExceeded._PACK_STR, self.data_len))
if self.data is not None:
hdr += self.data
return hdr
def __len__(self):
length = self._MIN_LEN
if self.data is not None:
length += len(self.data)
return length
icmp.set_classes(icmp._ICMP_TYPES)
|
the-stack_0_22965 | import os.path
from setuptools import setup, find_packages
VERSION = "0.1.0"
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, "README.md")) as fid:
README = fid.read()
setup(
name="aws-emr-launcher",
version=VERSION,
description="Library that enables to provision EMR clusters with yaml config files (Configuration as Code)",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/HarshadRanganathan/aws-emr-launcher",
author="Harshad Ranganathan",
author_email="[email protected]",
license="MIT",
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="AWS EMR",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=[
"boto3"
],
python_requires='>=3.7',
)
|
the-stack_0_22966 | # -*- coding: utf-8 -*-
from cadnano.proxies.cnproxy import UndoCommand
from cadnano.strand import Strand
class MergeCommand(UndoCommand):
"""
This class takes two Strands and merges them. This Class should be
private to StrandSet as knowledge of a strandsetIndex outside of this
of the StrandSet class implies knowledge of the StrandSet
implementation
Must pass this two different strands, and nominally one of the strands
again which is the priority_strand. The resulting "merged" strand has
the properties of the priority_strand's oligo. Decorators are preserved
the strand_low and strand_high must be presorted such that strand_low
has a lower range than strand_high
low_strandset_idx should be known ahead of time as a result of selection
"""
def __init__(self, strand_low: Strand,
strand_high: Strand,
priority_strand: Strand):
super(MergeCommand, self).__init__("merge strands")
# Store strands
self._strand_low = strand_low
self._strand_high = strand_high
self._s_set = s_set = priority_strand.strandSet()
# Store oligos
self._new_oligo = priority_strand.oligo().shallowCopy()
self._s_low_oligo = s_low_olg = strand_low.oligo()
self._s_high_oligo = s_high_olg = strand_high.oligo()
# self._s_set_idx = low_strandset_idx
# update the new oligo length if it's not a loop
if s_low_olg != s_high_olg:
self._new_oligo._setLength(s_low_olg.length() + s_high_olg.length(),
emit_signals=True)
# Create the new_strand by copying the priority strand to
# preserve its properties
new_idxs = strand_low.lowIdx(), strand_high.highIdx()
new_strand = strand_low.shallowCopy()
new_strand.setIdxs(new_idxs)
new_strand.setConnectionHigh(strand_high.connectionHigh())
self._new_strand = new_strand
# Update the oligo for things like its 5prime end and isCircular
self._new_oligo._strandMergeUpdate(strand_low, strand_high, new_strand)
# set the new sequence by concatenating the sequence properly
if strand_low._sequence or strand_high._sequence:
tL = strand_low.totalLength()
tH = strand_high.totalLength()
seqL = strand_low._sequence if strand_low._sequence else "".join([" " for i in range(tL)])
seqH = strand_high._sequence if strand_high._sequence else "".join([" " for i in range(tH)])
if new_strand.isForward():
new_strand._sequence = seqL + seqH
else:
new_strand._sequence = seqH + seqL
# end def
def redo(self):
ss = self._s_set
ss._document
s_low = self._strand_low
s_high = self._strand_high
new_strand = self._new_strand
# idx = self._s_set_idx
olg = self._new_oligo
l_olg = s_low.oligo()
h_olg = s_high.oligo()
fSetOligo = Strand.setOligo
# Remove old strands from the s_set (reusing idx, so order matters)
ss._removeFromStrandList(s_low, update_segments=False)
ss._removeFromStrandList(s_high, update_segments=False)
# Add the new_strand to the s_set
ss._addToStrandList(new_strand)
# update connectivity of strands
nScL = new_strand.connectionLow()
if nScL:
if (new_strand.isForward() and nScL.isForward()) or \
(not new_strand.isForward() and not nScL.isForward()):
nScL.setConnectionHigh(new_strand)
else:
nScL.setConnectionLow(new_strand)
nScH = new_strand.connectionHigh()
if nScH:
if (new_strand.isForward() and nScH.isForward()) or \
(not new_strand.isForward() and not nScH.isForward()):
nScH.setConnectionLow(new_strand)
else:
nScH.setConnectionHigh(new_strand)
# Traverse the strands via 3'conns to assign the new oligo
for strand in olg.strand5p().generator3pStrand():
fSetOligo(strand, olg, emit_signals=True) # emits strandHasNewOligoSignal
# Add new oligo and remove old oligos
olg.addToPart(ss.part(), emit_signals=True)
l_olg.removeFromPart(emit_signals=True)
if h_olg != l_olg: # check if a loop was created
h_olg.removeFromPart(emit_signals=True)
# Emit Signals related to destruction and addition
s_low.strandRemovedSignal.emit(s_low)
s_high.strandRemovedSignal.emit(s_high)
ss.strandsetStrandAddedSignal.emit(ss, new_strand)
# end def
def undo(self):
ss = self._s_set
ss._document
s_low = self._strand_low
s_high = self._strand_high
new_strand = self._new_strand
fSetOligo = Strand.setOligo
olg = self._new_oligo
l_olg = self._s_low_oligo
h_olg = self._s_high_oligo
# Remove the new_strand from the s_set
ss._removeFromStrandList(new_strand, update_segments=False)
# Add old strands to the s_set (reusing idx, so order matters)
ss._addToStrandList(s_high, update_segments=False)
ss._addToStrandList(s_low)
# update connectivity of strands
sLcL = s_low.connectionLow()
if sLcL:
if (s_low.isForward() and sLcL.isForward()) or \
(not s_low.isForward() and not sLcL.isForward()):
sLcL.setConnectionHigh(s_low)
else:
sLcL.setConnectionLow(s_low)
sHcH = s_high.connectionHigh()
if sHcH:
if (s_high.isForward() and sHcH.isForward()) or \
(not s_high.isForward() and not sHcH.isForward()):
sHcH.setConnectionLow(s_high)
else:
sHcH.setConnectionHigh(s_high)
# Traverse the strands via 3'conns to assign the old oligo
for strand in l_olg.strand5p().generator3pStrand():
fSetOligo(strand, l_olg, emit_signals=True) # emits strandHasNewOligoSignal
for strand in h_olg.strand5p().generator3pStrand():
fSetOligo(strand, h_olg, emit_signals=True) # emits strandHasNewOligoSignal
# Remove new oligo and add old oligos
olg.removeFromPart(emit_signals=True)
l_olg.addToPart(s_low.part(), emit_signals=True)
if h_olg != l_olg:
h_olg.addToPart(s_high.part(), emit_signals=True)
# Emit Signals related to destruction and addition
new_strand.strandRemovedSignal.emit(new_strand)
ss.strandsetStrandAddedSignal.emit(ss, s_low)
ss.strandsetStrandAddedSignal.emit(ss, s_high)
# end def
# end class
|
the-stack_0_22968 | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
"""
This module defines a versionless model for ``TList``.
"""
from __future__ import absolute_import
import struct
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
import uproot
_tlist_format1 = struct.Struct(">i")
class Model_TList(uproot.model.Model, Sequence):
"""
A versionless :doc:`uproot.model.Model` for ``TList``.
"""
def read_members(self, chunk, cursor, context, file):
if self.is_memberwise:
raise NotImplementedError(
"""memberwise serialization of {0}
in file {1}""".format(
type(self).__name__, self.file.file_path
)
)
self._bases.append(
uproot.models.TObject.Model_TObject.read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._members["fName"] = cursor.string(chunk, context)
self._members["fSize"] = cursor.field(chunk, _tlist_format1, context)
self._starts = []
self._data = []
self._options = []
self._stops = []
for _ in uproot._util.range(self._members["fSize"]):
self._starts.append(cursor.index)
item = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self._parent
)
self._data.append(item)
self._options.append(cursor.bytestring(chunk, context))
self._stops.append(cursor.index)
def __repr__(self):
if self.class_version is None:
version = ""
else:
version = " (version {0})".format(self.class_version)
return "<{0}{1} of {2} items at 0x{3:012x}>".format(
self.classname,
version,
len(self),
id(self),
)
def __getitem__(self, where):
return self._data[where]
def __len__(self):
return len(self._data)
@property
def byte_ranges(self):
return zip(self._starts, self._stops)
def tojson(self):
return {
"_typename": "TList",
"name": "TList",
"arr": [x.tojson() for x in self._data],
"opt": [],
}
writable = True
def _to_writable_postprocess(self, original):
self._data = original._data
self._options = original._options
def _serialize(self, out, header, name, tobject_flags):
import uproot.writing._cascade
where = len(out)
for x in self._bases:
x._serialize(out, True, None, tobject_flags)
out.append(uproot.serialization.string(self._members["fName"]))
out.append(_tlist_format1.pack(self._members["fSize"]))
for datum, option in zip(self._data, self._options):
uproot.serialization._serialize_object_any(out, datum, None)
out.append(option)
if header:
num_bytes = sum(len(x) for x in out[where:])
version = 5
out.insert(where, uproot.serialization.numbytes_version(num_bytes, version))
uproot.classes["TList"] = Model_TList
|
the-stack_0_22970 | import codecs
import os
import re
from setuptools import setup
def _get_version():
PATH_TO_INIT_PY = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "aiohttp_jinja2", "__init__.py"
)
with codecs.open(PATH_TO_INIT_PY, "r", "latin1") as fp:
try:
for line in fp.readlines():
if line:
line = line.strip()
version = re.findall(r'^__version__ = "([^"]+)"$', line, re.M)
if version:
return version[0]
except IndexError:
raise RuntimeError("Unable to determine version.")
version = _get_version()
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
install_requires = [
"aiohttp>=3.6.3",
"jinja2>=3.0.0",
'typing_extensions>=3.7.4; python_version<"3.8"',
]
setup(
name="aiohttp-jinja2",
version=version,
description=(
"jinja2 template renderer for aiohttp.web " "(http server for asyncio)"
),
long_description="\n\n".join((read("README.rst"), read("CHANGES.rst"))),
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Development Status :: 5 - Production/Stable",
"Topic :: Internet :: WWW/HTTP",
"Framework :: AsyncIO",
"Framework :: aiohttp",
],
author="Andrew Svetlov",
author_email="[email protected]",
url="https://github.com/aio-libs/aiohttp_jinja2/",
license="Apache 2",
packages=["aiohttp_jinja2"],
python_requires=">=3.6",
install_requires=install_requires,
include_package_data=True,
)
|
the-stack_0_22972 | #!/usr/bin/env python3
import platform
import os
if __name__ == '__main__':
channel = input("Enter a Telegram channel in the form of ID (number) or @channelname:\n")
api_token = input("Enter a Telegram bot API token:\n")
rtime = input("Enter how long the bot should run in seconds (0 if bot should run infinitely):\n")
stime = input("Enter the hour when story notifications should be sent:\n")
debugging_mode = input("Enter if debugging mode should be activated: true/false\n")
user_ids = []
while True:
user_id = input("Enter a user ID:\n")
user_ids.append(user_id)
confirm = input("Do you want to add another user ID? Yes/No\n")
if str(confirm) in ["no", "No", "NO", "n", "0"]:
break
script = "bin/python/requesthandler.py instabot"
user_ids_string_raw = ""
for user_id in user_ids:
user_ids_string_raw = user_ids_string_raw + str(user_id) + ";"
user_ids_string = user_ids_string_raw.rstrip(";")
if "Windows" in platform.system():
cmd = "python " + script + " " + channel + " " + api_token + " " + rtime + " " + stime + " " + debugging_mode + " " + user_ids_string
os.system(cmd)
else:
cmd = "python3 " + script + " " + channel + " " + api_token + " " + rtime + " " + stime + " " + debugging_mode + " " + user_ids_string
os.system(cmd)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.