input
stringlengths
2.65k
237k
output
stringclasses
1 value
on the right, the output is to be # B x N x F (the multiplication is not along the N dimension), so we reshape # z to be B x N x E x K x F and reshape it to B x N x EKG (remember we # always reshape the last dimensions), and then make h be E x K x F x G and # reshape it to EKF x G, and then multiply y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E * K * F]), h.reshape([F, E * K * G]).permute(1, 0)).permute(0, 2, 1) # And permute againt to bring it from B x N x G to B x G x N. # Finally, add the bias if b is not None: y = y + b return y class GraphFilterBatch(nn.Module): """ GraphFilter Creates a (linear) layer that applies a graph filter Initialization: GraphFilter(in_features, out_features, filter_taps, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) filter_taps (int): number of filter taps edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer). Observation: Filter taps have shape out_features x edge_features x filter_taps x in_features Add graph shift operator: GraphFilter.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: Batch edge_features x number_nodes x number_nodes Forward call: y = GraphFilter(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, F, G, K, E=1, bias=True): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.F = F self.G = G self.K = K self.E = E self.S = None # No GSO assigned yet # Create parameters: self.weight = nn.parameter.Parameter(torch.Tensor(G, E, K, F)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(G, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.F * self.K) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 4 dimensions. assert len(S.shape) == 4 # S is of shape B x E x N x N assert S.shape[1] == self.E self.N = S.shape[2] assert S.shape[3] == self.N self.S = S def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N - Nin) \ .type(x.dtype).to(x.device) ), dim=2) # Compute the filter output u = BatchLSIGF(self.weight, self.S, x, self.bias) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, " % ( self.F, self.G) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) + \ "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class GraphFilterRNNBatch(nn.Module): """ GraphFilter Creates a (linear) layer that applies a graph filter Initialization: GraphFilter(in_features, out_features, filter_taps, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) filter_taps (int): number of filter taps edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer). Observation: Filter taps have shape out_features x edge_features x filter_taps x in_features Add graph shift operator: GraphFilter.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: Batch edge_features x number_nodes x number_nodes Forward call: y = GraphFilter(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, H, K, E=1, bias=True): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.F = F self.G = G self.H = H self.K = K self.E = E self.S = None # No GSO assigned yet # Create parameters: self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G)) self.weight_B = nn.parameter.Parameter(torch.Tensor(H, E, K, H)) self.weight_D = nn.parameter.Parameter(torch.Tensor(F, E, K, H)) if bias: self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1)) self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1)) self.bias_D = nn.parameter.Parameter(torch.Tensor(G, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv_a = 1. / math.sqrt(self.F * self.K) self.weight_A.data.uniform_(-stdv_a, stdv_a) if self.bias_A is not None: self.bias_A.data.uniform_(-stdv_a, stdv_a) stdv_b = 1. / math.sqrt(self.H * self.K) self.weight_B.data.uniform_(-stdv_b, stdv_b) if self.bias_B is not None: self.bias_B.data.uniform_(-stdv_b, stdv_b) stdv_d = 1. / math.sqrt(self.H * self.K) self.weight_U.data.uniform_(-stdv_d, stdv_d) if self.bias_U is not None: self.bias_U.data.uniform_(-stdv_d, stdv_d) def addGSO(self, S): # Every S has 4 dimensions. assert len(S.shape) == 4 # S is of shape B x E x N x N assert S.shape[1] == self.E self.N = S.shape[2] assert S.shape[3] == self.N self.S = S def updateHiddenState(self, hiddenState): self.hiddenState = hiddenState def forward(self, x, hidden_prev): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N - Nin) \ .type(x.dtype).to(x.device) ), dim=2) # Compute the filter output u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A) u_b = BatchLSIGF(self.weight_B, self.S, self.hiddenState, self.bias_B) sigma = nn.ReLU(inplace=True) self.hiddenStateNext = sigma(u_a + u_b) u = BatchLSIGF(self.weight_D, self.S, self.hiddenStateNext, self.bias_D) self.updateHiddenState(self.hiddenStateNext) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, hidden_features=%d," % ( self.G, self.F, self.H) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) + \ "bias=%s, " % (self.bias_D is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class NoActivation(nn.Module): """ NoActivation creates an activation layer that does nothing It is for completeness, to be able to switch between linear models and nonlinear models, without altering the entire architecture model Initialization: NoActivation() Output: torch.nn.Module for an empty activation layer Forward call: y = NoActivation(x) Inputs: x (torch.tensor): input data; shape: batch_size x dim_features x number_nodes Outputs: y (torch.tensor): activated data; shape: batch_size x dim_features x number_nodes """ def __init__(self): super().__init__() def forward(self, x): return x def extra_repr(self): reprString = "No
<filename>contrib/buildbot/server.py #!/usr/bin/env python3 # # Copyright (c) 2019 The Bitcoin ABC developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from build import BuildStatus, BuildTarget from deepmerge import always_merger from flask import abort, Flask, request from functools import wraps import hashlib import hmac import logging import os from phabricator_wrapper import ( BITCOIN_ABC_PROJECT_PHID, ) import re import shelve from shieldio import RasterBadge from shlex import quote from teamcity_wrapper import TeamcityRequestException import yaml # Some keywords used by TeamCity and tcWebHook SUCCESS = "success" FAILURE = "failure" RUNNING = "running" UNRESOLVED = "UNRESOLVED" LANDBOT_BUILD_TYPE = "BitcoinAbcLandBot" with open(os.path.join(os.path.dirname(__file__), 'resources', 'teamcity-icon-16.base64'), 'rb') as icon: BADGE_TC_BASE = RasterBadge( label='TC build', logo='data:image/png;base64,{}'.format( icon.read().strip().decode('utf-8')), ) BADGE_TRAVIS_BASE = RasterBadge( label='Travis build', logo='travis' ) def create_server(tc, phab, slackbot, travis, db_file_no_ext=None, jsonEncoder=None): # Create Flask app for use as decorator app = Flask("abcbot") app.logger.setLevel(logging.INFO) # json_encoder can be overridden for testing if jsonEncoder: app.json_encoder = jsonEncoder phab.setLogger(app.logger) tc.set_logger(app.logger) travis.set_logger(app.logger) # Optionally persistable database create_server.db = { # A collection of the known build targets 'diff_targets': {}, # Build status panel data 'panel_data': {}, # Whether the last status check of master was green 'master_is_green': True, } # If db_file_no_ext is not None, attempt to restore old database state if db_file_no_ext: app.logger.info( "Loading persisted state database with base name '{}'...".format(db_file_no_ext)) try: with shelve.open(db_file_no_ext, flag='r') as db: for key in create_server.db.keys(): if key in db: create_server.db[key] = db[key] app.logger.info( "Restored key '{}' from persisted state".format(key)) except BaseException: app.logger.info( "Persisted state database with base name '{}' could not be opened. A new one will be created when written to.".format(db_file_no_ext)) app.logger.info("Done") else: app.logger.warning( "No database file specified. State will not be persisted.") def persistDatabase(fn): @wraps(fn) def decorated_function(*args, **kwargs): fn_ret = fn(*args, **kwargs) # Persist database after executed decorated function if db_file_no_ext: with shelve.open(db_file_no_ext) as db: for key in create_server.db.keys(): db[key] = create_server.db[key] app.logger.debug("Persisted current state") else: app.logger.debug( "No database file specified. Persisting state is being skipped.") return fn_ret return decorated_function # This decorator specifies an HMAC secret environment variable to use for verifying # requests for the given route. Currently, we're using Phabricator to trigger these # routes as webhooks, and a separate HMAC secret is required for each hook. # Phabricator does not support basic auth for webhooks, so HMAC must be # used instead. def verify_hmac(secret_env): def decorator(fn): @wraps(fn) def decorated_function(*args, **kwargs): secret = os.getenv(secret_env, None) if not secret: app.logger.info( "Error: HMAC env variable '{}' does not exist".format(secret_env)) abort(401) data = request.get_data() digest = hmac.new( secret.encode(), data, hashlib.sha256).hexdigest() hmac_header = request.headers.get( 'X-Phabricator-Webhook-Signature') if not hmac_header: abort(401) if not hmac.compare_digest( digest.encode(), hmac_header.encode()): abort(401) return fn(*args, **kwargs) return decorated_function return decorator def get_json_request_data(request): if not request.is_json: abort(415, "Expected content-type is 'application/json'") return request.get_json() @app.route("/getCurrentUser", methods=['GET']) def getCurrentUser(): return request.authorization.username if request.authorization else None @app.route("/backportCheck", methods=['POST']) @verify_hmac('HMAC_BACKPORT_CHECK') def backportCheck(): data = get_json_request_data(request) revisionId = data['object']['phid'] revisionSearchArgs = { "constraints": { "phids": [revisionId], }, } data_list = phab.differential.revision.search( **revisionSearchArgs).data assert len(data_list) == 1, "differential.revision.search({}): Expected 1 revision, got: {}".format( revisionSearchArgs, data_list) summary = data_list[0]['fields']['summary'] foundPRs = 0 multilineCodeBlockDelimiters = 0 newSummary = "" for line in summary.splitlines(keepends=True): multilineCodeBlockDelimiters += len(re.findall(r'```', line)) # Only link PRs that do not reside in code blocks if multilineCodeBlockDelimiters % 2 == 0: def replacePRWithLink(baseUrl): def repl(match): nonlocal foundPRs # This check matches identation-based code blocks (2+ spaces) # and common cases for single-line code blocks (using # both single and triple backticks) if match.string.startswith(' ') or len( re.findall(r'`', match.string[:match.start()])) % 2 > 0: # String remains unchanged return match.group(0) else: # Backport PR is linked inline foundPRs += 1 PRNum = match.group(1) remaining = '' if len(match.groups()) >= 2: remaining = match.group(2) return '[[{}/{} | PR{}]]{}'.format( baseUrl, PRNum, PRNum, remaining) return repl line = re.sub( r'PR[ #]*(\d{3}\d+)', replacePRWithLink( 'https://github.com/bitcoin/bitcoin/pull'), line) # Be less aggressive about serving libsecp256k1 links. Check # for some reference to the name first. if re.search('secp', line, re.IGNORECASE): line = re.sub(r'PR[ #]*(\d{2}\d?)([^\d]|$)', replacePRWithLink( 'https://github.com/bitcoin-core/secp256k1/pull'), line) newSummary += line if foundPRs > 0: phab.updateRevisionSummary(revisionId, newSummary) return SUCCESS, 200 @app.route("/build", methods=['POST']) @persistDatabase def build(): buildTypeId = request.args.get('buildTypeId', None) ref = request.args.get('ref', 'refs/heads/master') PHID = request.args.get('PHID', None) abcBuildName = request.args.get('abcBuildName', None) properties = None if abcBuildName: properties = [{ 'name': 'env.ABC_BUILD_NAME', 'value': abcBuildName, }] build_id = tc.trigger_build(buildTypeId, ref, PHID, properties)['id'] if PHID in create_server.db['diff_targets']: build_target = create_server.db['diff_targets'][PHID] else: build_target = BuildTarget(PHID) build_target.queue_build(build_id, abcBuildName) create_server.db['diff_targets'][PHID] = build_target return SUCCESS, 200 @app.route("/buildDiff", methods=['POST']) @persistDatabase def build_diff(): def get_mandatory_argument(argument): value = request.args.get(argument, None) if value is None: raise AssertionError( "Calling /buildDiff endpoint with missing mandatory argument {}:\n{}".format( argument, request.args ) ) return value staging_ref = get_mandatory_argument('stagingRef') target_phid = get_mandatory_argument('targetPHID') revision_id = get_mandatory_argument('revisionId') # Get the configuration from master config = yaml.safe_load(phab.get_file_content_from_master( "contrib/teamcity/build-configurations.yml")) # Get the list of changed files changedFiles = phab.get_revision_changed_files( revision_id=revision_id) # Get a list of the templates, if any templates = config.get("templates", {}) # Get a list of the builds that should run on diffs builds = [] for build_name, v in config.get('builds', {}).items(): # Merge the templates template_config = {} template_names = v.get("templates", []) for template_name in template_names: # Raise an error if the template does not exist if template_name not in templates: raise AssertionError( "Build {} configuration inherits from template {}, but the template does not exist.".format( build_name, template_name ) ) always_merger.merge( template_config, templates.get(template_name)) # Retrieve the full build configuration by applying the templates build_config = always_merger.merge(template_config, v) diffRegexes = build_config.get('runOnDiffRegex', None) if build_config.get('runOnDiff', False) or diffRegexes is not None: if diffRegexes: # If the regex matches at least one changed file, add this # build to the list. def regexesMatchAnyFile(regexes, files): for regex in regexes: for filename in files: if re.match(regex, filename): return True return False if regexesMatchAnyFile(diffRegexes, changedFiles): builds.append(build_name) else: builds.append(build_name) if target_phid in create_server.db['diff_targets']: build_target = create_server.db['diff_targets'][target_phid] else: build_target = BuildTarget(target_phid) for build_name in builds: properties = [{ 'name': 'env.ABC_BUILD_NAME', 'value': build_name, }, { 'name': 'env.ABC_REVISION', 'value': revision_id, }] build_id = tc.trigger_build( 'BitcoinABC_BitcoinAbcStaging', staging_ref, target_phid, properties)['id'] build_target.queue_build(build_id, build_name) if len(build_target.builds) > 0: create_server.db['diff_targets'][target_phid] = build_target else: phab.update_build_target_status(build_target) return SUCCESS, 200 @app.route("/land", methods=['POST']) def land(): data = get_json_request_data(request) revision = data['revision'] if not revision: return FAILURE, 400 # conduitToken is expected to be encrypted and will be decrypted by the # land bot. conduitToken = data['conduitToken'] if not conduitToken: return FAILURE, 400 committerName = data['committerName'] if not committerName: return FAILURE, 400 committerEmail = data['committerEmail'] if not committerEmail: return FAILURE, 400 properties = [{ 'name': 'env.ABC_REVISION', 'value': revision, }, { 'name': 'env.ABC_CONDUIT_TOKEN', 'value': conduitToken, }, { 'name': 'env.ABC_COMMITTER_NAME', 'value': committerName, }, { 'name': 'env.ABC_COMMITTER_EMAIL', 'value': committerEmail, }] output = tc.trigger_build( LANDBOT_BUILD_TYPE, 'refs/heads/master', UNRESOLVED, properties) if output: return output return FAILURE, 500 @app.route("/triggerCI", methods=['POST']) @verify_hmac('HMAC_TRIGGER_CI') def triggerCI(): data = get_json_request_data(request) app.logger.info("Received /triggerCI POST:\n{}".format(data)) # We expect a webhook with an edited object and a list of transactions. if "object" not in data or "transactions" not in data: return FAILURE, 400 data_object = data["object"] if "type" not in data_object or "phid" not in data_object: return FAILURE, 400 # We are searching for a specially crafted comment to trigger a CI # build. Only comments on revision should be parsed. Also if there is # no transaction, or the object is not what we expect, just return. if data_object["type"] != "DREV" or not data.get('transactions', []): return SUCCESS, 200 revision_PHID = data_object["phid"] # Retrieve the transactions details from their PHIDs transaction_PHIDs = [t["phid"] for t in data["transactions"] if "phid" in t] transactions = phab.transaction.search( objectIdentifier=revision_PHID, constraints={ "phids": transaction_PHIDs, } ).data # Extract the comments from the transaction list. Each transaction # contains a list of comments. comments = [c for t in transactions if t["type"] == "comment" for c in t["comments"]] # If there is no comment we have no interest in this webhook if not comments: return SUCCESS, 200 # Check if there is a specially crafted comment that should trigger a # CI build. Format: # @bot <build_name> [build_name ...] def get_builds_from_comment(comment): tokens = comment.split() if not tokens or tokens.pop(0) != "@bot": return [] # Escape to prevent shell injection and remove
'int'}, 'test_case_id': {'key': 'testCaseId', 'type': 'int'} } def __init__(self, child_suite_id=None, sequence_number=None, suite_id=None, test_case_id=None): super(SuiteEntry, self).__init__() self.child_suite_id = child_suite_id self.sequence_number = sequence_number self.suite_id = suite_id self.test_case_id = test_case_id class SuiteEntryUpdateModel(Model): """SuiteEntryUpdateModel. :param child_suite_id: Id of child suite in a suite :type child_suite_id: int :param sequence_number: Updated sequence number for the test case or child suite in the suite :type sequence_number: int :param test_case_id: Id of a test case in a suite :type test_case_id: int """ _attribute_map = { 'child_suite_id': {'key': 'childSuiteId', 'type': 'int'}, 'sequence_number': {'key': 'sequenceNumber', 'type': 'int'}, 'test_case_id': {'key': 'testCaseId', 'type': 'int'} } def __init__(self, child_suite_id=None, sequence_number=None, test_case_id=None): super(SuiteEntryUpdateModel, self).__init__() self.child_suite_id = child_suite_id self.sequence_number = sequence_number self.test_case_id = test_case_id class SuiteTestCase(Model): """SuiteTestCase. :param point_assignments: :type point_assignments: list of :class:`PointAssignment <test.v4_1.models.PointAssignment>` :param test_case: :type test_case: :class:`WorkItemReference <test.v4_1.models.WorkItemReference>` """ _attribute_map = { 'point_assignments': {'key': 'pointAssignments', 'type': '[PointAssignment]'}, 'test_case': {'key': 'testCase', 'type': 'WorkItemReference'} } def __init__(self, point_assignments=None, test_case=None): super(SuiteTestCase, self).__init__() self.point_assignments = point_assignments self.test_case = test_case class SuiteUpdateModel(Model): """SuiteUpdateModel. :param default_configurations: :type default_configurations: list of :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param default_testers: :type default_testers: list of :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param inherit_default_configurations: :type inherit_default_configurations: bool :param name: :type name: str :param parent: :type parent: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param query_string: :type query_string: str """ _attribute_map = { 'default_configurations': {'key': 'defaultConfigurations', 'type': '[ShallowReference]'}, 'default_testers': {'key': 'defaultTesters', 'type': '[ShallowReference]'}, 'inherit_default_configurations': {'key': 'inheritDefaultConfigurations', 'type': 'bool'}, 'name': {'key': 'name', 'type': 'str'}, 'parent': {'key': 'parent', 'type': 'ShallowReference'}, 'query_string': {'key': 'queryString', 'type': 'str'} } def __init__(self, default_configurations=None, default_testers=None, inherit_default_configurations=None, name=None, parent=None, query_string=None): super(SuiteUpdateModel, self).__init__() self.default_configurations = default_configurations self.default_testers = default_testers self.inherit_default_configurations = inherit_default_configurations self.name = name self.parent = parent self.query_string = query_string class TeamContext(Model): """TeamContext. :param project: The team project Id or name. Ignored if ProjectId is set. :type project: str :param project_id: The Team Project ID. Required if Project is not set. :type project_id: str :param team: The Team Id or name. Ignored if TeamId is set. :type team: str :param team_id: The Team Id :type team_id: str """ _attribute_map = { 'project': {'key': 'project', 'type': 'str'}, 'project_id': {'key': 'projectId', 'type': 'str'}, 'team': {'key': 'team', 'type': 'str'}, 'team_id': {'key': 'teamId', 'type': 'str'} } def __init__(self, project=None, project_id=None, team=None, team_id=None): super(TeamContext, self).__init__() self.project = project self.project_id = project_id self.team = team self.team_id = team_id class TeamProjectReference(Model): """TeamProjectReference. :param abbreviation: Project abbreviation. :type abbreviation: str :param description: The project's description (if any). :type description: str :param id: Project identifier. :type id: str :param name: Project name. :type name: str :param revision: Project revision. :type revision: long :param state: Project state. :type state: object :param url: Url to the full version of the object. :type url: str :param visibility: Project visibility. :type visibility: object """ _attribute_map = { 'abbreviation': {'key': 'abbreviation', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'revision': {'key': 'revision', 'type': 'long'}, 'state': {'key': 'state', 'type': 'object'}, 'url': {'key': 'url', 'type': 'str'}, 'visibility': {'key': 'visibility', 'type': 'object'} } def __init__(self, abbreviation=None, description=None, id=None, name=None, revision=None, state=None, url=None, visibility=None): super(TeamProjectReference, self).__init__() self.abbreviation = abbreviation self.description = description self.id = id self.name = name self.revision = revision self.state = state self.url = url self.visibility = visibility class TestAttachment(Model): """TestAttachment. :param attachment_type: :type attachment_type: object :param comment: :type comment: str :param created_date: :type created_date: datetime :param file_name: :type file_name: str :param id: :type id: int :param size: :type size: long :param url: :type url: str """ _attribute_map = { 'attachment_type': {'key': 'attachmentType', 'type': 'object'}, 'comment': {'key': 'comment', 'type': 'str'}, 'created_date': {'key': 'createdDate', 'type': 'iso-8601'}, 'file_name': {'key': 'fileName', 'type': 'str'}, 'id': {'key': 'id', 'type': 'int'}, 'size': {'key': 'size', 'type': 'long'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, attachment_type=None, comment=None, created_date=None, file_name=None, id=None, size=None, url=None): super(TestAttachment, self).__init__() self.attachment_type = attachment_type self.comment = comment self.created_date = created_date self.file_name = file_name self.id = id self.size = size self.url = url class TestAttachmentReference(Model): """TestAttachmentReference. :param id: :type id: int :param url: :type url: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'int'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, id=None, url=None): super(TestAttachmentReference, self).__init__() self.id = id self.url = url class TestAttachmentRequestModel(Model): """TestAttachmentRequestModel. :param attachment_type: :type attachment_type: str :param comment: :type comment: str :param file_name: :type file_name: str :param stream: :type stream: str """ _attribute_map = { 'attachment_type': {'key': 'attachmentType', 'type': 'str'}, 'comment': {'key': 'comment', 'type': 'str'}, 'file_name': {'key': 'fileName', 'type': 'str'}, 'stream': {'key': 'stream', 'type': 'str'} } def __init__(self, attachment_type=None, comment=None, file_name=None, stream=None): super(TestAttachmentRequestModel, self).__init__() self.attachment_type = attachment_type self.comment = comment self.file_name = file_name self.stream = stream class TestCaseResult(Model): """TestCaseResult. :param afn_strip_id: :type afn_strip_id: int :param area: :type area: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param associated_bugs: :type associated_bugs: list of :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param automated_test_id: :type automated_test_id: str :param automated_test_name: :type automated_test_name: str :param automated_test_storage: :type automated_test_storage: str :param automated_test_type: :type automated_test_type: str :param automated_test_type_id: :type automated_test_type_id: str :param build: :type build: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param build_reference: :type build_reference: :class:`BuildReference <test.v4_1.models.BuildReference>` :param comment: :type comment: str :param completed_date: :type completed_date: datetime :param computer_name: :type computer_name: str :param configuration: :type configuration: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param created_date: :type created_date: datetime :param custom_fields: :type custom_fields: list of :class:`CustomTestField <test.v4_1.models.CustomTestField>` :param duration_in_ms: :type duration_in_ms: float :param error_message: :type error_message: str :param failing_since: :type failing_since: :class:`FailingSince <test.v4_1.models.FailingSince>` :param failure_type: :type failure_type: str :param id: :type id: int :param iteration_details: :type iteration_details: list of :class:`TestIterationDetailsModel <test.v4_1.models.TestIterationDetailsModel>` :param last_updated_by: :type last_updated_by: :class:`IdentityRef <test.v4_1.models.IdentityRef>` :param last_updated_date: :type last_updated_date: datetime :param outcome: :type outcome: str :param owner: :type owner: :class:`IdentityRef <test.v4_1.models.IdentityRef>` :param priority: :type priority: int :param project: :type project: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param release: :type release: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param release_reference: :type release_reference: :class:`ReleaseReference <test.v4_1.models.ReleaseReference>` :param reset_count: :type reset_count: int :param resolution_state: :type resolution_state: str :param resolution_state_id: :type resolution_state_id: int :param revision: :type revision: int :param run_by: :type run_by: :class:`IdentityRef <test.v4_1.models.IdentityRef>` :param stack_trace: :type stack_trace: str :param started_date: :type started_date: datetime :param state: :type state: str :param test_case: :type test_case: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param test_case_reference_id: :type test_case_reference_id: int :param test_case_title: :type test_case_title: str :param test_plan: :type test_plan: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param test_point: :type test_point: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param test_run: :type test_run: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param test_suite: :type test_suite: :class:`ShallowReference <test.v4_1.models.ShallowReference>` :param url: :type url: str """ _attribute_map = { 'afn_strip_id': {'key': 'afnStripId', 'type': 'int'}, 'area': {'key': 'area', 'type': 'ShallowReference'}, 'associated_bugs': {'key': 'associatedBugs', 'type': '[ShallowReference]'}, 'automated_test_id': {'key': 'automatedTestId', 'type': 'str'}, 'automated_test_name': {'key': 'automatedTestName', 'type': 'str'}, 'automated_test_storage': {'key': 'automatedTestStorage', 'type': 'str'}, 'automated_test_type': {'key': 'automatedTestType', 'type': 'str'}, 'automated_test_type_id': {'key': 'automatedTestTypeId', 'type': 'str'}, 'build': {'key': 'build', 'type': 'ShallowReference'}, 'build_reference': {'key': 'buildReference', 'type': 'BuildReference'}, 'comment': {'key': 'comment', 'type': 'str'}, 'completed_date': {'key': 'completedDate', 'type': 'iso-8601'}, 'computer_name': {'key': 'computerName', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ShallowReference'}, 'created_date': {'key': 'createdDate', 'type': 'iso-8601'}, 'custom_fields': {'key': 'customFields', 'type': '[CustomTestField]'}, 'duration_in_ms': {'key': 'durationInMs', 'type': 'float'}, 'error_message': {'key': 'errorMessage', 'type': 'str'}, 'failing_since': {'key': 'failingSince', 'type': 'FailingSince'}, 'failure_type': {'key': 'failureType', 'type': 'str'}, 'id': {'key': 'id', 'type': 'int'}, 'iteration_details': {'key': 'iterationDetails', 'type': '[TestIterationDetailsModel]'}, 'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'}, 'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'}, 'outcome': {'key': 'outcome', 'type': 'str'}, 'owner': {'key': 'owner', 'type': 'IdentityRef'}, 'priority': {'key': 'priority', 'type': 'int'}, 'project': {'key': 'project', 'type': 'ShallowReference'}, 'release': {'key': 'release', 'type': 'ShallowReference'}, 'release_reference': {'key': 'releaseReference', 'type': 'ReleaseReference'}, 'reset_count': {'key': 'resetCount', 'type': 'int'}, 'resolution_state': {'key': 'resolutionState', 'type': 'str'}, 'resolution_state_id': {'key': 'resolutionStateId', 'type': 'int'}, 'revision': {'key': 'revision', 'type': 'int'}, 'run_by': {'key': 'runBy', 'type': 'IdentityRef'}, 'stack_trace': {'key': 'stackTrace', 'type': 'str'}, 'started_date': {'key': 'startedDate', 'type': 'iso-8601'}, 'state': {'key': 'state', 'type': 'str'}, 'test_case': {'key': 'testCase', 'type': 'ShallowReference'}, 'test_case_reference_id': {'key': 'testCaseReferenceId', 'type': 'int'}, 'test_case_title': {'key': 'testCaseTitle', 'type': 'str'}, 'test_plan': {'key': 'testPlan', 'type': 'ShallowReference'}, 'test_point': {'key': 'testPoint', 'type': 'ShallowReference'}, 'test_run': {'key': 'testRun', 'type': 'ShallowReference'}, 'test_suite': {'key': 'testSuite', 'type': 'ShallowReference'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, afn_strip_id=None, area=None, associated_bugs=None, automated_test_id=None, automated_test_name=None, automated_test_storage=None, automated_test_type=None, automated_test_type_id=None, build=None, build_reference=None, comment=None, completed_date=None, computer_name=None, configuration=None, created_date=None, custom_fields=None, duration_in_ms=None, error_message=None, failing_since=None, failure_type=None, id=None, iteration_details=None, last_updated_by=None, last_updated_date=None, outcome=None, owner=None, priority=None, project=None, release=None, release_reference=None, reset_count=None, resolution_state=None, resolution_state_id=None, revision=None, run_by=None, stack_trace=None, started_date=None, state=None, test_case=None, test_case_reference_id=None, test_case_title=None, test_plan=None, test_point=None, test_run=None, test_suite=None, url=None): super(TestCaseResult, self).__init__() self.afn_strip_id = afn_strip_id self.area = area self.associated_bugs = associated_bugs self.automated_test_id = automated_test_id self.automated_test_name = automated_test_name self.automated_test_storage = automated_test_storage self.automated_test_type = automated_test_type
#!/usr/bin/env python import os import argparse import json import re import pathlib import networkx from jinja2 import Environment, FileSystemLoader from . import __version__ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) class JsonSchema2Popo: """Converts a JSON Schema to a Plain Old Python Object class""" PYTHON_CLASS_TEMPLATE_FNAME = "python_class.tmpl" JS_CLASS_TEMPLATE_FNAME = "js_class.tmpl" GO_STRUCT_TEMPLATE_FNAME = "go_struct.tmpl" TEMPLATES = { "python": PYTHON_CLASS_TEMPLATE_FNAME, "js": JS_CLASS_TEMPLATE_FNAME, "go": GO_STRUCT_TEMPLATE_FNAME, } J2P_TYPES = { "string": str, "integer": int, "number": float, "object": type, "array": list, "boolean": bool, "null": None, } @staticmethod def flatten(something): if isinstance(something, (list, tuple, set, range)): for sub in something: yield from JsonSchema2Popo.flatten(sub) else: yield something def __init__( self, use_types=False, constructor_type_check=False, use_slots=False, generate_definitions=True, generate_root=True, translate_properties=False, language="python", namespace_path="", package_name="", custom_template="", ): self.list_used = False self.enum_used = False search_path = SCRIPT_DIR if not custom_template else os.getcwd() self.jinja = Environment( loader=FileSystemLoader(searchpath=search_path), trim_blocks=True ) self.jinja.filters["regex_replace"] = lambda s, find, replace: re.sub( find, replace, s ) self.use_types = use_types self.use_slots = use_slots self.constructor_type_check = constructor_type_check self.generate_root = generate_root self.generate_definitions = generate_definitions self.translate_properties = translate_properties self.language = language self.namespace_path = namespace_path self.package_name = package_name self.custom_template = custom_template self.definitions = [] def load(self, json_schema_file): self.process(json.load(json_schema_file)) def get_model_dependencies(self, model): deps = set() for prop in model["properties"]: if prop["_type"]["type"] not in self.J2P_TYPES.values(): deps.add(prop["_type"]["type"]) if prop["_type"]["subtype"] not in self.J2P_TYPES.values(): deps.add(prop["_type"]["subtype"]) return list(deps) def process(self, json_schema): if "definitions" in json_schema: for _obj_name, _obj in json_schema["definitions"].items(): model = self.definition_parser(_obj_name, _obj) self.definitions.append(model) # topological ordered dependencies g = networkx.DiGraph() models_map = {} for model in self.definitions: models_map[model["name"]] = model deps = self.get_model_dependencies(model) if not deps: g.add_edge(model["name"], "") for dep in deps: g.add_edge(model["name"], dep) self.definitions = [] if self.generate_definitions: # use lexicographical topo sort so that the generation order is stable for model_name in networkx.lexicographical_topological_sort(g): if model_name in models_map: # insert to front so that the sorting is reversed self.definitions.insert(0, models_map[model_name]) # create root object if there are some properties in the root if "title" in json_schema: root_object_name = "".join( x for x in json_schema["title"].title() if x.isalpha() ) else: root_object_name = "RootObject" if self.generate_root: root_model = self.definition_parser(root_object_name, json_schema) self.definitions.append(root_model) def definition_parser(self, _obj_name, _obj, sub_model=""): model = {"name": _obj_name, "subModels": [], "parent": sub_model} if "description" in _obj: model["comment"] = _obj["description"] join_str = "._" if self.translate_properties: join_str = "." sub_prefix = "_" if self.translate_properties: sub_prefix = "" if "$ref" in _obj and _obj["$ref"].startswith("#/definitions/"): # References defined at a top level should be copied from what it is referencing ref_path = _obj["$ref"].split("/")[2:] ref = join_str.join(ref_path) for model in self.definitions: if model["name"] in ref_path: subModels = model["subModels"] built_path = model["name"] i = 0 while i < len(subModels) and subModels: subModel = subModels[i] i = i + 1 if "subModels" in subModel: if self.strip_sub_prefix(subModel["name"]) in ref_path: built_path = built_path + "." + subModel["name"] subModels = subModel["subModels"] model = subModel i = 0 if built_path == ref: break if ref_path[len(ref_path) - 1] == self.strip_sub_prefix( model["name"] ): model = model.copy() model["name"] = _obj_name model["parent"] = sub_model return model print("Unable to find object refs for ", "/".join(ref_path)) if "type" in _obj: model["type"] = self.type_parser(_obj) model["text_type"] = _obj["type"] if "enum" in _obj: enum = {} for i, v in enumerate(_obj["enum"]): enum[v if "javaEnumNames" not in _obj else _obj["javaEnumNames"][i]] = v model["enum"] = enum self.enum_used = True if "extends" in _obj and "$ref" in _obj["extends"]: if _obj["extends"]["$ref"].endswith(".json"): with open(_obj["extends"]["$ref"], "r") as f: ref_file = json.load(f) self.process(ref_file) model["extends"] = ref_file["title"] else: ref_path = _obj["extends"]["$ref"].split("/")[2:] ref = join_str.join(ref_path) if sub_model and sub_model.endswith(_obj_name): subs = sub_model.split(".")[-1] ref = ref[len(sub_model) - len(subs) :] model["extends"] = ref model["properties"] = [] if "properties" in _obj: for _prop_name, _prop in _obj["properties"].items(): _type = self.type_parser(_prop) _default = None _comment = None if "default" in _prop: _default = _type["type"](_prop["default"]) if _type["type"] == str: _default = "'{}'".format(_default) if "description" in _prop: _comment = _prop["description"] read_list = self.definitions[:] read_list.append(model) def find_parent(path, model): return [ (path + "." + m["name"], find_parent(path + "." + m["name"], m)) for m in model["subModels"] if "subModels" in m ] potential_paths = list( JsonSchema2Popo.flatten( [find_parent(model["name"], model) for model in read_list] ) ) parent_name = sub_model + join_str + _prop_name if not sub_model: parent_name = _obj_name + join_str + _prop_name for path in potential_paths: if path.endswith(parent_name) and len(path) > len(parent_name): parent_name = path if _type["type"] == list and _type["subtype"] == type: _type["subtype"] = sub_prefix + _prop_name _type["parent"] = parent_name model["subModels"].append( self.definition_parser( sub_prefix + _prop_name, _prop["items"], sub_model=parent_name, ) ) if "$ref" in _prop and _prop["$ref"].startswith("#/definitions/"): # Properties with references should reference the existing defined classes ref = _prop["$ref"].split("/")[2:] _type = {"type": join_str.join(ref), "subtype": None} if ("type" in _prop and _prop["type"] == "object") or "enum" in _prop: _type = { "type": sub_prefix + _prop_name, "subtype": None, "parent": parent_name, } sub_mod = self.definition_parser( sub_prefix + _prop_name, _prop, sub_model=parent_name ) # Only generate sub models when the sub model actually has properties, otherwise treat is as # a dict, which is what an object is to JSON if sub_mod["properties"]: model["subModels"].append(sub_mod) else: _type = { "type": dict, "subtype": None, } if "enum" in _prop: self.enum_used = True _format = None if "format" in _prop: _format = _prop["format"] if ( _type["type"] == list and "items" in _prop and isinstance(_prop["items"], list) ): _format = _prop["items"][0]["format"] _validations = {"required": False} validation_types = [ "maximum", "minimum", "maxItems", "minItems", "minLength", "maxLength", "pattern", ] for t in validation_types: if t in _prop: _validations[t] = _prop[t] if _type["type"] == list and "items" in _prop: array_validation = _prop["items"] if t in array_validation: _validations[t] = array_validation[t] if "required" in _obj and _prop_name in _obj["required"]: _validations["required"] = True prop = { "_name": self.get_prop_name(_prop_name), "_original_name": _prop_name, "_type": _type, "_default": _default, "_format": _format, "_comment": _comment, "_validations": _validations, } model["properties"].append(prop) model["propertiesHaveComment"] = any(p["_comment"] for p in model["properties"]) return model def type_parser(self, t): _type = None _subtype = None _subformat = None if "type" in t: if t["type"] == "array" and "items" in t: self.list_used = True _type = self.J2P_TYPES[t["type"]] if isinstance(t["items"], list): if "type" in t["items"][0]: _subtype = self.J2P_TYPES[t["items"][0]["type"]] if "format" in t["items"][0]: _subformat = t["items"][0]["format"] elif ( "$ref" in t["items"][0] or "oneOf" in t["items"][0] and len(t["items"][0]["oneOf"]) == 1 ): if "$ref" in t["items"][0]: ref = t["items"][0]["$ref"] else: ref = t["items"][0]["oneOf"][0]["$ref"] _subtype = ref.split("/")[-1] elif isinstance(t["items"], dict): if "type" in t["items"]: _subtype = self.J2P_TYPES[t["items"]["type"]] if "format" in t["items"]: _subformat = t["items"]["format"] elif ( "$ref" in t["items"] or "oneOf" in t["items"] and len(t["items"]["oneOf"]) == 1 ): if "$ref" in t["items"]: ref = t["items"]["$ref"] else: ref = t["items"]["oneOf"][0]["$ref"] _subtype = ref.split("/")[-1] elif isinstance(t["type"], list): _type = self.J2P_TYPES[t["type"][0]] elif t["type"]: _type = self.J2P_TYPES[t["type"]] if ( _type == str and "media" in t and "binaryEncoding" in t["media"] and t["media"]["binaryEncoding"] == "base64" ): _type = bytes elif "$ref" in t: _type = t["$ref"].split("/")[-1] elif "anyOf" in t or "allOf" in t or "oneOf" in t: _type = list return {"type": _type, "subtype": _subtype, "subformat": _subformat} def write_file(self, filename): template = self.custom_template or self.TEMPLATES[self.language] self.jinja.get_template(template).stream( models=self.definitions, use_types=self.use_types, constructor_type_check=self.constructor_type_check, enum_used=self.enum_used, list_used=self.list_used, use_slots=self.use_slots, namespace_path=self.namespace_path, package_name=self.package_name, ).dump(filename) if hasattr(filename, "close"): filename.close() def get_prop_name(self, name): if not self.translate_properties: return name s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() def strip_sub_prefix(self, name): if self.translate_properties: return name return name.lstrip("_") def init_parser(): parser = argparse.ArgumentParser( description="Converts JSON Schema to Plain Old Python Object" ) parser.add_argument( "json_schema_file", type=argparse.FileType("r", encoding="utf-8"), help="Path to JSON Schema file to load", ) parser.add_argument( "-o", "--output-file", type=argparse.FileType("w", encoding="utf-8"), help="Path to file output", default="model.py", ) parser.add_argument( "-jt", "--custom-template", help="Path to custom Jinja template file", default="", ) parser.add_argument("-t", "--use-types", action="store_true", help="Add typings") parser.add_argument( "-ct", "--constructor-type-check", action="store_true", help="Validate input types in constructor", ) parser.add_argument( "-s", "--use_slots", action="store_true", help="Generate class with __slots__." ) parser.add_argument( "--no-generate-from-definitions", action="store_false", help='Don\'t generate classes from "definitions" section of the schema.', default=True, ) parser.add_argument( "--no-generate-from-root-object", action="store_false", help="Don't generate classes from root of the schema.", default=True, ) parser.add_argument( "-tp", "--translate-properties", action="store_true", help="Translate property names into snake_case.", ) parser.add_argument( "-l", "--language", choices=JsonSchema2Popo.TEMPLATES.keys(), help="Which language to generate in", default="python", ) parser.add_argument( "--namespace-path", help="Namespace path to be prepended to the @memberOf for JSDoc (only used for JS)", ) parser.add_argument( "--package-name", help="Package name for generated code (only used for Go)", default="generated", ) parser.add_argument( "--version", action="version", version="%(prog)s
# Copyright 2013 The Swarming Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 that # can be found in the LICENSE file. """subprocess42 is the answer to life the universe and everything. It has the particularity of having a Popen implementation that can yield output as it is produced while implementing a timeout and not requiring the use of worker threads. TODO(maruel): Add VOID and TIMED_OUT support like subprocess2. """ import contextlib import logging import os import signal import time import subprocess from subprocess import CalledProcessError, PIPE, STDOUT # pylint: disable=W0611 from subprocess import call, check_output # pylint: disable=W0611 # Default maxsize argument. MAX_SIZE = 16384 if subprocess.mswindows: import msvcrt # pylint: disable=F0401 from ctypes import wintypes from ctypes import windll # Which to be received depends on how this process was called and outside the # control of this script. See Popen docstring for more details. STOP_SIGNALS = (signal.SIGBREAK, signal.SIGTERM) def ReadFile(handle, desired_bytes): """Calls kernel32.ReadFile().""" c_read = wintypes.DWORD() buff = wintypes.create_string_buffer(desired_bytes+1) windll.kernel32.ReadFile( handle, buff, desired_bytes, wintypes.byref(c_read), None) # NULL terminate it. buff[c_read.value] = '\x00' return wintypes.GetLastError(), buff.value def PeekNamedPipe(handle): """Calls kernel32.PeekNamedPipe(). Simplified version.""" c_avail = wintypes.DWORD() c_message = wintypes.DWORD() success = windll.kernel32.PeekNamedPipe( handle, None, 0, None, wintypes.byref(c_avail), wintypes.byref(c_message)) if not success: raise OSError(wintypes.GetLastError()) return c_avail.value def recv_multi_impl(conns, maxsize, timeout): """Reads from the first available pipe. It will immediately return on a closed connection, independent of timeout. Arguments: - maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE. - timeout: If None, it is blocking. If 0 or above, will return None if no data is available within |timeout| seconds. Returns: tuple(int(index), str(data), bool(closed)). """ assert conns assert timeout is None or isinstance(timeout, (int, float)), timeout maxsize = max(maxsize or MAX_SIZE, 1) # TODO(maruel): Use WaitForMultipleObjects(). Python creates anonymous pipes # for proc.stdout and proc.stderr but they are implemented as named pipes on # Windows. Since named pipes are not waitable object, they can't be passed # as-is to WFMO(). So this means N times CreateEvent(), N times ReadFile() # and finally WFMO(). This requires caching the events handles in the Popen # object and remembering the pending ReadFile() calls. This will require # some re-architecture to store the relevant event handle and OVERLAPPEDIO # object in Popen or the file object. start = time.time() handles = [ (i, msvcrt.get_osfhandle(c.fileno())) for i, c in enumerate(conns) ] while True: for index, handle in handles: try: avail = min(PeekNamedPipe(handle), maxsize) if avail: return index, ReadFile(handle, avail)[1], False except OSError: # The pipe closed. return index, None, True if timeout is not None and (time.time() - start) >= timeout: return None, None, False # Polling rocks. time.sleep(0.001) else: import fcntl # pylint: disable=F0401 import select # Signals that mean this process should exit quickly. STOP_SIGNALS = (signal.SIGINT, signal.SIGTERM) def recv_multi_impl(conns, maxsize, timeout): """Reads from the first available pipe. It will immediately return on a closed connection, independent of timeout. Arguments: - maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE. - timeout: If None, it is blocking. If 0 or above, will return None if no data is available within |timeout| seconds. Returns: tuple(int(index), str(data), bool(closed)). """ assert conns assert timeout is None or isinstance(timeout, (int, float)), timeout maxsize = max(maxsize or MAX_SIZE, 1) # select(timeout=0) will block, it has to be a value > 0. if timeout == 0: timeout = 0.001 try: r, _, _ = select.select(conns, [], [], timeout) except select.error: r = None if not r: return None, None, False conn = r[0] # Temporarily make it non-blocking. # TODO(maruel): This is not very ifficient when the caller is doing this in # a loop. Add a mechanism to have the caller handle this. flags = fcntl.fcntl(conn, fcntl.F_GETFL) if not conn.closed: # pylint: disable=E1101 fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK) try: data = conn.read(maxsize) if not data: # On posix, this means the channel closed. return conns.index(conn), None, True return conns.index(conn), data, False finally: if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags) class Popen(subprocess.Popen): """Adds timeout support on stdout and stderr. Inspired by http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subprocess-use-on-win/ Arguments: - detached: If True, the process is created in a new process group. On Windows, use CREATE_NEW_PROCESS_GROUP. On posix, use os.setpgid(0, 0). Additional members: - start: timestamp when this process started. - end: timestamp when this process exited, as seen by this process. - detached: If True, the child process was started as a detached process. - gid: process group id, if any. """ def __init__(self, args, **kwargs): assert 'creationflags' not in kwargs assert 'preexec_fn' not in kwargs self.start = time.time() self.end = None self.gid = None self.detached = kwargs.pop('detached', False) if self.detached: if subprocess.mswindows: kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP else: kwargs['preexec_fn'] = lambda: os.setpgid(0, 0) super(Popen, self).__init__(args, **kwargs) if self.detached and not subprocess.mswindows: self.gid = os.getpgid(self.pid) def duration(self): """Duration of the child process. It is greater or equal to the actual time the child process ran. It can be significantly higher than the real value if neither .wait() nor .poll() was used. """ return (self.end or time.time()) - self.start def wait(self): ret = super(Popen, self).wait() if not self.end: # communicate() uses wait() internally. self.end = time.time() return ret def poll(self): ret = super(Popen, self).poll() if ret is not None and not self.end: self.end = time.time() return ret def yield_any(self, maxsize=None, hard_timeout=None, soft_timeout=None): """Yields output until the process terminates or is killed by a timeout. Yielded values are in the form (pipename, data). Arguments: - maxsize: See recv_any(). Can be a callable function. - hard_timeout: If None, the process is never killed. If set, the process is killed after |hard_timeout| seconds. Can be a callable function. - soft_timeout: If None, the call is blocking. If set, yields None, None if no data is available within |soft_timeout| seconds. It resets itself after each yield. Can be a callable function. """ if hard_timeout is not None: # hard_timeout=0 means the process is not even given a little chance to # execute and will be immediately killed. if isinstance(hard_timeout, (int, float)): assert hard_timeout > 0., hard_timeout old_hard_timeout = hard_timeout hard_timeout = lambda: old_hard_timeout if soft_timeout is not None: # soft_timeout=0 effectively means that the pipe is continuously polled. if isinstance(soft_timeout, (int, float)): assert soft_timeout >= 0, soft_timeout old_soft_timeout = soft_timeout soft_timeout = lambda: old_soft_timeout else: assert callable(soft_timeout), soft_timeout last_yield = time.time() while self.poll() is None: ms = maxsize if callable(maxsize): ms = maxsize() t, data = self.recv_any( maxsize=ms, timeout=self._calc_timeout(hard_timeout, soft_timeout, last_yield)) if data or soft_timeout is not None: yield t, data last_yield = time.time() if hard_timeout and self.duration() >= hard_timeout(): break if self.poll() is None and hard_timeout: logging.debug('Kill %s %s', self.duration(), hard_timeout()) self.kill() self.wait() # Read all remaining output in the pipes. # There is 3 cases: # - pipes get closed automatically by the calling process before it exits # - pipes are closed automated by the OS # - pipes are kept open due to grand-children processes outliving the # children process. while True: ms = maxsize if callable(maxsize): ms = maxsize() # timeout=0 is mainly to handle the case where a grand-children process # outlives the process started. t, data = self.recv_any(maxsize=ms, timeout=0) if not data: break yield t, data def _calc_timeout(self, hard_timeout, soft_timeout, last_yield): """Returns the timeout to be used on the next recv_any() in yield_any(). It depends on both timeout. It returns None if no timeout is used. Otherwise it returns a value >= 0.001. It's not 0 because it's effectively polling, on linux it can peg a single core, so adding 1ms sleep does a tremendous difference. """ hard_remaining = ( None if hard_timeout is None else max(hard_timeout() - self.duration(), 0)) soft_remaining = ( None if soft_timeout is None else max(soft_timeout() - (time.time() - last_yield), 0)) if hard_remaining is None: return soft_remaining if soft_remaining is None: return hard_remaining return min(hard_remaining, soft_remaining) def recv_any(self, maxsize=None, timeout=None): """Reads from the first pipe available from stdout and stderr.
2*x/5 + 1, domain='QQ').all_coeffs() == [S(3)/5, S(2)/5, 1] assert _eq(Poly(3*x**2/5 + 2*x/5 + 1, domain='RR').all_coeffs(), [0.6, 0.4, 1.0]) assert Poly(3.0*x**2 + 2.0*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1] assert Poly(3.0*x**2 + 2.0*x + 1, domain='QQ').all_coeffs() == [3, 2, 1] assert Poly(3.0*x**2 + 2.0*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0] raises(CoercionFailed, "Poly(3.1*x**2 + 2.1*x + 1, domain='ZZ')") assert Poly(3.1*x**2 + 2.1*x + 1, domain='QQ').all_coeffs() == [S(31)/10, S(21)/10, 1] assert Poly(3.1*x**2 + 2.1*x + 1, domain='RR').all_coeffs() == [3.1, 2.1, 1.0] assert Poly({(2,1): 1, (1,2): 2, (1,1): 3}, x, y) == \ Poly(x**2*y + 2*x*y**2 + 3*x*y, x, y) assert Poly(x**2 + 1, extension=I).get_domain() == QQ.algebraic_field(I) f = 3*x**5 - x**4 + x**3 - x** 2 + 65538 assert Poly(f, x, modulus=65537, symmetric=True) == \ Poly(3*x**5 - x**4 + x**3 - x** 2 + 1, x, modulus=65537, symmetric=True) assert Poly(f, x, modulus=65537, symmetric=False) == \ Poly(3*x**5 + 65536*x**4 + x**3 + 65536*x** 2 + 1, x, modulus=65537, symmetric=False) def test_Poly__args(): assert Poly(x**2 + 1).args == [x**2 + 1] def test_Poly__gens(): assert Poly((x-p)*(x-q), x).gens == (x,) assert Poly((x-p)*(x-q), p).gens == (p,) assert Poly((x-p)*(x-q), q).gens == (q,) assert Poly((x-p)*(x-q), x, p).gens == (x, p) assert Poly((x-p)*(x-q), x, q).gens == (x, q) assert Poly((x-p)*(x-q), x, p, q).gens == (x, p, q) assert Poly((x-p)*(x-q), p, x, q).gens == (p, x, q) assert Poly((x-p)*(x-q), p, q, x).gens == (p, q, x) assert Poly((x-p)*(x-q)).gens == (x, p, q) assert Poly((x-p)*(x-q), sort='x < p < q').gens == (x, p, q) assert Poly((x-p)*(x-q), sort='p < x < q').gens == (p, x, q) assert Poly((x-p)*(x-q), sort='p < q < x').gens == (p, q, x) assert Poly((x-p)*(x-q), x, p, q, sort='p < q < x').gens == (x, p, q) assert Poly((x-p)*(x-q), wrt='x').gens == (x, p, q) assert Poly((x-p)*(x-q), wrt='p').gens == (p, x, q) assert Poly((x-p)*(x-q), wrt='q').gens == (q, x, p) assert Poly((x-p)*(x-q), wrt=x).gens == (x, p, q) assert Poly((x-p)*(x-q), wrt=p).gens == (p, x, q) assert Poly((x-p)*(x-q), wrt=q).gens == (q, x, p) assert Poly((x-p)*(x-q), x, p, q, wrt='p').gens == (x, p, q) assert Poly((x-p)*(x-q), wrt='p', sort='q < x').gens == (p, q, x) assert Poly((x-p)*(x-q), wrt='q', sort='p < x').gens == (q, p, x) def test_Poly_unify(): raises(UnificationFailed, "Poly(x).unify(y)") raises(UnificationFailed, "Poly(x, x, modulus=3).unify(Poly(x, x, modulus=5))") raises(UnificationFailed, "Poly(x, x, modulus=3).unify(Poly(y, y, modulus=3))") raises(UnificationFailed, "Poly(x, x, y).unify(Poly(x, x, modulus=3))") raises(UnificationFailed, "Poly(x, x, y).unify(Poly(x, x, modulus=3))") raises(UnificationFailed, "Poly(x, x, modulus=3).unify(Poly(x, x, y))") raises(UnificationFailed, "Poly(x, x, modulus=3).unify(Poly(x, x, y))") assert Poly(x+1, x).unify(Poly(x+2, x))[2:] == (DMP([1, 1], ZZ), DMP([1, 2], ZZ)) assert Poly(x+1, x, domain='QQ').unify(Poly(x+2, x))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ)) assert Poly(x+1, x).unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ)) assert Poly(x+1, x).unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ)) assert Poly(x+1, x, domain='QQ').unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ)) assert Poly(x+1, x).unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ)) assert Poly(x+1, x, y).unify(Poly(x+2, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ)) assert Poly(x+1, x, y, domain='QQ').unify(Poly(x+2, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ)) assert Poly(x+1, x, y).unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ)) assert Poly(x+1, x, y).unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ)) assert Poly(x+1, x, y, domain='QQ').unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ)) assert Poly(x+1, x, y).unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ)) assert Poly(x+1, x).unify(Poly(x+2, y, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ)) assert Poly(x+1, x, domain='QQ').unify(Poly(x+2, y, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ)) assert Poly(x+1, x).unify(Poly(x+2, y, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ)) assert Poly(x+1, y, x).unify(Poly(x+2, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ)) assert Poly(x+1, y, x, domain='QQ').unify(Poly(x+2, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ)) assert Poly(x+1, y, x).unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ)) assert Poly(x+1, x, y).unify(Poly(x+2, y, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ)) assert Poly(x+1, x, y, domain='QQ').unify(Poly(x+2, y, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ)) assert Poly(x+1, x, y).unify(Poly(x+2, y, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ)) assert Poly(x+1, y, x).unify(Poly(x+2, x, y))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ)) assert Poly(x+1, y, x, domain='QQ').unify(Poly(x+2, x, y))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ)) assert Poly(x+1, y, x).unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ)) assert Poly(2*x+5, x).unify(Poly(x+2, x, modulus=3))[2:] == (GFP([2, 2], 3, ZZ), GFP([1, 2], 3, ZZ)) assert Poly(x+2, x, modulus=3).unify(Poly(2*x+5, x))[2:] == (GFP([1, 2], 3, ZZ), GFP([2, 2], 3, ZZ)) assert Poly(x+5, x, modulus=3).unify(Poly(x+7, x, modulus=3))[2:] == (GFP([1, 2], 3, ZZ), GFP([1, 1], 3, ZZ)) assert Poly(x+5, x, modulus=3, symmetric=True).unify(Poly(x+7, x, modulus=3, symmetric=False))[2:] == \ (GFP([1, 2], 3, ZZ, symmetric=True), GFP([1, 1], 3, ZZ, symmetric=True)) def test_Poly__analyze_order(): assert Poly._analyze_order({}) is None assert Poly._analyze_order({'order': 'lex'}) == monomial_lex_cmp raises(ValueError, "Poly._analyze_order({'order': 1})") def test_Poly__analyze_domain(): assert Poly._analyze_domain({}) is None assert Poly._analyze_domain({'domain': ZZ}) == ZZ assert Poly._analyze_domain({'domain': 'ZZ'}) == ZZ def test_Poly__parse_domain(): assert Poly._parse_domain(ZZ) == ZZ assert Poly._parse_domain(QQ) == QQ assert Poly._parse_domain(EX) == EX assert Poly._parse_domain(ZZ[x,y]) == ZZ[x,y] assert Poly._parse_domain('Z') == ZZ assert Poly._parse_domain('Q') == QQ assert Poly._parse_domain('ZZ') == ZZ assert Poly._parse_domain('QQ') == QQ assert Poly._parse_domain('EX') == EX raises(ValueError, "Poly._parse_domain('Z[]')") assert Poly._parse_domain('Z[x]') == ZZ[x] assert Poly._parse_domain('Q[x]') == QQ[x] assert Poly._parse_domain('ZZ[x]') == ZZ[x] assert Poly._parse_domain('QQ[x]') == QQ[x] assert Poly._parse_domain('Z[x,y]') == ZZ[x,y] assert Poly._parse_domain('Q[x,y]') == QQ[x,y] assert Poly._parse_domain('ZZ[x,y]') == ZZ[x,y] assert Poly._parse_domain('QQ[x,y]') == QQ[x,y] raises(ValueError, "Poly._parse_domain('Z()')") assert Poly._parse_domain('Z(x)') == ZZ.frac_field(x) assert Poly._parse_domain('Q(x)') == QQ.frac_field(x) assert Poly._parse_domain('ZZ(x)') == ZZ.frac_field(x) assert Poly._parse_domain('QQ(x)') == QQ.frac_field(x) assert Poly._parse_domain('Z(x,y)') == ZZ.frac_field(x,y) assert Poly._parse_domain('Q(x,y)') == QQ.frac_field(x,y) assert Poly._parse_domain('ZZ(x,y)') == ZZ.frac_field(x,y) assert Poly._parse_domain('QQ(x,y)') == QQ.frac_field(x,y) assert Poly._parse_domain('Q<I>') == QQ.algebraic_field(I) assert Poly._parse_domain('QQ<I>') == QQ.algebraic_field(I) assert Poly._parse_domain('Q<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I) assert Poly._parse_domain('QQ<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I) def test_Poly_get_domain(): assert Poly(2*x).get_domain() == ZZ assert Poly(2*x, domain='ZZ').get_domain() == ZZ assert Poly(2*x, domain='QQ').get_domain() == QQ assert Poly(x/2).get_domain() == QQ raises(CoercionFailed, "Poly(x/2, domain='ZZ')") assert Poly(x/2, domain='QQ').get_domain() == QQ assert Poly(0.2*x).get_domain() == RR def test_Poly_set_domain(): assert Poly(2*x + 1).set_domain(ZZ) == Poly(2*x + 1) assert Poly(2*x + 1).set_domain('ZZ') == Poly(2*x + 1) assert Poly(2*x + 1).set_domain(QQ) == Poly(2*x + 1, domain='QQ') assert Poly(2*x + 1).set_domain('QQ') == Poly(2*x + 1, domain='QQ') assert Poly(S(2)/10*x + S(1)/10).set_domain('RR') == Poly(0.2*x + 0.1) assert Poly(0.2*x + 0.1).set_domain('QQ') == Poly(S(2)/10*x + S(1)/10) raises(CoercionFailed, "Poly(x/2 + 1).set_domain(ZZ)") raises(DomainError, "Poly(x + 1, modulus=2).set_domain(QQ)") def test_Poly__analyze_modulus(): assert Poly._analyze_modulus({}) is None assert Poly._analyze_modulus({'modulus': 2}) == 2 assert Poly._analyze_modulus({'modulus': Integer(2)}) == 2 def test_Poly__parse_modulus(): assert Poly._parse_modulus(5) == 5 assert Poly._parse_modulus(Integer(5)) == 5 raises(ValueError, "Poly._parse_modulus(1)") raises(ValueError, "Poly._parse_modulus(x)") def test_Poly_get_modulus(): Poly(x**2 + 1, modulus=2).get_modulus() == 2 raises(PolynomialError, "Poly(x**2 + 1).get_modulus()") def test_Poly_set_modulus(): Poly(x**2 + 1, modulus=2).set_modulus(7) == Poly(x**2 + 1, modulus=7) Poly(x**2 + 5, modulus=7).set_modulus(2) == Poly(x**2 + 1, modulus=2) Poly(x**2 + 1).set_modulus(2) == Poly(x**2 + 1, modulus=2) raises(PolynomialError, "Poly(x/2 + 1).set_modulus(2)") def test_Poly__analyze_extension(): assert Poly._analyze_extension({}) is None assert Poly._analyze_extension({'extension': []}) is None assert Poly._analyze_extension({'extension': sqrt(2)}) == set([sqrt(2)]) assert Poly._analyze_extension({'extension': [sqrt(2),sqrt(3)]}) == set([sqrt(2),sqrt(3)]) assert Poly._analyze_extension({'extension': True}) is True assert Poly._analyze_extension({'extension': False}) is None assert Poly._analyze_extension({'extension': I}) == set([I]) assert Poly._analyze_extension({'gaussian': True}) == set([I]) raises(PolynomialError, "Poly._analyze_extension({'gaussian': True, 'extension': I})") raises(PolynomialError, "Poly._analyze_extension({'gaussian': True, 'split': True})") raises(PolynomialError, "Poly._analyze_extension({'extension': I, 'split': True})") raises(NotImplementedError, "Poly._analyze_extension({'split': True})") def test_Poly_abs(): assert Poly(-x+1, x).abs() == abs(Poly(-x+1, x)) == Poly(x+1, x) def test_Poly_neg(): assert Poly(-x+1, x).neg() == -Poly(-x+1, x) == Poly(x-1, x) def test_Poly_add(): assert Poly(0, x).add(Poly(0, x)) == Poly(0, x) assert Poly(0, x) + Poly(0, x) == Poly(0, x) assert Poly(1, x).add(Poly(0, x)) == Poly(1, x) assert Poly(1, x, y) + Poly(0, x) == Poly(1, x, y) assert Poly(0, x).add(Poly(1, x, y)) == Poly(1, x, y) assert Poly(0, x, y) + Poly(1, x, y) == Poly(1, x, y) assert Poly(1, x) + x == Poly(x+1, x) assert Poly(1, x) + sin(x) == 1+sin(x) def test_Poly_sub(): assert Poly(0, x).sub(Poly(0, x)) == Poly(0, x) assert Poly(0, x) - Poly(0, x) == Poly(0, x) assert Poly(1, x).sub(Poly(0, x)) == Poly(1, x) assert Poly(1, x, y) - Poly(0, x) == Poly(1, x, y) assert Poly(0, x).sub(Poly(1, x, y)) == Poly(-1, x, y) assert Poly(0, x, y) - Poly(1, x, y) == Poly(-1, x, y) assert
<gh_stars>10-100 """Algorithms for STRIPS learning that start from the most general operators, then specialize them based on the data.""" import itertools from typing import Dict, List, Set from predicators.src import utils from predicators.src.nsrt_learning.strips_learning import BaseSTRIPSLearner from predicators.src.settings import CFG from predicators.src.structs import ParameterizedOption, \ PartialNSRTAndDatastore, Segment, STRIPSOperator class GeneralToSpecificSTRIPSLearner(BaseSTRIPSLearner): """Base class for a general-to-specific STRIPS learner.""" def _initialize_general_pnad_for_option( self, parameterized_option: ParameterizedOption ) -> PartialNSRTAndDatastore: """Create the most general PNAD for the given option.""" # Create the parameters, which are determined solely from the option # types, since the most general operator has no add/delete effects. parameters = utils.create_new_variables(parameterized_option.types) option_spec = (parameterized_option, parameters) # In the most general operator, the side predicates contain ALL # predicates. side_predicates = self._predicates.copy() # There are no add effects or delete effects. The preconditions # are initialized to be trivial. They will be recomputed next. op = STRIPSOperator(parameterized_option.name, parameters, set(), set(), set(), side_predicates) pnad = PartialNSRTAndDatastore(op, [], option_spec) # Recompute datastore. This simply clusters by option, since the # side predicates contain all predicates, and effects are trivial. self._recompute_datastores_from_segments([pnad]) # Determine the initial preconditions via a lifted intersection. preconditions = self._induce_preconditions_via_intersection(pnad) pnad.op = pnad.op.copy_with(preconditions=preconditions) return pnad class BackchainingSTRIPSLearner(GeneralToSpecificSTRIPSLearner): """Learn STRIPS operators by backchaining.""" def _learn(self) -> List[PartialNSRTAndDatastore]: # Initialize the most general PNADs by merging self._initial_pnads. # As a result, we will have one very general PNAD per option. param_opt_to_general_pnad = {} param_opt_to_nec_pnads: Dict[ParameterizedOption, List[PartialNSRTAndDatastore]] = {} # Extract all parameterized options from the data. parameterized_options = set() for ll_traj, seg_traj in zip(self._trajectories, self._segmented_trajs): if not ll_traj.is_demo: continue for segment in seg_traj: parameterized_options.add(segment.get_option().parent) # Set up the param_opt_to_general_pnad and param_opt_to_nec_pnads # dictionaries. for param_opt in parameterized_options: pnad = self._initialize_general_pnad_for_option(param_opt) param_opt_to_general_pnad[param_opt] = pnad param_opt_to_nec_pnads[param_opt] = [] self._assert_all_data_in_exactly_one_datastore( list(param_opt_to_general_pnad.values())) prev_itr_ops: Set[STRIPSOperator] = set() # We loop until the harmless PNADs induced by our procedure # converge to a fixed point (i.e, they don't change after two # subsequent iterations). while True: # Run multiple passes of backchaining over the data until # convergence to a fixed point. Note that this process creates # operators with only parameters, preconditions, and add effects. self._backchain_multipass(param_opt_to_nec_pnads, param_opt_to_general_pnad) # Induce delete effects, side predicates and potentially # keep effects. self._induce_delete_side_keep(param_opt_to_nec_pnads) # Harmlessness should now hold, but it's slow to check. if CFG.backchaining_check_intermediate_harmlessness: assert self._check_harmlessness( self._get_uniquely_named_nec_pnads(param_opt_to_nec_pnads)) # Recompute datastores and preconditions for all PNADs. # Filter out PNADs that don't have datastores. cur_itr_pnads_unfiltered = [ pnad for pnads in param_opt_to_nec_pnads.values() for pnad in pnads ] self._recompute_datastores_from_segments(cur_itr_pnads_unfiltered) cur_itr_pnads_filtered = [] for pnad in cur_itr_pnads_unfiltered: if len(pnad.datastore) > 0: new_pre = self._induce_preconditions_via_intersection(pnad) # NOTE: this implicitly changes param_opt_to_nec_pnads # as well, since we're directly modifying the PNAD objects. pnad.op = pnad.op.copy_with(preconditions=new_pre) cur_itr_pnads_filtered.append(pnad) else: param_opt_to_nec_pnads[pnad.option_spec[0]].remove(pnad) del cur_itr_pnads_unfiltered # should be unused after this # Check if the PNAD set has converged. If so, break. if {pnad.op for pnad in cur_itr_pnads_filtered} == prev_itr_ops: break prev_itr_ops = {pnad.op for pnad in cur_itr_pnads_filtered} # Assign a unique name to each PNAD. final_pnads = self._get_uniquely_named_nec_pnads( param_opt_to_nec_pnads) # Assert data has been correctly partitioned amongst PNADs. self._assert_all_data_in_exactly_one_datastore(final_pnads) return final_pnads def _backchain_multipass( self, param_opt_to_nec_pnads: Dict[ParameterizedOption, List[PartialNSRTAndDatastore]], param_opt_to_general_pnad: Dict[ParameterizedOption, PartialNSRTAndDatastore] ) -> None: """Take multiple passes through the demonstrations, running self._backchain_one_pass() each time. Keep going until the PNADs reach a fixed point. Note that this process creates operators with only parameters, preconditions, and add effects. """ while True: # Before each pass, clear the poss_keep_effects # of all the PNADs. We do this because we only want the # poss_keep_effects of the final pass, where the PNADs did # not change. However, we cannot simply clear the # pnad.seg_to_keep_effects_sub because some of these # substitutions might be necessary if this happens to be # a PNAD that already has keep effects. Thus, we call a # method that handles this correctly. for pnads in param_opt_to_nec_pnads.values(): for pnad in pnads: pnad.poss_keep_effects.clear() self._clear_unnecessary_keep_effs_sub(pnad) # Run one pass of backchaining. nec_pnad_set_changed = self._backchain_one_pass( param_opt_to_nec_pnads, param_opt_to_general_pnad) if not nec_pnad_set_changed: break def _backchain_one_pass( self, param_opt_to_nec_pnads: Dict[ParameterizedOption, List[PartialNSRTAndDatastore]], param_opt_to_general_pnad: Dict[ParameterizedOption, PartialNSRTAndDatastore] ) -> bool: """Take one pass through the demonstrations in the given order. Go through each one from the end back to the start, making the PNADs more specific whenever needed. Return whether any PNAD was changed. """ # Reset all segments' necessary_add_effects so that they aren't # accidentally used from a previous iteration of backchaining. self._reset_all_segment_add_effs() nec_pnad_set_changed = False for ll_traj, seg_traj in zip(self._trajectories, self._segmented_trajs): if not ll_traj.is_demo: continue traj_goal = self._train_tasks[ll_traj.train_task_idx].goal atoms_seq = utils.segment_trajectory_to_atoms_sequence(seg_traj) assert traj_goal.issubset(atoms_seq[-1]) # This variable, necessary_image, gets updated as we # backchain. It always holds the set of ground atoms that # are necessary for the remainder of the plan to reach the # goal. At the start, necessary_image is simply the goal. necessary_image = set(traj_goal) for t in range(len(atoms_seq) - 2, -1, -1): segment = seg_traj[t] option = segment.get_option() # Find the necessary PNADs associated with this option. If # there are none, then use the general PNAD associated with # this option. (But make sure to use a copy of it, because we # don't want the general PNAD to get mutated when we mutate # necessary PNADs!) if len(param_opt_to_nec_pnads[option.parent]) == 0: general_pnad = param_opt_to_general_pnad[option.parent] pnads_for_option = [ PartialNSRTAndDatastore(general_pnad.op, list(general_pnad.datastore), general_pnad.option_spec) ] else: pnads_for_option = param_opt_to_nec_pnads[option.parent] # Compute the ground atoms that must be added on this timestep. # They must be a subset of the current PNAD's add effects. necessary_add_effects = necessary_image - atoms_seq[t] assert necessary_add_effects.issubset(segment.add_effects) # Update the segment's necessary_add_effects. segment.necessary_add_effects = necessary_add_effects # We start by checking if any of the PNADs associated with the # demonstrated option are able to match this transition. objects = set(segment.states[0]) pnad, var_to_obj = self._find_best_matching_pnad_and_sub( segment, objects, pnads_for_option) if pnad is not None: assert var_to_obj is not None obj_to_var = {v: k for k, v in var_to_obj.items()} assert len(var_to_obj) == len(obj_to_var) ground_op = pnad.op.ground( tuple(var_to_obj[var] for var in pnad.op.parameters)) if len(param_opt_to_nec_pnads[option.parent]) == 0: param_opt_to_nec_pnads[option.parent].append(pnad) # If we weren't able to find a substitution (i.e, the above # _find_best_matching call didn't yield a PNAD), we need to # spawn a new PNAD from the most general PNAD to cover # these necessary add effects. else: nec_pnad_set_changed = True pnad = self._spawn_new_pnad( param_opt_to_general_pnad[option.parent], segment) param_opt_to_nec_pnads[option.parent].append(pnad) # Recompute datastores for ALL PNADs associated with this # option. We need to do this because the new PNAD may now # be a better match for some transition that we previously # matched to another PNAD. self._recompute_datastores_from_segments( param_opt_to_nec_pnads[option.parent]) # Recompute all preconditions, now that we have recomputed # the datastores. While doing this, keep track of any # PNADs that get empty datastores. pnads_to_remove = [] for nec_pnad in param_opt_to_nec_pnads[option.parent]: if len(nec_pnad.datastore) > 0: pre = self._induce_preconditions_via_intersection( nec_pnad) nec_pnad.op = nec_pnad.op.copy_with( preconditions=pre) else: pnads_to_remove.append(nec_pnad) # Remove PNADs that are no longer necessary because they # have no data in their datastores. for rem_pnad in pnads_to_remove: param_opt_to_nec_pnads[option.parent].remove(rem_pnad) # After all this, the unification call that failed earlier # (leading us into the current else statement) should work. best_score_pnad, var_to_obj = \ self._find_best_matching_pnad_and_sub( segment, objects, param_opt_to_nec_pnads[option.parent]) assert var_to_obj is not None assert best_score_pnad == pnad obj_to_var = {v: k for k, v in var_to_obj.items()} assert len(var_to_obj) == len(obj_to_var) ground_op = pnad.op.ground( tuple(var_to_obj[var] for var in pnad.op.parameters)) # Every atom in the necessary_image that wasn't in the # ground_op's add effects is a possible keep effect. This # may add new variables, whose mappings for this segment # we keep track of in the seg_to_keep_effects_sub dict. for atom in necessary_image - ground_op.add_effects: keep_eff_sub = {} for obj in atom.objects: if obj in obj_to_var: continue new_var = utils.create_new_variables( [obj.type], obj_to_var.values())[0] obj_to_var[obj] = new_var keep_eff_sub[new_var] = obj pnad.poss_keep_effects.add(atom.lift(obj_to_var)) if segment not in pnad.seg_to_keep_effects_sub: pnad.seg_to_keep_effects_sub[segment] = {} pnad.seg_to_keep_effects_sub[segment].update(keep_eff_sub) # Update necessary_image for this timestep. It no longer # needs to include the ground add effects of this PNAD, but # must now include its ground preconditions. necessary_image -= { a.ground(var_to_obj) for a
dest, keys, *args): """ Store the intersection of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. """ keys = list_or_args('sinterstore', keys, args) return self.execute_command('SINTERSTORE', dest, *keys) def sismember(self, name, value): "Return a boolean indicating if ``value`` is a member of set ``name``" return self.execute_command('SISMEMBER', name, value) def smembers(self, name): "Return all members of the set ``name``" return self.execute_command('SMEMBERS', name) def smove(self, src, dst, value): "Move ``value`` from set ``src`` to set ``dst`` atomically" return self.execute_command('SMOVE', src, dst, value) def spop(self, name): "Remove and return a random member of set ``name``" return self.execute_command('SPOP', name) def srandmember(self, name): "Return a random member of set ``name``" return self.execute_command('SRANDMEMBER', name) def srem(self, name, value): "Remove ``value`` from set ``name``" return self.execute_command('SREM', name, value) def sunion(self, keys, *args): "Return the union of sets specifiued by ``keys``" keys = list_or_args('sunion', keys, args) return self.execute_command('SUNION', *keys) def sunionstore(self, dest, keys, *args): """ Store the union of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. """ keys = list_or_args('sunionstore', keys, args) return self.execute_command('SUNIONSTORE', dest, *keys) #### SORTED SET COMMANDS #### def zadd(self, name, value, score): "Add member ``value`` with score ``score`` to sorted set ``name``" return self.execute_command('ZADD', name, score, value) def zcard(self, name): "Return the number of elements in the sorted set ``name``" return self.execute_command('ZCARD', name) def zcount(self, name, min, max): return self.execute_command('ZCOUNT', name, min, max) def zincr(self, key, member, value=1): "This has been deprecated, use zincrby instead" warnings.warn(DeprecationWarning( "Redis.zincr has been deprecated, use Redis.zincrby instead" )) return self.zincrby(key, member, value) def zincrby(self, name, value, amount=1): "Increment the score of ``value`` in sorted set ``name`` by ``amount``" return self.execute_command('ZINCRBY', name, amount, value) def zinter(self, dest, keys, aggregate=None): warnings.warn(DeprecationWarning( "Redis.zinter has been deprecated, use Redis.zinterstore instead" )) return self.zinterstore(dest, keys, aggregate) def zinterstore(self, dest, keys, aggregate=None): """ Intersect multiple sorted sets specified by ``keys`` into a new sorted set, ``dest``. Scores in the destination will be aggregated based on the ``aggregate``, or SUM if none is provided. """ return self._zaggregate('ZINTERSTORE', dest, keys, aggregate) def zrange(self, name, start, end, desc=False, withscores=False): """ Return a range of values from sorted set ``name`` between ``start`` and ``end`` sorted in ascending order. ``start`` and ``end`` can be negative, indicating the end of the range. ``desc`` indicates to sort in descending order. ``withscores`` indicates to return the scores along with the values. The return type is a list of (value, score) pairs """ if desc: return self.zrevrange(name, start, end, withscores) pieces = ['ZRANGE', name, start, end] if withscores: pieces.append('withscores') return self.execute_command(*pieces, **{'withscores': withscores}) def zrangebyscore(self, name, min, max, start=None, num=None, withscores=False): """ Return a range of values from the sorted set ``name`` with scores between ``min`` and ``max``. If ``start`` and ``num`` are specified, then return a slice of the range. ``withscores`` indicates to return the scores along with the values. The return type is a list of (value, score) pairs """ if (start is not None and num is None) or \ (num is not None and start is None): raise RedisError("``start`` and ``num`` must both be specified") pieces = ['ZRANGEBYSCORE', name, min, max] if start is not None and num is not None: pieces.extend(['LIMIT', start, num]) if withscores: pieces.append('withscores') return self.execute_command(*pieces, **{'withscores': withscores}) def zrank(self, name, value): """ Returns a 0-based value indicating the rank of ``value`` in sorted set ``name`` """ return self.execute_command('ZRANK', name, value) def zrem(self, name, value): "Remove member ``value`` from sorted set ``name``" return self.execute_command('ZREM', name, value) def zremrangebyrank(self, name, min, max): """ Remove all elements in the sorted set ``name`` with ranks between ``min`` and ``max``. Values are 0-based, ordered from smallest score to largest. Values can be negative indicating the highest scores. Returns the number of elements removed """ return self.execute_command('ZREMRANGEBYRANK', name, min, max) def zremrangebyscore(self, name, min, max): """ Remove all elements in the sorted set ``name`` with scores between ``min`` and ``max``. Returns the number of elements removed. """ return self.execute_command('ZREMRANGEBYSCORE', name, min, max) def zrevrange(self, name, start, num, withscores=False): """ Return a range of values from sorted set ``name`` between ``start`` and ``num`` sorted in descending order. ``start`` and ``num`` can be negative, indicating the end of the range. ``withscores`` indicates to return the scores along with the values as a dictionary of value => score """ pieces = ['ZREVRANGE', name, start, num] if withscores: pieces.append('withscores') return self.execute_command(*pieces, **{'withscores': withscores}) def zrevrank(self, name, value): """ Returns a 0-based value indicating the descending rank of ``value`` in sorted set ``name`` """ return self.execute_command('ZREVRANK', name, value) def zscore(self, name, value): "Return the score of element ``value`` in sorted set ``name``" return self.execute_command('ZSCORE', name, value) def zunion(self, dest, keys, aggregate=None): warnings.warn(DeprecationWarning( "Redis.zunion has been deprecated, use Redis.zunionstore instead" )) return self.zunionstore(dest, keys, aggregate) def zunionstore(self, dest, keys, aggregate=None): """ Union multiple sorted sets specified by ``keys`` into a new sorted set, ``dest``. Scores in the destination will be aggregated based on the ``aggregate``, or SUM if none is provided. """ return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate) def _zaggregate(self, command, dest, keys, aggregate=None): pieces = [command, dest, len(keys)] if isinstance(keys, dict): items = keys.items() keys = [i[0] for i in items] weights = [i[1] for i in items] else: weights = None pieces.extend(keys) if weights: pieces.append('WEIGHTS') pieces.extend(weights) if aggregate: pieces.append('AGGREGATE') pieces.append(aggregate) return self.execute_command(*pieces) #### HASH COMMANDS #### def hdel(self, name, key): "Delete ``key`` from hash ``name``" return self.execute_command('HDEL', name, key) def hexists(self, name, key): "Returns a boolean indicating if ``key`` exists within hash ``name``" return self.execute_command('HEXISTS', name, key) def hget(self, name, key): "Return the value of ``key`` within the hash ``name``" return self.execute_command('HGET', name, key) def hgetall(self, name): "Return a Python dict of the hash's name/value pairs" return self.execute_command('HGETALL', name) def hincrby(self, name, key, amount=1): "Increment the value of ``key`` in hash ``name`` by ``amount``" return self.execute_command('HINCRBY', name, key, amount) def hkeys(self, name): "Return the list of keys within hash ``name``" return self.execute_command('HKEYS', name) def hlen(self, name): "Return the number of elements in hash ``name``" return self.execute_command('HLEN', name) def hset(self, name, key, value): """ Set ``key`` to ``value`` within hash ``name`` Returns 1 if HSET created a new field, otherwise 0 """ return self.execute_command('HSET', name, key, value) def hsetnx(self, name, key, value): """ Set ``key`` to ``value`` within hash ``name`` if ``key`` does not exist. Returns 1 if HSETNX created a field, otherwise 0. """ return self.execute_command("HSETNX", name, key, value) def hmset(self, name, mapping): """ Sets each key in the ``mapping`` dict to its corresponding value in the hash ``name`` """ items = [] for pair in mapping.iteritems(): items.extend(pair) return self.execute_command('HMSET', name, *items) def hmget(self, name, keys): "Returns a list of values ordered identically to ``keys``" return self.execute_command('HMGET', name, *keys) def hvals(self, name): "Return the list of values within hash ``name``" return self.execute_command('HVALS', name) # channels def psubscribe(self, patterns): "Subscribe to all channels matching any pattern in ``patterns``" if isinstance(patterns, basestring): patterns = [patterns] response = self.execute_command('PSUBSCRIBE', *patterns) # this is *after* the SUBSCRIBE in order to allow for lazy and broken # connections that need to issue AUTH and SELECT commands self.subscribed = True return response def punsubscribe(self, patterns=[]): """ Unsubscribe from any channel matching any pattern in ``patterns``. If empty, unsubscribe from all channels. """ if isinstance(patterns, basestring): patterns = [patterns] return self.execute_command('PUNSUBSCRIBE', *patterns) def subscribe(self, channels): "Subscribe to ``channels``, waiting for messages to be published" if isinstance(channels, basestring): channels = [channels] response = self.execute_command('SUBSCRIBE', *channels) # this is *after* the SUBSCRIBE in order to allow for lazy and broken # connections that need to issue AUTH and SELECT commands self.subscribed = True return response def unsubscribe(self, channels=[]): """ Unsubscribe from ``channels``. If empty, unsubscribe from all channels """ if isinstance(channels, basestring): channels = [channels] return self.execute_command('UNSUBSCRIBE', *channels) def publish(self, channel, message): """ Publish
""" SQuAD with Bidirectional Encoder Representations from Transformers ========================================================================================= This example shows how to implement finetune a model with pre-trained BERT parameters for SQuAD, with Gluon NLP Toolkit. @article{devlin2018bert, title={BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding}, author={<NAME> and Chang, Ming- \ Wei and Lee, Kenton and Toutanova, Kristina}, journal={arXiv preprint arXiv:1810.04805}, year={2018} } """ # coding=utf-8 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint:disable=redefined-outer-name,logging-format-interpolation # import argparse import collections import json import logging import os import io import copy import random import time import warnings import numpy as np import mxnet as mx import gluonnlp as nlp from gluonnlp.data import SQuAD from model.qa import BertForQA from data.qa import SQuADTransform, preprocess_dataset from bert_qa_evaluate import get_F1_EM, predict, PredResult from verify import AnswerVerify, AnswerVerifyDense, AnswerVerifyThreshold np.random.seed(6) random.seed(6) mx.random.seed(6) log = logging.getLogger('gluonnlp') log.setLevel(logging.DEBUG) formatter = logging.Formatter( fmt='%(levelname)s:%(name)s:%(asctime)s %(message)s', datefmt='%H:%M:%S') parser = argparse.ArgumentParser(description='BERT QA example.' 'We fine-tune the BERT model on SQuAD dataset.') parser.add_argument('--only_predict', action='store_true', help='Whether to predict only.') parser.add_argument('--model_parameters', type=str, default=None, help='Model parameter file') parser.add_argument('--bert_model', type=str, default='bert_12_768_12', help='BERT model name. options are bert_12_768_12 and bert_24_1024_16.') parser.add_argument('--bert_dataset', type=str, default='book_corpus_wiki_en_uncased', help='BERT dataset name.' 'options are book_corpus_wiki_en_uncased and book_corpus_wiki_en_cased.') parser.add_argument('--pretrained_bert_parameters', type=str, default=None, help='Pre-trained bert model parameter file. default is None') parser.add_argument('--uncased', action='store_false', help='if not set, inputs are converted to lower case.') parser.add_argument('--output_dir', type=str, default='./output_dir', help='The output directory where the model params and predicted results will be written.' ' default is ./output_dir') parser.add_argument('--epochs', type=int, default=2, help='number of epochs, default is 2') parser.add_argument('--batch_size', type=int, default=12, help='Batch size. Number of examples per gpu in a minibatch. default is 12') parser.add_argument('--test_batch_size', type=int, default=12, help='Test batch size. default is 12') parser.add_argument('--optimizer', type=str, default='adam', help='optimization algorithm. default is adam, bertadam is also usable (mxnet >= 1.5.0.)') parser.add_argument('--accumulate', type=int, default=None, help='The number of batches for ' 'gradients accumulation to simulate large batch size. Default is None') parser.add_argument('--lr', type=float, default=5e-5, # 6e-5 help='Initial learning rate. default is 6e-5') parser.add_argument('--warmup_ratio', type=float, default=0.1, help='ratio of warmup steps that linearly increase learning rate from ' '0 to target learning rate. default is 0.1') parser.add_argument('--log_interval', type=int, default=50, help='report interval. default is 50') parser.add_argument('--max_seq_length', type=int, default=384, help='The maximum total input sequence length after WordPiece tokenization.' 'Sequences longer than this will be truncated, and sequences shorter ' 'than this will be padded. default is 384') parser.add_argument('--doc_stride', type=int, default=128, help='When splitting up a long document into chunks, how much stride to ' 'take between chunks. default is 128') parser.add_argument('--max_query_length', type=int, default=64, help='The maximum number of tokens for the question. Questions longer than ' 'this will be truncated to this length. default is 64') parser.add_argument('--n_rnn_layers', type=int, default=0, help='number of LSTM layers added after the BERT-output and before the dense span-classifier.') parser.add_argument('--n_best_size', type=int, default=20, # 20 help='The total number of n-best predictions to generate in the ' 'nbest_predictions.json output file. default is 20') parser.add_argument('--max_answer_length', type=int, default=30, help='The maximum length of an answer that can be generated. This is needed ' 'because the start and end predictions are not conditioned on one another.' ' default is 30') parser.add_argument('--version_2', action='store_true', help='SQuAD examples whether contain some that do not have an answer.') parser.add_argument('--gpu', type=int, default=None, help='Which gpu to use for finetuning. CPU is used if not set.') parser.add_argument('--verify_gpu', type=int, default=None, help='Which gpu to use for training the verifier. The same context with reader model is used if not set.') parser.add_argument('--sentencepiece', type=str, default=None, help='Path to the sentencepiece .model file for both tokenization and vocab.') parser.add_argument('--debug', action='store_true', help='Run the example in test mode for sanity checks') parser.add_argument('--add_query', action='store_true', default=False, help='add the embedding of query to the part of context if needed') parser.add_argument('--apply_coattention', action='store_true', default=False, help='apply coattention to BERT\' output') parser.add_argument('--apply_self_attention', action='store_true', default=False, help='apply self-attention to BERT\' output') parser.add_argument('--apply_transformer', action='store_true', default=False, help='apply transformer to BERT\' output') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.' 'default is 0.0') parser.add_argument('--answerable_threshold', type=float, default=0.5, help='If unanswerable - between 0 and 1, 0.5 by default.') parser.add_argument('--verifier', type=int, default=None, choices=[0, 1, 2], help='the id of the verifier to use, 0 refers to the standard thresholding.') parser.add_argument('--v_epochs', type=int, default=1, help='verifier running epochs / main model epoch.') parser.add_argument('--verifier_mode', type=str, default="joint", choices=["joint", "all", "takeover"], help='the id of the verifier to use') parser.add_argument('--not_extract_sentence', action='store_false', default=False, help='extracting sentence and use [S;Q;$;A] in verifier when extract sentence, otherwise [C;Q;$;A]') parser.add_argument('--save_params', action='store_true', default=False, help='save parameters') parser.add_argument('--freeze_bert', action='store_true', default=False, help='not finetuning bert parameters, only finetuning the rest parts.') parser.add_argument('--separate_train', action='store_true', default=False, help='separate BERT and the additional layers to use different learning rate and different trainer.') parser.add_argument('--qanet_style_out', action='store_true', default=False, help='using the QANet-style output.') parser.add_argument('--bidaf_style_out', action='store_true', default=False, help='using the BiDAF-style output.') parser.add_argument('--remove_special_token', action='store_true', default=False, help='remove the special tokens from bert output by masking.') parser.add_argument('--customize_loss', action='store_true', default=False, help='custimizing the loss function when needed.') args = parser.parse_args() VERIFIER_ID = args.verifier extract_sentence = not args.not_extract_sentence # by default extracting sentences for verifiers offsets = False # args.apply_coattention # shifting the order of the embedding might potentially harm the performance # it does harm the performance # NOTICE: shifting is not a good idea since it harms the performance and AT TIMES cause corruption (EM around 1, F1 around 10) # in case that offset needs to be shifted, refer to BertForQA.shift_ndarray output_dir = args.output_dir if not os.path.exists(output_dir): os.mkdir(output_dir) fh = logging.FileHandler(os.path.join( args.output_dir, 'finetune_squad.log'), mode='w') fh.setLevel(logging.INFO) fh.setFormatter(formatter) console = logging.StreamHandler() console.setLevel(logging.INFO) console.setFormatter(formatter) log.addHandler(console) log.addHandler(fh) log.info(args) model_name = args.bert_model dataset_name = args.bert_dataset only_predict = args.only_predict model_parameters = args.model_parameters pretrained_bert_parameters = args.pretrained_bert_parameters if pretrained_bert_parameters and model_parameters: raise ValueError('Cannot provide both pre-trained BERT parameters and ' 'BertForQA model parameters.') lower = args.uncased epochs = args.epochs batch_size = args.batch_size test_batch_size = args.test_batch_size lr = args.lr ctx = mx.cpu() if args.gpu is None else mx.gpu(args.gpu) verify_ctx = ctx if args.verify_gpu is None else mx.gpu(args.verify_gpu) accumulate = args.accumulate log_interval = args.log_interval * accumulate if accumulate else args.log_interval if accumulate: log.info('Using gradient accumulation. Effective batch size = {}'. format(accumulate*batch_size)) optimizer = args.optimizer warmup_ratio = args.warmup_ratio if args.save_params: print("We will save the model parameters in {} at the end.".format(os.path.join(output_dir, 'net.params'))) version_2 = args.version_2 null_score_diff_threshold = args.null_score_diff_threshold answerable_threshold = args.answerable_threshold max_seq_length = args.max_seq_length doc_stride = args.doc_stride max_query_length = args.max_query_length n_best_size = args.n_best_size max_answer_length = args.max_answer_length if max_seq_length <= max_query_length + 3: raise ValueError('The max_seq_length (%d) must be greater than max_query_length ' '(%d) + 3' % (max_seq_length, max_query_length)) # vocabulary and tokenizer if args.sentencepiece: logging.info('loading vocab file from sentence piece model: %s', args.sentencepiece) if dataset_name: warnings.warn('Both --dataset_name and --sentencepiece are provided. ' 'The vocabulary will be loaded based on --sentencepiece.') vocab = nlp.vocab.BERTVocab.from_sentencepiece(args.sentencepiece) dataset_name = None else: vocab = None pretrained = not model_parameters and not pretrained_bert_parameters and not args.sentencepiece bert, vocab = nlp.model.get_model( name=model_name, dataset_name=dataset_name, vocab=vocab, pretrained=pretrained, ctx=ctx, use_pooler=False, use_decoder=False, use_classifier=False) if args.sentencepiece: tokenizer = nlp.data.BERTSPTokenizer(args.sentencepiece, vocab, lower=lower) else: tokenizer = nlp.data.BERTTokenizer(vocab=vocab, lower=lower) batchify_fn = nlp.data.batchify.Tuple( nlp.data.batchify.Stack(), nlp.data.batchify.Pad(axis=0, pad_val=vocab[vocab.padding_token]), nlp.data.batchify.Pad(axis=0, pad_val=vocab[vocab.padding_token]), nlp.data.batchify.Stack('float32'), nlp.data.batchify.Stack('float32'), nlp.data.batchify.Stack('float32')) BERT_DIM = { 'bert_12_768_12': 768, 'bert_24_1024_16': 1024 } net = BertForQA(bert=bert, \ add_query=args.add_query, \ apply_coattention=args.apply_coattention, bert_out_dim=BERT_DIM[args.bert_model],\ apply_self_attention=args.apply_self_attention, apply_transformer=args.apply_transformer, qanet_style_out=args.qanet_style_out, bidaf_style_out=args.bidaf_style_out, n_rnn_layers=args.n_rnn_layers, remove_special_token=args.remove_special_token) # print(net) # exit(0) if args.apply_coattention and (args.qanet_style_out or args.bidaf_style_out): additional_params = None else: additional_params = net.span_classifier.collect_params() if model_parameters: # load complete BertForQA parameters net.load_parameters(model_parameters, ctx=ctx, cast_dtype=True) elif pretrained_bert_parameters: # only load BertModel parameters bert.load_parameters(pretrained_bert_parameters, ctx=ctx, ignore_extra=True, cast_dtype=True) if net.span_classifier is not None: net.span_classifier.initialize(init=mx.init.Normal(0.02), ctx=ctx) elif pretrained: # only load BertModel parameters if net.span_classifier is not None: net.span_classifier.initialize(init=mx.init.Normal(0.02), ctx=ctx) else: # no checkpoint is loaded net.initialize(init=mx.init.Normal(0.02), ctx=ctx) if args.apply_coattention: net.co_attention.collect_params().initialize(ctx=ctx) net.cls_mapping.initialize(ctx=ctx) if args.qanet_style_out: net.project.collect_params().initialize(ctx=ctx) net.dropout.collect_params().initialize(ctx=ctx) net.model_encoder.collect_params().initialize(ctx=ctx) net.predict_begin.collect_params().initialize(ctx=ctx) net.predict_end.collect_params().initialize(ctx=ctx) elif args.bidaf_style_out: net.modeling_layer.collect_params().initialize(ctx=ctx) net.output_layer.collect_params().initialize(ctx=ctx) # the additional paramaters if we want to freeze the BERT part of the model if additional_params is not None: additional_params.update(net.co_attention.collect_params()) else: additional_params = net.co_attention.collect_params() additional_params.update(net.cls_mapping.collect_params()) if args.qanet_style_out: additional_params.update(net.project.collect_params()) additional_params.update(net.dropout.collect_params()) additional_params.update(net.model_encoder.collect_params()) additional_params.update(net.predict_begin.collect_params()) additional_params.update(net.predict_end.collect_params()) elif args.bidaf_style_out: additional_params.update(net.modeling_layer.collect_params()) additional_params.update(net.output_layer.collect_params()) if args.apply_self_attention: net.multi_head_attention.collect_params().initialize(ctx=ctx) additional_params.update(net.multi_head_attention.collect_params()) if args.apply_transformer: net.transformer.collect_params().initialize(ctx=ctx) additional_params.update(net.transformer.collect_params()) net.hybridize(static_alloc=True) loss_function = net.loss(customize_loss=args.customize_loss) loss_function.hybridize(static_alloc=True) if version_2 and VERIFIER_ID is not None: if VERIFIER_ID == 0: verifier = AnswerVerifyThreshold( tokenizer=nlp.data.BERTBasicTokenizer(lower=lower), max_answer_length=max_answer_length, n_best_size=n_best_size, max_len=max_seq_length, version_2=version_2, ctx=verify_ctx) elif VERIFIER_ID == 1: verifier = AnswerVerify( tokenizer=nlp.data.BERTBasicTokenizer(lower=lower), max_answer_length=max_answer_length, null_score_diff_threshold=null_score_diff_threshold, n_best_size=n_best_size, max_len=max_seq_length, version_2=version_2, ctx=verify_ctx, offsets=offsets, epochs=args.v_epochs, extract_sentence=extract_sentence) # debug: to be moved onto another GPU latter if space issue happens elif VERIFIER_ID == 2: verifier = AnswerVerifyDense( max_answer_length=max_answer_length, null_score_diff_threshold=null_score_diff_threshold, n_best_size=n_best_size, max_len=max_seq_length, in_units=BERT_DIM[args.bert_model], version_2=version_2, extract_sentence=extract_sentence, offsets=offsets, ctx=verify_ctx) else: print("ERROR: verifier with id {0} unknown to the model.".format(VERIFIER_ID))
from jira import JIRA import csv import codecs import datetime options = {'server': '*'} jira = JIRA(options, basic_auth=("*", "*")) projects = jira.projects() # list containing all projects # loop to print all of the projects for i in projects: print(i) print("") # Clears the CSV file before writing reports to it with open('test3.csv', mode='w') as file: pass all_worker_names = [] # holds the names of all workers csv_header = [] # header of the csv file print('loading worker names...') # Used to append all of workers that existed in any project into both the csv_header and all_worker_names for project in projects: print(project) all_issues = jira.search_issues('project="{}"'.format(project)) #list which contains all issues in a project # obtains worker names from each issue in each project for i in range(len(all_issues)): issue = jira.issue(all_issues[i]) worklogs = jira.worklogs(issue.key) #list of all of the worklegs in an issue for worklog in worklogs: author = worklog.author #gets the name of the worklog authors if str(author) not in all_worker_names: # avoiding repeated names from being added to the lists all_worker_names.append(str(author)) csv_header.append(str(author)) print('worker names have been fully loaded') print("") projectcount = 0 # used to indicate the number of projects that have been loaded into the file, this is so the header only gets written once print('writing reports to csv file...') # loops through each project to get reports for projectname in projects: print(projectname) all_issues = jira.search_issues('project="{}"'.format(projectname)) #list which contains all issues in a project issue_list = [] #contains the summary of the issues, the names of workloggers and the time they logged worker_name_list = [] #contains the names of the workers that have worked on the project WorkerAndTS = [] #will become a 2D list which contains the names of the workers and the times they've worked on each issue fullissuelist = [] #this list will contain the summaries of each issue as well as the total amount of hours worked on an issue by each person in the worker_name_list #this loop is used to bring down the issue names and the worklogs on each issue for i in range(len(all_issues)): issue = jira.issue(all_issues[i]) issue_list.append([issue.fields.summary]) #issue.fields.summary represents the summary of the issue each issue will be put in a 2D list so I can apppend time values to it as well fullissuelist.append([issue.fields.summary]) worklogs = jira.worklogs(issue.key) #list of all of the worklegs in an issue for worklog in worklogs: author = worklog.author #gets the name of the worklog authors time = worklog.timeSpentSeconds #gets the amount of time that has been logged by the authors issue_list[i].append(str(author)) #through each iteration, the issue_list will fill up with worklogs and issue names issue_list[i].append(str(time)) #the issue_list at this point will contain names of all issue authors, this include repeated names of the same author. this if statement serves as a function #to remove duplicate names from the issue_list by appending them to a new list (worker_name_list) if str(author) not in worker_name_list: worker_name_list.append(str(author)) #this function baically splits each item in the worker_name_list, so each worker gets their own nested list, this will be used in order to tie the time spent to the worker who spent it for i in range(len(worker_name_list)): WorkerAndTS.append([worker_name_list[i]]) #Looping through all of the issues again in order to add time values to the rearrangedlist for i in range(len(all_issues)): for j in range(len(WorkerAndTS)): #adds the number 0 to each list in WorkerAndTS for each issue in a project. WorkerAndTS[j].append(0) #These 0s represent the amount of hours worked on each project by the worker, based on their worklog issue = jira.issue(all_issues[i]) worklogs = jira.worklogs(issue.key) for worklog in worklogs: author = worklog.author time = worklog.timeSpentSeconds #this for loop compares the author that the main for loop is looking at against the the worker name in WorkerAndTS for counter in range(len(WorkerAndTS)): if str(author) == str(WorkerAndTS[counter][0]): WorkerAndTS[counter][i+1] += time #if the author being looked at # ties the issue to the time spent on the issue by each other for i in range(len(fullissuelist)): for j in range(len(WorkerAndTS)): fullissuelist[i].append(WorkerAndTS[j][i+1]) # This list will only hold issues that have times logged on them, this for loop appends the issues which contain issues that don't have all zeros currentlist = [] for i in range(len(fullissuelist)): zeros = 0 # tally of the amount of zeros in an issue for j in range(len(fullissuelist[i])): if fullissuelist[i][j] == 0: zeros += 1 # adds 1 to the tally when a 0 is found # compared the amount of zeros to the number of items in the nested list which contains the issue if zeros < (len(fullissuelist[i])-1): # since the fullissuelist will only contain the issue summary right now, we only need to detect whether the amount of zeros is equivalent to the length of list without the summary currentlist.append(fullissuelist[i]) # if the amount of zeros is less than the length of the list without the summary, this means a worker has logged time on the issue, this means the issue will get appended to the currentlist # aesthetic purposes, replaces the 0s in each nested list in currentlist with blank spaces for i in range(len(currentlist)): for j in range(len(currentlist[i])): if currentlist[i][j] == 0: currentlist[i][j] = "" # obtains the length of each nested list in currentlist, this will be used when appending time length_list = [] for i in range(len(currentlist)): length_list.append(len(currentlist[i])) # appends the time created and time resolved into the currentlist for i in range(len(all_issues)): issue = jira.issue(all_issues[i]) for i in range(len(currentlist)): if len(currentlist[i]) == length_list[i]: # this checks whether the length of the nested list is equal to the length of the list before the created time and resolve time were added # this is used in case some of the issues have the same names but different creation dates and resolve dates if currentlist[i][0] == issue.fields.summary:# checks to see if the summary in the nested list is the same as the one being looked at by the main loop # this section obtains the date the issue was created and puts it into a traditional day/month/year format date = "{}".format(issue.fields.created) date_obj = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f%z') year = date_obj.strftime("%Y") month = date_obj.strftime("%m") day = date_obj.strftime("%d") timestring = (day+"/"+month+"/"+year) currentlist[i].insert(1, timestring) # this section similar to the creation date formatting, it finds the date of when an issue was resolved if issue.fields.resolutiondate: date = "{}".format(issue.fields.resolutiondate) date_obj = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f%z') year = date_obj.strftime("%Y") month = date_obj.strftime("%m") day = date_obj.strftime("%d") timestring = (day+"/"+month+"/"+year) currentlist[i].insert(2, timestring) # as some issue have not been resolved, they won't have a resolved date, in which case their resolved date will be marked as 'none' in the csv file else: currentlist[i].insert(2, "none") # inserts the issue key into the nested list which contains the corresponding issue by matching it with the summary for i in range(len(all_issues)): issue = jira.issue(all_issues[i]) summary = issue.fields.summary for j in range(len(currentlist)): if summary == currentlist[j][0]: currentlist[j].insert(0, issue.key) # inserts the project name into the nested lists for i in range(len(currentlist)): currentlist[i].insert(0, projectname) # creates a new list which will be used to rearrange the 2D list so it can written to the csv file in the given format # csv format is | Projectid | issueid | issue_desc | CreateDate | ResDate | all worker names with their own cell | # this for loop adds the project names, the creation date and the resolve date to the new_currentlist new_current = [] for i in range(len(currentlist)): new_current.append([currentlist[i][0]]) for j in range(4): new_current[i].append(currentlist[i][j+1]) # this for loop appends the number 0 for the amount of workers in all_worker_names. This 0 represents the amount of time a worker has worked on the each project in the issues in new_currentlist for i in range(len(new_current)): for j in range(len(all_worker_names)): new_current[i].append(0) # this for loops connects the location of the worker in the worker name list with a 0
#!/usr/bin/env python3 # Copyright 2016-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. import configparser import contextlib import fcntl import io import json import logging import os import threading from collections import defaultdict, namedtuple import compiler from constants import PACKAGE_JSON, BUCKCONFIG, BUCKCONFIG_LOCAL from helpers import BuckitException PackageInfo = namedtuple( 'PackageInfo', ['name', 'cell_name', 'cell_alias', 'absolute_path', 'includes_info'] ) IncludesInfo = namedtuple('IncludesInfo', ['path', 'whitelist_functions']) BUCKCONFIG_HEADER = r''' # This configuration file was updated by buckit. Manual changes # should be made in the root project's .buckconfig /.buckconfig.local # file. All package's .buckconfig.local files can then be updated by # running `yarn run buckit buckconfig` ''' __file_lock_counts = defaultdict(int) __mutex = threading.RLock() @contextlib.contextmanager def __lockfile(project_root): """ On enter, locks a file in the project root, on exit unlocks it. Keeps a count of lock attempts, so can be locked recursively """ _lockfile = None try: lock_path = os.path.join(project_root, '._buckconfig.lock') logging.debug("Locking file at %s", lock_path) _lockfile = open(lock_path, 'w') count = 0 with __mutex: __file_lock_counts[project_root] += 1 count = __file_lock_counts[project_root] fcntl.lockf(_lockfile, fcntl.LOCK_EX) logging.debug( "Locked file at %s. Count is %s", lock_path, count) yield None finally: if _lockfile: with __mutex: __file_lock_counts[project_root] -= 1 lock_count = __file_lock_counts[project_root] if lock_count == 0: logging.debug("Unlocking file at %s", lock_path) fcntl.lockf(_lockfile, fcntl.LOCK_UN) logging.debug("Unlocked file at %s", lock_path) else: logging.debug("Lock count is at %s, not unlocking", lock_count) _lockfile.close() def update_config( project_root, buckconfig, new_properties, override=False, merge=None, removed_properties=None): """ Take a dictionary of {section : {key: [values] (or a string value)}} and set it in a .buckconfig style file Arguments: project_root - The path to the root project. Used for locking buckconfig - The path to the buckconfig file new_properties - A dictionary of {section: key: [values]|value}. If values is an iterable, it will be joined by spaces override - Whether or not to override existing values merge - If provided, a dictionary of section.key strings to delimiter where we should split the string by the delimiter, merge the values, and write them back out with the given delimiter removed_properties - If provided, a dictionary of {section:[properties]} to remove from the config. This is applied after updates provided in `new_properties` """ with __lockfile(project_root): __update_config( buckconfig, new_properties, override, merge, removed_properties) def __update_config( buckconfig, new_properties, override=False, merge=None, removed_properties=None): """ Take a dictionary of {section : {key: [values] (or a string value)}} and set it in a .buckconfig style file. No locking is done in this method Arguments: buckconfig - The path to the buckconfig file new_properties - A dictionary of {section: key: [values]|value}. If values is an iterable, it will be joined by spaces override - Whether or not to override existing values merge - If provided, a dictionary of section.key strings to delimiter where we should split the string by the delimiter, merge the values, and write them back out with the given delimiter removed_properties - If provided, a dictionary of {section:[properties]} to remove from the config. This is applied after updates provided in `new_properties` """ merge = merge or {} config = configparser.ConfigParser() if os.path.exists(buckconfig): config.read(buckconfig) logging.debug("Updating file at %s", buckconfig) for section, kvs in new_properties.items(): if not config.has_section(section): config.add_section(section) for key, value in kvs.items(): if override or not config.has_option(section, key): merge_delimiter = merge.get('{}.{}'.format(section, key)) if isinstance(value, str): str_value = value elif merge_delimiter: if config.has_section(section): existing = set( ( x.strip() for x in config.get(section, key, fallback='') .split(merge_delimiter) ) ) else: existing = set() str_value = merge_delimiter.join(existing | set(value)) else: str_value = ' '.join(value) logging.debug("Setting %s.%s to %s", section, key, str_value) config.set(section, key, str_value) else: logging.debug( "%s.%s is already set, not overriding values", section, key ) if removed_properties: for section, keys in removed_properties.items(): if config.has_section(section): for key in keys: config.remove_option(section, key) with open(buckconfig, 'w') as fout: fout.write(BUCKCONFIG_HEADER) config.write(fout) logging.debug("Updated file at %s", buckconfig) def parse_package_info(package_path): """ Try to get the package info from the package.json inside of package_path Arguments: package_path - The path to the package root that contains package.json Returns: PackageInfo object with properties from the package.json """ package_path = os.path.abspath(package_path) json_path = os.path.join(package_path, PACKAGE_JSON) try: with open(json_path, 'r') as fin: js = json.loads(fin.read()) buckit = js.get('buckit', {}) cell_name = buckit.get('cell_name', js['name']) includes = buckit.get('includes', None) if includes: includes_info = IncludesInfo( includes.get('path', None), includes.get('whitelist_functions', []) ) if not isinstance(includes_info.path, str): raise BuckitException( "buckit.includes in {} should be a string", json_path) if not isinstance(includes_info.whitelist_functions, list): raise BuckitException( "buckit.whitelist_functions in {} should be a list", json_path) else: includes_info = None return PackageInfo( js["name"], cell_name.split('/')[-1], 'yarn|{}'.format(js["name"]), package_path, includes_info ) except Exception as e: raise BuckitException( "Could not read property 'buckit.name' or 'name' from " "json file at {}: {}", json_path, e) def find_project_root(start_path, node_modules=None): """ Starting at start_path, going up, try to find the first directory with a package.json or .buckconfig, and call that the root of the project Args: start_path: The directory to start looking in node_modules: If provided, the name of the directory that yarn installs packages to. This can be used if .buckconfig or package.json do not exist yet Returns: The absolute path to the project root Raises: Exception: No parent project could be found """ terminal = os.path.splitdrive(start_path)[0] or '/' path = os.path.abspath(start_path) while path != terminal: logging.debug("Checking %s for package.json or .buckconfig", path) package_json = os.path.join(path, PACKAGE_JSON) package_buckconfig = os.path.join(path, BUCKCONFIG) package_node_modules = None if node_modules: package_node_modules = os.path.join(path, node_modules) package_path = path path = os.path.split(path)[0] if os.path.exists(package_buckconfig): break elif os.path.exists(package_json): try: package_info = parse_package_info(package_path) logging.debug( "Found project %s at %s", package_info.name, package_json ) break except Exception: # If we couldn't parse it, it wasn't meant to be logging.debug("Could not parse json in %s", package_json) continue elif package_node_modules and os.path.exists(package_node_modules): logging.debug( "Found node modules directory at %s", package_node_modules) break else: continue else: raise BuckitException( "Could not find a .buckconfig or package.json above {}. Stopped " "at {}", start_path, path) logging.debug("{bold}Found project root at %s{clear}", package_path) return package_path def __update_root_buckconfig(project_root, package_info, is_root_dep): """ Updates the root .buckconfig with `cell=alias` in the repositories section """ buckconfig = os.path.join(project_root, BUCKCONFIG) repos = 'repositories' project = 'project' buildfile = 'buildfile' whitelist_key = 'build_file_import_whitelist' to_set = { repos: {}, project: {}, buildfile: {}, } alias_config = '$(config repository_aliases.{})'.format( package_info.cell_alias ) to_set[repos][package_info.cell_name] = alias_config if package_info.includes_info and is_root_dep: if package_info.includes_info.whitelist_functions: to_set[project][whitelist_key] = \ package_info.includes_info.whitelist_functions new_includes = '//{alias_config}/{path}'.format( alias_config=alias_config, path=package_info.includes_info.path ) to_set[buildfile]['includes'] = new_includes __update_config( buckconfig, to_set, merge={'{}.{}'.format(project, whitelist_key): ','} ) def __update_root_buckconfig_local(project_root, package_info): """ Updates the root .buckconfig with `alias=path_relative_to_root` in the repository_aliases section """ buckconfig_local = os.path.join(project_root, BUCKCONFIG_LOCAL) relative_path = os.path.relpath(package_info.absolute_path, project_root) logging.debug("Updating root buckconfig.local at %s", buckconfig_local) to_set = { 'repository_aliases': {package_info.cell_alias: relative_path}, 'log': {'buckconfig_local_warning_enabled': 'false'}, } __update_config(buckconfig_local, to_set) def __update_packages_buckconfig_local(project_root): """ Updates all .buckconfig.local files in all directories specified in the root's repository_aliases section. This copies the root .buckconfig.local, and makes all of the paths relative to the cell """ buckconfig_root = os.path.join(project_root, BUCKCONFIG) root_config = configparser.ConfigParser() if os.path.exists(buckconfig_root): root_config.read(buckconfig_root) root_repositories = {} if root_config.has_section('repositories'): root_repositories = { alias.replace('$(config repository_aliases.', '').replace(')', ''): cell for cell, alias in root_config.items('repositories') } buckconfig_local = os.path.join(project_root, BUCKCONFIG_LOCAL) section = 'repository_aliases' logging.debug("Updating .buckconfig.local paths for all packages") config = configparser.ConfigParser() if not os.path.exists(buckconfig_local): logging.debug('.buckconfig at %s does not exist', buckconfig_local) return config.read(buckconfig_local) if not config.has_section(section): logging.debug('[%s] was not found in %s', section, buckconfig_local) return config_string = io.StringIO() config.write(config_string) for alias, package_path in config.items(section): # For all cells, make sure they have a copy of the .buckconfig.local. # Update its paths to have proper relative paths, rather than the ones # copied from the root package_path = os.path.join(project_root, package_path) if not os.path.exists(package_path): logging.debug("Package path %s does not exist", package_path) continue package_buckconfig_local = os.path.join(package_path, BUCKCONFIG_LOCAL) package_config = configparser.ConfigParser() config_string.seek(0) package_config.read_file(config_string) logging.debug( "Updating .buckconfig.local at %s", package_buckconfig_local ) for cell_alias, root_relative_path in package_config.items(section): relative_path = os.path.relpath( os.path.abspath(os.path.join(project_root, root_relative_path)), os.path.abspath(package_path) ) package_config.set(section, cell_alias, relative_path) # If a section ends in #buckit-<platform> or #buckit-<cell>-<platform> # copy that section into <section>#platform instead. This lets us # override specific sections for third party or
and -stat_vox mean: <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>. & <NAME>. The average pathlength map: A diffusion MRI tractography-derived index for studying brain pathology. NeuroImage, 2011, 55, 133-141 * If using -dixel option with TDI contrast only: <NAME>., Tournier, J-D., <NAME>., <NAME>. A novel paradigm for automated segmentation of very large whole-brain probabilistic tractography data sets. In proc. ISMRM, 2011, 19, 673 * If using -dixel option with any other contrast: <NAME>., Raffelt, D., <NAME>., <NAME>. Incorporating directional information in diffusion tractography derived maps: angular track imaging (ATI). In Proc. ISMRM, 2012, 20, 1912 * If using -tod option: <NAME>., <NAME>., <NAME>., Maes, F., <NAME>., <NAME>. Track Orientation Density Imaging (TODI) and Track Orientation Distribution (TOD) based tractography. NeuroImage, 2014, 94, 312-336 * If using other contrasts / statistics: <NAME>.; <NAME>.; <NAME>. & <NAME>. A generalised framework for super-resolution track-weighted imaging. NeuroImage, 2012, 59, 2494-2503 * If using -precise mapping option: <NAME>.; <NAME>.; <NAME>. & <NAME>. SIFT: Spherical-deconvolution informed filtering of tractograms. NeuroImage, 2013, 67, 298-312 (Appendix 3) Example ------- >>> import nipype.interfaces.mrtrix3 as mrt >>> tdi = mrt.ComputeTDI() >>> tdi.inputs.in_file = 'dti.mif' >>> tdi.cmdline # doctest: +ELLIPSIS 'tckmap dti.mif tdi.mif' >>> tdi.run() # doctest: +SKIP """ _cmd = "tckmap" input_spec = ComputeTDIInputSpec output_spec = ComputeTDIOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class TCK2VTKInputSpec(CommandLineInputSpec): in_file = File( exists=True, argstr="%s", mandatory=True, position=-2, desc="input tractography" ) out_file = File( "tracks.vtk", argstr="%s", usedefault=True, position=-1, desc="output VTK file" ) reference = File( exists=True, argstr="-image %s", desc="if specified, the properties of" " this image will be used to convert track point positions from real " "(scanner) coordinates into image coordinates (in mm).", ) voxel = File( exists=True, argstr="-image %s", desc="if specified, the properties of" " this image will be used to convert track point positions from real " "(scanner) coordinates into image coordinates.", ) nthreads = traits.Int( argstr="-nthreads %d", desc="number of threads. if zero, the number" " of available cpus will be used", nohash=True, ) class TCK2VTKOutputSpec(TraitedSpec): out_file = File(desc="output VTK file") class TCK2VTK(MRTrix3Base): """ Convert a track file to a vtk format, cave: coordinates are in XYZ coordinates not reference Example ------- >>> import nipype.interfaces.mrtrix3 as mrt >>> vtk = mrt.TCK2VTK() >>> vtk.inputs.in_file = 'tracks.tck' >>> vtk.inputs.reference = 'b0.nii' >>> vtk.cmdline # doctest: +ELLIPSIS 'tck2vtk -image b0.nii tracks.tck tracks.vtk' >>> vtk.run() # doctest: +SKIP """ _cmd = "tck2vtk" input_spec = TCK2VTKInputSpec output_spec = TCK2VTKOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class DWIExtractInputSpec(MRTrix3BaseInputSpec): in_file = File( exists=True, argstr="%s", mandatory=True, position=-2, desc="input image" ) out_file = File(argstr="%s", mandatory=True, position=-1, desc="output image") bzero = traits.Bool(argstr="-bzero", desc="extract b=0 volumes") nobzero = traits.Bool(argstr="-no_bzero", desc="extract non b=0 volumes") singleshell = traits.Bool( argstr="-singleshell", desc="extract volumes with a specific shell" ) shell = traits.List( traits.Float, sep=",", argstr="-shell %s", desc="specify one or more gradient shells", ) class DWIExtractOutputSpec(TraitedSpec): out_file = File(exists=True, desc="output image") class DWIExtract(MRTrix3Base): """ Extract diffusion-weighted volumes, b=0 volumes, or certain shells from a DWI dataset Example ------- >>> import nipype.interfaces.mrtrix3 as mrt >>> dwiextract = mrt.DWIExtract() >>> dwiextract.inputs.in_file = 'dwi.mif' >>> dwiextract.inputs.bzero = True >>> dwiextract.inputs.out_file = 'b0vols.mif' >>> dwiextract.inputs.grad_fsl = ('bvecs', 'bvals') >>> dwiextract.cmdline # doctest: +ELLIPSIS 'dwiextract -bzero -fslgrad bvecs bvals dwi.mif b0vols.mif' >>> dwiextract.run() # doctest: +SKIP """ _cmd = "dwiextract" input_spec = DWIExtractInputSpec output_spec = DWIExtractOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class MRConvertInputSpec(MRTrix3BaseInputSpec): in_file = File( exists=True, argstr="%s", mandatory=True, position=-2, desc="input image" ) out_file = File( "dwi.mif", argstr="%s", mandatory=True, position=-1, usedefault=True, desc="output image", ) coord = traits.List( traits.Int, sep=" ", argstr="-coord %s", desc="extract data at the specified coordinates", ) vox = traits.List( traits.Float, sep=",", argstr="-vox %s", desc="change the voxel dimensions" ) axes = traits.List( traits.Int, sep=",", argstr="-axes %s", desc="specify the axes that will be used", ) scaling = traits.List( traits.Float, sep=",", argstr="-scaling %s", desc="specify the data scaling parameter", ) json_import = File( exists=True, argstr="-json_import %s", mandatory=False, desc="import data from a JSON file into header key-value pairs", ) json_export = File( exists=False, argstr="-json_export %s", mandatory=False, desc="export data from an image header key-value pairs into a JSON file", ) class MRConvertOutputSpec(TraitedSpec): out_file = File(exists=True, desc="output image") json_export = File( exists=True, desc="exported data from an image header key-value pairs in a JSON file", ) out_bvec = File(exists=True, desc="export bvec file in FSL format") out_bval = File(exists=True, desc="export bvec file in FSL format") class MRConvert(MRTrix3Base): """ Perform conversion between different file types and optionally extract a subset of the input image Example ------- >>> import nipype.interfaces.mrtrix3 as mrt >>> mrconvert = mrt.MRConvert() >>> mrconvert.inputs.in_file = 'dwi.nii.gz' >>> mrconvert.inputs.grad_fsl = ('bvecs', 'bvals') >>> mrconvert.cmdline # doctest: +ELLIPSIS 'mrconvert -fslgrad bvecs bvals dwi.nii.gz dwi.mif' >>> mrconvert.run() # doctest: +SKIP """ _cmd = "mrconvert" input_spec = MRConvertInputSpec output_spec = MRConvertOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = op.abspath(self.inputs.out_file) if self.inputs.json_export: outputs["json_export"] = op.abspath(self.inputs.json_export) if self.inputs.out_bvec: outputs["out_bvec"] = op.abspath(self.inputs.out_bvec) if self.inputs.out_bval: outputs["out_bval"] = op.abspath(self.inputs.out_bval) return outputs class TransformFSLConvertInputSpec(MRTrix3BaseInputSpec): in_file = File( exists=True, argstr="%s", mandatory=True, position=1, desc="FLIRT input image", ) reference = File( exists=True, argstr="%s", mandatory=True, position=2, desc="FLIRT reference image", ) in_transform = File( exists=True, argstr="%s", mandatory=True, position=0, desc="FLIRT output transformation matrix", ) out_transform = File( "transform_mrtrix.txt", argstr="%s", mandatory=True, position=-1, usedefault=True, desc="output transformed affine in mrtrix3's format", ) flirt_import = traits.Bool( True, argstr="flirt_import", mandatory=True, usedefault=True, position=-2, desc="import transform from FSL's FLIRT.", ) class TransformFSLConvertOutputSpec(TraitedSpec): out_transform = File( exists=True, desc="output transformed affine in mrtrix3's format" ) class TransformFSLConvert(MRTrix3Base): """ Perform conversion between FSL's transformation matrix format to mrtrix3's. """ _cmd = "transformconvert" input_spec = TransformFSLConvertInputSpec output_spec = TransformFSLConvertOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["out_transform"] = op.abspath(self.inputs.out_transform) return outputs class MRTransformInputSpec(MRTrix3BaseInputSpec): in_files = InputMultiPath( File(exists=True), argstr="%s", mandatory=True, position=-2, desc="Input images to be transformed", ) out_file = File( genfile=True, argstr="%s", position=-1, desc="Output image", ) invert = traits.Bool( argstr="-inverse", position=1, desc="Invert the specified transform before using it", ) linear_transform = File( exists=True, argstr="-linear %s", position=1, desc=( "Specify a linear transform to apply, in the form of a 3x4 or 4x4 ascii file. " "Note the standard reverse convention is used, " "where the transform maps points in the template image to the moving image. " "Note that the reverse convention is still assumed even if no -template image is supplied." ), ) replace_transform = traits.Bool( argstr="-replace", position=1, desc="replace the current transform by that specified, rather than applying it to the current transform", ) transformation_file = File( exists=True, argstr="-transform %s", position=1, desc="The transform to apply, in the form of a 4x4 ascii file.", ) template_image = File( exists=True, argstr="-template %s", position=1, desc="Reslice the input image to match the specified template image.", ) reference_image = File( exists=True, argstr="-reference %s", position=1, desc="in case the transform supplied maps from the input image onto a reference image, use this option to specify the reference. Note that this implicitly sets the -replace option.", ) flip_x = traits.Bool( argstr="-flipx", position=1, desc="assume the transform is supplied assuming a coordinate system with the x-axis reversed relative to the MRtrix convention (i.e. x increases from right to left). This is required to handle transform matrices produced by FSL's FLIRT command. This is only used in conjunction with the -reference option.", ) quiet = traits.Bool( argstr="-quiet", position=1, desc="Do not display information messages or progress status.", ) debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class MRTransformOutputSpec(TraitedSpec): out_file = File(exists=True, desc="the output image of the transformation") class MRTransform(MRTrix3Base): """ Apply spatial transformations or reslice images Example ------- >>> MRxform = MRTransform() >>> MRxform.inputs.in_files = 'anat_coreg.mif' >>> MRxform.run() # doctest: +SKIP """ _cmd = "mrtransform" input_spec = MRTransformInputSpec output_spec = MRTransformOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = self.inputs.out_file if not isdefined(outputs["out_file"]): outputs["out_file"] = op.abspath(self._gen_outfilename()) else: outputs["out_file"] = op.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): if name == "out_file": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_files[0]) return name + "_MRTransform.mif" class MRMathInputSpec(MRTrix3BaseInputSpec): in_file = File( exists=True, argstr="%s", mandatory=True, position=-3, desc="input image" ) out_file = File(argstr="%s", mandatory=True, position=-1, desc="output image") operation = traits.Enum( "mean", "median", "sum", "product", "rms", "norm", "var", "std", "min", "max", "absmax",
#!/usr/bin/env python3 """@@@ program: Constants.py Creation Date: cr 2017.03~ Last Update: 200416, more refactoring of constants Version: 1.0 Purpose: Contains a hodgepodge of constants, labels and indices that that are often used by various programs. These are uniform, fundamental definitions that are used in all the programs but don't really have a home anywhere. Having one file that contains them all helps to keep everything regular. Comments: 180904: Constants has been updated to include the Turner Energy Rule (TER) parameters because the concepts developed to build chreval and the upgrades of the vsfold series will probably be inherited to do RNA structure prediction eventually. """ # ################################################################# # ############### General configuration CONSTANTS ############### # ############### settings used in FreeEnergy ############### # ################################################################# # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv # These are used to assign parameters in FreeEnergy() INFINITY = 1000.0 """@ 161019wkd: I know infinity is not 1000 kcal/mol, but I need to set some upper bound on the program on the program where generally nothing can be found by typical free energy. Generally this appears to be a large enough number to be treated as "infinity". I made it adjustable for the sake of raising the bar if necessary. 190403wkd: In an older version of the code, I still needed this for declaring values in the program. However, since the end of Dec 2018, I introduced my newer approach that includes entropy corrections in the blank regions, and this seems to eleminate the need for this artifical setting. Nevertheless, for starting search calculations, it is still convenient to have this very large positive free energy as a starting value so that almost every location resets to a more realistic value. """ kB = 0.0019872041 # [kcal/mol] (Boltzmann constant) # source: https://en.wikipedia.org/wiki/Boltzmann_constant T37C = 310.15 # [K] temperature at 37 C in Kelven T_0C = 273.15 # [K] temperature at 0 C in Kelven # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # ################################################################# # ################################################################# # ######################## Control labels ####################### # ################################################################# # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv # Used for testing etc in various modules. In particular, it is used # by SO_dG_barrier and CtData to filter LThread data It is also used # by RNAModules and ChromatinModules blm_labels = {'B' : True, 'I' : True, 'M' : True } skip_labels = {'bgn' : True, 'end' : True, '-' : True } base_labels = {'B' : True, 'I' : True, 'M' : True, 'S' : True, 'K' : True, 'W' : True } pk_labels = {'K' : True, 'R' : True } # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # ################################################################# # ################################################################# # ############### General configuration CONSTANTS ############### # ############### used in 1D structure notation ############### # ################################################################# # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv # used for PKs and parallel stems # this notation at least works with VARNA # used for PKs and parallel stems # this notation at least works with VARNA num2lpr = { 0 : '(', 1 : '[', 2 : '{', 3 : '<', 4 : 'A', 5 : 'B', 6 : 'C', 7 : 'D', 8 : 'E', 9 : 'F', 10 : 'G', 11 : 'H', 12 : 'I', 13 : 'J', 14 : 'K', 15 : 'L', 16 : 'M', 17 : 'N', # 18 : 'O', # doesn't work beyond N # 19 : 'P', # 20 : 'Q', # 21 : 'R', # 22 : 'S', # 23 : 'T', # 24 : 'U', # 25 : 'V', # 26 : 'W', # 27 : 'X', # 28 : 'Y', # 29 : 'Z', } # used for PKs and parallel stems # this notation at least works with VARNA num2rpr = { 0 : ')', 1 : ']', 2 : '}', 3 : '>', 4 : 'a', 5 : 'b', 6 : 'c', 7 : 'd', 8 : 'e', 9 : 'f', 10 : 'g', 11 : 'h', 12 : 'i', 13 : 'j', 14 : 'k', 15 : 'l', 16 : 'm', 17 : 'n', # 18 : 'o', # doesn't work beyond N # 19 : 'p', # 20 : 'q', # 21 : 'r', # 22 : 's', # 23 : 't', # 24 : 'u', # 25 : 'v', # 26 : 'w', # 27 : 'x', # 28 : 'y', # 29 : 'z', } lpr2num = { '(' : 0, '[' : 1, '{' : 2, '<' : 3, 'A' : 4, 'B' : 5, 'C' : 6, 'D' : 7, 'E' : 8, 'F' : 9, 'G' : 10, 'H' : 11, 'I' : 12, 'J' : 13, 'K' : 14, 'L' : 15, 'M' : 16, 'N' : 17, # 'O' : 18, # doesn't work beyond N # 'P' : 19, # 'Q' : 20, # 'R' : 21, # 'S' : 22, # 'T' : 23, # 'U' : 24, # 'V' : 25, # 'W' : 26, # 'X' : 27, # 'Y' : 28, # 'Z' : 29, } rpr2num = { ')' : 0, ']' : 1, '}' : 2, '>' : 3, 'a' : 4, 'b' : 5, 'c' : 6, 'd' : 7, 'e' : 8, 'f' : 9, 'g' : 10, 'h' : 11, 'i' : 12, 'j' : 13, 'k' : 14, 'l' : 15, 'm' : 16, 'n' : 17, # 'o' : 18, # doesn't work beyond N # 'p' : 19, # 'q' : 20, # 'r' : 21, # 's' : 22, # 't' : 23, # 'u' : 24, # 'v' : 25, # 'w' : 26, # 'x' : 27, # 'y' : 28, # 'z' : 29, } """@ the next two dictionaries can be used to increment between index i and i+1 and reference a particular character. For example, say you are using "Ee". Then lpr2num[E] = 8 / rpr2num[e] = 8, PKrndx[rpr2num[e]] = 6 and so if we increment to 7, rpr2num[PKfndx[7]], num2lpr[7] = 'F', num2rpr[7] = 'f'. """ PKfndx = { 0 : 1, # ['[', ']'], 1 : 3, # ['<', '>'], 2 : 4, # ['A', 'a'], 3 : 5, # ['B', 'b'], 4 : 6, # ['C', 'c'], 5 : 7, # ['D', 'd'], 6 : 8, # ['E', 'e'], 7 : 9, # ['F', 'f'], 8 : 10, # ['G', 'g'], 9 : 11, # ['H', 'h'], 10 : 12, # ['I', 'i'], 11 : 13, # ['J', 'j'], 12 : 14, # ['K', 'k'], 13 : 15, # ['L', 'l'], 14 : 16, # ['M', 'm'], 15 : 17, # ['N', 'n'], # 16 : 18, # ['O', 'o'], # doesn't work beyond N # 17 : 19, # ['P', 'p'], # 18 : 20, # ['Q', 'q'], # 19 : 21, # ['R', 'r'], # 20 : 22, # ['S', 's'], # 21 : 23, # ['T', 't'], # 22 : 24, # ['U', 'u'], # 23 : 25, # ['V', 'v'], # 24 : 26, # ['W', 'w'], # 25 : 27, # ['X', 'x'], # 26 : 28, # ['Y', 'y'], # 27 : 29, # ['Z', 'z'], } PKrndx = { 1 : 0, # ['[', ']'], 3 : 1, # ['<', '>'], 4 : 2, # ['A', 'a'], 5 : 3, # ['B', 'b'], 6 : 4, # ['C', 'c'], 7 : 5, # ['D', 'd'], 8 : 6, # ['E', 'e'], 9 : 7, # ['F', 'f'], 10 : 8, # ['G', 'g'], 11 : 9, # ['H', 'h'], 12 : 10, # ['I', 'i'], 13 : 11, # ['J', 'j'], 14 : 12, # ['K', 'k'], 15 : 13, # ['L', 'l'], 16 : 14, # ['M', 'm'], 17 : 15, # ['N', 'n'], # 18 : 16, # ['O', 'o'], # doesn't work beyond N # 19 : 17, # ['P', 'p'], # 20 : 18, # ['Q', 'q'], # 21 : 19, # ['R', 'r'], # 22 : 20, # ['S', 's'], # 23 : 21, # ['T', 't'], # 24 : 22, # ['U', 'u'], # 25 : 23, # ['V', 'v'], # 26 : 24, # ['W', 'w'], # 27 : 25, # ['X', 'x'], # 28 : 26, # ['Y', 'y'], # 29 : 27, # ['Z', 'z'], } # The final dictionary is for counting the number of items on the # stack; i.e., the stack pointer. pointer = { 0 : 0, # ['(', ')'], 1 : 0, # ['[', ']'], 2 : 0, # ['{', '}'], 3 : 0, # ['<', '>'], 4 : 0, # ['A', 'a'], 5 : 0, # ['B', 'b'], 6 : 0, # ['C', 'c'], 7 : 0, # ['D', 'd'], 8 : 0, # ['E', 'e'], 9 :
xydspec # text # primitive # labelbar # legend # cafield # sffield # vffield # rval = PlotIds() if (lst[0] is None): rval.nbase = 0 else: rval.nbase = len(lst[0]) rval.base = lst[0] if (lst[1] is None): rval.ncontour = 0 else: rval.ncontour = len(lst[1]) rval.contour = lst[1] if (lst[2] is None): rval.nvector = 0 else: rval.nvector = len(lst[2]) rval.vector = lst[2] if (lst[3] is None): rval.nstreamline = 0 else: rval.nstreamline = len(lst[3]) rval.streamline = lst[3] if (lst[4] is None): rval.nmap = 0 else: rval.nmap = len(lst[4]) rval.map = lst[4] if (lst[5] is None): rval.nxy = 0 else: rval.nxy = len(lst[5]) rval.xy = lst[5] if (lst[6] is None): rval.nxydspec = 0 else: rval.nxydspec = len(lst[6]) rval.xydspec = lst[6] if (lst[7] is None): rval.ntext = 0 else: rval.ntext = len(lst[7]) rval.text = lst[7] if (lst[8] is None): rval.nprimitive = 0 else: rval.nprimitive = len(lst[8]) rval.primitive = lst[8] if (lst[9] is None): rval.nlabelbar = 0 else: rval.nlabelbar = len(lst[9]) rval.labelbar = lst[9] if (lst[10] is None): rval.nlegend = 0 else: rval.nlegend = len(lst[10]) rval.legend = lst[10] if (lst[11] is None): rval.ncafield = 0 else: rval.ncafield = len(lst[11]) rval.cafield = lst[11] if (lst[12] is None): rval.nsffield = 0 else: rval.nsffield = len(lst[12]) rval.sffield = lst[12] if (lst[13] is None): rval.nvffield = 0 else: rval.nvffield = len(lst[13]) rval.vffield = lst[13] return rval def _pobj2lst(pobj): # # A Python list of PlotIds is in the order: # # base # contour # vector # streamline # map # xy # xydspec # text # primitive # labelbar # legend # cafield # sffield # vffield # # Converts the attributes of a PlotId object to a Python list. # if (pobj == 0): return [None,None,None,None,None,None,None,None,None,None,None,None,None,None] else: return [pobj.base,pobj.contour,pobj.vector,pobj.streamline,pobj.map, \ pobj.xy,pobj.xydspec,pobj.text,pobj.primitive,pobj.labelbar, \ pobj.legend,pobj.cafield,pobj.sffield,pobj.vffield] def _pseq2lst(pseq): # # Takes a list of Python plot objects and converts it to # a list of lists that will be converted to a list of PlotId # structures in the panel argument. # lst = [] for i in range(len(pseq)): lst.append(_pobj2lst(pseq[i])) return lst # # Set a default missing value and a flag indicating # whether a fill value was already present. # def _set_default_msg(fv,default_msg=1.e20): if fv is None: return default_msg,0 else: return fv,1 def _set_spc_res(resource_name,value): # # Change True and False values to 1 and 0 and leave all other # values unchaged. # lval = value if (value == True): lval = 1 elif (value == False): lval = 0 # # Set the special resource values. # # These resources must stay in this order! # If you add new resources, add them at the # end of the list. See also _set_spc_defaults. # if (resource_name == "Maximize"): set_nglRes_i(0, lval) elif (resource_name == "Draw"): set_nglRes_i(1, lval) elif (resource_name == "Frame"): set_nglRes_i(2, lval) elif (resource_name == "Scale"): set_nglRes_i(3, lval) elif (resource_name == "Debug"): set_nglRes_i(4, lval) elif (resource_name == "PaperOrientation"): if(isinstance(lval, str)): if(lval.lower() == "portrait"): set_nglRes_i(5, 0) elif(lval.lower() == "landscape"): set_nglRes_i(5, 6) elif(lval.lower() == "auto"): set_nglRes_i(5, 3) else: print("_set_spc_res: Unknown value for " + resource_name) else: set_nglRes_i(5, lval) elif (resource_name == "PaperWidth"): set_nglRes_f(6, lval) elif (resource_name == "PaperHeight"): set_nglRes_f(7, lval) elif (resource_name == "PaperMargin"): set_nglRes_f(8, lval) elif (resource_name == "PanelCenter"): set_nglRes_i(9, lval) elif (resource_name == "PanelRowSpec"): set_nglRes_i(10, lval) elif (resource_name == "PanelXWhiteSpacePercent"): set_nglRes_f(11, lval) elif (resource_name == "PanelYWhiteSpacePercent"): set_nglRes_f(12, lval) elif (resource_name == "PanelBoxes"): set_nglRes_i(13, lval) elif (resource_name == "PanelLeft"): set_nglRes_f(14, lval) elif (resource_name == "PanelRight"): set_nglRes_f(15, lval) elif (resource_name == "PanelBottom"): set_nglRes_f(16, lval) elif (resource_name == "PanelTop"): set_nglRes_f(17, lval) elif (resource_name == "PanelInvsblTop"): set_nglRes_f(18, lval) elif (resource_name == "PanelInvsblLeft"): set_nglRes_f(19, lval) elif (resource_name == "PanelInvsblRight"): set_nglRes_f(20, lval) elif (resource_name == "PanelInvsblBottom"): set_nglRes_f(21, lval) elif (resource_name == "PanelSave"): set_nglRes_i(22, lval) elif (resource_name == "SpreadColors"): set_nglRes_i(23, lval) elif (resource_name == "SpreadColorStart"): set_nglRes_i(24, lval) elif (resource_name == "SpreadColorEnd"): set_nglRes_i(25, lval) elif (resource_name == "PanelLabelBarOrientation"): set_nglRes_i(26, lval) elif (resource_name == "PanelLabelBar" and len(resource_name) == 13): set_nglRes_i(27, lval) elif (resource_name == "PanelLabelBarXF"): set_nglRes_f(28, lval) elif (resource_name == "PanelLabelBarYF"): set_nglRes_f(29, lval) elif (resource_name == "PanelLabelBarLabelFontHeightF"): set_nglRes_f(30, lval) elif (resource_name == "PanelLabelBarWidthF"): set_nglRes_f(31, lval) elif (resource_name == "PanelLabelBarHeightF"): set_nglRes_f(32, lval) elif (resource_name == "PanelLabelBarOrthogonalPosF"): set_nglRes_f(33, lval) elif (resource_name == "PanelLabelBarParallelPosF"): set_nglRes_f(34, lval) elif (resource_name == "PanelLabelBarPerimOn"): set_nglRes_i(35, lval) elif (resource_name == "PanelLabelBarAlignment"): set_nglRes_i(36, lval) elif (resource_name == "PanelLabelBarLabelAutoStride"): set_nglRes_i(37, lval) elif (resource_name == "PanelFigureStrings" and len(resource_name) == 18): set_nglRes_c(38, lval) elif (resource_name == "PanelFigureStringsCount"): set_nglRes_i(39, lval) elif (resource_name == "PanelFigureStringsJust"): set_nglRes_i(40, lval) elif (resource_name == "PanelFigureStringsOrthogonalPosF"): set_nglRes_f(41, lval) elif (resource_name == "PanelFigureStringsParallelPosF"): set_nglRes_f(42, lval) elif (resource_name == "PanelFigureStringsPerimOn"): set_nglRes_i(43, lval) elif (resource_name == "PanelFigureStringsBackgroundFillColor"): set_nglRes_i(44, lval) elif (resource_name == "PanelFigureStringsFontHeightF"): set_nglRes_f(45, lval) elif (resource_name == "AppResFileName"): set_nglRes_s(46, lval) elif (resource_name == "XAxisType"): if(isinstance(lval, str)): if(lval.lower() == "irregularaxis"): set_nglRes_i(47, 0) elif(lval.lower() == "linearaxis"): set_nglRes_i(47, 1) elif(lval.lower() == "logaxis"): set_nglRes_i(47, 2) else: print("_set_spc_res: Unknown value for " + resource_name) else: set_nglRes_i(47, lval) elif (resource_name == "YAxisType"): if(isinstance(lval, str)): if(lval.lower() == "irregularaxis"): set_nglRes_i(48, 0) elif(lval.lower() == "linearaxis"): set_nglRes_i(48, 1) elif(lval.lower() == "logaxis"): set_nglRes_i(48, 2) else: print("_set_spc_res: Unknown value for " + resource_name) else: set_nglRes_i(48, lval) elif (resource_name == "PointTickmarksOutward"): set_nglRes_i(49, lval) elif (resource_name == "XRefLine"): set_nglRes_f(50, lval) elif (resource_name == "YRefLine"): set_nglRes_f(51, lval) elif (resource_name == "XRefLineThicknessF"): set_nglRes_f(52, lval) elif (resource_name == "YRefLineThicknessF"): set_nglRes_f(53, lval) elif (resource_name == "XRefLineColor"): set_nglRes_i(54, lval) elif (resource_name == "YRefLineColor"): set_nglRes_i(55, lval) elif (resource_name == "MaskLambertConformal"): set_nglRes_i(56, lval) elif (resource_name == "MaskLambertConformalOutlineOn"): set_nglRes_i(57, lval) else: print("_set_spc_res: Unknown special resource ngl" + resource_name) def _check_res_value(resvalue,strvalue,intvalue): # # Function for checking a resource value that can either be of # type string or integer (like color resource values). # if( (isinstance(resvalue, str) and \ resvalue.lower() == strvalue.lower()) or \ (isinstance(resvalue, integer_types) and resvalue == intvalue)): return(True) else: return(False) def _set_tickmark_res(reslist,reslist1): # # Set tmEqualizeXYSizes to True so that tickmark lengths and font # heights are the same size on both axes. # if(("nglScale" in reslist and reslist["nglScale"] > 0) or (not ("nglScale" in reslist))): reslist1["tmEqualizeXYSizes"] = True def _set_contour_res(reslist,reslist1): # # Set some contour resources of which we either don't like the NCL # defaults, or we want to set something on behalf of the user. # if("cnFillOn" in reslist and reslist["cnFillOn"] > 0): if ( not ("cnInfoLabelOn" in reslist)): reslist1["cnInfoLabelOn"] = False if ( not ("pmLabelBarDisplayMode" in reslist) and (not ("lbLabelBarOn" in reslist) or "lbLabelBarOn" in reslist and reslist["lbLabelBarOn"] > 0)): reslist1["pmLabelBarDisplayMode"] = "Always" # # The ContourPlot object does not recognize the lbLabelBarOn resource # so we have to remove it after we've used it. # if ("lbLabelBarOn" in reslist1): del reslist1["lbLabelBarOn"] # # It used to be that nglSpreadColors was set to True by default. # # With PyNGL 1.5.0, however, this resource is somewhat obsolete, # as cnFillPalette should now be used. nglSpreadColors will only # be set back to True if nglSpreadColorStart set explicitly to # something other than 2 and/or nglSpreadColorEnd is set explicitly # to something other than -1. # if(("nglSpreadColorStart" in reslist and reslist["nglSpreadColorStart"] != 2) or ("nglSpreadColorEnd" in reslist and reslist["nglSpreadColorEnd"] != -1)): lval = 1 set_nglRes_i(23, lval) # # Check for "plural" resources that only take effect if their # corresponding "Mono" resource is set to False, and set the # Mono resource on behalf of the user. # if("cnLineDashPatterns" in reslist): if ( not ("cnMonoLineDashPattern" in reslist)): reslist1["cnMonoLineDashPattern"] = False if("cnLineColors" in reslist): if (not ("cnMonoLineColor" in reslist)): reslist1["cnMonoLineColor"] = False if("cnLineThicknesss" in reslist): if (not ("cnMonoLineThickness" in reslist)): reslist1["cnMonoLineThickness"] = False if("cnLevelFlags" in reslist): if (not ("cnMonoLevelFlag" in reslist)): reslist1["cnMonoLevelFlag"] = False if("cnFillPatterns" in reslist): if (not ("cnMonoFillPattern" in reslist)): reslist1["cnMonoFillPattern"] = False if("cnFillScales" in reslist): if (not ("cnMonoFillScale" in reslist)): reslist1["cnMonoFillScale"] = False if("cnLineLabelFontColors" in reslist): if (not ("cnMonoLineLabelFontColor" in reslist)): reslist1["cnMonoLineLabelFontColor"] = False # # Set some tickmark resources. # _set_tickmark_res(reslist,reslist1) def _set_vector_res(reslist,reslist1): # # Set some vector resources of which we either don't like the NCL # defaults, or we want to set something on behalf of the user. # # Vectors can be colored one of two ways, either with colored line # vectors or filled colored vectors, or wind barbs. Any one of these # would warrant a labelbar. # # Don't bother setting the the vcMonoLineArrowColor, # vcMonoFillArrowEdgeColor, vcMonoFillArrowFillColor, or # vcMonoWindBarbColor resources to False if vcLevelColors is # set, because it all depends on if vcGlyphStyle is set a certain way. # Put the responsibility on the user # if( ("vcMonoLineArrowColor" in reslist and (_check_res_value(reslist["vcMonoLineArrowColor"],"False",0) or not reslist["vcMonoLineArrowColor"])) or ("vcMonoFillArrowFillColor" in reslist and (_check_res_value(reslist["vcMonoFillArrowFillColor"],"False",0) or not reslist["vcMonoFillArrowFillColor"]))
not return control flow.""" self.intermediates.append( ExpressionIntermediate.Terminal(expression) ) def pushMove(self, typed_expression): """Given a typed expression, allocate space for it on the stack and 'move' it (copy its bits, but don't inc or decref it) """ return self.push( typed_expression.expr_type, lambda other: other.expr.store(typed_expression.nonref_expr), wantsTeardown=False ) def let(self, e1, e2): v = self.functionContext.allocateLetVarname() return native_ast.Expression.Let( var=v, val=e1, within=e2(native_ast.Expression.Variable(name=v)) ) def pushReference(self, type, expression): """Push a reference to an object that's guaranteed to be alive for the duration of the expression.""" type = typeWrapper(type) varname = self.functionContext.allocateLetVarname() self.intermediates.append( ExpressionIntermediate.Simple(name=varname, expr=expression) ) return TypedExpression(self, native_ast.Expression.Variable(varname), type, True) def allocateUninitializedSlot(self, type): type = typeWrapper(type) varname = self.functionContext.allocateStackVarname() resExpr = TypedExpression( self, native_ast.Expression.StackSlot(name=varname, type=type.getNativeLayoutType()), type, True ) if not type.is_pod: with self.subcontext() as sc: type.convert_destroy(self, resExpr) self.teardowns.append( native_ast.Teardown.ByTag( tag=varname, expr=sc.result ) ) return resExpr def markUninitializedSlotInitialized(self, slot): if slot.expr_type.is_pod: return assert slot.expr.matches.StackSlot self.pushEffect(native_ast.Expression.ActivatesTeardown(slot.expr.name)) def pushStackSlot(self, nativeType): varname = self.functionContext.allocateStackVarname() return native_ast.Expression.StackSlot(name=varname, type=nativeType) def push(self, type, callback, wantsTeardown=True): """Allocate a stackvariable of type 'type' and pass it to 'callback' which should return a native_ast.Expression or TypedExpression(None) initializing it. """ type = typeWrapper(type) if type.is_pod: wantsTeardown = False varname = self.functionContext.allocateStackVarname() resExpr = TypedExpression( self, native_ast.Expression.StackSlot(name=varname, type=type.getNativeLayoutType()), type, True ) expr = callback(resExpr) if expr is None: expr = native_ast.nullExpr if isinstance(expr, TypedExpression): assert expr.expr_type.typeRepresentation is type(None), expr.expr_type # noqa expr = expr.expr else: assert isinstance(expr, native_ast.Expression) self.intermediates.append( ExpressionIntermediate.StackSlot( name=varname, expr=expr if not wantsTeardown else expr >> native_ast.Expression.ActivatesTeardown(varname) ) ) if wantsTeardown: with self.subcontext() as sc: type.convert_destroy(self, resExpr) self.teardowns.append( native_ast.Teardown.ByTag( tag=varname, expr=sc.result ) ) return resExpr def subcontext(self): class Scope: def __init__(scope): scope.intermediates = None scope.teardowns = None def __enter__(scope): scope.intermediates = self.intermediates scope.teardowns = self.teardowns self.intermediates = [] self.teardowns = [] scope.expr = None return scope def __exit__(scope, *args): scope.result = self.finalize(scope.expr) self.intermediates = scope.intermediates self.teardowns = scope.teardowns return Scope() def whileLoop(self, conditionExpr): if isinstance(conditionExpr, TypedExpression): conditionExpr = conditionExpr.nonref_expr class Scope: def __init__(scope): scope.intermediates = None scope.teardowns = None def __enter__(scope): scope.intermediates = self.intermediates scope.teardowns = self.teardowns self.intermediates = [] self.teardowns = [] def __exit__(scope, *args): result = self.finalize(None) self.intermediates = scope.intermediates self.teardowns = scope.teardowns self.pushEffect( native_ast.Expression.While( cond=conditionExpr, while_true=result, orelse=native_ast.nullExpr ) ) return Scope() def loop(self, countExpr): class Scope: def __init__(scope): scope.intermediates = None scope.teardowns = None def __enter__(scope): scope.counter = self.push(int, lambda counter: counter.expr.store(native_ast.const_int_expr(0))) scope.intermediates = self.intermediates scope.teardowns = self.teardowns self.intermediates = [] self.teardowns = [] return scope.counter def __exit__(scope, *args): result = self.finalize(None) self.intermediates = scope.intermediates self.teardowns = scope.teardowns self.pushEffect( native_ast.Expression.While( cond=scope.counter.nonref_expr.lt(countExpr.nonref_expr), while_true=result >> scope.counter.expr.store( scope.counter.nonref_expr.add(native_ast.const_int_expr(1)) ), orelse=native_ast.nullExpr ) ) return Scope() def switch(self, expression, targets, wantsBailout): results = {} if wantsBailout: targets = tuple(targets) + (None,) else: targets = tuple(targets) class Scope: def __init__(scope, target): scope.intermediates = [] scope.teardowns = [] scope.target = target def __enter__(scope): scope.intermediates, self.intermediates = self.intermediates, scope.intermediates scope.teardowns, self.teardowns = self.teardowns, scope.teardowns def __exit__(scope, *args): results[scope.target] = self.finalize(None) scope.intermediates, self.intermediates = self.intermediates, scope.intermediates scope.teardowns, self.teardowns = self.teardowns, scope.teardowns class MainScope: def __init__(scope): pass def __enter__(scope): return [(target, Scope(target)) for target in targets] def __exit__(scope, t, v, traceback): if t is None: expr = results.get(targets[-1], native_ast.nullExpr) for t in reversed(targets[:-1]): expr = native_ast.Expression.Branch( cond=expression.cast(native_ast.Int64).eq(native_ast.const_int_expr(t)), true=results.get(t, native_ast.nullExpr), false=expr ) self.pushEffect(expr) return MainScope() def ifelse(self, condition): if isinstance(condition, TypedExpression): condition = condition.toBool().nonref_expr results = {} class Scope: def __init__(scope, isTrue): scope.intermediates = [] scope.teardowns = [] scope.isTrue = isTrue def __enter__(scope): scope.intermediates, self.intermediates = self.intermediates, scope.intermediates scope.teardowns, self.teardowns = self.teardowns, scope.teardowns def __exit__(scope, *args): results[scope.isTrue] = self.finalize(None) scope.intermediates, self.intermediates = self.intermediates, scope.intermediates scope.teardowns, self.teardowns = self.teardowns, scope.teardowns class MainScope: def __init__(scope): pass def __enter__(scope): return Scope(True), Scope(False) def __exit__(scope, t, v, traceback): if t is None: true = results.get(True, native_ast.nullExpr) false = results.get(False, native_ast.nullExpr) if condition.matches.Constant: if condition.val.truth_value(): self.pushEffect(true) else: self.pushEffect(false) else: self.pushEffect( native_ast.Expression.Branch( cond=condition, true=true, false=false ) ) return MainScope() def finalize(self, expr, exceptionsTakeFrom=None): if expr is None: expr = native_ast.nullExpr elif isinstance(expr, native_ast.Expression): pass else: assert isinstance(expr, TypedExpression), type(expr) expr = expr.nonref_expr if len(self.intermediates): expr = native_ast.Expression.ApplyIntermediates(base=expr, intermediates=self.intermediates) if self.teardowns: expr = native_ast.Expression.Finally(expr=expr, teardowns=self.teardowns) if exceptionsTakeFrom and expr.couldThrow() and exceptionsTakeFrom.filename: expr = native_ast.Expression.ExceptionPropagator( expr=expr, varname=self.functionContext.allocateLetVarname(), handler=runtime_functions.add_traceback.call( native_ast.const_utf8_cstr(self.functionContext.name), native_ast.const_utf8_cstr(exceptionsTakeFrom.filename), native_ast.const_int_expr(exceptionsTakeFrom.line_number) ) >> native_ast.Expression.Throw( expr=native_ast.Expression.Constant( val=native_ast.Constant.NullPointer(value_type=native_ast.UInt8.pointer()) ) ) ) return expr def call_function_pointer(self, funcPtr, args, returnType): # force arguments to a type appropriate for argpassing native_args = [a.as_native_call_arg() for a in args if not a.expr_type.is_empty] if returnType.is_pass_by_ref: nativeFunType = native_ast.Type.Function( output=native_ast.Void, args=[returnType.getNativePassingType()] + [a.expr_type.getNativePassingType() for a in args], varargs=False, can_throw=True ) return self.push( returnType, lambda output_slot: native_ast.CallTarget.Pointer(expr=funcPtr.cast(nativeFunType.pointer())) .call(output_slot.expr, *native_args) ) else: nativeFunType = native_ast.Type.Function( output=returnType.getNativePassingType(), args=[a.expr_type.getNativePassingType() for a in args], varargs=False, can_throw=True ) return self.pushPod( returnType, native_ast.CallTarget.Pointer(expr=funcPtr.cast(nativeFunType.pointer())).call(*native_args) ) @staticmethod def mapFunctionArguments(functionOverload: FunctionOverload, args, kwargs) -> OneOf(str, ListOf(FunctionArgMapping)): """Figure out how to call 'functionOverload' with 'args' and 'kwargs'. This takes care of doing things like mapping keyword arguments, default values, etc. It does _not_ deal at all with types, so it's fine to use the typed-form of a non-typed function. The args in 'args/kwargs' can be any object. Args: functionOverload - a FunctionOverload we're trying to map to args - a list of positional arguments. They can be of any type. kwargs - a dict of keyword arguments. They can be of any type. Returns: A ListOf(FunctionArgMapping) mapping to the arguments of the function, in the order in which the names appear. Otherwise, a string error message. """ name = functionOverload.name outArgs = ListOf(FunctionArgMapping)() curTargetIx = 0 minPositional = functionOverload.minPositionalCount() maxPositional = functionOverload.maxPositionalCount() consumedPositionalNames = set() if minPositional == maxPositional: positionalMsg = f"{minPositional}" else: positionalMsg = f"from {minPositional} to {maxPositional}" if args and not functionOverload.args: return f"{name}() takes {positionalMsg} positional arguments but {len(args)} were given" if kwargs and not functionOverload.args: return f"{name}() got an unexpected keyword argument '{list(kwargs)[0]}'" for a in args: if curTargetIx >= len(functionOverload.args): return f"{name}() takes {positionalMsg} positional arguments but {len(args)} were given" if functionOverload.args[curTargetIx].isKwarg: return f"{name}() takes {positionalMsg} positional arguments but {len(args)} were given" if functionOverload.args[curTargetIx].isStarArg: if len(outArgs) <= curTargetIx: outArgs.append(FunctionArgMapping.StarArgs(ListOf(object)([a]))) else: outArgs[-1].value.append(a) else: consumedPositionalNames.add(functionOverload.args[curTargetIx].name) outArgs.append(FunctionArgMapping.Arg(value=a)) curTargetIx += 1 unconsumedKwargs = dict(kwargs) while len(outArgs) < len(functionOverload.args): arg = functionOverload.args[len(outArgs)] if arg.isStarArg: outArgs.append(FunctionArgMapping.StarArgs()) elif arg.isKwarg: for name in unconsumedKwargs: if name in consumedPositionalNames: return f"{name}() got multiple values for argument '{name}'" outArgs.append(FunctionArgMapping.Kwargs(value=unconsumedKwargs.items())) assert len(outArgs) == len(functionOverload.args) unconsumedKwargs = {} elif arg.name in kwargs: outArgs.append(FunctionArgMapping.Arg(value=unconsumedKwargs[arg.name])) del unconsumedKwargs[arg.name] elif arg.defaultValue is not None: outArgs.append(FunctionArgMapping.Constant(value=arg.defaultValue[0])) else: return f"{name}() missing required positional argument: {arg.name}" for argName in unconsumedKwargs: return f"{name}() got an unexpected keyword argument '{argName}'" return outArgs def buildFunctionArguments(self, functionOverload: FunctionOverload, args, kwargs): """Figure out how to call 'functionOverload' with 'args' and 'kwargs'. This takes care of doing things like mapping keyword arguments, default values, etc. It does _not_ deal at all with types, so it's fine to use the typed-form of a non-typed function. This function may generate code to construct the relevant arguments. Args: functionOverload - a FunctionOverload we're trying to map to args - a list of positional argument TypedExpression objects. kwargs - a dict of keyword argument TypedExpression objects. Returns: If we can map, a list of TypedExpression objects mapping to the argument names of the function, in the order that they appear. Otherwise, None, and an exception will have been generated. """ argsOrErr = self.mapFunctionArguments(functionOverload, args, kwargs) if isinstance(argsOrErr, str): self.pushException(TypeError, argsOrErr) return outArgs = [] for mappingArg in argsOrErr: if mappingArg.matches.Arg: outArgs.append(mappingArg.value) elif mappingArg.matches.Constant: outArgs.append(self.constant(mappingArg.value, allowArbitrary=True)) elif mappingArg.matches.StarArgs: outArgs.append(self.makeStarArgTuple(mappingArg.value)) elif mappingArg.matches.Kwargs: outArgs.append(self.makeKwargDict(dict(mappingArg.value))) return outArgs @staticmethod def computeOverloadSignature(functionOverload: FunctionOverload, args, kwargs): """Figure out the concrete type assignments we'd need to give to each _argument_ to call 'functionOverload' Args: functionOverload - a FunctionOverload we're trying to map to args - a list of positional argument Wrapper objects. kwargs - a dict of keyword argument Wrapper objects. Returns: If we can map, a pair (argsOut, kwargsOut) giving the updated type wrapper assignments. There will be one entry in each of argsOut/kwargsOut for each entry in the inputs, updated to reflect the required typing judgments that are applied by the overload's signature. Note that
= t.replace("Pky ", "<NAME> ") t = t.replace("Ii ", "II ") t = t.replace("Iii ", "III ") t = t.replace("Iv ", "IV ") t = t.replace("Communic ", "Communications ") t = t.replace("Postdoc ", "Postdoctoral ") t = t.replace("Tech ", "Technician ") t = t.replace("Vp ", "Vice President ") t = t.replace(" @", "/") # restore / t = t.replace(" @", "/") t = t.replace(" !", ",") # restore , t = t.replace(" #", "-") # restore - return t[:-1] # Take off the trailing space def get_position_uris(person_uri): """ Given a person_uri, return a list of the position_uris for that person. If none, return an empty list """ from vivofoundation import vivo_sparql_query position_uris = [] query = """ # Return the uri of positions for a person SELECT ?position_uri WHERE { <person_uri> vivo:relatedBy ?position_uri . ?position_uri rdf:type vivo:Position . } group by ?position_uri """ query = query.replace('person_uri', person_uri) result = vivo_sparql_query(query) try: count = len(result["results"]["bindings"]) except: count = 0 i = 0 while i < count: b = result["results"]["bindings"][i] position_uris.append(b['position_uri']['value']) i = i + 1 return position_uris def get_telephone(telephone_uri): """ Given the uri of a telephone number, return the uri, number and type """ from vivofoundation import get_triples telephone = {'telephone_uri':telephone_uri} type = "" triples = get_triples(telephone_uri) try: count = len(triples["results"]["bindings"]) except: count = 0 i = 0 while i < count: b = triples["results"]["bindings"][i] p = b['p']['value'] o = b['o']['value'] if p == "http://www.w3.org/2006/vcard/ns#telephone": telephone['telephone_number'] = o if p == "http://www.w3.org/1999/02/22-rdf-syntax-ns#type": if o.startswith('http://www.w3.org/2006/vcard'): ptype = o[32:] if type == "" or type == "Telephone" and ptype == "Fax" \ or ptype == "Telephone": type = ptype i = i + 1 telephone['telephone_type'] = type return telephone def get_name(name_uri): """ Given the uri of a vcard name entity, get all the data values associated with the entity """ from vivofoundation import get_triples name = {'name_uri':name_uri} triples = get_triples(name_uri) try: count = len(triples["results"]["bindings"]) except: count = 0 i = 0 while i < count: b = triples["results"]["bindings"][i] p = b['p']['value'] o = b['o']['value'] if p == "http://www.w3.org/2006/vcard/ns#givenName": name['given_name'] = o if p == "http://www.w3.org/2006/vcard/ns#familyName": name['family_name'] = o if p == "http://www.w3.org/2006/vcard/ns#additionalName": name['additional_name'] = o if p == "http://www.w3.org/2006/vcard/ns#honorificPrefix": name['honorific_prefix'] = o if p == "http://www.w3.org/2006/vcard/ns#honorificSuffix": name['honorific_suffix'] = o i = i + 1 return name def get_vcard(vcard_uri): """ Given the uri of a vcard, get all the data values and uris associated with the vcard """ from vivofoundation import get_triples from vivofoundation import get_vivo_value vcard = {'vcard_uri':vcard_uri} vcard['telephone_uris'] = [] vcard['email_uris'] = [] triples = get_triples(vcard_uri) try: count = len(triples["results"]["bindings"]) except: count = 0 i = 0 while i < count: b = triples["results"]["bindings"][i] p = b['p']['value'] o = b['o']['value'] if p == "http://www.w3.org/2006/vcard/ns#hasTitle": vcard['title_uri'] = o if p == "http://purl.obolibrary.org/obo/ARG_2000029": vcard['person_uri'] = o if p == "http://www.w3.org/2006/vcard/ns#hasTelephone": vcard['telephone_uris'].append(o) if p == "http://www.w3.org/2006/vcard/ns#hasName": vcard['name_uri'] = o if p == "http://www.w3.org/2006/vcard/ns#hasEmail": vcard['email_uris'].append(o) i = i + 1 # And now deref each of the uris to get the data values. vcard['name'] = get_name(vcard['name_uri']) if vcard.get('title_uri', None) is not None: vcard['title'] = get_vivo_value(vcard['title_uri'],'vcard:title') vcard['telephones'] = [] for telephone_uri in vcard['telephone_uris']: vcard['telephones'].append(get_telephone(telephone_uri)) del vcard['telephone_uris'] vcard['email_addresses'] = [] for email_uri in vcard['email_uris']: vcard['email_addresses'].append({ 'email_uri':email_uri, 'email_address':get_vivo_value(email_uri, "vcard:email") }) del vcard['email_uris'] return vcard def get_person(person_uri, get_contact=True): """ Given the URI of a person in VIVO, get the poerson's attributes and return a flat, keyed structure appropriate for update and other applications. To Do: Add get_grants, get_papers, etc as we had previously """ from vivofoundation import get_triples person = {'person_uri': person_uri} triples = get_triples(person_uri) try: count = len(triples["results"]["bindings"]) except: count = 0 i = 0 while i < count: b = triples["results"]["bindings"][i] p = b['p']['value'] o = b['o']['value'] if p == \ "http://vitro.mannlib.cornell.edu/ns/vitro/0.7#mostSpecificType": person['person_type'] = o if p == "http://purl.obolibrary.org/obo/ARG_2000028": person['vcard_uri'] = o if p == "http://www.w3.org/2000/01/rdf-schema#label": person['display_name'] = o if p == "http://vivo.ufl.edu/ontology/vivo-ufl/ufid": person['ufid'] = o if p == "http://vivo.ufl.edu/ontology/vivo-ufl/homeDept": person['homedept_uri'] = o if p == "http://vivo.ufl.edu/ontology/vivo-ufl/privacyFlag": person['privacy_flag'] = o if p == "http://vivo.ufl.edu/ontology/vivo-ufl/gatorlink": person['gatorlink'] = o if p == "http://vivoweb.org/ontology/core#eRACommonsId": person['eracommonsid'] = o i = i + 1 # deref the vcard if get_contact == True: person['vcard'] = get_vcard(person['vcard_uri']) return person def get_degree(degree_uri): """ Given a URI, return an object that contains the degree (educational training) it represents """ degree = {'degree_uri':degree_uri} triples = get_triples(degree_uri) try: count = len(triples["results"]["bindings"]) except: count = 0 i = 0 while i < count: b = triples["results"]["bindings"][i] p = b['p']['value'] o = b['o']['value'] if p == "http://vivoweb.org/ontology/core#majorField": degree['major_field'] = o # deref the academic degree if p == "http://vivoweb.org/ontology/core#degreeEarned": degree['earned_uri'] = o degree['degree_name'] = get_vivo_value(o, 'core:abbreviation') # deref the Institution if p == "http://vivoweb.org/ontology/core#trainingAtOrganization": degree['training_institution_uri'] = o institution = get_organization(o) if 'label' in institution: # home department might be incomplete degree['institution_name'] = institution['label'] # deref the datetime interval if p == "http://vivoweb.org/ontology/core#dateTimeInterval": datetime_interval = get_datetime_interval(o) degree['datetime_interval'] = datetime_interval if 'start_date' in datetime_interval: degree['start_date'] = datetime_interval['start_date'] if 'end_date' in datetime_interval: degree['end_date'] = datetime_interval['end_date'] i = i + 1 return degree def get_position(position_uri): """ Given a URI, return an object that contains the position it represents """ from vivofoundation import get_triples from vivofoundation import get_types from vivofoundation import get_datetime_interval from vivofoundation import untag_predicate position = {'position_uri':position_uri} # include position_uri triples = get_triples(position_uri) try: count = len(triples["results"]["bindings"]) except: count = 0 i = 0 while i < count: b = triples["results"]["bindings"][i] p = b['p']['value'] o = b['o']['value'] if p == "http://vivoweb.org/ontology/core#relates": # deref relates. Get the types of the referent. If its an org, # assign the uri of the relates (o) to the org_uri of the # position. Otherwise, assume its the person_uri types = get_types(o) if untag_predicate('foaf:Organization') in types: position['position_orguri'] = o else: position['person_uri'] = o if p == "http://vivo.ufl.edu/ontology/vivo-ufl/hrJobTitle": position['hr_title'] = o if p == "http://www.w3.org/2000/01/rdf-schema#label": position['position_label'] = o if o == "http://vivoweb.org/ontology/core#FacultyPosition": position['position_type'] = o if o == "http://vivoweb.org/ontology/core#Non-FacultyAcademicPosition": position['position_type'] = o if o == "http://vivoweb.org/ontology/vivo-ufl/ClinicalFacultyPosition": position['position_type'] = o if o == "http://vivoweb.org/ontology/vivo-ufl/PostDocPosition": position['position_type'] = o if o == "http://vivoweb.org/ontology/core#LibrarianPosition": position['position_type'] = o if o == "http://vivoweb.org/ontology/core#Non-AcademicPosition": position['position_type'] = o if o == "http://vivoweb.org/ontology/vivo-ufl/StudentAssistant": position['position_type'] = o if o == "http://vivoweb.org/ontology/vivo-ufl/GraduateAssistant": position['position_type'] = o if o == "http://vivoweb.org/ontology/vivo-ufl/Housestaff": position['position_type'] = o if o == "http://vivoweb.org/ontology/vivo-ufl/TemporaryFaculty": position['position_type'] = o if o == \ "http://vivoweb.org/ontology/core#FacultyAdministrativePosition": position['position_type'] = o if p == "http://vivoweb.org/ontology/core#dateTimeInterval": position['dti_uri'] = o datetime_interval = get_datetime_interval(o) position['datetime_interval'] = datetime_interval if 'start_date' in datetime_interval: position['start_date'] = datetime_interval['start_date'] if 'end_date' in datetime_interval: position['end_date'] = datetime_interval['end_date'] i = i + 1 return position def add_position(person_uri, position): """ Given a person_uri and a position dictionary containing the attributes of a position, generate the RDF necessary to create the position, associate it with the person and assign its attributes. """ from vivofoundation import assert_resource_property from vivofoundation import assert_data_property from vivofoundation import add_dti from vivofoundation import get_vivo_uri ardf = "" position_uri = get_vivo_uri() dti = {'start' : position.get('start_date',None), 'end': position.get('end_date',None)} [add, dti_uri] = add_dti(dti) ardf = ardf + add ardf = ardf + assert_resource_property(position_uri, 'rdf:type', position['position_type']) ardf = ardf + assert_resource_property(position_uri, 'rdfs:label', position['position_label']) ardf = ardf + assert_resource_property(position_uri, 'vivo:dateTimeInterval', dti_uri) ardf = ardf + assert_resource_property(position_uri, 'vivo:relates', person_uri) ardf = ardf + assert_resource_property(position_uri, 'vivo:relates', position['position_orguri']) return [ardf, position_uri] def add_vcard(person_uri, vcard): """ Given a person_uri and a vcard dictionary of items on the vcard, generate ther RDF necessary to create the vcard, associate it with the person, and associate attributes to the vcard. The person_uri will be associated to the vcard and the vcard may have any number of single entry entities to references. The single_entry table controls the processing of these entities. The name entity is a special case. All values are attrbuted to the name entity. The single_entry table contains some additional keys for future use Both the name table and the single entry table are easily extensible to handle additional name attributes and additional single entry entities respectively. """ from vivofoundation import assert_resource_property from vivofoundation import assert_data_property
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long, too-many-locals, too-many-statements import json import time import sys from itertools import chain from knack.log import get_logger from knack.util import CLIError from azure.appconfiguration import (ConfigurationSetting, ResourceReadOnlyError) from azure.core import MatchConditions from azure.cli.core.util import user_confirmation from azure.core.exceptions import (HttpResponseError, ResourceNotFoundError, ResourceModifiedError) from ._constants import (FeatureFlagConstants, KeyVaultConstants, SearchFilterOptions, StatusCodes) from ._models import (convert_configurationsetting_to_keyvalue, convert_keyvalue_to_configurationsetting) from ._utils import get_appconfig_data_client, prep_label_filter_for_url_encoding from ._kv_helpers import (__compare_kvs_for_restore, __read_kv_from_file, __read_features_from_file, __write_kv_and_features_to_file, __read_kv_from_config_store, __is_json_content_type, __write_kv_and_features_to_config_store, __discard_features_from_retrieved_kv, __read_kv_from_app_service, __write_kv_to_app_service, __serialize_kv_list_to_comparable_json_object, __serialize_features_from_kv_list_to_comparable_json_object, __serialize_feature_list_to_comparable_json_object, __print_features_preview, __print_preview, __print_restore_preview) from .feature import list_feature logger = get_logger(__name__) def import_config(cmd, source, name=None, connection_string=None, label=None, prefix="", # prefix to add yes=False, skip_features=False, content_type=None, auth_mode="key", endpoint=None, # from-file parameters path=None, format_=None, separator=None, depth=None, # from-configstore parameters src_name=None, src_connection_string=None, src_key=None, src_label=None, preserve_labels=False, src_auth_mode="key", src_endpoint=None, # from-appservice parameters appservice_account=None): src_features = [] dest_features = [] dest_kvs = [] source = source.lower() format_ = format_.lower() if format_ else None azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint) # fetch key values from source if source == 'file': if format_ and content_type: # JSON content type is only supported with JSON format. # Error out if user has provided JSON content type with any other format. if format_ != 'json' and __is_json_content_type(content_type): raise CLIError("Failed to import '{}' file format with '{}' content type. Please provide JSON file format to match your content type.".format(format_, content_type)) if separator: # If separator is provided, use max depth by default unless depth is specified. depth = sys.maxsize if depth is None else int(depth) else: if depth and int(depth) != 1: logger.warning("Cannot flatten hierarchical data without a separator. --depth argument will be ignored.") depth = 1 src_kvs = __read_kv_from_file(file_path=path, format_=format_, separator=separator, prefix_to_add=prefix, depth=depth, content_type=content_type) if not skip_features: # src_features is a list of KeyValue objects src_features = __read_features_from_file(file_path=path, format_=format_) elif source == 'appconfig': src_azconfig_client = get_appconfig_data_client(cmd, src_name, src_connection_string, src_auth_mode, src_endpoint) if label is not None and preserve_labels: raise CLIError("Import failed! Please provide only one of these arguments: '--label' or '--preserve-labels'. See 'az appconfig kv import -h' for examples.") if preserve_labels: # We need label to be the same as src_label for preview later. # This will have no effect on label while writing to config store # as we check preserve_labels again before labelling KVs. label = src_label src_kvs = __read_kv_from_config_store(src_azconfig_client, key=src_key, label=src_label if src_label else SearchFilterOptions.EMPTY_LABEL, prefix_to_add=prefix) # We need to separate KV from feature flags __discard_features_from_retrieved_kv(src_kvs) if not skip_features: # Get all Feature flags with matching label all_features = __read_kv_from_config_store(src_azconfig_client, key=FeatureFlagConstants.FEATURE_FLAG_PREFIX + '*', label=src_label if src_label else SearchFilterOptions.EMPTY_LABEL) for feature in all_features: if feature.content_type == FeatureFlagConstants.FEATURE_FLAG_CONTENT_TYPE: src_features.append(feature) elif source == 'appservice': src_kvs = __read_kv_from_app_service( cmd, appservice_account=appservice_account, prefix_to_add=prefix, content_type=content_type) # if customer needs preview & confirmation if not yes: # fetch key values from user's configstore dest_kvs = __read_kv_from_config_store(azconfig_client, key=SearchFilterOptions.ANY_KEY, label=label if label else SearchFilterOptions.EMPTY_LABEL) __discard_features_from_retrieved_kv(dest_kvs) # generate preview and wait for user confirmation need_kv_change = __print_preview( old_json=__serialize_kv_list_to_comparable_json_object(keyvalues=dest_kvs, level=source), new_json=__serialize_kv_list_to_comparable_json_object(keyvalues=src_kvs, level=source)) need_feature_change = False if src_features and not skip_features: # Append all features to dest_features list all_features = __read_kv_from_config_store(azconfig_client, key=FeatureFlagConstants.FEATURE_FLAG_PREFIX + '*', label=label if label else SearchFilterOptions.EMPTY_LABEL) for feature in all_features: if feature.content_type == FeatureFlagConstants.FEATURE_FLAG_CONTENT_TYPE: dest_features.append(feature) need_feature_change = __print_features_preview( old_json=__serialize_features_from_kv_list_to_comparable_json_object(keyvalues=dest_features), new_json=__serialize_features_from_kv_list_to_comparable_json_object(keyvalues=src_features)) if not need_kv_change and not need_feature_change: return user_confirmation("Do you want to continue? \n") # append all feature flags to src_kvs list src_kvs.extend(src_features) # import into configstore __write_kv_and_features_to_config_store(azconfig_client, key_values=src_kvs, label=label, preserve_labels=preserve_labels, content_type=content_type) def export_config(cmd, destination, name=None, connection_string=None, label=None, key=None, prefix="", # prefix to remove yes=False, skip_features=False, skip_keyvault=False, auth_mode="key", endpoint=None, # to-file parameters path=None, format_=None, separator=None, naming_convention='pascal', resolve_keyvault=False, # to-config-store parameters dest_name=None, dest_connection_string=None, dest_label=None, preserve_labels=False, dest_auth_mode="key", dest_endpoint=None, # to-app-service parameters appservice_account=None): src_features = [] dest_features = [] dest_kvs = [] destination = destination.lower() format_ = format_.lower() if format_ else None naming_convention = naming_convention.lower() azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint) dest_azconfig_client = None if destination == 'appconfig': if dest_label is not None and preserve_labels: raise CLIError("Export failed! Please provide only one of these arguments: '--dest-label' or '--preserve-labels'. See 'az appconfig kv export -h' for examples.") if preserve_labels: # We need dest_label to be the same as label for preview later. # This will have no effect on label while writing to config store # as we check preserve_labels again before labelling KVs. dest_label = label dest_azconfig_client = get_appconfig_data_client(cmd, dest_name, dest_connection_string, dest_auth_mode, dest_endpoint) # fetch key values from user's configstore src_kvs = __read_kv_from_config_store(azconfig_client, key=key, label=label if label else SearchFilterOptions.EMPTY_LABEL, prefix_to_remove=prefix, cli_ctx=cmd.cli_ctx if resolve_keyvault else None) if skip_keyvault: src_kvs = [keyvalue for keyvalue in src_kvs if keyvalue.content_type != KeyVaultConstants.KEYVAULT_CONTENT_TYPE] # We need to separate KV from feature flags __discard_features_from_retrieved_kv(src_kvs) if not skip_features: # Get all Feature flags with matching label if (destination == 'file' and format_ == 'properties') or destination == 'appservice': skip_features = True logger.warning("Exporting feature flags to properties file or appservice is currently not supported.") else: # src_features is a list of FeatureFlag objects src_features = list_feature(cmd, feature='*', label=label if label else SearchFilterOptions.EMPTY_LABEL, name=name, connection_string=connection_string, all_=True, auth_mode=auth_mode, endpoint=endpoint) # if customer needs preview & confirmation if not yes: if destination == 'appconfig': # dest_kvs contains features and KV that match the label dest_kvs = __read_kv_from_config_store(dest_azconfig_client, key=SearchFilterOptions.ANY_KEY, label=dest_label if dest_label else SearchFilterOptions.EMPTY_LABEL) __discard_features_from_retrieved_kv(dest_kvs) if not skip_features: # Append all features to dest_features list dest_features = list_feature(cmd, feature='*', label=dest_label if dest_label else SearchFilterOptions.EMPTY_LABEL, name=dest_name, connection_string=dest_connection_string, all_=True, auth_mode=dest_auth_mode, endpoint=dest_endpoint) elif destination == 'appservice': dest_kvs = __read_kv_from_app_service(cmd, appservice_account=appservice_account) # generate preview and wait for user confirmation need_kv_change = __print_preview( old_json=__serialize_kv_list_to_comparable_json_object(keyvalues=dest_kvs, level=destination), new_json=__serialize_kv_list_to_comparable_json_object(keyvalues=src_kvs, level=destination)) need_feature_change = False if src_features: need_feature_change = __print_features_preview( old_json=__serialize_feature_list_to_comparable_json_object(features=dest_features), new_json=__serialize_feature_list_to_comparable_json_object(features=src_features)) if not need_kv_change and not need_feature_change: return user_confirmation("Do you want to continue? \n") # export to destination if destination == 'file': __write_kv_and_features_to_file(file_path=path, key_values=src_kvs, features=src_features, format_=format_, separator=separator, skip_features=skip_features, naming_convention=naming_convention) elif destination == 'appconfig': __write_kv_and_features_to_config_store(dest_azconfig_client, key_values=src_kvs, features=src_features, label=dest_label, preserve_labels=preserve_labels) elif destination == 'appservice': __write_kv_to_app_service(cmd, key_values=src_kvs, appservice_account=appservice_account) def set_key(cmd, key, name=None, label=None, content_type=None, tags=None, value=None, yes=False, connection_string=None, auth_mode="key", endpoint=None): azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint) if content_type: if content_type.lower() == KeyVaultConstants.KEYVAULT_CONTENT_TYPE: logger.warning("There is a dedicated command to set key vault reference. 'appconfig kv set-keyvault -h'") elif content_type.lower() == FeatureFlagConstants.FEATURE_FLAG_CONTENT_TYPE: logger.warning("There is a dedicated command to set feature flag. 'appconfig feature set -h'") retry_times = 3 retry_interval = 1 label = label if label and label != SearchFilterOptions.EMPTY_LABEL else None for i in range(0, retry_times): retrieved_kv = None set_kv = None new_kv = None try: retrieved_kv = azconfig_client.get_configuration_setting(key=key, label=label) except ResourceNotFoundError: logger.debug("Key '%s' with label '%s' not found. A new key-value will be created.", key, label) except HttpResponseError as exception: raise CLIError("Failed to retrieve key-values from config store. " + str(exception)) if retrieved_kv is None: if content_type and __is_json_content_type(content_type): try: # Ensure that provided value is valid JSON. Error out if value is invalid JSON. value = 'null' if value is None else value json.loads(value) except ValueError: raise CLIError('Value "{}" is not a valid JSON object, which conflicts with the content type "{}".'.format(value, content_type)) set_kv = ConfigurationSetting(key=key, label=label, value=value, content_type=content_type, tags=tags) else: value = retrieved_kv.value if value is None else value content_type = retrieved_kv.content_type if content_type is None else content_type if content_type and __is_json_content_type(content_type): try: # Ensure that provided/existing value is valid JSON. Error out if value is invalid JSON. json.loads(value) except (TypeError, ValueError): raise CLIError('Value "{}" is not a valid JSON object, which conflicts with the content type "{}". Set the value again in valid JSON format.'.format(value, content_type)) set_kv = ConfigurationSetting(key=key, label=label, value=value, content_type=content_type, tags=retrieved_kv.tags if tags is None else tags, read_only=retrieved_kv.read_only, etag=retrieved_kv.etag) verification_kv = { "key": set_kv.key, "label": set_kv.label, "content_type": set_kv.content_type, "value": set_kv.value, "tags": set_kv.tags } entry = json.dumps(verification_kv, indent=2, sort_keys=True, ensure_ascii=False) confirmation_message = "Are you sure you want to set the key: \n" + entry + "\n" user_confirmation(confirmation_message, yes) try: if set_kv.etag is None: new_kv = azconfig_client.add_configuration_setting(set_kv) else: new_kv = azconfig_client.set_configuration_setting(set_kv, match_condition=MatchConditions.IfNotModified) return convert_configurationsetting_to_keyvalue(new_kv) except ResourceReadOnlyError: raise CLIError("Failed to update read only key-value. Unlock the key-value before updating it.") except HttpResponseError as exception: if exception.status_code == StatusCodes.PRECONDITION_FAILED: logger.debug('Retrying setting %s times with
Default is descend. Returns: sorted_boxlist: A sorted BoxList with the field in the specified order. Raises: ValueError: if specified field does not exist or is not of single dimension. ValueError: if the order is not either descend or ascend. """ if not boxlist.has_field(field): raise ValueError('Field ' + field + ' does not exist') if len(boxlist.get_field(field).shape) != 1: raise ValueError('Field ' + field + 'should be single dimension.') if order != SortOrder.DESCEND and order != SortOrder.ASCEND: raise ValueError('Invalid sort order') field_to_sort = boxlist.get_field(field) sorted_indices = np.argsort(field_to_sort) if order == SortOrder.DESCEND: sorted_indices = sorted_indices[::-1] return gather_boxlist(boxlist, sorted_indices) def non_max_suppression(boxlist, max_output_size=10000, iou_threshold=1.0, score_threshold=-10.0): """Non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. In each iteration, the detected bounding box with highest score in the available pool is selected. Args: boxlist: BoxList holding N boxes. Must contain a 'scores' field representing detection scores. All scores belong to the same class. max_output_size: maximum number of retained boxes iou_threshold: intersection over union threshold. score_threshold: minimum score threshold. Remove the boxes with scores less than this value. Default value is set to -10. A very low threshold to pass pretty much all the boxes, unless the user sets a different score threshold. Returns: a BoxList holding M boxes where M <= max_output_size Raises: ValueError: if 'scores' field does not exist ValueError: if threshold is not in [0, 1] ValueError: if max_output_size < 0 """ if not boxlist.has_field('scores'): raise ValueError('Field scores does not exist') if iou_threshold < 0. or iou_threshold > 1.0: raise ValueError('IOU threshold must be in [0, 1]') if max_output_size < 0: raise ValueError('max_output_size must be bigger than 0.') boxlist = filter_scores_greater_than(boxlist, score_threshold) if boxlist.num_boxes() == 0: return boxlist boxlist = sort_by_field_boxlist(boxlist, 'scores') # Prevent further computation if NMS is disabled. if iou_threshold == 1.0: if boxlist.num_boxes() > max_output_size: selected_indices = np.arange(max_output_size) return gather_boxlist(boxlist, selected_indices) else: return boxlist boxes = boxlist.get() num_boxes = boxlist.num_boxes() # is_index_valid is True only for all remaining valid boxes, is_index_valid = np.full(num_boxes, 1, dtype=bool) selected_indices = [] num_output = 0 for i in range(num_boxes): if num_output < max_output_size: if is_index_valid[i]: num_output += 1 selected_indices.append(i) is_index_valid[i] = False valid_indices = np.where(is_index_valid)[0] if valid_indices.size == 0: break intersect_over_union = iou(np.expand_dims(boxes[i, :], axis=0), boxes[valid_indices, :]) intersect_over_union = np.squeeze(intersect_over_union, axis=0) is_index_valid[valid_indices] = np.logical_and( is_index_valid[valid_indices], intersect_over_union <= iou_threshold) return gather_boxlist(boxlist, np.array(selected_indices)) def multi_class_non_max_suppression(boxlist, score_thresh, iou_thresh, max_output_size): """Multi-class version of non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. It operates independently for each class for which scores are provided (via the scores field of the input box_list), pruning boxes with score less than a provided threshold prior to applying NMS. Args: boxlist: BoxList holding N boxes. Must contain a 'scores' field representing detection scores. This scores field is a tensor that can be 1 dimensional (in the case of a single class) or 2-dimensional, which which case we assume that it takes the shape [num_boxes, num_classes]. We further assume that this rank is known statically and that scores.shape[1] is also known (i.e., the number of classes is fixed and known at graph construction time). score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap with previously selected boxes are removed). max_output_size: maximum number of retained boxes per class. Returns: a BoxList holding M boxes with a rank-1 scores field representing corresponding scores for each box with scores sorted in decreasing order and a rank-1 classes field representing a class label for each box. Raises: ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have a valid scores field. """ if not 0 <= iou_thresh <= 1.0: raise ValueError('thresh must be between 0 and 1') if not isinstance(boxlist, BoxList): raise ValueError('boxlist must be a BoxList') if not boxlist.has_field('scores'): raise ValueError('input boxlist must have \'scores\' field') scores = boxlist.get_field('scores') if len(scores.shape) == 1: scores = np.reshape(scores, [-1, 1]) elif len(scores.shape) == 2: if scores.shape[1] is None: raise ValueError('scores field must have statically defined second dimension') else: raise ValueError('scores field must be of rank 1 or 2') num_boxes = boxlist.num_boxes() num_scores = scores.shape[0] num_classes = scores.shape[1] if num_boxes != num_scores: raise ValueError('Incorrect scores field length: actual vs expected.') selected_boxes_list = [] for class_idx in range(num_classes): boxlist_and_class_scores = BoxList(boxlist.get()) class_scores = np.reshape(scores[0:num_scores, class_idx], [-1]) boxlist_and_class_scores.add_field('scores', class_scores) boxlist_filt = filter_scores_greater_than(boxlist_and_class_scores, score_thresh) nms_result = non_max_suppression( boxlist_filt, max_output_size=max_output_size, iou_threshold=iou_thresh, score_threshold=score_thresh) nms_result.add_field('classes', np.zeros_like(nms_result.get_field('scores')) + class_idx) selected_boxes_list.append(nms_result) selected_boxes = concatenate_boxlist(selected_boxes_list) sorted_boxes = sort_by_field_boxlist(selected_boxes, 'scores') return sorted_boxes def scale(boxlist, y_scale, x_scale): """Scale box coordinates in x and y dimensions. Args: boxlist: BoxList holding N boxes y_scale: float x_scale: float Returns: boxlist: BoxList holding N boxes """ y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) y_min = y_scale * y_min y_max = y_scale * y_max x_min = x_scale * x_min x_max = x_scale * x_max scaled_boxlist = BoxList(np.hstack([y_min, x_min, y_max, x_max])) fields = boxlist.get_extra_fields() for field in fields: extra_field_data = boxlist.get_field(field) scaled_boxlist.add_field(field, extra_field_data) return scaled_boxlist def clip_to_window(boxlist, window, filter_nonoverlapping=True): """Clip bounding boxes to a window. This op clips input bounding boxes (represented by bounding box corners) to a window, optionally filtering out boxes that do not overlap at all with the window. Args: boxlist: BoxList holding M_in boxes window: a numpy array of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip boxes. filter_nonoverlapping: whether to filter out boxes that do not overlap at all with the window. Returns: a BoxList holding M_out boxes where M_out <= M_in """ y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) win_y_min = window[0] win_x_min = window[1] win_y_max = window[2] win_x_max = window[3] y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min) y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min) x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min) x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min) clipped = BoxList(np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped])) clipped = _copy_extra_fields(clipped, boxlist) if filter_nonoverlapping: areas = area(clipped) nonzero_area_indices = np.reshape(np.nonzero(np.greater(areas, 0.0)), [-1]).astype(np.int32) clipped = gather_boxlist(clipped, nonzero_area_indices) return clipped def prune_non_overlapping_boxes(boxlist1, boxlist2, minoverlap=0.0): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. minoverlap: Minimum required overlap between boxes, to count them as overlapping. Returns: A pruned boxlist with size [N', 4]. """ intersection_over_area = ioa(boxlist2, boxlist1) # [M, N] tensor intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap)) keep_inds = np.nonzero(keep_bool)[0] new_boxlist1 = gather_boxlist(boxlist1, keep_inds) return new_boxlist1 def prune_outside_window(boxlist, window): """Prunes bounding boxes that fall outside a given window. This function prunes bounding boxes that even partially fall outside the given window. See also ClipToWindow which only prunes bounding boxes that fall completely outside the window, and clips any bounding boxes that partially overflow. Args: boxlist: a BoxList holding M_in boxes. window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax] of the window. Returns: pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in. valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes in the input tensor. """ y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) win_y_min = window[0] win_x_min = window[1] win_y_max = window[2] win_x_max = window[3] coordinate_violations = np.hstack([ np.less(y_min, win_y_min), np.less(x_min, win_x_min), np.greater(y_max, win_y_max), np.greater(x_max, win_x_max)]) valid_indices = np.reshape(np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1]) return gather_boxlist(boxlist, valid_indices), valid_indices def concatenate_boxlist(boxlists, fields=None): """Concatenate list of BoxLists. This op concatenates a list of input BoxLists into a larger BoxList. It also handles concatenation of BoxList fields as long as the field tensor shapes are equal except for the first dimension. Args: boxlists: list
#!/usr/bin/python # ----------------------------------------------------------------------------- # # Copyright 2013-2019 lispers.net - <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ----------------------------------------------------------------------------- # # ltr.py - LISP EID Traceroute Client - Trace the encap/decap paths # # Usage: python ltr.py [-s <source-eid>] <destination-EID | DNS-name> # # -s: Optional source EID. # <destination-EID>: required parameter [<iid>] in front is optional # # This application is run on an xTR. Typically a ITR or RTR, where the # encapsulator adds to the ltr message with the RLOC the ITR is encapsulating # to. Then the decapsulator will decapsulate and swap the source and # destination addresses to return the packet to the source-EID (running the # client program). If the ETR is not the EID, then the packet will be re- # encapsulated in which more data is added to the ltr message. # # ltr messages run in UDP on port 2434 (4342 backwards) and are returned # to the client program. # # The LISP-Trace message takes the following path: # # (1) ltr sends LISP-TRACE packet from its EID to the EID of the ETR on # port 2434. It builds a type=9 packet with a nonce and an empty JSON field. # # (2) ITR will look up destination EID as part of forwarding logic and add # RLOC information to LISP-Trace message. The message is encapsulated to # the ETR. # # (3) The ETR (or RTR) will decap packet. It will add information to the LISP- # packet. If it is the destination EID, it will send the LISP-Trace packet # using itself as the source and the original source as the destination. # # (4) The local ITR will encapsulate the packet and add RLOC information to # the LISP-Trace packet. It encapsulates the return packet to the ETR. # # (5) The ETR decapsulates the packet and sends it to the ltr client so the # accumulated JSON data can be displayed for the user. # # This functionality works on a chain of encapsulating tunnels to give the # user what RLOCs are used and the arrival time of the packet. It allows an # ltr client to not only determine path and latency of the network, but if # the encapsulation paths are symmetric or asymmetric. # # If there an error along the path, the node detecting the error will return # the LISP-Trace packet to the RLOC of the originating ITR. # # The JSON format of an LISP-Trace packet is an array of dictionary arrays. # The array will typically have 2 elements, one from ltr source to destination # EID and one for the return path. Each dictionary array is keyed with "seid", # "deid", and "paths". The array "paths" is the node data that is appended # at each encapsulation hop. Note example below: # # [ # { "seid" : "[<iid>]<orig-eid>", "deid" : "[<iid>]<dest-eid>", "paths" : a # [ # { "node" : "ITR", "srloc" : "<source-rloc>", "drloc" : "<dest_rloc>", # "encap-timestamp" : "<ts>", "hostname" : "<hn>", # "recent-rtts" : [...], "recent-hops" : [...] }, # { "node" : "RTR", "srloc" : "<source-rloc>", "drloc" : "<dest_rloc>", # "decap-timestamp" : "<ts>", "hostname" : "<hn>" }, # { "node" : "RTR", "srloc" : "<source-rloc>", "drloc" : "<dest_rloc>", # "encap-timestamp" : "<ts>", "hostname" : "<hn>", # "recent-rtts" : [...], "recent-hops" : [...] }, # { "node" : "ETR", "srloc" : "<source-rloc>", "drloc" : "<dest_rloc>", # "encap-timestamp" : "<ts>", "hostname" : "<hn>" }, ... # ] }, # # { "seid" : "[<iid>]<dest-eid>", "deid" : "[<iid>]<orig-eid>", "paths" : # [ # { "node" : "ITR", "srloc" : "<source-rloc>", "drloc" : "<dest_rloc>", # "encap-timestamp" : "<ts>", "hostname" : "<hn>", # "recent-rtts" : [...], "recent-hops" : [...] }, # { "node" : "RTR", "srloc" : "<source-rloc>", "drloc" : "<dest_rloc>", # "decap-timestamp" : "<ts>", "hostname" : "<hn>" }, # { "node" : "RTR", "srloc" : "<source-rloc>", "drloc" : "<dest_rloc>", # "encap-timestamp" : "<ts>", "hostname" : "<hn>", # "recent-rtts" : [...], "recent-hops" : [...] }, # { "node" : "ETR", "srloc" : "<source-rloc>", "drloc" : "<dest_rloc>", # "encap-timestamp" : "<ts>", "hostname" : "<hn>" }, ... # ] } # ] # # Environment variable LISP_LTR_PORT is used to determine if the connection to # the LISP API is done with a particular port. And if the port has a minus # sign in front of it, it will use http rather https to connect to the # lispers.net API. # #------------------------------------------------------------------------------ if 64 - 64: i11iIiiIii import sys import struct import random import socket import json import commands import time import os if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi if 73 - 73: II111iiii if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i if 48 - 48: oO0o / OOooOOo / I11i / Ii1I if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I if 46 - 46: ooOoO0o * I11i - OoooooooOO if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO Oo0o = "https" OOO0o0o = 8080 if 40 - 40: I1IiiI / O0 % ooOoO0o + O0 * i1IIi I1Ii11I1Ii1i = os . getenv ( "LISP_LTR_PORT" ) if ( I1Ii11I1Ii1i != None ) : if ( I1Ii11I1Ii1i [ 0 ] == "-" ) : Oo0o = "http" I1Ii11I1Ii1i = I1Ii11I1Ii1i [ 1 : : ] if 67 - 67: iIii1I11I1II1 . I1ii11iIi11i . oO0o / i1IIi % II111iiii - OoOoOO00 if ( I1Ii11I1Ii1i . isdigit ( ) == False ) : print "Invalid value for env variable LISP_LTR_PORT" exit ( 1 ) if 91 - 91: OoO0O00 . i11iIiiIii / oO0o % I11i / OoO0O00 - i11iIiiIii OOO0o0o = int ( I1Ii11I1Ii1i ) if 8 - 8: o0oOOo0O0Ooo * I1ii11iIi11i * iIii1I11I1II1 . IiII / IiII % IiII if 22 - 22: Ii1I . IiII I11 = 2434 if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii if 79 - 79: IiII if 86 - 86: OoOoOO00 % I1IiiI if 80 - 80: OoooooooOO . I1IiiI if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo if 58 - 58: i11iIiiIii % I1Ii111 if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i if 31 - 31: OoO0O00 + II111iiii if 13 - 13: OOooOOo * oO0o * I1IiiI if 55 - 55: II111iiii if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I if 17 - 17: o0oOOo0O0Ooo if 64 - 64: Ii1I % i1IIi % OoooooooOO if 3 - 3: iII111i + O0 if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I if 78 - 78: OoO0O00 if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo if 14 - 14: I11i % O0 def IiI1I1 ( rloc , port ) : OoO000 = socket . htonl ( 0x90000000 + port ) IIiiIiI1 = struct . pack ( "I" , OoO000 ) if 41 - 41: OoOoOO00 II = rloc . split ( "." ) ooOoOoo0O = int ( II [ 0 ] ) << 24 ooOoOoo0O += int ( II [ 1 ] ) << 16 ooOoOoo0O += int ( II [ 2 ] ) << 8 ooOoOoo0O += int ( II [ 3 ] ) IIiiIiI1 += struct . pack ( "I" , socket . htonl ( ooOoOoo0O ) ) if 76 - 76: O0 / o0oOOo0O0Ooo . I1IiiI * Ii1I - OOooOOo Oooo = random . randint ( 0 , ( 2 ** 64 ) - 1 ) IIiiIiI1 += struct . pack ( "Q" , Oooo ) return ( Oooo , IIiiIiI1 ) if 67 - 67: OOooOOo / OoooooooOO % I11i - iIii1I11I1II1 if 82 - 82:
value: pulumi.Input[Union[str, 'ActionType']]): pulumi.set(self, "action", value) @property @pulumi.getter(name="matchConditions") def match_conditions(self) -> pulumi.Input[Sequence[pulumi.Input['MatchConditionArgs']]]: """ List of match conditions. """ return pulumi.get(self, "match_conditions") @match_conditions.setter def match_conditions(self, value: pulumi.Input[Sequence[pulumi.Input['MatchConditionArgs']]]): pulumi.set(self, "match_conditions", value) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ Defines the name of the custom rule """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def priority(self) -> pulumi.Input[int]: """ Defines in what order this rule be evaluated in the overall list of custom rules """ return pulumi.get(self, "priority") @priority.setter def priority(self, value: pulumi.Input[int]): pulumi.set(self, "priority", value) @property @pulumi.getter(name="enabledState") def enabled_state(self) -> Optional[pulumi.Input[Union[str, 'CustomRuleEnabledState']]]: """ Describes if the custom rule is in enabled or disabled state. Defaults to Enabled if not specified. """ return pulumi.get(self, "enabled_state") @enabled_state.setter def enabled_state(self, value: Optional[pulumi.Input[Union[str, 'CustomRuleEnabledState']]]): pulumi.set(self, "enabled_state", value) @pulumi.input_type class CustomerCertificateParametersArgs: def __init__(__self__, *, secret_source: pulumi.Input['ResourceReferenceArgs'], type: pulumi.Input[str], certificate_authority: Optional[pulumi.Input[str]] = None, secret_version: Optional[pulumi.Input[str]] = None, subject_alternative_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, use_latest_version: Optional[pulumi.Input[bool]] = None): """ Customer Certificate used for https :param pulumi.Input['ResourceReferenceArgs'] secret_source: Resource reference to the KV secret :param pulumi.Input[str] type: The type of the Secret to create. Expected value is 'CustomerCertificate'. :param pulumi.Input[str] certificate_authority: Certificate issuing authority. :param pulumi.Input[str] secret_version: Version of the secret to be used :param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alternative_names: The list of SANs. :param pulumi.Input[bool] use_latest_version: Whether to use the latest version for the certificate """ pulumi.set(__self__, "secret_source", secret_source) pulumi.set(__self__, "type", 'CustomerCertificate') if certificate_authority is not None: pulumi.set(__self__, "certificate_authority", certificate_authority) if secret_version is not None: pulumi.set(__self__, "secret_version", secret_version) if subject_alternative_names is not None: pulumi.set(__self__, "subject_alternative_names", subject_alternative_names) if use_latest_version is not None: pulumi.set(__self__, "use_latest_version", use_latest_version) @property @pulumi.getter(name="secretSource") def secret_source(self) -> pulumi.Input['ResourceReferenceArgs']: """ Resource reference to the KV secret """ return pulumi.get(self, "secret_source") @secret_source.setter def secret_source(self, value: pulumi.Input['ResourceReferenceArgs']): pulumi.set(self, "secret_source", value) @property @pulumi.getter def type(self) -> pulumi.Input[str]: """ The type of the Secret to create. Expected value is 'CustomerCertificate'. """ return pulumi.get(self, "type") @type.setter def type(self, value: pulumi.Input[str]): pulumi.set(self, "type", value) @property @pulumi.getter(name="certificateAuthority") def certificate_authority(self) -> Optional[pulumi.Input[str]]: """ Certificate issuing authority. """ return pulumi.get(self, "certificate_authority") @certificate_authority.setter def certificate_authority(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "certificate_authority", value) @property @pulumi.getter(name="secretVersion") def secret_version(self) -> Optional[pulumi.Input[str]]: """ Version of the secret to be used """ return pulumi.get(self, "secret_version") @secret_version.setter def secret_version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "secret_version", value) @property @pulumi.getter(name="subjectAlternativeNames") def subject_alternative_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The list of SANs. """ return pulumi.get(self, "subject_alternative_names") @subject_alternative_names.setter def subject_alternative_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "subject_alternative_names", value) @property @pulumi.getter(name="useLatestVersion") def use_latest_version(self) -> Optional[pulumi.Input[bool]]: """ Whether to use the latest version for the certificate """ return pulumi.get(self, "use_latest_version") @use_latest_version.setter def use_latest_version(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "use_latest_version", value) @pulumi.input_type class DeepCreatedOriginGroupArgs: def __init__(__self__, *, name: pulumi.Input[str], origins: pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]], health_probe_settings: Optional[pulumi.Input['HealthProbeParametersArgs']] = None, response_based_origin_error_detection_settings: Optional[pulumi.Input['ResponseBasedOriginErrorDetectionParametersArgs']] = None, traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[pulumi.Input[int]] = None): """ The origin group for CDN content which is added when creating a CDN endpoint. Traffic is sent to the origins within the origin group based on origin health. :param pulumi.Input[str] name: Origin group name which must be unique within the endpoint. :param pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]] origins: The source of the content being delivered via CDN within given origin group. :param pulumi.Input['HealthProbeParametersArgs'] health_probe_settings: Health probe settings to the origin that is used to determine the health of the origin. :param pulumi.Input['ResponseBasedOriginErrorDetectionParametersArgs'] response_based_origin_error_detection_settings: The JSON object that contains the properties to determine origin health using real requests/responses.This property is currently not supported. :param pulumi.Input[int] traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new endpoint is added. Default is 10 mins. This property is currently not supported. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "origins", origins) if health_probe_settings is not None: pulumi.set(__self__, "health_probe_settings", health_probe_settings) if response_based_origin_error_detection_settings is not None: pulumi.set(__self__, "response_based_origin_error_detection_settings", response_based_origin_error_detection_settings) if traffic_restoration_time_to_healed_or_new_endpoints_in_minutes is not None: pulumi.set(__self__, "traffic_restoration_time_to_healed_or_new_endpoints_in_minutes", traffic_restoration_time_to_healed_or_new_endpoints_in_minutes) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ Origin group name which must be unique within the endpoint. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def origins(self) -> pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]: """ The source of the content being delivered via CDN within given origin group. """ return pulumi.get(self, "origins") @origins.setter def origins(self, value: pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]): pulumi.set(self, "origins", value) @property @pulumi.getter(name="healthProbeSettings") def health_probe_settings(self) -> Optional[pulumi.Input['HealthProbeParametersArgs']]: """ Health probe settings to the origin that is used to determine the health of the origin. """ return pulumi.get(self, "health_probe_settings") @health_probe_settings.setter def health_probe_settings(self, value: Optional[pulumi.Input['HealthProbeParametersArgs']]): pulumi.set(self, "health_probe_settings", value) @property @pulumi.getter(name="responseBasedOriginErrorDetectionSettings") def response_based_origin_error_detection_settings(self) -> Optional[pulumi.Input['ResponseBasedOriginErrorDetectionParametersArgs']]: """ The JSON object that contains the properties to determine origin health using real requests/responses.This property is currently not supported. """ return pulumi.get(self, "response_based_origin_error_detection_settings") @response_based_origin_error_detection_settings.setter def response_based_origin_error_detection_settings(self, value: Optional[pulumi.Input['ResponseBasedOriginErrorDetectionParametersArgs']]): pulumi.set(self, "response_based_origin_error_detection_settings", value) @property @pulumi.getter(name="trafficRestorationTimeToHealedOrNewEndpointsInMinutes") def traffic_restoration_time_to_healed_or_new_endpoints_in_minutes(self) -> Optional[pulumi.Input[int]]: """ Time in minutes to shift the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new endpoint is added. Default is 10 mins. This property is currently not supported. """ return pulumi.get(self, "traffic_restoration_time_to_healed_or_new_endpoints_in_minutes") @traffic_restoration_time_to_healed_or_new_endpoints_in_minutes.setter def traffic_restoration_time_to_healed_or_new_endpoints_in_minutes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "traffic_restoration_time_to_healed_or_new_endpoints_in_minutes", value) @pulumi.input_type class DeepCreatedOriginArgs: def __init__(__self__, *, host_name: pulumi.Input[str], name: pulumi.Input[str], enabled: Optional[pulumi.Input[bool]] = None, http_port: Optional[pulumi.Input[int]] = None, https_port: Optional[pulumi.Input[int]] = None, origin_host_header: Optional[pulumi.Input[str]] = None, priority: Optional[pulumi.Input[int]] = None, private_link_alias: Optional[pulumi.Input[str]] = None, private_link_approval_message: Optional[pulumi.Input[str]] = None, private_link_location: Optional[pulumi.Input[str]] = None, private_link_resource_id: Optional[pulumi.Input[str]] = None, weight: Optional[pulumi.Input[int]] = None): """ The main origin of CDN content which is added when creating a CDN endpoint. :param pulumi.Input[str] host_name: The address of the origin. It can be a domain name, IPv4 address, or IPv6 address. This should be unique across all origins in an endpoint. :param pulumi.Input[str] name: Origin name which must be unique within the endpoint. :param pulumi.Input[bool] enabled: Origin is enabled for load balancing or not. By default, origin is always enabled. :param pulumi.Input[int] http_port: The value of the HTTP port. Must be between 1 and 65535. :param pulumi.Input[int] https_port: The value of the HTTPS port. Must be between 1 and 65535. :param pulumi.Input[str] origin_host_header: The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default. :param pulumi.Input[int] priority: Priority of origin in given origin group for load balancing. Higher priorities will not be used for load balancing if any lower priority origin is healthy.Must be between 1 and 5. :param pulumi.Input[str] private_link_alias: The Alias of the Private Link resource. Populating this optional field indicates that this origin is 'Private' :param pulumi.Input[str] private_link_approval_message: A custom message to be included in the approval request to connect to the Private Link. :param pulumi.Input[str] private_link_location: The location of the Private Link resource. Required only if 'privateLinkResourceId' is populated :param pulumi.Input[str] private_link_resource_id: The Resource Id of the Private Link resource. Populating this optional field indicates that this backend is 'Private' :param pulumi.Input[int] weight: Weight of the origin in given origin group for load balancing. Must be between 1 and 1000 """ pulumi.set(__self__, "host_name", host_name) pulumi.set(__self__, "name", name) if enabled is not None: pulumi.set(__self__, "enabled", enabled) if http_port is not None: pulumi.set(__self__, "http_port", http_port) if https_port is not None: pulumi.set(__self__, "https_port", https_port) if origin_host_header is not None: pulumi.set(__self__, "origin_host_header", origin_host_header) if priority is not None: pulumi.set(__self__, "priority", priority) if private_link_alias is not None: pulumi.set(__self__, "private_link_alias", private_link_alias) if private_link_approval_message is not None: pulumi.set(__self__, "private_link_approval_message", private_link_approval_message) if private_link_location is not None: pulumi.set(__self__, "private_link_location", private_link_location) if private_link_resource_id is not None: pulumi.set(__self__, "private_link_resource_id", private_link_resource_id) if weight is not None: pulumi.set(__self__, "weight", weight) @property @pulumi.getter(name="hostName") def host_name(self) -> pulumi.Input[str]: """ The address of the origin. It can be a domain name, IPv4 address, or IPv6 address. This should be unique across all origins in an endpoint. """ return pulumi.get(self, "host_name") @host_name.setter def host_name(self, value: pulumi.Input[str]): pulumi.set(self, "host_name", value) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ Origin name which must be unique within the endpoint. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def enabled(self) -> Optional[pulumi.Input[bool]]: """ Origin
'wavy_dash', 'wavy_dash'), (b'\xe2\x98\xb8\xef\xb8\x8f', 'wheel_of_dharma', 'wheel_of_dharma'), (b'\xf0\x9f\xa4\x8d', 'white_heart', 'white_heart'), (b'\xe2\x97\xbb\xef\xb8\x8f', 'white_medium_square', 'white_medium_square'), (b'\xe2\x96\xab\xef\xb8\x8f', 'white_small_square', 'white_small_square'), (b'\xf0\x9f\x8c\xa5\xef\xb8\x8f', 'white_sun_behind_cloud', 'white_sun_behind_cloud', 'white_sun_cloud'), (b'\xf0\x9f\x8c\xa6\xef\xb8\x8f', 'white_sun_behind_cloud_with_rain', 'white_sun_behind_cloud_with_rain', 'white_sun_rain_cloud'), (b'\xf0\x9f\x8c\xa4\xef\xb8\x8f', 'white_sun_small_cloud', 'white_sun_small_cloud', 'white_sun_with_small_cloud'), (b'\xf0\x9f\x8c\xac\xef\xb8\x8f', 'wind_blowing_face', 'wind_blowing_face'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x8e\xa8', 'woman_artist', 'woman_artist'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x8e\xa8', 'woman_artist_tone1', 'woman_artist_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x8e\xa8', 'woman_artist_tone2', 'woman_artist_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x8e\xa8', 'woman_artist_tone3', 'woman_artist_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x8e\xa8', 'woman_artist_tone4', 'woman_artist_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x8e\xa8', 'woman_artist_tone5', 'woman_artist_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x9a\x80', 'woman_astronaut', 'woman_astronaut'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x9a\x80', 'woman_astronaut_tone1', 'woman_astronaut_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x9a\x80', 'woman_astronaut_tone2', 'woman_astronaut_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x9a\x80', 'woman_astronaut_tone3', 'woman_astronaut_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x9a\x80', 'woman_astronaut_tone4', 'woman_astronaut_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x9a\x80', 'woman_astronaut_tone5', 'woman_astronaut_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\xa6\xb2', 'woman_bald', 'woman_bald'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\xa6\xb2', 'woman_bald_tone1', 'woman_bald_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\xa6\xb2', 'woman_bald_tone2', 'woman_bald_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\xa6\xb2', 'woman_bald_tone3', 'woman_bald_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\xa6\xb2', 'woman_bald_tone4', 'woman_bald_tone4'), (b'\<KEY>', 'woman_bald_tone5', 'woman_bald_tone5'), (b'\<KEY>', 'woman_biking', 'woman_biking'), (b'\<KEY>', 'woman_biking_tone1', 'woman_biking_tone1'), (b'\xf0\x9f\x9a\xb4\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_biking_tone2', 'woman_biking_tone2'), (b'\xf0\x9f\x9a\xb4\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_biking_tone3', 'woman_biking_tone3'), (b'\<KEY>', 'woman_biking_tone4', 'woman_biking_tone4'), (b'\xf0\x9f\x9a\xb4\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_biking_tone5', 'woman_biking_tone5'), (b'\xe2\x9b\xb9\xef\xb8\x8f\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bouncing_ball', 'woman_bouncing_ball'), (b'\xe2\x9b\xb9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bouncing_ball_tone1', 'woman_bouncing_ball_tone1'), (b'\xe2\x9b\xb9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bouncing_ball_tone2', 'woman_bouncing_ball_tone2'), (b'\xe2\x9b\xb9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bouncing_ball_tone3', 'woman_bouncing_ball_tone3'), (b'\xe2\x9b\xb9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bouncing_ball_tone4', 'woman_bouncing_ball_tone4'), (b'\xe2\x9b\xb9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bouncing_ball_tone5', 'woman_bouncing_ball_tone5'), (b'\xf0\x9f\x99\x87\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bowing', 'woman_bowing'), (b'\xf0\x9f\x99\x87\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bowing_tone1', 'woman_bowing_tone1'), (b'\xf0\x9f\x99\x87\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bowing_tone2', 'woman_bowing_tone2'), (b'\xf0\x9f\x99\x87\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bowing_tone3', 'woman_bowing_tone3'), (b'\xf0\x9f\x99\x87\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bowing_tone4', 'woman_bowing_tone4'), (b'\xf0\x9f\x99\x87\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_bowing_tone5', 'woman_bowing_tone5'), (b'\xf0\<KEY>8\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_cartwheeling', 'woman_cartwheeling'), (b'\xf0\<KEY>8\xf0\<KEY>0\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_cartwheeling_tone1', 'woman_cartwheeling_tone1'), (b'\xf0\x9f\xa4\xb8\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_cartwheeling_tone2', 'woman_cartwheeling_tone2'), (b'\xf0\x9f\xa4\xb8\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_cartwheeling_tone3', 'woman_cartwheeling_tone3'), (b'\xf0\x9f\xa4\xb8\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_cartwheeling_tone4', 'woman_cartwheeling_tone4'), (b'\xf0\x9f\xa4\xb8\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_cartwheeling_tone5', 'woman_cartwheeling_tone5'), (b'\xf0\x9f\xa7\x97\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_climbing', 'woman_climbing'), (b'\xf0\x9f\xa7\x97\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_climbing_tone1', 'woman_climbing_tone1'), (b'\xf0\x9f\xa7\x97\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_climbing_tone2', 'woman_climbing_tone2'), (b'\xf0\x9f\xa7\x97\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_climbing_tone3', 'woman_climbing_tone3'), (b'\xf0\x9f\xa7\x97\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_climbing_tone4', 'woman_climbing_tone4'), (b'\xf0\x9f\xa7\x97\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_climbing_tone5', 'woman_climbing_tone5'), (b'\xf0\x9f\x91\xb7\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_construction_worker', 'woman_construction_worker'), (b'\xf0\x9f\x91\xb7\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_construction_worker_tone1', 'woman_construction_worker_tone1'), (b'\xf0\x9f\x91\xb7\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_construction_worker_tone2', 'woman_construction_worker_tone2'), (b'\xf0\x9f\x91\xb7\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_construction_worker_tone3', 'woman_construction_worker_tone3'), (b'\xf0\x9f\x91\xb7\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_construction_worker_tone4', 'woman_construction_worker_tone4'), (b'\xf0\x9f\x91\xb7\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_construction_worker_tone5', 'woman_construction_worker_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x8d\xb3', 'woman_cook', 'woman_cook'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x8d\xb3', 'woman_cook_tone1', 'woman_cook_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x8d\xb3', 'woman_cook_tone2', 'woman_cook_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x8d\xb3', 'woman_cook_tone3', 'woman_cook_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x8d\xb3', 'woman_cook_tone4', 'woman_cook_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x8d\xb3', 'woman_cook_tone5', 'woman_cook_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\xa6\xb1', 'woman_curly_haired', 'woman_curly_haired'), (b'\xf0\<KEY>1', 'woman_curly_haired_tone1', 'woman_curly_haired_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\xa6\xb1', 'woman_curly_haired_tone2', 'woman_curly_haired_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\xa6\xb1', 'woman_curly_haired_tone3', 'woman_curly_haired_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\xa6\xb1', 'woman_curly_haired_tone4', 'woman_curly_haired_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\xa6\xb1', 'woman_curly_haired_tone5', 'woman_curly_haired_tone5'), (b'\xf0\x9f\x95\xb5\xef\xb8\x8f\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_detective', 'woman_detective'), (b'\xf0\x9f\x95\xb5\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_detective_tone1', 'woman_detective_tone1'), (b'\xf0\x9f\x95\xb5\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_detective_tone2', 'woman_detective_tone2'), (b'\xf0\x9f\x95\xb5\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_detective_tone3', 'woman_detective_tone3'), (b'\xf0\x9f\x95\xb5\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_detective_tone4', 'woman_detective_tone4'), (b'\xf0\x9f\x95\xb5\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_detective_tone5', 'woman_detective_tone5'), (b'\xf0\x9f\xa7\x9d\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_elf', 'woman_elf'), (b'\xf0\x9f\xa7\x9d\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_elf_tone1', 'woman_elf_tone1'), (b'\xf0\x9f\xa7\x9d\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_elf_tone2', 'woman_elf_tone2'), (b'\xf0\x9f\xa7\x9d\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_elf_tone3', 'woman_elf_tone3'), (b'\xf0\x9f\xa7\x9d\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_elf_tone4', 'woman_elf_tone4'), (b'\xf0\x9f\xa7\x9d\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_elf_tone5', 'woman_elf_tone5'), (b'\xf0\x9f\xa4\xa6\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_facepalming', 'woman_facepalming'), (b'\xf0\x9f\xa4\xa6\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_facepalming_tone1', 'woman_facepalming_tone1'), (b'\xf0\x9f\xa4\xa6\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_facepalming_tone2', 'woman_facepalming_tone2'), (b'\xf0\x9f\xa4\xa6\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_facepalming_tone3', 'woman_facepalming_tone3'), (b'\xf0\x9f\xa4\xa6\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_facepalming_tone4', 'woman_facepalming_tone4'), (b'\xf0\x9f\xa4\xa6\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_facepalming_tone5', 'woman_facepalming_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x8f\xad', 'woman_factory_worker', 'woman_factory_worker'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x8f\xad', 'woman_factory_worker_tone1', 'woman_factory_worker_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x8f\xad', 'woman_factory_worker_tone2', 'woman_factory_worker_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x8f\xad', 'woman_factory_worker_tone3', 'woman_factory_worker_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x8f\xad', 'woman_factory_worker_tone4', 'woman_factory_worker_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x8f\xad', 'woman_factory_worker_tone5', 'woman_factory_worker_tone5'), (b'\xf0\x9f\xa7\x9a\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_fairy', 'woman_fairy'), (b'\xf0\x9f\xa7\x9a\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_fairy_tone1', 'woman_fairy_tone1'), (b'\xf0\x9f\xa7\x9a\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_fairy_tone2', 'woman_fairy_tone2'), (b'\xf0\x9f\xa7\x9a\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_fairy_tone3', 'woman_fairy_tone3'), (b'\xf0\x9f\xa7\x9a\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_fairy_tone4', 'woman_fairy_tone4'), (b'\<KEY>', 'woman_fairy_tone5', 'woman_fairy_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x8c\xbe', 'woman_farmer', 'woman_farmer'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x8c\xbe', 'woman_farmer_tone1', 'woman_farmer_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x8c\xbe', 'woman_farmer_tone2', 'woman_farmer_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x8c\xbe', 'woman_farmer_tone3', 'woman_farmer_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x8c\xbe', 'woman_farmer_tone4', 'woman_farmer_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x8c\xbe', 'woman_farmer_tone5', 'woman_farmer_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x9a\x92', 'woman_firefighter', 'woman_firefighter'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x9a\x92', 'woman_firefighter_tone1', 'woman_firefighter_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x9a\x92', 'woman_firefighter_tone2', 'woman_firefighter_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x9a\x92', 'woman_firefighter_tone3', 'woman_firefighter_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x9a\x92', 'woman_firefighter_tone4', 'woman_firefighter_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x9a\x92', 'woman_firefighter_tone5', 'woman_firefighter_tone5'), (b'\xf0\x9f\x99\x8d\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_frowning', 'woman_frowning'), (b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_frowning_tone1', 'woman_frowning_tone1'), (b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_frowning_tone2', 'woman_frowning_tone2'), (b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_frowning_tone3', 'woman_frowning_tone3'), (b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_frowning_tone4', 'woman_frowning_tone4'), (b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_frowning_tone5', 'woman_frowning_tone5'), (b'\xf0\x9f\xa7\x9e\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_genie', 'woman_genie'), (b'\xf0\x9f\x99\x85\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_no', 'woman_gesturing_no'), (b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_no_tone1', 'woman_gesturing_no_tone1'), (b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_no_tone2', 'woman_gesturing_no_tone2'), (b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_no_tone3', 'woman_gesturing_no_tone3'), (b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_no_tone4', 'woman_gesturing_no_tone4'), (b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_no_tone5', 'woman_gesturing_no_tone5'), (b'\xf0\x9f\x99\x86\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_ok', 'woman_gesturing_ok'), (b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_ok_tone1', 'woman_gesturing_ok_tone1'), (b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_ok_tone2', 'woman_gesturing_ok_tone2'), (b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_ok_tone3', 'woman_gesturing_ok_tone3'), (b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_ok_tone4', 'woman_gesturing_ok_tone4'), (b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_gesturing_ok_tone5', 'woman_gesturing_ok_tone5'), (b'\xf0\x9f\x92\x86\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_face_massage', 'woman_getting_face_massage'), (b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_face_massage_tone1', 'woman_getting_face_massage_tone1'), (b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_face_massage_tone2', 'woman_getting_face_massage_tone2'), (b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_face_massage_tone3', 'woman_getting_face_massage_tone3'), (b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_face_massage_tone4', 'woman_getting_face_massage_tone4'), (b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_face_massage_tone5', 'woman_getting_face_massage_tone5'), (b'\xf0\x9f\x92\x87\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_haircut', 'woman_getting_haircut'), (b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_haircut_tone1', 'woman_getting_haircut_tone1'), (b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_haircut_tone2', 'woman_getting_haircut_tone2'), (b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_haircut_tone3', 'woman_getting_haircut_tone3'), (b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_haircut_tone4', 'woman_getting_haircut_tone4'), (b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_getting_haircut_tone5', 'woman_getting_haircut_tone5'), (b'\xf0\x9f\x8f\x8c\xef\xb8\x8f\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_golfing', 'woman_golfing'), (b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_golfing_tone1', 'woman_golfing_tone1'), (b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_golfing_tone2', 'woman_golfing_tone2'), (b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_golfing_tone3', 'woman_golfing_tone3'), (b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_golfing_tone4', 'woman_golfing_tone4'), (b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_golfing_tone5', 'woman_golfing_tone5'), (b'\xf0\x9f\x92\x82\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_guard', 'woman_guard'), (b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_guard_tone1', 'woman_guard_tone1'), (b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_guard_tone2', 'woman_guard_tone2'), (b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_guard_tone3', 'woman_guard_tone3'), (b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_guard_tone4', 'woman_guard_tone4'), (b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_guard_tone5', 'woman_guard_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xe2\x9a\x95\xef\xb8\x8f', 'woman_health_worker', 'woman_health_worker'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x9a\x95\xef\xb8\x8f', 'woman_health_worker_tone1', 'woman_health_worker_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x9a\x95\xef\xb8\x8f', 'woman_health_worker_tone2', 'woman_health_worker_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x9a\x95\xef\xb8\x8f', 'woman_health_worker_tone3', 'woman_health_worker_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x9a\x95\xef\xb8\x8f', 'woman_health_worker_tone4', 'woman_health_worker_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x9a\x95\xef\xb8\x8f', 'woman_health_worker_tone5', 'woman_health_worker_tone5'), (b'\xf0\x9f\xa7\x98\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_lotus_position', 'woman_in_lotus_position'), (b'\xf0\x9f\xa7\x98\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_lotus_position_tone1', 'woman_in_lotus_position_tone1'), (b'\xf0\x9f\xa7\x98\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_lotus_position_tone2', 'woman_in_lotus_position_tone2'), (b'\xf0\x9f\xa7\x98\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_lotus_position_tone3', 'woman_in_lotus_position_tone3'), (b'\xf0\x9f\xa7\x98\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_lotus_position_tone4', 'woman_in_lotus_position_tone4'), (b'\xf0\x9f\xa7\x98\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_lotus_position_tone5', 'woman_in_lotus_position_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\xa6\xbd', 'woman_in_manual_wheelchair', 'woman_in_manual_wheelchair'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\xa6\xbd', 'woman_in_manual_wheelchair_tone1', 'woman_in_manual_wheelchair_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\xa6\xbd', 'woman_in_manual_wheelchair_tone2', 'woman_in_manual_wheelchair_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\xa6\xbd', 'woman_in_manual_wheelchair_tone3', 'woman_in_manual_wheelchair_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\xa6\xbd', 'woman_in_manual_wheelchair_tone4', 'woman_in_manual_wheelchair_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\xa6\xbd', 'woman_in_manual_wheelchair_tone5', 'woman_in_manual_wheelchair_tone5'), (b'\<KEY>', 'woman_in_motorized_wheelchair', 'woman_in_motorized_wheelchair'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\xa6\xbc', 'woman_in_motorized_wheelchair_tone1', 'woman_in_motorized_wheelchair_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\xa6\xbc', 'woman_in_motorized_wheelchair_tone2', 'woman_in_motorized_wheelchair_tone2'), (b'\<KEY>', 'woman_in_motorized_wheelchair_tone3', 'woman_in_motorized_wheelchair_tone3'), (b'\<KEY>', 'woman_in_motorized_wheelchair_tone4', 'woman_in_motorized_wheelchair_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\xa6\xbc', 'woman_in_motorized_wheelchair_tone5', 'woman_in_motorized_wheelchair_tone5'), (b'\xf0\x9f\xa7\x96\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_steamy_room', 'woman_in_steamy_room'), (b'\xf0\x9f\xa7\x96\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_steamy_room_tone1', 'woman_in_steamy_room_tone1'), (b'\xf0\x9f\xa7\x96\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_steamy_room_tone2', 'woman_in_steamy_room_tone2'), (b'\xf0\x9f\xa7\x96\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_steamy_room_tone3', 'woman_in_steamy_room_tone3'), (b'\xf0\x9f\xa7\x96\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_steamy_room_tone4', 'woman_in_steamy_room_tone4'), (b'\xf0\x9f\xa7\x96\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_in_steamy_room_tone5', 'woman_in_steamy_room_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xe2\x9a\x96\xef\xb8\x8f', 'woman_judge', 'woman_judge'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x9a\x96\xef\xb8\x8f', 'woman_judge_tone1', 'woman_judge_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x9a\x96\xef\xb8\x8f', 'woman_judge_tone2', 'woman_judge_tone2'), (b'\xf0\<KEY>', 'woman_judge_tone3', 'woman_judge_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x9a\x96\xef\xb8\x8f', 'woman_judge_tone4', 'woman_judge_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x9a\x96\xef\xb8\x8f', 'woman_judge_tone5', 'woman_judge_tone5'), (b'\xf0\x9f\xa4\xb9\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_juggling', 'woman_juggling'), (b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_juggling_tone1', 'woman_juggling_tone1'), (b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_juggling_tone2', 'woman_juggling_tone2'), (b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_juggling_tone3', 'woman_juggling_tone3'), (b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_juggling_tone4', 'woman_juggling_tone4'), (b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_juggling_tone5', 'woman_juggling_tone5'), (b'\xf0\x9f\xa7\x8e\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_kneeling', 'woman_kneeling'), (b'\xf0\x9f\xa7\x8e\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_kneeling_tone1', 'woman_kneeling_tone1'), (b'\xf0\x9f\xa7\x8e\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_kneeling_tone2', 'woman_kneeling_tone2'), (b'\<KEY>', 'woman_kneeling_tone3', 'woman_kneeling_tone3'), (b'\xf0\<KEY>0\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_kneeling_tone4', 'woman_kneeling_tone4'), (b'\xf0\x9f\xa7\x8e\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_kneeling_tone5', 'woman_kneeling_tone5'), (b'\xf0\x9f\x8f\x8b\xef\xb8\x8f\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_lifting_weights', 'woman_lifting_weights'), (b'\xf0\x9f\x8f\x8b\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_lifting_weights_tone1', 'woman_lifting_weights_tone1'), (b'\xf0\x9f\x8f\x8b\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_lifting_weights_tone2', 'woman_lifting_weights_tone2'), (b'\<KEY>', 'woman_lifting_weights_tone3', 'woman_lifting_weights_tone3'), (b'\<KEY>', 'woman_lifting_weights_tone4', 'woman_lifting_weights_tone4'), (b'\<KEY>', 'woman_lifting_weights_tone5', 'woman_lifting_weights_tone5'), (b'\xf0\x9f\xa7\x99\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mage', 'woman_mage'), (b'\xf0\x9f\xa7\x99\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mage_tone1', 'woman_mage_tone1'), (b'\<KEY>', 'woman_mage_tone2', 'woman_mage_tone2'), (b'\xf0\x9f\xa7\x99\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mage_tone3', 'woman_mage_tone3'), (b'\xf0\x9f\xa7\x99\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mage_tone4', 'woman_mage_tone4'), (b'\xf0\x9f\xa7\x99\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mage_tone5', 'woman_mage_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x94\xa7', 'woman_mechanic', 'woman_mechanic'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x94\xa7', 'woman_mechanic_tone1', 'woman_mechanic_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x94\xa7', 'woman_mechanic_tone2', 'woman_mechanic_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x94\xa7', 'woman_mechanic_tone3', 'woman_mechanic_tone3'), (b'\<KEY>', 'woman_mechanic_tone4', 'woman_mechanic_tone4'), (b'\<KEY>', 'woman_mechanic_tone5', 'woman_mechanic_tone5'), (b'\xf0\x9f\x9a\xb5\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mountain_biking', 'woman_mountain_biking'), (b'\xf0\<KEY>5\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mountain_biking_tone1', 'woman_mountain_biking_tone1'), (b'\xf0\x9f\x9a\xb5\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mountain_biking_tone2', 'woman_mountain_biking_tone2'), (b'\<KEY>', 'woman_mountain_biking_tone3', 'woman_mountain_biking_tone3'), (b'\xf0\x9f\x9a\xb5\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mountain_biking_tone4', 'woman_mountain_biking_tone4'), (b'\xf0\x9f\x9a\xb5\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_mountain_biking_tone5', 'woman_mountain_biking_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x92\xbc', 'woman_office_worker', 'woman_office_worker'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x92\xbc', 'woman_office_worker_tone1', 'woman_office_worker_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x92\xbc', 'woman_office_worker_tone2', 'woman_office_worker_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x92\xbc', 'woman_office_worker_tone3', 'woman_office_worker_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x92\xbc', 'woman_office_worker_tone4', 'woman_office_worker_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x92\xbc', 'woman_office_worker_tone5', 'woman_office_worker_tone5'), (b'\xf0\<KEY>9\<KEY>', 'woman_pilot', 'woman_pilot'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x9c\x88\xef\xb8\x8f', 'woman_pilot_tone1', 'woman_pilot_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x9c\x88\xef\xb8\x8f', 'woman_pilot_tone2', 'woman_pilot_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x9c\x88\xef\xb8\x8f', 'woman_pilot_tone3', 'woman_pilot_tone3'), (b'\xf0\x9f\x91\xa9\xf0\<KEY>0\<KEY>8\x8f', 'woman_pilot_tone4', 'woman_pilot_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x9c\x88\xef\xb8\x8f', 'woman_pilot_tone5', 'woman_pilot_tone5'), (b'\xf0\x9f\xa4\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_handball', 'woman_playing_handball'), (b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_handball_tone1', 'woman_playing_handball_tone1'), (b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_handball_tone2', 'woman_playing_handball_tone2'), (b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_handball_tone3', 'woman_playing_handball_tone3'), (b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_handball_tone4', 'woman_playing_handball_tone4'), (b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_handball_tone5', 'woman_playing_handball_tone5'), (b'\xf0\x9f\xa4\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_water_polo', 'woman_playing_water_polo'), (b'\xf0\x9f\xa4\xbd\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_water_polo_tone1', 'woman_playing_water_polo_tone1'), (b'\xf0\x9f\xa4\xbd\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_water_polo_tone2', 'woman_playing_water_polo_tone2'), (b'\xf0\x9f\xa4\xbd\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_water_polo_tone3', 'woman_playing_water_polo_tone3'), (b'\xf0\x9f\xa4\xbd\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_water_polo_tone4', 'woman_playing_water_polo_tone4'), (b'\xf0\x9f\xa4\xbd\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_playing_water_polo_tone5', 'woman_playing_water_polo_tone5'), (b'\xf0\x9f\x91\xae\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_police_officer', 'woman_police_officer'), (b'\xf0\x9f\x91\xae\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_police_officer_tone1', 'woman_police_officer_tone1'), (b'\xf0\x9f\x91\xae\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_police_officer_tone2', 'woman_police_officer_tone2'), (b'\xf0\x9f\x91\xae\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_police_officer_tone3', 'woman_police_officer_tone3'), (b'\xf0\x9f\x91\xae\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_police_officer_tone4', 'woman_police_officer_tone4'), (b'\xf0\x9f\x91\xae\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_police_officer_tone5', 'woman_police_officer_tone5'), (b'\xf0\x9f\x99\x8e\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_pouting', 'woman_pouting'), (b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_pouting_tone1', 'woman_pouting_tone1'), (b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_pouting_tone2', 'woman_pouting_tone2'), (b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_pouting_tone3', 'woman_pouting_tone3'), (b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_pouting_tone4', 'woman_pouting_tone4'), (b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_pouting_tone5', 'woman_pouting_tone5'), (b'\xf0\x9f\x99\x8b\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_raising_hand', 'woman_raising_hand'), (b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_raising_hand_tone1', 'woman_raising_hand_tone1'), (b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_raising_hand_tone2', 'woman_raising_hand_tone2'), (b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_raising_hand_tone3', 'woman_raising_hand_tone3'), (b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_raising_hand_tone4', 'woman_raising_hand_tone4'), (b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_raising_hand_tone5', 'woman_raising_hand_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\xa6\xb0', 'woman_red_haired', 'woman_red_haired'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\xa6\xb0', 'woman_red_haired_tone1', 'woman_red_haired_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\xa6\xb0', 'woman_red_haired_tone2', 'woman_red_haired_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\xa6\xb0', 'woman_red_haired_tone3', 'woman_red_haired_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\xa6\xb0', 'woman_red_haired_tone4', 'woman_red_haired_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\xa6\xb0', 'woman_red_haired_tone5', 'woman_red_haired_tone5'), (b'\xf0\x9f\x9a\xa3\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_rowing_boat', 'woman_rowing_boat'), (b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_rowing_boat_tone1', 'woman_rowing_boat_tone1'), (b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_rowing_boat_tone2', 'woman_rowing_boat_tone2'), (b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_rowing_boat_tone3', 'woman_rowing_boat_tone3'), (b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_rowing_boat_tone4', 'woman_rowing_boat_tone4'), (b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_rowing_boat_tone5', 'woman_rowing_boat_tone5'), (b'\xf0\x9f\x8f\x83\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_running', 'woman_running'), (b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_running_tone1', 'woman_running_tone1'), (b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_running_tone2', 'woman_running_tone2'), (b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_running_tone3', 'woman_running_tone3'), (b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_running_tone4', 'woman_running_tone4'), (b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_running_tone5', 'woman_running_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x94\xac', 'woman_scientist', 'woman_scientist'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x94\xac', 'woman_scientist_tone1', 'woman_scientist_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x94\xac', 'woman_scientist_tone2', 'woman_scientist_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x94\xac', 'woman_scientist_tone3', 'woman_scientist_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x94\xac', 'woman_scientist_tone4', 'woman_scientist_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x94\xac', 'woman_scientist_tone5', 'woman_scientist_tone5'), (b'\xf0\x9f\xa4\xb7\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_shrugging', 'woman_shrugging'), (b'\xf0\x9f\xa4\xb7\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_shrugging_tone1', 'woman_shrugging_tone1'), (b'\xf0\x9f\xa4\xb7\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_shrugging_tone2', 'woman_shrugging_tone2'), (b'\xf0\x9f\xa4\xb7\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_shrugging_tone3', 'woman_shrugging_tone3'), (b'\xf0\x9f\xa4\xb7\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_shrugging_tone4', 'woman_shrugging_tone4'), (b'\xf0\x9f\xa4\xb7\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_shrugging_tone5', 'woman_shrugging_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x8e\xa4', 'woman_singer', 'woman_singer'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x8e\xa4', 'woman_singer_tone1', 'woman_singer_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x8e\xa4', 'woman_singer_tone2', 'woman_singer_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x8e\xa4', 'woman_singer_tone3', 'woman_singer_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x8e\xa4', 'woman_singer_tone4', 'woman_singer_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x8e\xa4', 'woman_singer_tone5', 'woman_singer_tone5'), (b'\xf0\x9f\xa7\x8d\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_standing', 'woman_standing'), (b'\xf0\x9f\xa7\x8d\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_standing_tone1', 'woman_standing_tone1'), (b'\xf0\x9f\xa7\x8d\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_standing_tone2', 'woman_standing_tone2'), (b'\xf0\x9f\xa7\x8d\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_standing_tone3', 'woman_standing_tone3'), (b'\xf0\x9f\xa7\x8d\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_standing_tone4', 'woman_standing_tone4'), (b'\xf0\x9f\xa7\x8d\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_standing_tone5', 'woman_standing_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x8e\x93', 'woman_student', 'woman_student'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x8e\x93', 'woman_student_tone1', 'woman_student_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x8e\x93', 'woman_student_tone2', 'woman_student_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x8e\x93', 'woman_student_tone3', 'woman_student_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x8e\x93', 'woman_student_tone4', 'woman_student_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x8e\x93', 'woman_student_tone5', 'woman_student_tone5'), (b'\xf0\x9f\xa6\xb8\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_superhero', 'woman_superhero'), (b'\xf0\x9f\xa6\xb8\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_superhero_tone1', 'woman_superhero_tone1'), (b'\xf0\x9f\xa6\xb8\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_superhero_tone2', 'woman_superhero_tone2'), (b'\xf0\x9f\xa6\xb8\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_superhero_tone3', 'woman_superhero_tone3'), (b'\xf0\x9f\xa6\xb8\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_superhero_tone4', 'woman_superhero_tone4'), (b'\xf0\x9f\xa6\xb8\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_superhero_tone5', 'woman_superhero_tone5'), (b'\xf0\x9f\xa6\xb9\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_supervillain', 'woman_supervillain'), (b'\xf0\<KEY>', 'woman_supervillain_tone1', 'woman_supervillain_tone1'), (b'\xf0\x9f\xa6\xb9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_supervillain_tone2', 'woman_supervillain_tone2'), (b'\xf0\x9f\xa6\xb9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_supervillain_tone3', 'woman_supervillain_tone3'), (b'\xf0\x9f\xa6\xb9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_supervillain_tone4', 'woman_supervillain_tone4'), (b'\<KEY>', 'woman_supervillain_tone5', 'woman_supervillain_tone5'), (b'\xf0\x9f\x8f\x84\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_surfing', 'woman_surfing'), (b'\xf0\x9f\x8f\x84\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_surfing_tone1', 'woman_surfing_tone1'), (b'\xf0\x9f\x8f\x84\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_surfing_tone2', 'woman_surfing_tone2'), (b'\xf0\x9f\x8f\x84\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_surfing_tone3', 'woman_surfing_tone3'), (b'\xf0\x9f\x8f\x84\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_surfing_tone4', 'woman_surfing_tone4'), (b'\xf0\x9f\x8f\x84\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_surfing_tone5', 'woman_surfing_tone5'), (b'\xf0\x9f\x8f\x8a\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_swimming', 'woman_swimming'), (b'\xf0\x9f\x8f\x8a\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_swimming_tone1', 'woman_swimming_tone1'), (b'\xf0\x9f\x8f\x8a\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_swimming_tone2', 'woman_swimming_tone2'), (b'\xf0\x9f\x8f\x8a\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_swimming_tone3', 'woman_swimming_tone3'), (b'\xf0\x9f\x8f\x8a\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_swimming_tone4', 'woman_swimming_tone4'), (b'\xf0\x9f\x8f\x8a\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_swimming_tone5', 'woman_swimming_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x8f\xab', 'woman_teacher', 'woman_teacher'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x8f\xab', 'woman_teacher_tone1', 'woman_teacher_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x8f\xab', 'woman_teacher_tone2', 'woman_teacher_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x8f\xab', 'woman_teacher_tone3', 'woman_teacher_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x8f\xab', 'woman_teacher_tone4', 'woman_teacher_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x8f\xab', 'woman_teacher_tone5', 'woman_teacher_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\x92\xbb', 'woman_technologist', 'woman_technologist'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\x92\xbb', 'woman_technologist_tone1', 'woman_technologist_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\x92\xbb', 'woman_technologist_tone2', 'woman_technologist_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\x92\xbb', 'woman_technologist_tone3', 'woman_technologist_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x92\xbb', 'woman_technologist_tone4', 'woman_technologist_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x92\xbb', 'woman_technologist_tone5', 'woman_technologist_tone5'), (b'\xf0\x9f\x92\x81\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_tipping_hand', 'woman_tipping_hand'), (b'\xf0\x9f\x92\x81\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_tipping_hand_tone1', 'woman_tipping_hand_tone1'), (b'\xf0\x9f\x92\x81\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_tipping_hand_tone2', 'woman_tipping_hand_tone2'), (b'\<KEY>', 'woman_tipping_hand_tone3', 'woman_tipping_hand_tone3'), (b'\xf0\x9f\x92\x81\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_tipping_hand_tone4', 'woman_tipping_hand_tone4'), (b'\xf0\x9f\x92\x81\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_tipping_hand_tone5', 'woman_tipping_hand_tone5'), (b'\xf0\x9f\xa7\x9b\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_vampire', 'woman_vampire'), (b'\xf0\x9f\xa7\x9b\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_vampire_tone1', 'woman_vampire_tone1'), (b'\<KEY>', 'woman_vampire_tone2', 'woman_vampire_tone2'), (b'\<KEY>', 'woman_vampire_tone3', 'woman_vampire_tone3'), (b'\<KEY>', 'woman_vampire_tone4', 'woman_vampire_tone4'), (b'\xf0\x9f\xa7\x9b\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_vampire_tone5', 'woman_vampire_tone5'), (b'\xf0\x9f\x9a\xb6\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_walking', 'woman_walking'), (b'\xf0\x9f\x9a\xb6\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_walking_tone1', 'woman_walking_tone1'), (b'\<KEY>', 'woman_walking_tone2', 'woman_walking_tone2'), (b'\xf0\x9f\x9a\xb6\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_walking_tone3', 'woman_walking_tone3'), (b'\xf0\x9f\x9a\xb6\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_walking_tone4', 'woman_walking_tone4'), (b'\xf0\x9f\x9a\xb6\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_walking_tone5', 'woman_walking_tone5'), (b'\xf0\x9f\x91\xb3\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_wearing_turban', 'woman_wearing_turban'), (b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbb\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_wearing_turban_tone1', 'woman_wearing_turban_tone1'), (b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_wearing_turban_tone2', 'woman_wearing_turban_tone2'), (b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbd\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_wearing_turban_tone3', 'woman_wearing_turban_tone3'), (b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbe\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_wearing_turban_tone4', 'woman_wearing_turban_tone4'), (b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_wearing_turban_tone5', 'woman_wearing_turban_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\xa6\xb3', 'woman_white_haired', 'woman_white_haired'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\xa6\xb3', 'woman_white_haired_tone1', 'woman_white_haired_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\xa6\xb3', 'woman_white_haired_tone2', 'woman_white_haired_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\xa6\xb3', 'woman_white_haired_tone3', 'woman_white_haired_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\xa6\xb3', 'woman_white_haired_tone4', 'woman_white_haired_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\xa6\xb3', 'woman_white_haired_tone5', 'woman_white_haired_tone5'), (b'\xf0\x9f\xa7\x95', 'woman_with_headscarf', 'woman_with_headscarf'), (b'\xf0\x9f\xa7\x95\xf0\x9f\x8f\xbb', 'woman_with_headscarf_tone1', 'woman_with_headscarf_tone1'), (b'\xf0\x9f\xa7\x95\xf0\x9f\x8f\xbc', 'woman_with_headscarf_tone2', 'woman_with_headscarf_tone2'), (b'\xf0\x9f\xa7\x95\xf0\x9f\x8f\xbd', 'woman_with_headscarf_tone3', 'woman_with_headscarf_tone3'), (b'\xf0\x9f\xa7\x95\xf0\x9f\x8f\xbe', 'woman_with_headscarf_tone4', 'woman_with_headscarf_tone4'), (b'\xf0\x9f\xa7\x95\xf0\x9f\x8f\xbf', 'woman_with_headscarf_tone5', 'woman_with_headscarf_tone5'), (b'\xf0\x9f\x91\xa9\xe2\x80\x8d\xf0\x9f\xa6\xaf', 'woman_with_probing_cane', 'woman_with_probing_cane'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\xa6\xaf', 'woman_with_probing_cane_tone1', 'woman_with_probing_cane_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc\xe2\x80\x8d\xf0\x9f\xa6\xaf', 'woman_with_probing_cane_tone2', 'woman_with_probing_cane_tone2'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbd\xe2\x80\x8d\xf0\x9f\xa6\xaf', 'woman_with_probing_cane_tone3', 'woman_with_probing_cane_tone3'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\xa6\xaf', 'woman_with_probing_cane_tone4', 'woman_with_probing_cane_tone4'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\xa6\xaf', 'woman_with_probing_cane_tone5', 'woman_with_probing_cane_tone5'), (b'\xf0\x9f\xa7\x9f\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'woman_zombie', 'woman_zombie'), (b'\xf0\x9f\xa5\xbf', 'womans_flat_shoe', 'womans_flat_shoe'), (b'\xf0\x9f\x91\xaf\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'women_with_bunny_ears_partying', 'women_with_bunny_ears_partying'), (b'\xf0\x9f\xa4\xbc\xe2\x80\x8d\xe2\x99\x80\xef\xb8\x8f', 'women_wrestling', 'women_wrestling'), (b'\xf0\x9f\xa5\xb4', 'woozy_face', 'woozy_face'), (b'\xe2\x9c\x8d\xef\xb8\x8f', 'writing_hand', 'writing_hand'), (b'\xf0\x9f\xa7\xb6', 'yarn', 'yarn'), (b'\xf0\x9f\xa5\xb1', 'yawning_face', 'yawning_face'), (b'\xf0\x9f\x9f\xa1', 'yellow_circle', 'yellow_circle'), (b'\xf0\x9f\x9f\xa8', 'yellow_square', 'yellow_square'), (b'\xe2\x98\xaf\xef\xb8\x8f', 'yin_yang', 'yin_yang'), (b'\xf0\x9f\xaa\x80', 'yo_yo', 'yo_yo'), (b'\xf0\x9f\xa4\xaa', 'zany_face', 'zany_face'), (b'\xf0\x9f\xa6\x93', 'zebra', 'zebra'), (b'0\xef\xb8\x8f\xe2\x83\xa3', 'zero', 'zero'), (b'\xf0\x9f\xa7\x9f', 'zombie', 'zombie'), (b'\xf0\x9f\xa7\x91\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x92\xbb', 'technologist_tone4', 'technologist_tone4'), (b'\xf0\x9f\xa7\x91\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x9a\x92', 'firefighter_tone4', 'firefighter_tone4'), (b'\xf0\x9f\x91\xa8\xf0\x9f\x8f\xbe\xe2\x80\x8d\xf0\x9f\x8d\xbc', 'man_feeding_baby_tone4', 'man_feeding_baby_tone4'), (b'\xf0\x9f\x91\xa8\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x8d\xbc', 'man_feeding_baby_tone5', 'man_feeding_baby_tone5'), (b'\xf0\x9f\xa7\x91\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x92\xbb', 'technologist_tone5', 'technologist_tone5'), (b'\xf0\x9f\xa7\x91\xf0\x9f\x8f\xbf\xe2\x80\x8d\xf0\x9f\x9a\x92', 'firefighter_tone5', 'firefighter_tone5'), (b'\xf0\x9f\x91\xad\xf0\x9f\x8f\xbb', 'women_holding_hands_tone1', 'women_holding_hands_tone1'), (b'\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbb\xe2\x80\x8d\xf0\x9f\xa4\x9d\xe2\x80\x8d\xf0\x9f\x91\xa9\xf0\x9f\x8f\xbc', 'women_holding_hands_tone1_tone2', 'women_holding_hands_tone1_tone2'), (b'\xf0\x9f\xaa\xa1', 'sewing_needle', 'sewing_needle'), (b'\xf0\x9f\xa7\x91\xe2\x80\x8d\xf0\x9f\x92\xbb', 'technologist', 'technologist'),
<filename>tests/orca_unit_testing/test_func_application_rolling.py import unittest import orca import os.path as path from setup.settings import * from pandas.util.testing import * class Csv: pdf_csv = None odf_csv = None class RollingTest(unittest.TestCase): @classmethod def setUpClass(cls): # configure data directory DATA_DIR = path.abspath(path.join(__file__, "../setup/data")) fileName = 'onlyNumericalColumns.csv' data = os.path.join(DATA_DIR, fileName) data = data.replace('\\', '/') # Orca connect to a DolphinDB server orca.connect(HOST, PORT, "admin", "123456") Csv.pdf_csv = pd.read_csv(data) Csv.odf_csv = orca.read_csv(data) @property def pdf_csv(self): return Csv.pdf_csv @property def odf_csv(self): return Csv.odf_csv @property def pdf(self): n = 100 # note that n should be a multiple of 10 re = n / 10 pdf_da = pd.DataFrame({'id': np.arange(1, n + 1, 1, dtype='int32'), 'date': np.repeat(pd.date_range('2019.08.01', periods=10, freq='D'), re), 'tchar': np.repeat(np.arange(1, 11, 1, dtype='int8'), re), 'tshort': np.repeat(np.arange(1, 11, 1, dtype='int16'), re), 'tint': np.repeat(np.arange(1, 11, 1, dtype='int32'), re), 'tlong': np.repeat(np.arange(1, 11, 1, dtype='int64'), re), 'tfloat': np.repeat(np.arange(1, 11, 1, dtype='float32'), re), 'tdouble': np.repeat(np.arange(1, 11, 1, dtype='float64'), re) }) return pdf_da.set_index("id") @property def pdf_da(self): n = 9 # note that n should be a multiple of 10 ps = pd.to_datetime( ["20170101", "20170103", "20170105", "20170106", "20171231", "20180615", "20181031", "20190501", "20190517"]).to_series() pdf_da = pd.DataFrame({'id': np.arange(1, n + 1, 1, dtype='int32'), 'date': ps, 'tchar': np.arange(1, 10, 1, dtype='int8'), 'tshort': np.arange(1, 10, 1, dtype='int16'), 'tint': np.arange(1, 10, 1, dtype='int32'), 'tlong': np.arange(1, 10, 1, dtype='int64'), 'tfloat': np.arange(1, 10, 1, dtype='float32'), 'tdouble': np.arange(1, 10, 1, dtype='float64') }) return pdf_da.set_index("id") @property def odf(self): return orca.DataFrame(self.pdf) @property def odf_da(self): return orca.DataFrame(self.pdf_da) def test_rolling_allocation_verification(self): self.assertIsInstance(self.odf.rolling(window=5, on="date")['date'].count().to_pandas(), Series) with self.assertRaises(KeyError): self.odf.rolling(window=5, on="date")['hello'].count() with self.assertRaises(KeyError): self.odf.rolling(window=5, on="date")[['dare', 5, 0]].count() with self.assertRaises(KeyError): self.odf.rolling(window=5, on="date")[['hello', 'world']].count() with self.assertRaises(KeyError): self.odf.rolling(window=5, on="date")[np.array([1, 2, 3])].count() with self.assertRaises(KeyError): self.odf.rolling(window=5, on="date")[5].count() with self.assertRaises(KeyError): self.odf.rolling(window=5, on="date")[[16.5, 5]].count() def test_rolling_from_pandas_param_window_sum(self): a = self.odf.rolling(window=5, on="date").sum() b = self.pdf.rolling(window=5, on="date").sum() assert_frame_equal(a.to_pandas(), b) a = self.odf.rolling(window=5, on="date")[ 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() b = self.pdf.rolling(window=5, on="date")[ 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) a = self.odf.rolling(window=5, on="date")[ 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum() b = self.pdf.rolling(window=5, on="date")[ 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum() assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) odf_dai = self.odf.reset_index() pdf_dai = self.pdf.reset_index() a = odf_dai.rolling(window=5, on="date")[ 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() b = pdf_dai.rolling(window=5, on="date")[ 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) odf_dai = self.odf.set_index('date') pdf_dai = self.pdf.set_index('date') a = odf_dai.rolling(window=5).sum() b = pdf_dai.rolling(window=5).sum() assert_frame_equal(a.to_pandas(), b) a = odf_dai.rolling(window=5)[ 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() b = pdf_dai.rolling(window=5)[ 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) def test_rolling_from_pandas_param_window_rule_day_sum(self): a = self.odf_da.rolling(window='d', on="date").sum() b = self.pdf_da.rolling(window='d', on="date").sum() assert_frame_equal(a.to_pandas(), b, check_dtype=False) a = self.odf_da.rolling(window='3d', on="date")[ 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() b = self.pdf_da.rolling(window='3d', on="date")[ 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) a = self.odf_da.rolling(window='d', on="date")[ 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum() b = self.pdf_da.rolling(window='d', on="date")[ 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum() assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) odf_dai = self.odf_da.reset_index() pdf_dai = self.pdf_da.reset_index() a = odf_dai.rolling(window='d', on="date")[ 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() b = pdf_dai.rolling(window='d', on="date")[ 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) odf_dai = self.odf_da.set_index('date') pdf_dai = self.pdf_da.set_index('date') a = odf_dai.rolling(window='d').sum() b = pdf_dai.rolling(window='d').sum() assert_frame_equal(a.to_pandas(), b, check_dtype=False) a = odf_dai.rolling(window='d')[ 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() b = pdf_dai.rolling(window='d')[ 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) def test_rolling_from_pandas_param_window_rule_hour_sum(self): ps = pd.to_datetime( ["20170101 9:10:15", "20170101 10:10:15", "20170101 11:10:15", "20170101 11:20:15", "20170101 11:21:00", "20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15", "20190517 9:10:15"]).to_series() pdf_h = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'), 'date': ps, 'tchar': np.arange(1, 10, 1, dtype='int8'), 'tshort': np.arange(1, 10, 1, dtype='int16'), 'tint': np.arange(1, 10, 1, dtype='int32'), 'tlong': np.arange(1, 10, 1, dtype='int64'), 'tfloat': np.arange(1, 10, 1, dtype='float32'), 'tdouble': np.arange(1, 10, 1, dtype='float64') }) pdf_h.set_index("id", inplace=True) odf_h = orca.DataFrame(pdf_h) # TODO: ALL ASSERT FAIL a = odf_h.rolling(window='h', on="date").sum() b = pdf_h.rolling(window='h', on="date").sum() # assert_frame_equal(a.to_pandas(), b, check_dtype=False) a = odf_h.rolling(window='2h', on="date").sum() b = pdf_h.rolling(window='2h', on="date").sum() # assert_frame_equal(a.to_pandas(), b, check_dtype=False) # a = odf_h.rolling(window='h', on="date")[ # 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() # b = pdf_h.rolling(window='h', on="date")[ # 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) # a = odf_hrolling(window='h', on="date")[ # 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum() # b = pdf_h.rolling(window='h', on="date")[ # 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) # # odf_dai = odf_h.reset_index() # pdf_dai = pdf_h.reset_index() # a = odf_dai.rolling(window='h', on="date")[ # 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # b = pdf_dai.rolling(window='h', on="date")[ # 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) # # odf_dai = odf_h.set_index('date') # pdf_dai = pdf_h.set_index('date') # a = odf_dai.rolling(window='h').sum() # b = pdf_dai.rolling(window='h').sum() # assert_frame_equal(a.to_pandas(), b) # # a = odf_dai.rolling(window='h')[ # 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # b = pdf_dai.rolling(window='h')[ # 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) def test_rolling_from_pandas_param_window_rule_minute_sum(self): ps = pd.to_datetime( ["20170101 9:10:15", "20170101 9:10:16", "20170101 9:11:10", "20170101 9:11:17", "20170101 11:21:00", "20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15", "20190517 9:10:15"]).to_series() pdf_t = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'), 'date': ps, 'tchar': np.arange(1, 10, 1, dtype='int8'), 'tshort': np.arange(1, 10, 1, dtype='int16'), 'tint': np.arange(1, 10, 1, dtype='int32'), 'tlong': np.arange(1, 10, 1, dtype='int64'), 'tfloat': np.arange(1, 10, 1, dtype='float32'), 'tdouble': np.arange(1, 10, 1, dtype='float64') }) pdf_t.set_index("id", inplace=True) odf_t = orca.DataFrame(pdf_t) # TODO: ALL ASSERT FAIL a = odf_t.rolling(window='t', on="date").sum() b = pdf_t.rolling(window='t', on="date").sum() # assert_frame_equal(a.to_pandas(), b, check_dtype=False) # a = odf_t.rolling(window='t', on="date")[ # 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() # b = pdf_t.rolling(window='t', on="date")[ # 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) # # a = odf_t.rolling(window='t', on="date")[ # 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum() # b = pdf_t.rolling(window='t', on="date")[ # 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) # # odf_dai = odf_t.reset_index() # pdf_dai = pdf_t.reset_index() # a = odf_dai.rolling(window='t', on="date")[ # 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # b = pdf_dai.rolling(window='t', on="date")[ # 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) # # odf_dai = odf_t.set_index('date') # pdf_dai = pdf_t.set_index('date') # a = odf_dai.rolling(window='t').sum() # b = pdf_dai.rolling(window='t').sum() # assert_frame_equal(a.to_pandas(), b) # # a = odf_dai.rolling(window='t')[ # 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # b = pdf_dai.rolling(window='t')[ # 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) def test_rolling_from_pandas_param_window_rule_second_sum(self): ps = pd.to_datetime( ["20170101 9:10:15", "20170101 9:10:16", "20170101 9:10:16", "20170101 9:11:17", "20170101 9:11:17", "20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15", "20190517 9:10:15"]).to_series() pdf_s = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'), 'date': ps, 'tchar': np.arange(1, 10, 1, dtype='int8'), 'tshort': np.arange(1, 10, 1, dtype='int16'), 'tint': np.arange(1, 10, 1, dtype='int32'), 'tlong': np.arange(1, 10, 1, dtype='int64'), 'tfloat': np.arange(1, 10, 1, dtype='float32'), 'tdouble': np.arange(1, 10, 1, dtype='float64') }) pdf_s.set_index("id", inplace=True) odf_s = orca.DataFrame(pdf_s) # TODO: ALL ASSERT FAIL a = odf_s.rolling(window='s', on="date").sum() b = pdf_s.rolling(window='s', on="date").sum() # assert_frame_equal(a.to_pandas(), b, check_dtype=False) a = odf_s.rolling(window='2s', on="date").sum() b = pdf_s.rolling(window='2s', on="date").sum() # assert_frame_equal(a.to_pandas(), b, check_dtype=False) # a = odf_s.rolling(window='s', on="date")[ # 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() # b = pdf_s.rolling(window='s', on="date")[ # 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) # a = odf_s.rolling(window='s', on="date")[ # 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum() # b = pdf_s.rolling(window='s', on="date")[ # 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) # # odf_dai = odf_s.reset_index() # pdf_dai = pdf_s.reset_index() # a = odf_dai.rolling(window='s', on="date")[ # 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # b = pdf_dai.rolling(window='s', on="date")[ # 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) # # odf_dai = odf_s.set_index('date') # pdf_dai = pdf_s.set_index('date') # a = odf_dai.rolling(window='s').sum() # b = pdf_dai.rolling(window='s').sum() # assert_frame_equal(a.to_pandas(), b) # # a = odf_dai.rolling(window='s')[ # 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # b = pdf_dai.rolling(window='s')[ # 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum() # assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False) def test_rolling_from_pandas_param_window_rule_milli_sum(self): ps = pd.to_datetime( ["20170101 9:10:15.000", "20170101 9:10:15.000", "20170101 9:10:15.001", "20170101 9:11:17.015", "20170101 9:11:17.015", "20180615 9:10:15.015", "20181031 9:10:15.015", "20190501 9:10:15.015", "20190517 9:10:15.015"]).to_series() pdf_l = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'), 'date': ps, 'tchar': np.arange(1, 10, 1, dtype='int8'), 'tshort': np.arange(1, 10, 1, dtype='int16'), 'tint': np.arange(1, 10, 1, dtype='int32'), 'tlong': np.arange(1, 10, 1, dtype='int64'), 'tfloat': np.arange(1, 10, 1, dtype='float32'), 'tdouble': np.arange(1, 10, 1, dtype='float64') }) pdf_l.set_index("id", inplace=True) odf_l = orca.DataFrame(pdf_l) # TODO: ALL ASSERT FAIL # a =
19), woosh.Token(woosh.OP, '.', 294, 19, 294, 20), woosh.Token(woosh.NAME, '__class__', 294, 20, 294, 29), woosh.Token(woosh.OP, ',', 294, 29, 294, 30), woosh.Token(woosh.NAME, 'self', 294, 31, 294, 35), woosh.Token(woosh.OP, '.', 294, 35, 294, 36), woosh.Token(woosh.NAME, '_items', 294, 36, 294, 42), woosh.Token(woosh.NEWLINE, '\r\n', 294, 42, 295, 0), woosh.Token(woosh.DEDENT, '', 296, 0, 296, 0), woosh.Token(woosh.DEDENT, '', 296, 0, 296, 0), woosh.Token(woosh.NAME, 'class', 296, 0, 296, 5), woosh.Token(woosh.NAME, 'methodcaller', 296, 6, 296, 18), woosh.Token(woosh.OP, ':', 296, 18, 296, 19), woosh.Token(woosh.NEWLINE, '\r\n', 296, 19, 297, 0), woosh.Token(woosh.INDENT, ' ', 297, 0, 297, 4), woosh.Token(woosh.STRING, '"""\r\n Return a callable object that calls the given method on its operand.\r\n After f = methodcaller(\'name\'), the call f(r) returns r.name().\r\n After g = methodcaller(\'name\', \'date\', foo=1), the call g(r) returns\r\n r.name(\'date\', foo=1).\r\n """', 297, 4, 302, 7), woosh.Token(woosh.NEWLINE, '\r\n', 302, 7, 303, 0), woosh.Token(woosh.NAME, '__slots__', 303, 4, 303, 13), woosh.Token(woosh.OP, '=', 303, 14, 303, 15), woosh.Token(woosh.OP, '(', 303, 16, 303, 17), woosh.Token(woosh.STRING, "'_name'", 303, 17, 303, 24), woosh.Token(woosh.OP, ',', 303, 24, 303, 25), woosh.Token(woosh.STRING, "'_args'", 303, 26, 303, 33), woosh.Token(woosh.OP, ',', 303, 33, 303, 34), woosh.Token(woosh.STRING, "'_kwargs'", 303, 35, 303, 44), woosh.Token(woosh.OP, ')', 303, 44, 303, 45), woosh.Token(woosh.NEWLINE, '\r\n', 303, 45, 304, 0), woosh.Token(woosh.NAME, 'def', 305, 4, 305, 7), woosh.Token(woosh.NAME, '__init__', 305, 8, 305, 16), woosh.Token(woosh.OP, '(', 305, 16, 305, 17), woosh.Token(woosh.NAME, 'self', 305, 17, 305, 21), woosh.Token(woosh.OP, ',', 305, 21, 305, 22), woosh.Token(woosh.NAME, 'name', 305, 23, 305, 27), woosh.Token(woosh.OP, ',', 305, 27, 305, 28), woosh.Token(woosh.OP, '/', 305, 29, 305, 30), woosh.Token(woosh.OP, ',', 305, 30, 305, 31), woosh.Token(woosh.OP, '*', 305, 32, 305, 33), woosh.Token(woosh.NAME, 'args', 305, 33, 305, 37), woosh.Token(woosh.OP, ',', 305, 37, 305, 38), woosh.Token(woosh.OP, '**', 305, 39, 305, 41), woosh.Token(woosh.NAME, 'kwargs', 305, 41, 305, 47), woosh.Token(woosh.OP, ')', 305, 47, 305, 48), woosh.Token(woosh.OP, ':', 305, 48, 305, 49), woosh.Token(woosh.NEWLINE, '\r\n', 305, 49, 306, 0), woosh.Token(woosh.INDENT, ' ', 306, 0, 306, 8), woosh.Token(woosh.NAME, 'self', 306, 8, 306, 12), woosh.Token(woosh.OP, '.', 306, 12, 306, 13), woosh.Token(woosh.NAME, '_name', 306, 13, 306, 18), woosh.Token(woosh.OP, '=', 306, 19, 306, 20), woosh.Token(woosh.NAME, 'name', 306, 21, 306, 25), woosh.Token(woosh.NEWLINE, '\r\n', 306, 25, 307, 0), woosh.Token(woosh.NAME, 'if', 307, 8, 307, 10), woosh.Token(woosh.NAME, 'not', 307, 11, 307, 14), woosh.Token(woosh.NAME, 'isinstance', 307, 15, 307, 25), woosh.Token(woosh.OP, '(', 307, 25, 307, 26), woosh.Token(woosh.NAME, 'self', 307, 26, 307, 30), woosh.Token(woosh.OP, '.', 307, 30, 307, 31), woosh.Token(woosh.NAME, '_name', 307, 31, 307, 36), woosh.Token(woosh.OP, ',', 307, 36, 307, 37), woosh.Token(woosh.NAME, 'str', 307, 38, 307, 41), woosh.Token(woosh.OP, ')', 307, 41, 307, 42), woosh.Token(woosh.OP, ':', 307, 42, 307, 43), woosh.Token(woosh.NEWLINE, '\r\n', 307, 43, 308, 0), woosh.Token(woosh.INDENT, ' ', 308, 0, 308, 12), woosh.Token(woosh.NAME, 'raise', 308, 12, 308, 17), woosh.Token(woosh.NAME, 'TypeError', 308, 18, 308, 27), woosh.Token(woosh.OP, '(', 308, 27, 308, 28), woosh.Token(woosh.STRING, "'method name must be a string'", 308, 28, 308, 58), woosh.Token(woosh.OP, ')', 308, 58, 308, 59), woosh.Token(woosh.NEWLINE, '\r\n', 308, 59, 309, 0), woosh.Token(woosh.DEDENT, ' ', 309, 0, 309, 8), woosh.Token(woosh.NAME, 'self', 309, 8, 309, 12), woosh.Token(woosh.OP, '.', 309, 12, 309, 13), woosh.Token(woosh.NAME, '_args', 309, 13, 309, 18), woosh.Token(woosh.OP, '=', 309, 19, 309, 20), woosh.Token(woosh.NAME, 'args', 309, 21, 309, 25), woosh.Token(woosh.NEWLINE, '\r\n', 309, 25, 310, 0), woosh.Token(woosh.NAME, 'self', 310, 8, 310, 12), woosh.Token(woosh.OP, '.', 310, 12, 310, 13), woosh.Token(woosh.NAME, '_kwargs', 310, 13, 310, 20), woosh.Token(woosh.OP, '=', 310, 21, 310, 22), woosh.Token(woosh.NAME, 'kwargs', 310, 23, 310, 29), woosh.Token(woosh.NEWLINE, '\r\n', 310, 29, 311, 0), woosh.Token(woosh.DEDENT, ' ', 312, 0, 312, 4), woosh.Token(woosh.NAME, 'def', 312, 4, 312, 7), woosh.Token(woosh.NAME, '__call__', 312, 8, 312, 16), woosh.Token(woosh.OP, '(', 312, 16, 312, 17), woosh.Token(woosh.NAME, 'self', 312, 17, 312, 21), woosh.Token(woosh.OP, ',', 312, 21, 312, 22), woosh.Token(woosh.NAME, 'obj', 312, 23, 312, 26), woosh.Token(woosh.OP, ')', 312, 26, 312, 27), woosh.Token(woosh.OP, ':', 312, 27, 312, 28), woosh.Token(woosh.NEWLINE, '\r\n', 312, 28, 313, 0), woosh.Token(woosh.INDENT, ' ', 313, 0, 313, 8), woosh.Token(woosh.NAME, 'return', 313, 8, 313, 14), woosh.Token(woosh.NAME, 'getattr', 313, 15, 313, 22), woosh.Token(woosh.OP, '(', 313, 22, 313, 23), woosh.Token(woosh.NAME, 'obj', 313, 23, 313, 26), woosh.Token(woosh.OP, ',', 313, 26, 313, 27), woosh.Token(woosh.NAME, 'self', 313, 28, 313, 32), woosh.Token(woosh.OP, '.', 313, 32, 313, 33), woosh.Token(woosh.NAME, '_name', 313, 33, 313, 38), woosh.Token(woosh.OP, ')', 313, 38, 313, 39), woosh.Token(woosh.OP, '(', 313, 39, 313, 40), woosh.Token(woosh.OP, '*', 313, 40, 313, 41), woosh.Token(woosh.NAME, 'self', 313, 41, 313, 45), woosh.Token(woosh.OP, '.', 313, 45, 313, 46), woosh.Token(woosh.NAME, '_args', 313, 46, 313, 51), woosh.Token(woosh.OP, ',', 313, 51, 313, 52), woosh.Token(woosh.OP, '**', 313, 53, 313, 55), woosh.Token(woosh.NAME, 'self', 313, 55, 313, 59), woosh.Token(woosh.OP, '.', 313, 59, 313, 60), woosh.Token(woosh.NAME, '_kwargs', 313, 60, 313, 67), woosh.Token(woosh.OP, ')', 313, 67, 313, 68), woosh.Token(woosh.NEWLINE, '\r\n', 313, 68, 314, 0), woosh.Token(woosh.DEDENT, ' ', 315, 0, 315, 4), woosh.Token(woosh.NAME, 'def', 315, 4, 315, 7), woosh.Token(woosh.NAME, '__repr__', 315, 8, 315, 16), woosh.Token(woosh.OP, '(', 315, 16, 315, 17), woosh.Token(woosh.NAME, 'self', 315, 17, 315, 21), woosh.Token(woosh.OP, ')', 315, 21, 315, 22), woosh.Token(woosh.OP, ':', 315, 22, 315, 23), woosh.Token(woosh.NEWLINE, '\r\n', 315, 23, 316, 0), woosh.Token(woosh.INDENT, ' ', 316, 0, 316, 8), woosh.Token(woosh.NAME, 'args', 316, 8, 316, 12), woosh.Token(woosh.OP, '=', 316, 13, 316, 14), woosh.Token(woosh.OP, '[', 316, 15, 316, 16), woosh.Token(woosh.NAME, 'repr', 316, 16, 316, 20), woosh.Token(woosh.OP, '(', 316, 20, 316, 21), woosh.Token(woosh.NAME, 'self', 316, 21, 316, 25), woosh.Token(woosh.OP, '.', 316, 25, 316, 26), woosh.Token(woosh.NAME, '_name', 316, 26, 316, 31), woosh.Token(woosh.OP, ')', 316, 31, 316, 32), woosh.Token(woosh.OP, ']', 316, 32, 316, 33), woosh.Token(woosh.NEWLINE, '\r\n', 316, 33, 317, 0), woosh.Token(woosh.NAME, 'args', 317, 8, 317, 12), woosh.Token(woosh.OP, '.', 317, 12, 317, 13), woosh.Token(woosh.NAME, 'extend', 317, 13, 317, 19), woosh.Token(woosh.OP, '(', 317, 19, 317, 20), woosh.Token(woosh.NAME, 'map', 317, 20, 317, 23), woosh.Token(woosh.OP, '(', 317, 23, 317, 24), woosh.Token(woosh.NAME, 'repr', 317, 24, 317, 28), woosh.Token(woosh.OP, ',', 317, 28, 317, 29), woosh.Token(woosh.NAME, 'self', 317, 30, 317, 34), woosh.Token(woosh.OP, '.', 317, 34, 317, 35), woosh.Token(woosh.NAME, '_args', 317, 35, 317, 40), woosh.Token(woosh.OP, ')', 317, 40, 317, 41), woosh.Token(woosh.OP, ')', 317, 41, 317, 42), woosh.Token(woosh.NEWLINE, '\r\n', 317, 42, 318, 0), woosh.Token(woosh.NAME, 'args', 318, 8, 318, 12), woosh.Token(woosh.OP, '.', 318, 12, 318, 13), woosh.Token(woosh.NAME, 'extend', 318, 13, 318, 19), woosh.Token(woosh.OP, '(', 318, 19, 318, 20), woosh.Token(woosh.STRING, "'%s=%r'", 318, 20, 318, 27), woosh.Token(woosh.OP, '%', 318, 28, 318, 29), woosh.Token(woosh.OP, '(', 318, 30, 318, 31), woosh.Token(woosh.NAME, 'k', 318, 31, 318, 32), woosh.Token(woosh.OP, ',', 318, 32, 318, 33), woosh.Token(woosh.NAME, 'v', 318, 34, 318, 35), woosh.Token(woosh.OP, ')', 318, 35, 318, 36), woosh.Token(woosh.NAME, 'for', 318, 37, 318, 40), woosh.Token(woosh.NAME, 'k', 318, 41, 318, 42), woosh.Token(woosh.OP, ',', 318, 42, 318, 43), woosh.Token(woosh.NAME, 'v', 318, 44, 318, 45), woosh.Token(woosh.NAME, 'in', 318, 46, 318, 48), woosh.Token(woosh.NAME, 'self', 318, 49, 318, 53), woosh.Token(woosh.OP, '.', 318, 53, 318, 54), woosh.Token(woosh.NAME, '_kwargs', 318, 54, 318, 61), woosh.Token(woosh.OP, '.', 318, 61, 318, 62), woosh.Token(woosh.NAME, 'items', 318, 62, 318, 67), woosh.Token(woosh.OP, '(', 318, 67, 318, 68), woosh.Token(woosh.OP, ')', 318, 68, 318, 69), woosh.Token(woosh.OP, ')', 318, 69, 318, 70), woosh.Token(woosh.NEWLINE, '\r\n', 318, 70, 319, 0), woosh.Token(woosh.NAME, 'return', 319, 8, 319, 14), woosh.Token(woosh.STRING, "'%s.%s(%s)'", 319, 15, 319, 26), woosh.Token(woosh.OP, '%', 319, 27, 319, 28), woosh.Token(woosh.OP, '(', 319, 29, 319, 30), woosh.Token(woosh.NAME, 'self', 319, 30, 319, 34), woosh.Token(woosh.OP, '.', 319, 34, 319, 35), woosh.Token(woosh.NAME, '__class__', 319, 35, 319, 44), woosh.Token(woosh.OP, '.', 319, 44, 319, 45), woosh.Token(woosh.NAME, '__module__', 319, 45, 319, 55), woosh.Token(woosh.OP, ',', 319, 55, 319, 56), woosh.Token(woosh.NAME, 'self', 320, 30, 320, 34), woosh.Token(woosh.OP, '.', 320, 34, 320, 35), woosh.Token(woosh.NAME, '__class__', 320, 35, 320, 44), woosh.Token(woosh.OP, '.', 320, 44, 320, 45), woosh.Token(woosh.NAME, '__name__', 320, 45, 320, 53), woosh.Token(woosh.OP, ',', 320, 53, 320, 54), woosh.Token(woosh.STRING, "', '", 321, 30, 321, 34), woosh.Token(woosh.OP, '.', 321, 34, 321, 35), woosh.Token(woosh.NAME, 'join', 321, 35, 321, 39), woosh.Token(woosh.OP, '(', 321, 39, 321, 40), woosh.Token(woosh.NAME, 'args', 321, 40, 321, 44), woosh.Token(woosh.OP, ')', 321, 44, 321, 45), woosh.Token(woosh.OP, ')', 321, 45, 321, 46), woosh.Token(woosh.NEWLINE, '\r\n', 321, 46, 322, 0), woosh.Token(woosh.DEDENT, ' ', 323, 0, 323, 4), woosh.Token(woosh.NAME, 'def', 323, 4, 323, 7), woosh.Token(woosh.NAME, '__reduce__', 323, 8, 323, 18), woosh.Token(woosh.OP, '(', 323, 18, 323, 19), woosh.Token(woosh.NAME, 'self', 323, 19, 323, 23), woosh.Token(woosh.OP, ')', 323, 23, 323, 24), woosh.Token(woosh.OP, ':', 323, 24, 323, 25), woosh.Token(woosh.NEWLINE, '\r\n', 323, 25, 324, 0), woosh.Token(woosh.INDENT, ' ', 324, 0, 324, 8), woosh.Token(woosh.NAME, 'if', 324, 8, 324, 10), woosh.Token(woosh.NAME, 'not', 324, 11, 324, 14), woosh.Token(woosh.NAME, 'self', 324, 15, 324, 19), woosh.Token(woosh.OP, '.', 324, 19, 324, 20), woosh.Token(woosh.NAME, '_kwargs', 324, 20, 324, 27), woosh.Token(woosh.OP, ':', 324, 27, 324, 28), woosh.Token(woosh.NEWLINE, '\r\n', 324, 28, 325, 0), woosh.Token(woosh.INDENT, ' ', 325, 0, 325, 12), woosh.Token(woosh.NAME, 'return', 325, 12, 325, 18), woosh.Token(woosh.NAME, 'self', 325, 19, 325, 23), woosh.Token(woosh.OP, '.', 325, 23, 325, 24), woosh.Token(woosh.NAME, '__class__', 325, 24, 325, 33), woosh.Token(woosh.OP, ',', 325, 33, 325, 34), woosh.Token(woosh.OP, '(', 325, 35, 325, 36), woosh.Token(woosh.NAME, 'self', 325, 36, 325, 40), woosh.Token(woosh.OP, '.', 325, 40, 325, 41), woosh.Token(woosh.NAME, '_name', 325, 41, 325, 46), woosh.Token(woosh.OP, ',', 325, 46, 325, 47), woosh.Token(woosh.OP, ')', 325, 47, 325, 48), woosh.Token(woosh.OP, '+', 325, 49, 325, 50), woosh.Token(woosh.NAME, 'self', 325, 51, 325, 55), woosh.Token(woosh.OP, '.', 325, 55, 325, 56), woosh.Token(woosh.NAME, '_args', 325, 56, 325, 61), woosh.Token(woosh.NEWLINE, '\r\n', 325, 61, 326, 0), woosh.Token(woosh.DEDENT, ' ', 326, 0, 326, 8), woosh.Token(woosh.NAME, 'else', 326, 8, 326, 12), woosh.Token(woosh.OP, ':', 326, 12, 326, 13), woosh.Token(woosh.NEWLINE, '\r\n', 326, 13, 327, 0), woosh.Token(woosh.INDENT, ' ', 327, 0, 327, 12), woosh.Token(woosh.NAME, 'from', 327, 12, 327, 16), woosh.Token(woosh.NAME, 'functools', 327, 17, 327, 26), woosh.Token(woosh.NAME, 'import', 327, 27, 327, 33), woosh.Token(woosh.NAME, 'partial', 327, 34, 327, 41), woosh.Token(woosh.NEWLINE, '\r\n', 327, 41, 328, 0), woosh.Token(woosh.NAME, 'return', 328, 12, 328, 18), woosh.Token(woosh.NAME, 'partial', 328, 19, 328, 26), woosh.Token(woosh.OP, '(', 328, 26, 328, 27), woosh.Token(woosh.NAME, 'self', 328, 27, 328, 31), woosh.Token(woosh.OP, '.', 328, 31, 328, 32), woosh.Token(woosh.NAME, '__class__',
for each line in path up to a limit. :param path: Path to files containing sentences. :param limit: How many lines to read from path. :return: Iterator over lists of words. """ with smart_open(path) as indata: for i, line in enumerate(indata): if limit is not None and i == limit: break yield list(get_tokens(line)) def get_tokens(line: str) -> Iterator[str]: """ Yields tokens from input string. :param line: Input string. :return: Iterator over tokens. """ for token in line.rstrip().split(): if len(token) > 0: yield token def tokens2ids(tokens: Iterable[str], vocab: Dict[str, int]) -> List[int]: """ Returns sequence of ids given a sequence of tokens and vocab. :param tokens: List of tokens. :param vocab: Vocabulary (containing UNK symbol). :return: List of word ids. """ return [vocab.get(w, vocab[C.UNK_SYMBOL]) for w in tokens] def read_sentences(path: str, vocab: Dict[str, int], add_bos=False, limit=None) -> List[List[int]]: """ Reads sentences from path and creates word id sentences. :param path: Path to read data from. :param vocab: Vocabulary mapping. :param add_bos: Whether to add Beginning-Of-Sentence (BOS) symbol. :param limit: Read limit. :return: List of integer sequences. """ assert C.UNK_SYMBOL in vocab assert C.UNK_SYMBOL in vocab assert vocab[C.PAD_SYMBOL] == C.PAD_ID assert C.BOS_SYMBOL in vocab assert C.EOS_SYMBOL in vocab sentences = [] for sentence_tokens in read_content(path, limit): sentence = tokens2ids(sentence_tokens, vocab) check_condition(bool(sentence), "Empty sentence in file %s" % path) if add_bos: sentence.insert(0, vocab[C.BOS_SYMBOL]) sentences.append(sentence) logger.info("%d sentences loaded from '%s'", len(sentences), path) return sentences def get_default_bucket_key(buckets: List[Tuple[int, int]]) -> Tuple[int, int]: """ Returns the default bucket from a list of buckets, i.e. the largest bucket. :param buckets: List of buckets. :return: The largest bucket in the list. """ return max(buckets) def get_parallel_bucket(buckets: List[Tuple[int, int]], length_source: int, length_target: int) -> Optional[Tuple[int, Tuple[int, int]]]: """ Returns bucket index and bucket from a list of buckets, given source and target length. Returns (None, None) if no bucket fits. :param buckets: List of buckets. :param length_source: Length of source sequence. :param length_target: Length of target sequence. :return: Tuple of (bucket index, bucket), or (None, None) if not fitting. """ bucket = None, None # type: Tuple[int, Tuple[int, int]] for j, (source_bkt, target_bkt) in enumerate(buckets): if source_bkt >= length_source and target_bkt >= length_target: bucket = j, (source_bkt, target_bkt) break return bucket BucketBatchSize = NamedTuple("BucketBatchSize", [ ("batch_size", int), ("average_words_per_batch", float) ]) """ :param batch_size: Number of sentences in each batch. :param average_words_per_batch: Approximate number of non-padding tokens in each batch. """ def read_graphs(path: str, vocab: Dict[str, int], limit=None): #TODO: add return type """ Reads graphs from path, creating a list of tuples for each sentence. We assume the format for graphs uses whitespace as separator. This allows us to reuse the reading methods for the sentences. :param path: Path to read data from. :return: List of sequences of integer tuples with the edge label. """ graphs = [] for graph_tokens in read_content(path, limit): graph = process_edges(graph_tokens, vocab) assert len(graph) > 0, "Empty graph in file %s" % path graphs.append(graph) logger.info("%d graphs loaded from '%s'", len(graphs), path) return graphs def process_edges(graph_tokens: Iterable[str], vocab: Dict[str, int]): #TODO: add typing """ Returns sequence of int tuples given a sequence of graph edges. TODO: this can be more efficient... :param graph_tokens: List of tokens containing graph edges. :return: List of (int, int) tuples """ adj_list = [(int(tok[1:-1].split(',')[0]), int(tok[1:-1].split(',')[1]), vocab[tok[1:-1].split(',')[2]]) for tok in graph_tokens] return adj_list # TODO: consider more memory-efficient data reading (load from disk on demand) # TODO: consider using HDF5 format for language data class ParallelBucketSentenceIter(mx.io.DataIter): """ A Bucket sentence iterator for parallel data. Randomly shuffles the data after every call to reset(). Data is stored in NDArrays for each epoch for fast indexing during iteration. :param source_sentences: List of source sentences (integer-coded). :param target_sentences: List of target sentences (integer-coded). :param source_graphs: List of source graphs (tuples of index pairs). :param buckets: List of buckets. :param batch_size: Batch_size of generated data batches. Incomplete batches are discarded if fill_up == None, or filled up according to the fill_up strategy. :param batch_by_words: Size batches by words rather than sentences. :param batch_num_devices: Number of devices batches will be parallelized across. :param md_vocab_size: Size of metadata vocabulary, needed for the adjacency tensors. :param fill_up: If not None, fill up bucket data to a multiple of batch_size to avoid discarding incomplete batches. for each bucket. If set to 'replicate', sample examples from the bucket and use them to fill up. :param eos_id: Word id for end-of-sentence. :param pad_id: Word id for padding symbols. :param unk_id: Word id for unknown symbols. :param forward_id: Word id for forward edge symbol (used to get graph positions). :param bucket_batch_sizes: Pre-computed bucket batch sizes (used to keep iterators consistent for train/validation). :param dtype: Data type of generated NDArrays. """ def __init__(self, source_sentences: List[List[int]], target_sentences: List[List[int]], source_graphs: List[Tuple[int, int, str]], buckets: List[Tuple[int, int]], batch_size: int, batch_by_words: bool, batch_num_devices: int, #edge_vocab_size: int, eos_id: int, pad_id: int, unk_id: int, forward_id: int, bucket_batch_sizes: Optional[List[BucketBatchSize]] = None, fill_up: Optional[str] = None, source_data_name=C.SOURCE_NAME, target_data_name=C.TARGET_NAME, src_graphs_name=C.SOURCE_GRAPHS_NAME, src_positions_name=C.SOURCE_POSITIONS_NAME, label_name=C.TARGET_LABEL_NAME, temperature=1.0, dtype='float32') -> None: super(ParallelBucketSentenceIter, self).__init__() self.buckets = list(buckets) self.buckets.sort() self.default_bucket_key = get_default_bucket_key(self.buckets) self.batch_size = batch_size self.batch_by_words = batch_by_words self.batch_num_devices = batch_num_devices #self.edge_vocab_size = edge_vocab_size self.eos_id = eos_id self.pad_id = pad_id self.unk_id = unk_id self.forward_id = forward_id self.dtype = dtype self.source_data_name = source_data_name self.target_data_name = target_data_name self.src_graphs_name = src_graphs_name self.src_positions_name = src_positions_name self.label_name = label_name self.fill_up = fill_up self.number_languages = len(source_sentences) #number of datasets in the training self.temperature = temperature self.data_source = [] # type: ignore self.data_target = [] # type: ignore self.data_label = [] # type: ignore self.data_label_average_len = [] self.data_src_graphs = [] self.data_src_positions = [] self.batch_samples = [] self.majority_index = 0 logger.info("Buckets: %s", self.buckets) # TODO: consider avoiding explicitly creating label arrays to save host memory for i in range(self.number_languages): #To handle several datasets (multilingual NLG) self.data_source.append([[] for _ in self.buckets]) # type: ignore self.data_target.append([[] for _ in self.buckets]) # type: ignore self.data_label.append([[] for _ in self.buckets]) # type: ignore self.data_label_average_len.append([0 for _ in self.buckets]) self.data_src_graphs.append([[] for _ in self.buckets]) self.data_src_positions.append([[] for _ in self.buckets]) logger.info("Data source: %s", self.data_source) # Per-bucket batch sizes (num seq, num word) # If not None, populated as part of assigning to buckets self.bucket_batch_sizes = bucket_batch_sizes logger.info("Bucket batch sizes: %s", self.bucket_batch_sizes) # assign sentence pairs to buckets self._assign_to_buckets(source_sentences, target_sentences, source_graphs) # convert to single numpy array for each bucket self._convert_to_array() # "Staging area" that needs to fit any size batch we're using by total number of elements. # When computing per-bucket batch sizes, we guarantee that the default bucket will have the # largest total batch size. # Note: this guarantees memory sharing for input data and is generally a good heuristic for # other parts of the model, but it is possible that some architectures will have intermediate # operations that produce shapes larger than the default bucket size. In these cases, MXNet # will silently allocate additional memory. self.provide_data = [ mx.io.DataDesc(name=source_data_name, shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[0]), layout=C.BATCH_MAJOR), mx.io.DataDesc(name=target_data_name, shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[1]), layout=C.BATCH_MAJOR), mx.io.DataDesc(name=src_graphs_name, shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[0], self.default_bucket_key[0]), layout=C.BATCH_MAJOR), mx.io.DataDesc(name=src_positions_name, shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[0]), layout=C.BATCH_MAJOR)] self.provide_label = [ mx.io.DataDesc(name=label_name, shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[1]), layout=C.BATCH_MAJOR)] self.data_names = [self.source_data_name, self.target_data_name, self.src_graphs_name, self.src_positions_name] self.label_names = [self.label_name] logger.info(self.data_names) logger.info(self.label_names) # create index tuples (i,j) into buckets: i := bucket index ; j := row index of bucket array #Now we are working with one or more idx and curre_idx (multilingual training) self.idx = [] self.curr_idx = [] for index in range(self.number_languages): idxi = [] # type: List[Tuple[int, int]] for i, buck in enumerate(self.data_source[index]): batch_size_seq = self.bucket_batch_sizes[i].batch_size if self.number_languages > 1: batch_size_seq = self.batch_samples[index] logger.info("Buck: %d and batch_size_seq: %d/%d", len(buck), batch_size_seq, self.bucket_batch_sizes[i].batch_size) rest = len(buck) % batch_size_seq if rest > 0: logger.info("Discarding %d samples from bucket %s due to incomplete batch", rest, self.buckets[i]) idxs = [(i, j) for j in range(0, len(buck) - batch_size_seq + 1, batch_size_seq)] idxi.extend(idxs) self.idx.append(idxi) self.curr_idx.append(0) self.indices = [[] for _ in range(self.number_languages)] self.nd_source = [[] for _ in range(self.number_languages)] self.nd_target = [[] for _ in range(self.number_languages)] self.nd_label = [[] for _ in range(self.number_languages)] ##### #
testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/' + str(system_1.system_id) + '/') # compare self.assertTrue(response.context['dfirtrack_api']) def test_system_detail_context_without_api(self): """test detail view""" # remove app from dfirtrack.settings if 'dfirtrack_api' in installed_apps: installed_apps.remove('dfirtrack_api') # get object system_1 = System.objects.get(system_name='system_1') # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/' + str(system_1.system_id) + '/') # compare self.assertFalse(response.context['dfirtrack_api']) def test_system_add_not_logged_in(self): """test add view""" # create url destination = '/login/?next=' + urllib.parse.quote('/system/add/', safe='') # get response response = self.client.get('/system/add/', follow=True) # compare self.assertRedirects( response, destination, status_code=302, target_status_code=200 ) def test_system_add_logged_in(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/add/') # compare self.assertEqual(response.status_code, 200) def test_system_add_template(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/add/') # compare self.assertTemplateUsed(response, 'dfirtrack_main/system/system_add.html') def test_system_add_get_user_context(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/add/') # compare self.assertEqual(str(response.context['user']), 'testuser_system') def test_system_add_redirect(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # create url destination = urllib.parse.quote('/system/add/', safe='/') # get response response = self.client.get('/system/add', follow=True) # compare self.assertRedirects( response, destination, status_code=301, target_status_code=200 ) def test_system_add_post_redirect(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get object systemstatus_id = Systemstatus.objects.get( systemstatus_name='systemstatus_1' ).systemstatus_id # create post data data_dict = { 'system_name': 'system_add_post_test', 'systemstatus': systemstatus_id, 'iplist': '', } # get response response = self.client.post('/system/add/', data_dict) # get object system_id = System.objects.get(system_name='system_add_post_test').system_id # create url destination = urllib.parse.quote('/system/' + str(system_id) + '/', safe='/') # compare self.assertRedirects( response, destination, status_code=302, target_status_code=200 ) def test_system_add_post_invalid_reload(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # create post data data_dict = {} # get response response = self.client.post('/system/add/', data_dict) # compare self.assertEqual(response.status_code, 200) def test_system_add_post_invalid_template(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # create post data data_dict = {} # get response response = self.client.post('/system/add/', data_dict) # compare self.assertTemplateUsed(response, 'dfirtrack_main/system/system_add.html') def test_system_add_post_ips_save_valid(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get object systemstatus_id = Systemstatus.objects.get( systemstatus_name='systemstatus_1' ).systemstatus_id # create post data data_dict = { 'system_name': 'system_add_post_ips_save_valid_test', 'systemstatus': systemstatus_id, 'iplist': '127.0.0.3\n127.0.0.4', } # get response self.client.post('/system/add/', data_dict) # get object system_1 = System.objects.get(system_name='system_add_post_ips_save_valid_test') # get objects from system system_ip_3 = system_1.ip.filter(ip_ip='127.0.0.3')[0] system_ip_4 = system_1.ip.filter(ip_ip='127.0.0.4')[0] # get objects ip_3 = Ip.objects.get(ip_ip='127.0.0.3') ip_4 = Ip.objects.get(ip_ip='127.0.0.4') # compare self.assertEqual(system_ip_3, ip_3) self.assertEqual(system_ip_4, ip_4) def test_system_add_post_ips_save_empty_line_message(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get object systemstatus_id = Systemstatus.objects.get( systemstatus_name='systemstatus_1' ).systemstatus_id # create post data data_dict = { 'system_name': 'system_add_post_ips_save_empty_line_test', 'systemstatus': systemstatus_id, 'iplist': '\n127.0.0.5', } # get response response = self.client.post('/system/add/', data_dict) # get messages messages = list(get_messages(response.wsgi_request)) # compare self.assertEqual(str(messages[0]), 'Empty line instead of IP was provided') def test_system_add_post_ips_save_no_ip_message(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get object systemstatus_id = Systemstatus.objects.get( systemstatus_name='systemstatus_1' ).systemstatus_id # create post data data_dict = { 'system_name': 'system_add_post_ips_save_no_ip_test', 'systemstatus': systemstatus_id, 'iplist': 'foobar\n127.0.0.6', } # get response response = self.client.post('/system/add/', data_dict) # get messages messages = list(get_messages(response.wsgi_request)) # compare self.assertEqual(str(messages[0]), 'Provided string was no IP') def test_system_add_post_ips_save_ip_existing_message(self): """test add view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # create object Ip.objects.create(ip_ip='127.0.0.7') # get object systemstatus_id = Systemstatus.objects.get( systemstatus_name='systemstatus_1' ).systemstatus_id # create post data data_dict = { 'system_name': 'system_add_post_ips_save_ip_existing_test', 'systemstatus': systemstatus_id, 'iplist': '127.0.0.7', } # get response response = self.client.post('/system/add/', data_dict) # get messages messages = list(get_messages(response.wsgi_request)) # compare self.assertEqual(str(messages[0]), 'IP already exists in database') def test_system_add_context_workflows(self): """test add view""" # add app to dfirtrack.settings if 'dfirtrack_config' not in installed_apps: installed_apps.append('dfirtrack_config') # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/add/') # compare self.assertEqual(str(response.context['workflows'][0]), 'workflow_1') def test_system_add_post_workflows(self): """test add view""" # add app to dfirtrack.settings if 'dfirtrack_config' not in installed_apps: installed_apps.append('dfirtrack_config') # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get object systemstatus_id = Systemstatus.objects.get( systemstatus_name='systemstatus_1' ).systemstatus_id workflow_id = Workflow.objects.get(workflow_name='workflow_1').workflow_id # create post data data_dict = { 'system_name': 'system_add_with_valid_workflow', 'systemstatus': systemstatus_id, 'workflow': workflow_id, 'iplist': '127.0.0.1', } # get response response = self.client.post('/system/add/', data_dict, follow=True) # compare self.assertContains(response, 'Workflow applied') def test_system_add_post_nonexistent_workflows(self): """test add view""" # add app to dfirtrack.settings if 'dfirtrack_config' not in installed_apps: installed_apps.append('dfirtrack_config') # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get object systemstatus_id = Systemstatus.objects.get( systemstatus_name='systemstatus_1' ).systemstatus_id # create post data data_dict = { 'system_name': 'system_add_with_invalid_workflow', 'systemstatus': systemstatus_id, 'workflow': 99, 'iplist': '127.0.0.1', } # get response response = self.client.post('/system/add/', data_dict, follow=True) # compare self.assertContains(response, 'Could not apply workflow') def test_system_edit_not_logged_in(self): """test edit view""" # get object system_1 = System.objects.get(system_name='system_1') # create url destination = '/login/?next=' + urllib.parse.quote( '/system/' + str(system_1.system_id) + '/edit/', safe='' ) # get response response = self.client.get( '/system/' + str(system_1.system_id) + '/edit/', follow=True ) # compare self.assertRedirects( response, destination, status_code=302, target_status_code=200 ) def test_system_edit_logged_in(self): """test edit view""" # get object system_1 = System.objects.get(system_name='system_1') # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/' + str(system_1.system_id) + '/edit/') # compare self.assertEqual(response.status_code, 200) def test_system_edit_template(self): """test edit view""" # get object system_1 = System.objects.get(system_name='system_1') # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/' + str(system_1.system_id) + '/edit/') # compare self.assertTemplateUsed(response, 'dfirtrack_main/system/system_edit.html') def test_system_edit_get_user_context(self): """test edit view""" # get object system_1 = System.objects.get(system_name='system_1') # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/' + str(system_1.system_id) + '/edit/') # compare self.assertEqual(str(response.context['user']), 'testuser_system') def test_system_edit_redirect(self): """test edit view""" # get object system_1 = System.objects.get(system_name='system_1') # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # create url destination = urllib.parse.quote( '/system/' + str(system_1.system_id) + '/edit/', safe='/' ) # get response response = self.client.get( '/system/' + str(system_1.system_id) + '/edit', follow=True ) # compare self.assertRedirects( response, destination, status_code=301, target_status_code=200 ) def test_system_edit_initial_ipstring(self): """test edit view""" # get object system_1 = System.objects.get(system_name='system_1') # create objects ip_1 = Ip.objects.create(ip_ip='127.0.0.1') ip_2 = Ip.objects.create(ip_ip='127.0.0.2') # append objects system_1.ip.add(ip_1) system_1.ip.add(ip_2) # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get response response = self.client.get('/system/' + str(system_1.system_id) + '/edit/') # compare self.assertEqual( response.context['form'].initial['iplist'], '127.0.0.1\n127.0.0.2' ) def test_system_edit_post_redirect(self): """test edit view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get user test_user = User.objects.get(username='testuser_system') # get object systemstatus_1 = Systemstatus.objects.get(systemstatus_name='systemstatus_1') # create object systemstatus_2 = Systemstatus.objects.create(systemstatus_name='systemstatus_2') # create object system_1 = System.objects.create( system_name='system_edit_post_test_1', systemstatus=systemstatus_1, system_created_by_user_id=test_user, system_modified_by_user_id=test_user, ) # create post data data_dict = { 'system_name': 'system_edit_post_test_1', 'systemstatus': systemstatus_2.systemstatus_id, 'iplist': '', } # get response response = self.client.post( '/system/' + str(system_1.system_id) + '/edit/', data_dict ) # get object system_2 = System.objects.get(system_name='system_edit_post_test_1') # create url destination = urllib.parse.quote( '/system/' + str(system_2.system_id) + '/', safe='/' ) # compare self.assertRedirects( response, destination, status_code=302, target_status_code=200 ) def test_system_edit_post_invalid_reload(self): """test edit view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get object system_id = System.objects.get(system_name='system_1').system_id # create post data data_dict = {} # get response response = self.client.post('/system/' + str(system_id) + '/edit/', data_dict) # compare self.assertEqual(response.status_code, 200) def test_system_edit_post_invalid_template(self): """test edit view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get object system_id = System.objects.get(system_name='system_1').system_id # create post data data_dict = {} # get response response = self.client.post('/system/' + str(system_id) + '/edit/', data_dict) # compare self.assertTemplateUsed(response, 'dfirtrack_main/system/system_edit.html') # TODO: does not work so far, model change in config does not affect the underlying view (it is not model related) # def test_system_edit_post_system_name_editable_redirect(self): # """ test edit view """ # # # login testuser # self.client.login(username='testuser_system', password='<PASSWORD>') # # get config model # main_config_model = MainConfigModel.objects.get(main_config_name = 'MainConfig') # # set config model # main_config_model.system_name_editable = True # main_config_model.save() # # get user # test_user = User.objects.get(username = 'testuser_system') # # get object # systemstatus_1 = Systemstatus.objects.get(systemstatus_name='systemstatus_1') # # create object # system_1 = System.objects.create( # system_name = 'system_edit_post_test_3', # systemstatus = systemstatus_1, # system_created_by_user_id = test_user, # system_modified_by_user_id = test_user, # ) # # create post data # data_dict = { # 'system_name': 'system_edit_post_test_4', # 'systemstatus': systemstatus_1.systemstatus_id, # 'iplist': '', # } # # get response # response = self.client.post('/system/' + str(system_1.system_id) + '/edit/', data_dict) # # get object # system_2 = System.objects.get(system_name='system_edit_post_test_4') # # create url # destination = urllib.parse.quote('/system/' + str(system_2.system_id) + '/', safe='/') # # compare # self.assertRedirects(response, destination, status_code=302, target_status_code=200) def test_system_edit_post_system_name_not_editable_redirect(self): """test edit view""" # login testuser self.client.login(username='testuser_system', password='<PASSWORD>') # get user test_user = User.objects.get(username='testuser_system') # get object systemstatus_1 = Systemstatus.objects.get(systemstatus_name='systemstatus_1') # create object system_1 = System.objects.create( system_name='system_edit_post_test_5',
<gh_stars>1-10 import hashlib import hmac from base64 import b64decode, b64encode from glob import glob from io import BufferedIOBase from os import unlink from os.path import exists from pbkdf2 import PBKDF2 from typing import Any, Dict, List, Optional, Tuple BASE58_ALPHABET = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' BECH32_ALPHABET = 'qpzry9x8gf2tvdw0s3jn54khce6mua7l' GEN = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3] PBKDF2_ROUNDS = 2048 SIGHASH_ALL = 1 SIGHASH_NONE = 2 SIGHASH_SINGLE = 3 def base64_encode(b: bytes) -> str: return b64encode(b).decode('ascii') def base64_decode(s: str) -> bytes: return b64decode(s) # next four functions are straight from BIP0173: # https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki def bech32_polymod(values: List[int]) -> int: chk = 1 for v in values: b = (chk >> 25) chk = (chk & 0x1ffffff) << 5 ^ v for i in range(5): chk ^= GEN[i] if ((b >> i) & 1) else 0 return chk def bech32_hrp_expand(s: str) -> List[int]: b = s.encode('ascii') return [x >> 5 for x in b] + [0] + [x & 31 for x in b] def bech32_verify_checksum(hrp: str, data: List[int]) -> bool: return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1 def bech32_create_checksum(hrp: str, data: List[int]) -> List[int]: values = bech32_hrp_expand(hrp) + data polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1 return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)] def big_endian_to_int(b: bytes) -> int: '''little_endian_to_int takes byte sequence as a little-endian number. Returns an integer''' # use the int.from_bytes(b, <endianness>) method return int.from_bytes(b, 'big') def bit_field_to_bytes(bit_field: List[int]) -> bytes: if len(bit_field) % 8 != 0: raise RuntimeError( 'bit_field does not have a length that is divisible by 8') result = bytearray(len(bit_field) // 8) for i, bit in enumerate(bit_field): byte_index, bit_index = divmod(i, 8) if bit: result[byte_index] |= 1 << bit_index return bytes(result) def byte_to_int(b: bytes) -> int: '''Returns an integer that corresponds to the byte''' return b[0] def bytes_to_bit_field(some_bytes: bytes) -> List[int]: flag_bits = [] # iterate over each byte of flags for byte in some_bytes: # iterate over each bit, right-to-left for _ in range(8): # add the current bit (byte & 1) flag_bits.append(byte & 1) # rightshift the byte 1 byte >>= 1 return flag_bits def check_not_exists(*filenames) -> None: for filename in filenames: if exists(filename): raise IOError(f'file {filename} already exists') def choice_menu(items: List[Any], exit_option: bool = False) -> Any: if exit_option: print('0. Exit') if len(items) == 1: return items[0] for i, item in enumerate(items): print(f'{i+1}. {item}') while True: choice = int(input('Please make your choice: ')) if exit_option and choice == 0: return None if 0 <= choice - 1 < len(items): return items[choice - 1] def choose_file(extension: str) -> Optional[str]: choices = glob(f'*.{extension}') if len(choices) == 0: print(f'No {extension} file in this directory') return None else: return choice_menu(choices) def decode_base58(s: str) -> bytes: return raw_decode_base58(s)[1:] def decode_bech32(s: str) -> Tuple[bool, int, bytes]: '''Returns whether it's testnet, segwit version and the hash from the bech32 address''' hrp, raw_data = s.split('1') if hrp == 'tb': testnet = True elif hrp == 'bc': testnet = False else: raise ValueError(f'unknown human readable part: {hrp}') data = [BECH32_ALPHABET.index(c) for c in raw_data] if not bech32_verify_checksum(hrp, data): raise ValueError(f'bad address: {s}') version = data[0] number = 0 for digit in data[1:-6]: number = (number << 5) | digit num_bytes = (len(data) - 7) * 5 // 8 bits_to_ignore = (len(data) - 7) * 5 % 8 number >>= bits_to_ignore h = int_to_big_endian(number, num_bytes) if num_bytes < 2 or num_bytes > 40: raise ValueError(f'bytes out of range: {num_bytes}') return testnet, version, h def delete_files(*filenames) -> None: for filename in filenames: if exists(filename): unlink(filename) def encode_base58(s: bytes) -> str: # determine how many 0 bytes (b'\x00') s starts with count = 0 for c in s: if c == 0: count += 1 else: break # convert from binary to hex, then hex to integer num = int(s.hex(), 16) result = '' prefix = '1' * count while num > 0: num, mod = divmod(num, 58) result = BASE58_ALPHABET[mod] + result return prefix + result def encode_base58_checksum(raw: bytes) -> str: '''Takes bytes and turns it into base58 encoding with checksum''' # checksum is the first 4 bytes of the hash256 checksum = hash256(raw)[:4] # encode_base58 on the raw and the checksum return encode_base58(raw + checksum) def encode_bech32(nums: List[int]) -> str: '''Convert from 5-bit array of integers to bech32 format''' result = '' for n in nums: result += BECH32_ALPHABET[n] return result def encode_bech32_checksum(s: bytes, testnet: bool = False) -> str: '''Convert a segwit ScriptPubKey to a bech32 address''' if testnet: prefix = 'tb' else: prefix = 'bc' version = s[0] if version > 0: version -= 0x50 length = s[1] data = [version] + group_32(s[2:2 + length]) checksum = bech32_create_checksum(prefix, data) bech32 = encode_bech32(data + checksum) return prefix + '1' + bech32 def encode_dict(d: Dict[bytes, Any]) -> bytes: return encode_list(d.values()) def encode_list(l: Any) -> bytes: result = encode_varint(len(l)) for item in l: result += item.serialize() return result def encode_varint(i: int) -> bytes: '''encodes an integer as a varint''' if i < 0xfd: return bytes([i]) elif i < 0x10000: return b'\xfd' + int_to_little_endian(i, 2) elif i < 0x100000000: return b'\xfe' + int_to_little_endian(i, 4) elif i < 0x10000000000000000: return b'\xff' + int_to_little_endian(i, 8) else: raise RuntimeError(f'integer too large: {i}') def encode_varstr(b: bytes) -> bytes: '''encodes bytes as a varstr''' # encode the length of the string using encode_varint result = encode_varint(len(b)) # add the bytes result += b # return the whole thing return result def group_32(s: bytes) -> List[int]: '''Convert from 8-bit bytes to 5-bit array of integers''' result = [] unused_bits = 0 current = 0 for c in s: unused_bits += 8 current = (current << 8) + c while unused_bits > 5: unused_bits -= 5 result.append(current >> unused_bits) mask = (1 << unused_bits) - 1 current &= mask result.append(current << (5 - unused_bits)) return result def hash160(s: bytes) -> bytes: return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest() def hash256(s: bytes) -> bytes: return hashlib.sha256(hashlib.sha256(s).digest()).digest() def hmac_sha512(key: bytes, msg: bytes) -> bytes: return hmac.HMAC(key=key, msg=msg, digestmod=hashlib.sha512).digest() def hmac_sha512_kdf(msg: str, salt: bytes) -> bytes: return PBKDF2( msg, salt, iterations=PBKDF2_ROUNDS, macmodule=hmac, digestmodule=hashlib.sha512, ).read(64) def int_to_big_endian(n: int, length: int) -> bytes: '''int_to_little_endian takes an integer and returns the little-endian byte sequence of length''' # use the int.to_bytes(length, <endianness>) method return n.to_bytes(length, 'big') def int_to_byte(n: int) -> bytes: '''Returns a single byte that corresponds to the integer''' if n > 255 or n < 0: raise ValueError( 'integer greater than 255 or lower than 0 cannot be converted into a byte' ) return bytes([n]) def int_to_little_endian(n: int, length: int) -> bytes: '''int_to_little_endian takes an integer and returns the little-endian byte sequence of length''' # use the int.to_bytes(length, <endianness>) method return n.to_bytes(length, 'little') def little_endian_to_int(b: bytes) -> int: '''little_endian_to_int takes byte sequence as a little-endian number. Returns an integer''' # use the int.from_bytes(b, <endianness>) method return int.from_bytes(b, 'little') def merkle_parent(hash1: bytes, hash2: bytes) -> bytes: '''Takes the binary hashes and calculates the hash256''' # return the hash256 of hash1 + hash2 return hash256(hash1 + hash2) def merkle_parent_level(hashes: List[bytes]) -> List[bytes]: '''Takes a list of binary hashes and returns a list that's half the length''' # if the list has exactly 1 element raise an error if len(hashes) == 1: raise RuntimeError('Cannot take a parent level with only 1 item') # if the list has an odd number of elements, duplicate the last one # and put it at the end so it has an even number of elements if len(hashes) % 2 == 1: hashes.append(hashes[-1]) # initialize parent level parent_level = [] # loop over every pair (use: for i in range(0, len(hashes), 2)) for i in range(0, len(hashes), 2): # get the merkle parent of i and i+1 hashes parent = merkle_parent(hashes[i], hashes[i + 1]) # append parent to parent level parent_level.append(parent) # return parent level return parent_level def merkle_root(hashes: List[bytes]) -> bytes: '''Takes a list of binary hashes and returns the merkle root ''' # current level starts as hashes current_level = hashes # loop until there's exactly 1 element while len(current_level) > 1: # current level becomes the merkle parent level current_level = merkle_parent_level(current_level) # return the 1st item of current_level return current_level[0] def murmur3(data: bytes, seed: int
& protocol parameters self.protocol_object.loadParameterPresets() self.updateParameterPresetSelector(self.serial_grid) self.updateProtocolParametersInput(self.serial_grid) self.updateRunParamtersInput(self.serial_grid) self.show() def onPressedButton(self): sender = self.sender() if sender.text() == 'Record': if (self.data.experimentFileExists() and self.data.currentFlyExists()): self.sendRun(save_metadata_flag=True) else: msg = QMessageBox() msg.setIcon(QMessageBox.Warning) msg.setText("You have not initialized a data file and/or fly yet") msg.setInformativeText("You can show stimuli by clicking the View button, but no metadata will be saved") msg.setWindowTitle("No experiment file and/or fly") msg.setDetailedText("Initialize or load both an experiment file and a fly if you'd like to save your metadata") msg.setStandardButtons(QMessageBox.Ok) msg.exec_() elif sender.text() == 'View': self.sendRun(save_metadata_flag=False) self.pauseButton.setText('Pause') elif sender.text() == 'Pause': self.epoch_run.pauseRun() self.pauseButton.setText('Resume') self.status_label.setText('Paused...') self.show() elif sender.text() == 'Resume': self.epoch_run.resumeRun() self.pauseButton.setText('Pause') self.status_label.setText('Viewing...') self.show() elif sender.text() == 'Stop': self.epoch_run.stopRun() self.pauseButton.setText('Pause') elif sender.text() == 'Enter note': self.noteText = self.notesEdit.toPlainText() if self.data.experimentFileExists(): self.data.createNote(self.noteText) # save note to expt file self.notesEdit.clear() # clear notes box else: self.notesEdit.setTextColor(QtGui.QColor("Red")) elif sender.text() == 'Save preset': self.updateParametersFromFillableFields() # get the state of the param input from GUI start_name = self.parameter_preset_comboBox.currentText() if start_name == 'Default': start_name = '' text, _ = QInputDialog.getText(self, "Save preset", "Preset Name:", QLineEdit.Normal, start_name) self.protocol_object.updateParameterPresets(text) elif sender.text() == 'Initialize experiment': dialog = QDialog() dialog.ui = InitializeExperimentGUI(parent=dialog) dialog.ui.setupUI(self, dialog) dialog.setFixedSize(300, 200) dialog.exec_() self.data.experiment_file_name = dialog.ui.le_FileName.text() self.data.data_directory = dialog.ui.le_DataDirectory.text() self.data.experimenter = dialog.ui.le_Experimenter.text() self.updateExistingFlyInput() self.populateGroups() elif sender.text() == 'Load experiment': filePath, _ = QFileDialog.getOpenFileName(self, "Open file") self.data.experiment_file_name = os.path.split(filePath)[1].split('.')[0] self.data.data_directory = os.path.split(filePath)[0] if self.data.experiment_file_name != '': self.currentExperimentLabel.setText(self.data.experiment_file_name) # update series count to reflect already-collected series self.data.reloadSeriesCount() self.series_counter_input.setValue(self.data.getHighestSeriesCount() + 1) self.updateExistingFlyInput() self.populateGroups() def onToggleBox(self, box): if box.text() == 'Multistim': if box.isChecked() == True: self.pre_protocol_project = self.protocol_object if self.aprotocol and self.vprotocol: self.protocol_object = DuoProtocol(self.cfg, self.vprotocol, self.aprotocol) else: self.protocol_object = self.pre_protocol_project def onCreatedFly(self): # Populate fly metadata from fly data fields fly_metadata = {'fly_id': self.fly_id_input.text(), 'sex': self.fly_sex_input.currentText(), 'age': self.fly_age_input.value(), 'prep': self.fly_prep_input.currentText(), 'driver_1': self.fly_driver_1.currentText(), 'indicator_1': self.fly_indicator_1.currentText(), 'driver_2': self.fly_driver_2.currentText(), 'indicator_2': self.fly_indicator_2.currentText(), 'genotype': self.fly_genotype_input.text()} self.data.createFly(fly_metadata) # creates new fly and selects it as the current fly self.updateExistingFlyInput() def resetLayout(self, grid): # TODO: the 6 here is hard coded, and corresponds to the 5 in updateProtocolParametersInput # TODO: need to change this ultimately space_ct = 6 for ii in range(len(self.protocol_object.protocol_parameters.items())): item = grid.itemAtPosition(self.run_params_ct+space_ct+ii, 0) if item is not None: item.widget().deleteLater() item = grid.itemAtPosition(self.run_params_ct+space_ct+ii, 1) if item is not None: item.widget().deleteLater() self.show() def updateProtocolParametersInput(self, grid): # update display window to show parameters for this protocol #self.protocol_parameter_input = {} # clear old input params dict self.protocol_params_dict[self.tabs.currentIndex()] = {} space_ct = 5 ct = 0 for key, value in self.protocol_object.protocol_parameters.items(): ct += 1 newLabel = QLabel(key + ':') grid.addWidget(newLabel, self.run_params_ct + space_ct + ct, 0) # was +5 if isinstance(value, bool): self.protocol_params_dict[self.tabs.currentIndex()][key] = QCheckBox() self.protocol_params_dict[self.tabs.currentIndex()][key].setChecked(value) else: self.protocol_params_dict[self.tabs.currentIndex()][key] = QLineEdit() if isinstance(value, int): self.protocol_params_dict[self.tabs.currentIndex()][key].setValidator(QtGui.QIntValidator()) elif isinstance(value, float): self.protocol_params_dict[self.tabs.currentIndex()][key].setValidator(QtGui.QDoubleValidator()) self.protocol_params_dict[self.tabs.currentIndex()][key].setText(str(value)) # set to default value grid.addWidget(self.protocol_params_dict[self.tabs.currentIndex()][key], self.run_params_ct + space_ct + ct, 1, 1, 2) # was +5 self.protocol_parameter_input = self.protocol_params_dict[self.tabs.currentIndex()] def updateParameterPresetSelector(self, grid): self.parameter_preset_comboBox = QComboBox(self) self.parameter_preset_comboBox.addItem("Default") for name in self.protocol_object.parameter_presets.keys(): self.parameter_preset_comboBox.addItem(name) self.parameter_preset_comboBox.activated[str].connect(partial(self.onSelectedParameterPreset, grid)) grid.addWidget(self.parameter_preset_comboBox, 2, 1, 1, 1) def onSelectedParameterPreset(self, grid, text): self.protocol_object.selectProtocolPreset(text) self.resetLayout(grid) self.updateProtocolParametersInput(grid) self.updateRunParamtersInput(grid) self.show() def onSelectedExistingFly(self, index): fly_data = self.data.getExistingFlyData() self.populateFlyMetadataFields(fly_data[index]) self.data.current_fly = fly_data[index].get('fly_id') def updateExistingFlyInput(self): self.existing_fly_input.clear() for fly_data in self.data.getExistingFlyData(): self.existing_fly_input.addItem(fly_data['fly_id']) index = self.existing_fly_input.findText(self.data.current_fly) if index >= 0: self.existing_fly_input.setCurrentIndex(index) def populateFlyMetadataFields(self, fly_data_dict): self.fly_id_input.setText(fly_data_dict['fly_id']) self.fly_sex_input.setCurrentText(fly_data_dict['sex']) self.fly_age_input.setValue(fly_data_dict['age']) self.fly_driver_1.setCurrentText(fly_data_dict['driver_1']) self.fly_indicator_1.setCurrentText(fly_data_dict['indicator_1']) self.fly_driver_2.setCurrentText(fly_data_dict['driver_2']) self.fly_indicator_2.setCurrentText(fly_data_dict['indicator_2']) self.fly_genotype_input.setText(fly_data_dict['genotype']) def updateRunParamtersInput(self, grid): self.run_params_ct = 0 # Run parameters list self.run_params_dict[self.tabs.currentIndex()] = {} for key, value in self.protocol_object.run_parameters.items(): if key not in ['protocol_ID', 'run_start_time']: self.run_params_ct += 1 # delete existing labels: item = grid.itemAtPosition(2 + self.run_params_ct, 0) if item is not None: item.widget().deleteLater() # write new labels: newLabel = QLabel(key + ':') grid.addWidget(newLabel, 2 + self.run_params_ct, 0) self.run_params_dict[self.tabs.currentIndex()][key] = QLineEdit() if isinstance(value, int): validator = QtGui.QIntValidator() validator.setBottom(0) elif isinstance(value, float): validator = QtGui.QDoubleValidator() validator.setBottom(0) self.run_params_dict[self.tabs.currentIndex()][key].setValidator(validator) self.run_params_dict[self.tabs.currentIndex()][key].setText(str(value)) grid.addWidget(self.run_params_dict[self.tabs.currentIndex()][key], 2 + self.run_params_ct, 1, 1, 1) self.run_parameter_input = self.run_params_dict[self.tabs.currentIndex()] def onEnteredSeriesCount(self): self.data.updateSeriesCount(self.series_counter_input.value()) if self.data.experimentFileExists: if self.data.getSeriesCount() <= self.data.getHighestSeriesCount(): self.series_counter_input.setStyleSheet("background-color: rgb(0, 255, 255);") else: self.series_counter_input.setStyleSheet("background-color: rgb(255, 255, 255);") def sendRun(self, save_metadata_flag=True): # check to make sure a protocol has been selected if self.protocol_object.run_parameters['protocol_ID'] == '': self.status_label.setText('Select a protocol') return # no protocol exists, don't send anything # check to make sure the series count does not already exist if save_metadata_flag: self.data.updateSeriesCount(self.series_counter_input.value()) if (self.data.getSeriesCount() in self.data.getExistingSeries()): self.series_counter_input.setStyleSheet("background-color: rgb(0, 255, 255);") return # group already exists, don't send anything else: self.series_counter_input.setStyleSheet("background-color: rgb(255, 255, 255);") # Populate parameters from filled fields self.updateParametersFromFillableFields() # start the epoch run thread: self.runSeriesThread = runSeriesThread(self.epoch_run, self.protocol_object, self.data, self.client, save_metadata_flag) self.runSeriesThread.finished.connect(lambda: self.runFinished(save_metadata_flag)) self.runSeriesThread.started.connect(lambda: self.runStarted(save_metadata_flag)) self.runSeriesThread.start() def runStarted(self, save_metadata_flag): # Lock the view and run buttons to prevent spinning up multiple threads self.viewButton.setEnabled(False) self.recordButton.setEnabled(False) if save_metadata_flag: self.status_label.setText('Recording series ' + str(self.data.getSeriesCount())) else: self.status_label.setText('Viewing...') def runFinished(self, save_metadata_flag): # re-enable view/record buttons self.viewButton.setEnabled(True) self.recordButton.setEnabled(True) self.status_label.setText('Ready') self.pauseButton.setText('Pause') if save_metadata_flag: self.updateExistingFlyInput() # Advance the series_count: self.data.advanceSeriesCount() self.series_counter_input.setValue(self.data.getSeriesCount()) self.populateGroups() def updateParametersFromFillableFields(self): if isinstance(self.protocol_object, DuoProtocol): self.protocol_object.run_parameters.update(self.runParamsSerializer(self.run_parameter_input)) self.protocol_object.vprotocol.run_parameters.update(self.runParamsSerializer(self.run_params_dict[1])) self.protocol_object.aprotocol.run_parameters.update(self.runParamsSerializer(self.run_params_dict[2])) self.protocol_object.vprotocol.protocol_parameters.update(self.protocolParamsSerializer(self.protocol_params_dict[1])) self.protocol_object.aprotocol.protocol_parameters.update(self.protocolParamsSerializer(self.protocol_params_dict[2])) self.protocol_object.combineProtocolParams() else: self.protocol_object.run_parameters.update(self.runParamsSerializer(self.run_parameter_input)) self.protocol_object.protocol_parameters.update(self.protocolParamsSerializer(self.protocol_parameter_input)) def runParamsSerializer(self, run_params_input): run_params = {} for key, value in run_params_input.items(): run_params[key] = float(value.text()) return run_params def protocolParamsSerializer(self, protocol_params_input): protocol_params = {} for key, value in protocol_params_input.items(): if isinstance(value, QCheckBox): #QCheckBox protocol_params[key] = value.isChecked() elif isinstance(value, str): protocol_params[key] = value.text() # Pass the string else: # QLineEdit new_param_entry = value.text() if new_param_entry[0] == '[': # User trying to enter a list of values to_a_list = [] for x in new_param_entry[1:-1].split(','): to_a_list.append(float(x)) protocol_params[key] = to_a_list else: protocol_params[key] = float(new_param_entry) return protocol_params def populateGroups(self): file_path = os.path.join(self.data.data_directory, self.data.experiment_file_name + '.hdf5') group_dset_dict = plugin.base.getHierarchy(file_path, additional_exclusions='rois') self._populateTree(self.groupTree, group_dset_dict) def _populateTree(self, widget, dict): widget.clear() self.fill_item(widget.invisibleRootItem(), dict) def fill_item(self, item, value): item.setExpanded(True) if type(value) is dict: for key, val in sorted(value.items()): child = QTreeWidgetItem() child.setText(0, key) item.addChild(child) self.fill_item(child, val) elif type(value) is list: for val in value: child = QTreeWidgetItem() item.addChild(child) if type(val) is dict: child.setText(0, '[dict]') self.fill_item(child, val) elif type(val) is list: child.setText(0, '[list]') self.fill_item(child, val) else: child.setText(0, val) child.setExpanded(True) else: child = QTreeWidgetItem() child.setText(0, value) item.addChild(child) def onTreeItemClicked(self, item, column): file_path = os.path.join(self.data.data_directory, self.data.experiment_file_name + '.hdf5') group_path = plugin.base.getPathFromTreeItem(self.groupTree.selectedItems()[0]) if group_path != '': attr_dict = plugin.base.getAttributesFromGroup(file_path, group_path) if 'series' in group_path.split('/')[-1]: editable_values = False # don't let user edit epoch parameters else: editable_values = True self.populate_attrs(attr_dict = attr_dict, editable_values = editable_values) def populate_attrs(self, attr_dict=None, editable_values=False): """ Populate attribute for currently selected group """ self.tableAttributes.blockSignals(True) # block udpate signals for auto-filled forms self.tableAttributes.setRowCount(0) self.tableAttributes.setColumnCount(2) self.tableAttributes.setSortingEnabled(False) if attr_dict: for num, key in enumerate(attr_dict): self.tableAttributes.insertRow(self.tableAttributes.rowCount()) key_item = QTableWidgetItem(key) key_item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.tableAttributes.setItem(num, 0, key_item) val_item = QTableWidgetItem(str(attr_dict[key])) if editable_values: val_item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled) else: val_item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.tableAttributes.setItem(num, 1, val_item) self.tableAttributes.blockSignals(False) def update_attrs_to_file(self, item): file_path = os.path.join(self.data.data_directory, self.data.experiment_file_name + '.hdf5') group_path = plugin.base.getPathFromTreeItem(self.groupTree.selectedItems()[0]) attr_key = self.tableAttributes.item(item.row(), 0).text() attr_val = item.text() # update attr in file plugin.base.changeAttribute(file_path, group_path, attr_key, attr_val) print('Changed attr {} to = {}'.format(attr_key, attr_val)) # # # Other accessory classes. For data file initialization and threading # # # # class InitializeExperimentGUI(QWidget): def setupUI(self, experimentGuiObject, parent=None): super(InitializeExperimentGUI, self).__init__(parent) self.parent = parent self.experimentGuiObject = experimentGuiObject layout = QFormLayout() label_FileName = QLabel('File Name:') init_now = datetime.now() defaultName = init_now.isoformat()[:-16] self.le_FileName = QLineEdit(defaultName) layout.addRow(label_FileName, self.le_FileName) button_SelectDirectory = QPushButton("Select Directory...", self) button_SelectDirectory.clicked.connect(self.onPressedDirectoryButton) self.le_DataDirectory = QLineEdit(self.experimentGuiObject.data.data_directory) layout.addRow(button_SelectDirectory, self.le_DataDirectory) label_Experimenter = QLabel('Experimenter:') self.le_Experimenter = QLineEdit(self.experimentGuiObject.data.experimenter) layout.addRow(label_Experimenter, self.le_Experimenter) self.label_status = QLabel('Enter experiment info') layout.addRow(self.label_status) enterButton = QPushButton("Enter", self) enterButton.clicked.connect(self.onPressedEnterButton) layout.addRow(enterButton) self.setLayout(layout) def onPressedEnterButton(self): self.experimentGuiObject.data.experiment_file_name = self.le_FileName.text() self.experimentGuiObject.data.data_directory = self.le_DataDirectory.text() self.experimentGuiObject.data.experimenter = self.le_Experimenter.text() if os.path.isfile(os.path.join(self.experimentGuiObject.data.data_directory, self.experimentGuiObject.data.experiment_file_name) + '.hdf5'): self.label_status.setText('Experiment file already exists!') elif not os.path.isdir(self.experimentGuiObject.data.data_directory): self.label_status.setText('Data directory does not exist!') else: self.label_status.setText('Data entered') self.experimentGuiObject.currentExperimentLabel.setText(self.experimentGuiObject.data.experiment_file_name) self.experimentGuiObject.data.initializeExperimentFile() self.experimentGuiObject.series_counter_input.setValue(1) self.close() self.parent.close() def onPressedDirectoryButton(self): filePath = str(QFileDialog.getExistingDirectory(self, "Select Directory")) self.le_DataDirectory.setText(filePath) class InitializeRigGUI(QWidget): def setupUI(self, experimentGuiObject, parent=None): super(InitializeRigGUI, self).__init__(parent) self.parent = parent self.experimentGuiObject = experimentGuiObject self.user_name = None self.available_rig_configs = [] self.layout = QFormLayout() self.resize(200, 400) label_UserName = QLabel('User Name:') self.UserComboBox = QComboBox() self.UserComboBox.activated[str].connect(self.onSelectedUserName) for choiceID in util.getAvailableUserNames(): self.UserComboBox.addItem(choiceID) self.layout.addRow(label_UserName, self.UserComboBox) label_RigName = QLabel('Rig Config:') self.RigComboBox = QComboBox() self.RigComboBox.activated[str].connect(self.onSelectedRig) self.layout.addRow(label_RigName, self.RigComboBox) self.updateAvailableRigs() self.setLayout(self.layout) self.show() def updateAvailableRigs(self): self.RigComboBox.clear() for choiceID in self.available_rig_configs: self.RigComboBox.addItem(choiceID) def onSelectedUserName(self, text): self.user_name = text self.available_rig_configs = util.getAvailableRigConfigs(self.user_name) self.updateAvailableRigs() self.show() def onSelectedRig(self, text): self.rig_name = text self.experimentGuiObject.user_name = self.user_name self.experimentGuiObject.rig_name = self.rig_name self.close() self.parent.close() class runSeriesThread(QThread): # https://nikolak.com/pyqt-threading-tutorial/ # https://stackoverflow.com/questions/41848769/pyqt5-object-has-no-attribute-connect def __init__(self, epoch_run,
s = phrases[i] if s.level > level: raise BadIndent(i) if s.level < level: return Goto('end', None, i - 1) if not s.name == 'else' and not s.name == 'elif': return Goto('end', None, i - 1) if is_true: i = skip_block(phrases, i + 1, level + 1) + 1 continue if s.name == 'else': goto = yield from gblock(phrases, i + 1, level + 1, scope) return goto is_true = val(s.tree, scope) if is_true: goto = yield from gblock(phrases, i + 1, level + 1, scope) if not goto._end: return goto i = goto.i + 1 else: i = skip_block(phrases, i + 1, level + 1) + 1 return Goto('end', None, i - 1) def iterate_over(value): if type(value) == Generator: while True: #print('ITERATING') yield value.next() yield from value def index(container, i): if type(container) == list or type(container) == tuple or type(container) == dict: if type(i) != Range: if type(i) == float and float.is_integer(i): return container[int(i)] else: return container[i] left, right, jump = i.expand(container) return container[left:right:jump] return container.at(i) def set_index(container, i, v): if type(cotainer) == list or type(container) == tuple: if type(i) != Range: if type(i) == float and float.is_integer(i): container[int(i)] = v else: container[i] = v left, right, jump = i.expand(container) container[left:right:jump] = v container.set(i, v) def adhoc_generator(tree, glob_loc, loc): pass def list_comprehension(tree, parent_scope): assert len(tree.inner) >= 5 and tree.inner[1].content == 'for' and tree.inner[3].content == 'in' expr = tree.inner[0] variables = tree.inner[2] container = tree.inner[4] if len(tree.inner) > 5: assert tree.inner[5].content == 'if' assert len(tree.inner) == 7 cond = tree.inner[6] do_filter = True else: do_filter = False lst = [] scope_id = tree.phrase_id + ':' + str(tree.inner[1].content.i) scope = Scope(parent_scope, False, scope_id) for v in iterate_over(container): _assign(variables, v, scope) if do_filter and not val(cond, scope): continue lst.append(val(expr, scope)) return lst def _assign(ltree, rvalue, scope, search): if ltree.name in ('list', '()', '[]'): assert len(ltree.inner) == len(rvalue) for lvalue, v in zip(ltree.inner, rvalue): _assign(lvalue, v, scope, search) elif ltree.name == 'name': scope.update(ltree.content, rvalue) elif ltree.name == 'index': owner = val(ltree.inner[0], search) i = val(ltree.inner[1].inner[0], scope) set_index(owner, i, rvalue) elif ltree.name == 'attr': #parse.print_tree([ltree.inner[0]]) owner = val(ltree.inner[0], scope) _assign(ltree.inner[2], rvalue, owner.scope, search) else: assert False, ltree.name def expand_range(tree): ijk = [None, None, None] token = 0 for r in range(3): if token >= len(tree.inner): return ijk if tree.inner[token].name != 'sign': ijk[r] = tree.inner[token] token += 2 else: token += 1 return ijk def val(tree, scope): return objval(tree, scope, scope) def escape(s): return s.replace('\\n', '\n').replace('\\0', '\0') def objval(tree, scope, obj): if tree.name == 'binary' or tree.name == 'compare': left = val(tree.inner[0], scope) right = val(tree.inner[2], scope) if tree.inner[1].content == '+': return left + right if tree.inner[1].content == '-': return left - right elif tree.inner[1].content == 'and': return left and right elif tree.inner[1].content == 'or': return left or right elif tree.inner[1].content == '/': return left / right elif tree.inner[1].content == '*': return left * right elif tree.inner[1].content == '%': return left % right elif tree.inner[1].content == '&': return left & right elif tree.inner[1].content == '^': return left ^ right elif tree.inner[1].content == '|': return left | right elif tree.inner[1].content == '<=': return left <= right elif tree.inner[1].content == '>=': return left >= right elif tree.inner[1].content == '==': return left == right elif tree.inner[1].content == '!=': return left != right elif tree.inner[1].content == '<': return left < right elif tree.inner[1].content == '>': return left > right elif tree.inner[1].content == 'is in': return right.contains(left) elif tree.inner[1].content == 'is not in': return not right.contains(left) else: assert False, tree.inner[1].content elif tree.name == 'assignment': ltree = tree.inner[0] right = val(tree.inner[2], scope) assert tree.inner[1].name == 'sign' if tree.inner[1].content != '=': left = val(ltree, scope) if tree.inner[1].content == '*=': right = left * right elif tree.inner[1].content == '+=': right = left + right elif tree.inner[1].content == '-=': right = left - right elif tree.inner[1].content == '/=': right = left / right elif tree.inner[1].content == '*=': right = left * right elif tree.inner[1].content == '%=': right = left % right elif tree.inner[1].content == '&=': right = left & right elif tree.inner[1].content == '^=': right = left ^ right elif tree.inner[1].content == '|=': right = left | right else: assert False, tree.inner[1].content _assign(ltree, right, obj, scope) if ltree.name == 'name': return objval(ltree, scope, obj) return None elif tree.name == 'name': v = obj.find(tree.content) return v elif tree.name == 'digit': return float(tree.content) elif tree.name == 'string': return builtin.String(escape(tree.content[1:-1])) elif tree.name == 'attr': owner = objval(tree.inner[0], scope, obj) return objval(tree.inner[2], scope, owner.scope) elif tree.name == 'call': assert tree.inner[1].name == '()' args_val = val(tree.inner[1], scope) if len(tree.inner[1].inner) > 0 and tree.inner[1].inner[0].name != 'list': args = tuple([args_val]) else: args = args_val if tree.inner[0].name == 'attr' and tree.inner[0].inner[-1].name == 'name': owner = objval(tree.inner[0].inner[0], scope, obj) f = objval(tree.inner[0].inner[-1], owner.scope, owner.scope) if owner.scope.is_instance: return f.call((owner.scope,) + args) else: return f.call(args) callee = objval(tree.inner[0], scope, obj) return callee.call(args) elif tree.name == 'index': mem = objval(tree.inner[0], scope, obj) i = val(tree.inner[1].inner[0], scope) return index(mem, i) elif tree.name == 'list': return tuple(val(v, scope) for v in tree.inner) elif tree.name == '()': if len(tree.inner) == 0: return tuple() elif len(tree.inner) == 1: return val(tree.inner[0], scope) else: return adhoc_generator(tree, scope) elif tree.name == '[]': if len(tree.inner) == 0: return builtin.List([], tree.phrase_id) elif len(tree.inner) == 1: v = val(tree.inner[0], scope) return builtin.List(v, tree.phrase_id) if tree.name == 'list' else v else: return list_comprehension(tree, scope) elif tree.name == '{}': if len(tree.inner) == 0: return builtin.Dict({}, tree.phrase_id) elif len(tree.inner) > 1: return dict_comprehension(tree, scope) else: if tree.inner[0].name == 'list': items = val(tree.inner[0], scope) elif len(tree.inner[0].inner) > 0: items = (val(tree.inner[0], scope),) else: return builtin.Dict({}, tree.phrase_id) if type(items[0]) == Range: assert all (item.i.flag and item.j.flag for item in items) return builtin.Dict({item.i.v: item.j.v for item in items}, tree.phrase_id) else: return builtin.Set({item for item in items}, tree.phrase_id) elif tree.name == 'range': i, j, k = [RangeRef(v, scope) for v in expand_range(tree)] return Range(i, j, k) elif tree.name == 'keyword': if tree.content == 'True': return True if tree.content == 'False': return False if tree.content == 'None': return None assert False, tree.content elif tree.name == 'unary': if tree.inner[0].content == '-': return -val(tree.inner[1], scope) if tree.inner[0].content == '~': return ~val(tree.inner[1], scope) if tree.inner[0].content == 'not': return not val(tree.inner[1], scope) assert False, tree.inner[0].content else: assert False, tree.name def anonymous(phrase_id): module, line = phrase_id.rsplit(':', maxsplit=1) return f'@{line}' def synchronized_block(phrases, i, min_level, scope): g = gblock(phrases, i, min_level, scope) while (1): try: next(g) raise NotImplementedError('internal import is not supported') except StopIteration as ex: return ex.value def gblock(phrases, i, min_level, scope, reraise=None): level = -1 j = i - 1 while j + 1 < len(phrases): j += 1 s = phrases[j] if level < 0: level = s.level assert level >= min_level elif level > s.level: j -= 1 break if DEBUG: print(s.debug) if s.name == 'unit': j = func(s, phrases, j + 1, level + 1, scope) elif s.name == 'class': j = yield from obj(s, phrases, j + 1, level + 1, scope) elif s.name == 'import': assert s.tree.name == 'name', s.tree.name yield ImportRequest(s.tree.content) scope.update(s.tree.content, Module(Scope.MODULES.get(s.tree.content))) elif s.name == 'import ? from ?': fullname = s.questions[1] + '.' + s.questions[0] yield ImportRequest(fullname, name=s.questions[0]) scope.update(s.questions[0], Module(Scope.MODULES.get(fullname))) elif s.name == 'return': value = val(s.tree, scope) if s.tree else None return Goto('return', value, j) elif s.name == 'yield': value = val(s.tree, scope) if s.tree else None LIB.update_type(scope.scope_id, '', Types.typeof(value)) yield value elif s.name == 'raise': if s.tree is not None: raise val(s.tree, scope) else: assert reraise is not None raise reraise elif s.name == 'break': return Goto('break', None, j) elif s.name == 'continue': return Goto('continue', None, j) elif s.name == 'pass': pass elif s.name == 'assert': if s.tree.name == 'list': cond = val(s.tree.inner[0], scope) mess = val(s.tree.inner[1], scope) else: cond = val(s.tree, scope) mess = "Failure" assert cond, mess elif s.name == 'for': assert s.tree.name == 'assignment' and s.tree.inner[1].content == 'in' value = val(s.tree.inner[2], scope) LIB.update_type(scope.scope_id, anonymous(s.tree.inner[2].phrase_id), Types.typeof(value)) for vals in iterate_over(value): _assign(s.tree.inner[0], vals, scope, scope) goto = yield from gblock(phrases,
'WARNING', 'process': 'PRODUCER', 'message': 'Could not migrate to new temporary file folder, returning to old one', 'method': 'producer'} theLogSocket.send(json.dumps(toSend).encode()) theLogSocket.close() globals.tempfolder = OLDAPATH session.commit() return (OLDBPATH, OLDIPATH, OLDAPATH) def emptyDirectory(path): if(os.path.isdir(path)): dirList = os.listdir(path) for inside in dirList: if os.path.isdir(os.path.join(path, inside)): emptyDirectory(os.path.join(path, inside)) os.rmdir(os.path.join(path, inside)) else: os.remove(os.path.join(path, inside)) else: os.remove(path) def getCOMData(device, parseInterval, command, baud, par, bytes, stop, id, workerList, toStore): """Auxiliary function used by the :py:func:`getPeripheralData` function to collect data from a local COM port :param device: (String) identifier of the device's serial port :param parseInterval: (Integer) period in seconds between data gathering :param command: (String) command to send the peripheral port in case data is collected by request and not streamed :param baud: (String) serial baud rate :param par: (String) serial parity :param bytes: (String) serial byte length :param stop: (String) serial stop bit :param id: (Integer) Unused. Defaults to -1 :param workerList: (List) Unused. Defaults to [] :param toStore: (Boolean) Unused. Defaults to True :returns: Dict object with the following keys: | ``status`` : status code of the request (1: done; 0: underway (error); -1: exception) | ``error`` : empty string if no exceptions, a traceback if an exception occurred | ``data`` : data from the requested resource """ theNode = {"active": True, "name": "localhost"} if theNode["active"]: try: if theNode["name"] == "localhost": if stop == "0": stopBits = serial.STOPBITS_ONE if stop == "1": stopBits = serial.STOPBITS_ONE_POINT_FIVE if stop == "2": stopBits = serial.STOPBITS_ONE_TWO if(not command == ""): ser = serial.Serial(device, baudrate=baud, bytesize=bytes, parity=par, stopbits=stopBits, timeout=5) else: ser = serial.Serial( device, baudrate=baud, bytesize=bytes, parity=par, stopbits=stopBits, timeout=parseInterval / 2) ser.flush() if(not command == ""): ser.write(command.encode()) while not ser.readable(): # Should stay here before being readable, should leave when becomes readable. Should IMMEDIATELY become readable continue data = ser.read(1000000).decode() if not toStore: for i, p in enumerate(workerList): if p["id"] == id: sideways = workerList[i] sideways['status'] = 1 sideways['data'] = {'status': 0, 'error': "", "reply": data} workerList[i] = sideways return 0 else: return {'status': 0, 'error': "", "reply": data} else: # Gotta play here first context = zmq.Context() for i, p in enumerate(workerList): if p["id"] == id: sideways = workerList[i] sideways['status'] = 1 sideways['data'] = {'status': 0, 'error': "", "reply": "remote request"} workerList[i] = sideways return 0 except Exception as e: traceback.print_exc() if toStore: for i, p in enumerate(workerList): if p["id"] == id: sideways = workerList[i] sideways['status'] = -1 sideways['error'] = str(e) workerList[i] = sideways else: return {'status': -1, 'error': str(e), "reply": ""} def getPortData(port, parseInterval, command, id, workerList, toStore): """Auxiliary function used by the :py:func:`getPeripheralData` function to collect data from a local COM port :param port: (String) network port of the resource :param parseInterval: (Integer) period in seconds between data gathering :param command: (String) command to send the peripheral port in case data is collected by request and not streamed :param id: (Integer) Unused. Defaults to -1 :param workerList: (List) Unused. Defaults to [] :param toStore: (Boolean) Unused. Defaults to True :returns: Dict object with the following keys: | ``status`` : status code of the request (1: done; 0: underway (error); -1: exception) | ``error`` : empty string if no exceptions, a traceback if an exception occurred | ``reply`` : data from the requested resource """ theNode = {"active": True, "name": "localhost"} if theNode["active"]: if theNode["name"] == "localhost": try: client = SOCKETS.socket(SOCKETS.AF_INET, SOCKETS.SOCK_STREAM) if(not command == ""): client.settimeout(5) else: client.settimeout(parseInterval / 2) server_address = ('localhost', port) client.connect(server_address) if(not command == ""): client.send(command.encode()) time.sleep(1) reply = "" recieved = client.recv(4096) reply = reply + recieved.decode() if not toStore: for i, p in enumerate(workerList): if p["id"] == id: sideways = workerList[i] sideways['status'] = 1 sideways['data'] = {'status': 0, 'error': "", "reply": reply} workerList[i] = sideways return 0 else: return {'status': 0, 'error': "", "reply": reply} except Exception as e: if not toStore: traceback.print_exc() for i, p in enumerate(workerList): if p["id"] == id: sideways = workerList[i] sideways['status'] = -1 sideways['data'] = str(e) workerList[i] = sideways else: return {'status': -1, 'error': str(e), "reply": ""} else: # No toStore testing here as there are no remote requests to this function with a True toStore parameter try: context = zmq.Context() portServer = random.randrange(8000, 8999) checkSocket = SOCKETS.socket() checkSocket.settimeout(1) checkSocket.connect((theNode["address"], theNode["port"])) ipLocal = checkSocket.getsockname()[0] checkSocket.close() machine = "tcp://*:" + str(portServer) machine2 = "tcp://" + theNode["address"] + ":" + str(theNode["port"]) message = { 'order': 'GETPORTDATA', 'server': ipLocal, 'sendBack': portServer, 'port': port, 'command': command, 'parseInterval': parseInterval} client = context.socket(zmq.PUSH) server = context.socket(zmq.REP) if(not command == ""): server.setsockopt(zmq.RCVTIMEO, 5000) else: server.setsockopt(zmq.RCVTIMEO, parseInterval * 1000) server.bind(machine) client.connect(machine2) client.send_json(message) getBack = server.recv_json() for i, p in enumerate(workerList): if p["id"] == id: sideways = workerList[i] sideways['status'] = 1 sideways['data'] = getBack["portData"] workerList[i] = sideways client.close() server.close() except Exception as e: traceback.print_exc() for i, p in enumerate(workerList): if p["id"] == id: sideways = workerList[i] sideways['status'] = -1 sideways['error'] = str(e) workerList[i] = sideways else: return { 'status': -1, 'error': "This node is currently not active, please make sure the agent application is running on it and consult your network operator", "reply": ""} def sendNodeQuery(version, ntp): """Auxiliary function used by the :py:func:`checkAddresses` function to send a broadcast request to all local area network DAQBroker client machines. This function is used to find unconnected local machines :param version: (String) version string :param ntp: (Dict) contains information on the current NTP server :returns: (List) list of available local area network nodes, including address, unique identifier and display name """ multicast_group = ('192.168.3.11', 10090) # Create the datagram socket sock = SOCKETS.socket(SOCKETS.AF_INET, SOCKETS.SOCK_DGRAM) # Set a timeout so the socket does not block indefinitely when trying # to receive data. sock.settimeout(5) # Set the time-to-live for messages to 1 so they do not go past the # local network segment. ttl = struct.pack('b', 1) sock.setsockopt(SOCKETS.IPPROTO_IP, SOCKETS.IP_MULTICAST_TTL, ttl) try: base_dir = '.' if getattr(sys, 'frozen', False): base_dir = os.path.join(sys._MEIPASS) # Send data to the multicast group info = { 'service': 'DAQBRoker server', 'version': version, 'id': snowflake.make_snowflake( snowflake_file=os.path.join(base_dir, 'snowflake')), 'message': 'show', 'ntp': ntp} toSend = json.dumps(info).encode() for i in range(0, 5): sent = sock.sendto(toSend, multicast_group) time.sleep(1) nodes = [] while True: try: data, server = sock.recvfrom(10240) try: processed = json.loads(data.decode()) if('id' in processed) and ('node' in processed): alreadyFoundNode = False for node in nodes: if processed["id"] == node["id"]: alreadyFoundNode = True break if not alreadyFoundNode: nodes.append({'id': processed["id"], 'node': processed["node"], 'details': processed["details"], 'serverAddr': processed["serverAddr"], 'address': server[0], 'port': processed["port"]}) except BaseException: # traceback.print_exc() poop = 'poop' except BaseException: # timeout # traceback.print_exc() break finally: sock.close() return nodes def refreshNodes(goodNodes): for node in goodNodes: machine = "tcp://" + node['remote'] + ":" + str(node['remotePort']) zmq_socket = context.socket(zmq.PUSH) zmq_socket.setsockopt(zmq.LINGER, 1000) try: zmq_socket.connect(machine) work_message = { 'order': "update", 'server': node['local'], 'sendBack': sendBack, 'theNode': node['remote'], 'extra': { 'serverNTP': daqbroObject.globals[0][5].decode(), 'tSync': node['NTP']}} zmq_socket.send_json(work_message) zmq_socket.close() except Exception as e: _, _, tb = sys.exc_info() tbResult = traceback.format_list(traceback.extract_tb(tb)[-1:])[-1] filename = tbResult.split(',')[0].replace('File', '').replace('"', '') lineno = tbResult.split(',')[1].replace('line', '') funname = tbResult.split(',')[2].replace('\n', '').replace(' in ', '') line = str(e) theLogSocket = context.socket(zmq.REQ) theLogSocket.connect("tcp://127.0.0.1:" + str(logPort)) toSend = { 'req': 'LOG', 'type': 'ERROR', 'process': 'PRODUCER', 'message': str(e), 'filename': filename, 'lineno': lineno, 'funname': funname, 'line': line} theLogSocket.send(json.dumps(toSend).encode()) theLogSocket.close() def sendSubscriberEmails(): try: for q, valDB in enumerate(daqbroObject.databases): if(int(valDB[1]) == 1): realvalDB = 'daqbro_' + valDB[0].decode() dbQuery = "USE " + realvalDB daqbroObject.db.query(dbQuery) daqbroObject.db.store_result() dbQuery = "SELECT start,run,summary,active FROM runlist WHERE start>" + \ str(time.time() * 1000 - (24 * 60 * 60 * 1000)) + " ORDER BY start" daqbroObject.db.query(dbQuery) r = daqbroObject.db.store_result() runs = r.fetch_row(0) textActive = 'none' textRuns = 'none' lastRunSearch = time.time() if(len(runs) > 0): for u, run in enumerate(runs): startStr = datetime.fromtimestamp(int(run[0]) / 1000).strftime('%Y-%m-%d %H:%M:%S') if run[2] is None: summary = 'None' else: summary = run[2].decode() if int(run[3]) == 1: textActive = 'Run ' + run[1].decode() + ' : Started - ' + startStr + \ ' | Summary : ' + summary + '\n' else: textRuns = textRuns + 'Run ' + \ run[1].decode() + '
order.", date_open.strftime("%Y-%m-%d"), traded_price, date_limit.strftime("%Y-%m-%d")) order = Order(order_type, symbol_to_buy, num_shares, date_open, traded_price, date_limit, target_up, target_down) order_target_percent(order, percentage) #Iterate through positions and see if need to sell any of them on this date #Imagine the time now is right before market close. Trying to sell stock at close price. for i, row in context.positions.iterrows(): if test_date == row['date_limit']: order_type = 'CLOSE' order_id = row['order_id'] num_lots = 1 num_shares = float('Nan') date_open = float('Nan') traded_price = price_list[:,row['symbol']].values[0] date_limit = float('Nan') target_up = float('Nan') target_down = float('Nan') percentage = 0 print("Making CLOSE order.", test_date.strftime("%Y-%m-%d"), traded_price) order = Order(order_type, row['symbol'], num_shares, date_open, traded_price, date_limit, target_up, target_down) order_target_percent(order, percentage, order_id=order_id) #After all the buy and sell (imagine the time now is after market close) #Update porfolio value value = positions_value(context.positions, price_list) if 'symbol' in context.positions.columns: positions_list = list(context.positions['symbol']) else: positions_list = [] result = {'positions': positions_list, #context.positions.to_dict('records'), 'date': test_date, 'positions_value': value, 'cash_value': context.cash, 'porfolio_value': value+context.cash} results = results.append(result, ignore_index=True) #HERE's the PREDICTION PART #Loop throught list of symbols #For each symbol, train LGB with available data to to this date (minus test valid gap) #Then make predictions on test date buy_list=[] for symbol in symbols: print('Predicting for {} on {}'.format(symbol, test_date)) outcomes_new_selected_symbol = outcomes_new_dropna.loc[outcomes_new_dropna.index.get_level_values('symbol')==symbol] #get all trading dates from dataset of this symbol data_dates = sorted(list(set(outcomes_new_selected_symbol.index.get_level_values(0)))) data_converted_dates = [] for ts in data_dates: data_converted_dates.append(ts.to_pydatetime()) #Check if test data is a trading date for this stock if test_date not in data_converted_dates: print('Stock data for {} not available on {}. Skipping this symbol for this date'.format(symbol, test_date)) continue #Calculate training start date and valid start date by back counting start_date = data_converted_dates[data_converted_dates.index(test_date)-(valid_size + training_size + valid_test_gap + test_size)] start_date_valid = data_converted_dates[data_converted_dates.index(test_date)-(valid_size + valid_test_gap + test_size)] end_date_valid = data_converted_dates[data_converted_dates.index(test_date)-valid_test_gap-test_size] start_date_test = data_converted_dates[data_converted_dates.index(test_date)-test_size] end_date_test = test_date #print('start_date = ', start_date) # print('start_date_valid = ', start_date_valid) # print('end_date_valid = ', end_date_valid) #print('test_date = ', test_date) X_y_train, X_y_valid, X_y_test = train_valid_test_split(outcomes_new_selected_symbol, start_date, start_date_valid, end_date_valid, start_date_test, end_date_test) #calculate upper threshold in training set, then create targets for both valid and test X_y_train, X_y_valid, X_y_test = add_target_upper(X_y_train, X_y_valid, X_y_test, q_upper, 'target', return_col) X_y_test['symbol'] = X_y_test.index.get_level_values('symbol') #downsample the training set's negative data points X_y_train_resampled = downsample(X_y_train, 'target', test_ratio=0.11, random_seed=11) #create 2 extra sets for calculating gain X_valid_close = X_y_valid[['close',return_col_actual]] X_test_close = X_y_test[['close',return_col_actual, 'symbol','next_day_open']] num_shares = [] for i, row in X_test_close.iterrows(): num_shares.append(info.board_lots[row[2]] * info.multiplier[row[2]]) X_test_close['num_shares'] = num_shares #split into features and target sets X_train, y_train = feature_target_split(X_y_train_resampled, features_selected, 'target') X_valid, y_valid = feature_target_split(X_y_valid, features_selected, 'target') X_test, y_test = feature_target_split(X_y_test, features_selected, 'target') (best_model, best_pres_model, max_total_gain, optimal_depth, optimal_num_leaves, max_precision, optimal_precision_depth, optimal_precision_num_leaves, max_precision_total_gain) = lgb_train(X_train, y_train, X_valid, y_valid, X_valid_close, max_depth_range, num_leaves_range, return_col_actual, min_data = 11, metric = 'auc', prob_threshold = prob_threshold) y_test_pred = best_model.predict(X_test, num_iteration=best_model.best_iteration) y_class_pred = class_switch_binary(y_test, y_test_pred, prob_threshold) if y_class_pred[0] == 1: print('Predicted POSITIVE for {}'.format(symbol)) buy_list.append(symbol) return results def backtest_random(outcomes_new, context): outcomes_new_dropna = outcomes_new.dropna() test_period = 200 trade_date_list = sorted(list(set(outcomes_new.index.get_level_values('date')))) test_dates = sorted(list(set(outcomes_new_dropna.index.get_level_values('date'))))[-test_period:] test_dates_converted = [] for ts in test_dates: test_dates_converted.append(ts.to_pydatetime()) results = pd.DataFrame() for test_date in test_dates_converted: print() price_list = get_price_list(outcomes_new, test_date) price_list_open = get_price_list_open(outcomes_new, test_date) if 'buy_list' not in globals() and 'buy_list' not in locals(): buy_list = [] if 'short_list' not in globals() and 'short_list' not in locals(): short_list = [] #Iterate throught the buy_list (which is created by the previous day's ML model run) #Make BUY order for each symbol in buy_list, at open price #Imagine the time now is before market open. Trying to buy stock at open price if len(buy_list)>0: weights = inverse_volatility_weights(buy_list, test_date, outcomes_new) for symbol_to_buy in buy_list: order_type = 'BUY' order_id = '' num_lots = 1 num_shares = num_lots * info.board_lots[symbol_to_buy] date_open = test_date traded_price = price_list_open[:,symbol_to_buy].values[0] date_limit = trade_date_list[trade_date_list.index(date_open)+4] target_up = outcomes_new.loc[(date_open,symbol_to_buy)]['target_upper_v2'] target_down = outcomes_new.loc[(date_open,symbol_to_buy)]['target_lower_v2'] percentage = weights[symbol_to_buy] print("Making buy order. Date open: {}, Date limit: {}, Traded price: {}.".format(date_open.strftime("%Y-%m-%d"), date_limit.strftime("%Y-%m-%d"), traded_price)) order = Order(order_type, symbol_to_buy, num_shares, date_open, traded_price, date_limit, target_up, target_down) order_target_percent(order, percentage, order_id, context) #Iterate throught the short_list (which is created by the previous day's ML model run) #Make SHORT order for each symbol in buy_list, at open price #Allocate weights to each symbol by inverse volatility of past_return_5 if len(short_list)>0: weights = inverse_volatility_weights(short_list, test_date, outcomes_new) for symbol_to_short in short_list: order_type = 'SHORT' order_id = '' num_lots = 1 num_shares = 1 date_open = test_date traded_price = price_list_open[:,symbol_to_short].values[0] date_limit = trade_date_list[trade_date_list.index(date_open)+4] target_up = outcomes_new.loc[(date_open,symbol_to_short)]['target_upper_v2'] target_down = outcomes_new.loc[(date_open,symbol_to_short)]['target_lower_v2'] percentage = weights[symbol_to_short] print("Making short order. Date open: {}, Date limit: {}, Traded price: {}.".format(date_open.strftime("%Y-%m-%d"), date_limit.strftime("%Y-%m-%d"), traded_price)) order = Order(order_type, symbol_to_short, num_shares, date_open, traded_price, date_limit, target_up, target_down) order_target_percent(order, percentage, order_id, context) #listen to market price of all stocks in positions #If hitting target price, then sell for i, row in context.positions.iterrows(): if outcomes_new.loc[(test_date,row['symbol'])]['high']>row['target_up']: order_type = 'CLOSE' order_id = row['order_id'] num_lots = 1 num_shares = float('Nan') date_open = float('Nan') traded_price = row['target_up'] date_limit = float('Nan') target_up = float('Nan') target_down = float('Nan') percentage = 0 print("Making CLOSE order.", test_date.strftime("%Y-%m-%d"), traded_price) order = Order(order_type, row['symbol'], num_shares, date_open, traded_price, date_limit, target_up, target_down) order_target_percent(order, percentage, order_id, context) continue if outcomes_new.loc[(test_date,row['symbol'])]['low']<row['target_down']: order_type = 'CLOSE' order_id = row['order_id'] num_lots = 1 num_shares = float('Nan') date_open = float('Nan') traded_price = row['target_down'] date_limit = float('Nan') target_up = float('Nan') target_down = float('Nan') percentage = 0 print("Making CLOSE order.", test_date.strftime("%Y-%m-%d"), traded_price) order = Order(order_type, row['symbol'], num_shares, date_open, traded_price, date_limit, target_up, target_down) order_target_percent(order, percentage, order_id, context) continue #Near market close - check what stock in position, with date limit = today. Sell it at close price for i, row in context.positions.iterrows(): #print('type of row[date_limit]', type(row['date_limit'])) if test_date >= row['date_limit']: order_type = 'CLOSE' order_id = row['order_id'] num_lots = 1 num_shares = float('Nan') date_open = float('Nan') traded_price = price_list[:,row['symbol']].values[0] date_limit = float('Nan') target_up = float('Nan') target_down = float('Nan') percentage = 0 print("Making CLOSE order.", test_date.strftime("%Y-%m-%d"), traded_price) order = Order(order_type, row['symbol'], num_shares, date_open, traded_price, date_limit, target_up, target_down) order_target_percent(order, percentage, order_id, context) #After all the buy and sell (imagine the time now is after market close) #Update porfolio value value = positions_value(context.positions, price_list) if 'symbol' in context.positions.columns: positions_list = list(context.positions['symbol']) else: positions_list = [] result = {'positions': positions_list, #context.positions.to_dict('records'), 'date': test_date, 'positions_value': value, 'cash_value': context.cash, 'porfolio_value': value+context.cash} results = results.append(result, ignore_index=True) #HERE's the PREDICTION PART buy_list=[] short_list=[] print('Test date: ', test_date) X_test = outcomes_new.loc[test_date] buy_df = pd.DataFrame() short_df = pd.DataFrame() symbol_universe = list(X_test.index.get_level_values('symbol')) X_test_samples = X_test.loc[X_test.index.get_level_values('symbol').isin(random.sample(symbol_universe,10))].sample(frac=1) buy_df=X_test_samples[:5] short_df=X_test_samples[5:] buy_list = list(buy_df.index.get_level_values('symbol')) short_list = list(short_df.index.get_level_values('symbol')) print("--------------------------------") print('Buy list: ', buy_list) print('Short list: ', short_list) print("--------------------------------") return results def backtest_lgb_v2(outcomes_new, context): outcomes_new_dropna = outcomes_new.dropna() trade_date_list = sorted(list(set(outcomes_new.index.get_level_values('date')))) features_selected = info.features_lgb_v2 max_impact_pct = 0.001 max_impact_volatility = 'price_chg_1_ema_std50' total_budget = context.capital_base * 1.2 test_period = 250 test_size = 1 valid_size = 1 training_size = 2000 valid_test_gap = 4 label = 'label_v2' #make a list of test dates according to test period, by back counting from the latest date with target test_dates = sorted(list(set(outcomes_new_dropna.index.get_level_values('date'))))[-test_period:] test_dates_converted = [] for ts in test_dates: test_dates_converted.append(ts.to_pydatetime()) results = pd.DataFrame() context.predictions = pd.DataFrame() for test_date in test_dates_converted: print() price_list = get_price_list(outcomes_new, test_date) price_list_open = get_price_list_open(outcomes_new, test_date) if 'buy_list' not in globals() and 'buy_list' not in locals(): buy_list = [] if 'short_list' not in globals() and 'short_list' not in locals(): short_list = [] #Iterate throught the buy_list (which is created by the previous day's ML model run) #Make BUY order for each symbol in buy_list, at open price #Imagine the time now is before market open. Trying to buy stock at open price max_impact = (context.positions_value+context.cash) * max_impact_pct ##################################################################################################### #Need to change algorithm so that it loops through alternate buy and short on the list, so each side #gets equal chance to be ordered, due to the margin limit ######################################################################### len_buy = len(buy_list) len_short = len(short_list) #calculate weights for both lists according to inverse volatility (std of past_return_5 ) if len_buy>0: weights_buy = inverse_volatility_weights(buy_list, test_date, outcomes_new) if len_short>0: weights_short = inverse_volatility_weights(short_list, test_date, outcomes_new) #Alternate buy and short lists to make order. Since we check margin for each order, so we want to give buy and short #roughly equal chance
hex-pose, scale the rot-servo around 90* l.list[ROT_SERVO] = ((l.list[ROT_SERVO] - 90.) * scale) + 90. # subtract 90, scale, add 90 temp_both.append(pose) # store # re-split the merged list left_step = temp_both[:4] # first half right_step = temp_both[4:] # second half # begin rotate sequence by lifting some of the legs: "half-raised neutral position" # this is the pose at the end of a right-step or beginning of a left-step self.set_hexwalker_position(TALL_TRI_RIGHT_NEUTRAL_LEFT_UP_NEUTRAL, durr=durr) self.synchronize() last_step_right = True for i in range(num_steps): if last_step_right: # if last_step_right == True: # this branch always runs first!! # if last step was right, do a left self.run_pose_list(left_step, durr=durr) last_step_right = False else: # elif last_step_right == False: # if last step was left, do a right self.run_pose_list(right_step, durr=durr) last_step_right = True self.synchronize() #cleanup self.set_hexwalker_position(TALL_NEUTRAL, durr=durr) self.synchronize() return SUCCESS # fine_rotate is just here for legacy support def fine_rotate(self, num_steps, direction, scale=0.2, durr=None): self.rotate(num_steps, direction, scale=scale, durr=durr) def leg_wave(self, direction, speed, repetitions): for i in range(0, repetitions): if(direction == RIGHT): for n in GROUP_ALL_LEGS: # pull_up = (60, 75, 90), tip above horizontal # normal neutral = (120, 90, 90) # crouch neutral = (45, 135, 90) self.do_set_hexwalker_position(LEG_MISC_TABLE["PULL_UP"], n, durr=speed) self.synchronize() # tall neutral = (120, 45, 90) self.do_set_hexwalker_position(LEG_TALL_MOVEMENT_TABLE["NEUTRAL"], n, durr=speed) if(direction == LEFT): reverselist = list(GROUP_ALL_LEGS) reverselist.reverse() for n in reverselist: self.do_set_hexwalker_position(LEG_MISC_TABLE["PULL_UP"], n, durr=speed) self.synchronize() self.do_set_hexwalker_position(LEG_TALL_MOVEMENT_TABLE["NEUTRAL"], n, durr=speed) # one last synchronize() for the final movement to complete self.synchronize() # tea-bag def bounce(self, wait, repetitions): for i in range(0, repetitions): self.set_hexwalker_position(TALL_TRI_BOUNCE_DOWN, durr=wait) self.synchronize() self.set_hexwalker_position(TALL_NEUTRAL, durr=wait) self.synchronize() # twist-dance # NOTE: one does block, so this should be called AFTER the torso function is called # NOTE: to change the twist angle edit TWIST_DANCE_LEFT/RIGHT in posedata_leg def twist_dance(self, speed, repetitions): for i in range(repetitions): self.set_hexwalker_position(TWIST_DANCE_LEFT, durr=speed) self.set_hexwalker_position(TALL_NEUTRAL, durr=speed) self.set_hexwalker_position(TWIST_DANCE_RIGHT, durr=speed) self.set_hexwalker_position(TALL_NEUTRAL, durr=speed) self.synchronize() def do_nothing(self): self.set_hexwalker_position(TALL_NEUTRAL) self.synchronize() ######################################################################################## ######################################################################################## # terms: torso = (Larm + Rarm) + waist # The Robot_Torso groups the 2 arms with the waist motor for macro-control. This is totally isolated from the 6 legs # in the Hex_Walker object. This contains the "motion functions" for things like dancing and pointing, as well as the # driver functions to execute these motions. Note that both arms and the waist are still technically "Leg" objects. # Most func accept optional arg "durr" to specify the duration of the transition; if missing, default is self.speed. # Most func accept optional arg "masklist" to specify the legs being set or waited on. # It has synchronize() and abort() just like the Hex_Walker object, and they work just the same. # do_moveset() takes a list of indices within ARMS_POSITIONS array, along with the waist-rotations to use for each. # set_torso_position() calls both set_arms_position() and set_waist_position(), thats it. # set_arms_position() will set LARM/RARM/both poses from an Arms_Position object or index within ARMS_POSITIONS. # set_waist_position() will set the waist pose from an angle(degrees) or a Leg_Position object. # do_set_torso_position() will set any combination of legs' poses from a Leg_Position object. class Robot_Torso(object): # list of functions: # __init__ # print_self # set_speed # synchronize # abort # do_moveset # set_torso_position # set_arms_position # set_waist_position # do_set_torso_position # + assorted "motion" functions def __init__(self, right_arm: Leg, left_arm: Leg, rotator: Rotator): # individual member variables self.left_arm = left_arm self.right_arm = right_arm self.rotator = rotator # list form (must subtract ARM_L from ID before indexing into this list, may change to dict in the future) self.leglist = [left_arm, right_arm, rotator] # set default speed self.speed = NORMAL_SPEED # go to default pose, arms and rotation self.torso_neutral() def print_self(self): print("torso object: speed=" + str(self.speed)) for L in self.leglist: L.print_self() def set_speed(self, n): self.speed = n ## synchronize the legs with the main thread by not returning until all of the specified legs are done moving # masklist accepts list, set, int (treated as single-element set) # if not given any arg, default is GROUP_ALL_TORSO # depending on USE_THREADING, either simply sleep or do the actual wait def synchronize(self, masklist=GROUP_ALL_TORSO): if USE_THREADING: # if given a single index rather than an iteratable, make it into a set mask = {masklist} if isinstance(masklist, int) else set(masklist) for leg in [self.leglist[n - ARM_L] for n in mask]: # wait until the leg is done, if it is already done this returns immediately leg.idle_flag.wait() else: time.sleep(self.speed) # abort all queued leg thread movements, and wait a bit to ensure they all actually stopped. # their "current angle/pwm" variables should still be correct, unless it was trying to move beyond its range somehow. def abort(self): # first clear all the queues for leg in self.leglist: leg.abort() # then wait until all legs returned to "sleeping" state self.synchronize() # then wait for 3x the interpolate time, just to be safe time.sleep(INTERPOLATE_TIME * 3) ## takes a list of indices within ARMS_POSITIONS array, along with the waist-rotations to use for each. # sets arms and waist at same time, waits until each change is done with synchronize() # previously do_moveset(self, positions, rotations, sleeps, repetitions): # TODO: convert this to accept indices or objects to allow for dynamic modificaiton of poses def do_moveset(self, position_indices, rotations, repeat=1, masklist=GROUP_ALL_ARMS, durr=None): if len(position_indices) != len(rotations): print("ERR: len(position_indices) != len(rotations)") return INV_PARAM for j in range(repeat): for pose_idx, rot in zip(position_indices, rotations): self.set_arms_position(pose_idx, masklist=masklist, durr=durr) self.set_waist_position(rot, durr=durr) self.synchronize() return SUCCESS ## do both set_arms_position and set_waist_position, thats it. no mask ability def set_torso_position(self, arms_pose_idx, rotation, durr=None): self.set_arms_position(arms_pose_idx, masklist=GROUP_ALL_ARMS, durr=durr) self.set_waist_position(rotation, durr=durr) ## set LARM/RARM/both poses from an Arms_Position object or index within ARMS_POSITIONS. # take mask of arm or arms, default is both arms (cannot set the waist from this function) # previously set_torso_position(self, torso_position_number, rotation) def set_arms_position(self, arms_pose_idx, masklist=GROUP_ALL_ARMS, durr=None): # if given a single index rather than an iteratable, make it into a set mask = {masklist} if isinstance(masklist, int) else set(masklist) # if given arms_pose_idx as an index, convert to Arms_Position object via lookup arms_pose_obj = arms_pose_idx if isinstance(arms_pose_idx, Arms_Position) else ARMS_POSITIONS[arms_pose_idx] # check which arms are in mask and extract appropriate leg-pose from the arms-obj for n in mask: self.do_set_torso_position(arms_pose_obj.list[n], masklist=n, durr=durr) ## set the waist pose from an angle(degrees) or a Leg_Position object. # one-to-one: no mask needed # previously set_torso_rotation(self, rotation): def set_waist_position(self, waist_rot, durr=None): # if given waist_rot as raw angle, convert to a leg-object waist_rot_obj = waist_rot if isinstance(waist_rot, Leg_Position) else Leg_Position(waist_rot, waist_rot, waist_rot) self.do_set_torso_position(waist_rot_obj, masklist=WAIST, durr=durr) ## set any combination of legs' poses from a Leg_Position object. # take a Leg_Position (extracted from Arms_Position or built from waist rotation angle) # take a masklist: specify any combination of L/R/W, probably not useful to set multiple at once tho # previously do_set_torso_position(self, torso_position, rotation) def do_set_torso_position(self, legobj, masklist=GROUP_ALL_TORSO, durr=None): # default time if not given is self.speed, can't put "self" in default args tho durr = self.speed if durr is None else durr # if given a single index rather than an iteratable, make it into a set mask = {masklist} if isinstance(masklist, int) else set(masklist) for leg in [self.leglist[n - ARM_L] for n in mask]: if USE_THREADING: leg.set_leg_position_thread(legobj, durr) # threading version else: leg.set_leg_position(legobj) # non-threading version ######################################################################################## ######################################################################################## # torso movement functions # TODO: change these to not use do_moveset function, put it in the queue and synchronize() # ????, then reset def monkey(self, repetitions): moves = [ARMS_MONKEY_RIGHT_UP, ARMS_MONKEY_LEFT_UP] # duplicate this a total of 8 times moves = moves * 8 rotations = [45] * 8 + [135] * 8 self.do_moveset(moves, rotations, repeat=repetitions, durr=0.1) # then go to the neutral position self.torso_neutral() # beat the chest, then reset def king_kong(self, rotation, repetitions): moves = [ARMS_DANCE_FRONT_LEFT_OUT, ARMS_DANCE_FRONT_RIGHT_OUT] rotations = [rotation] * 2 self.do_moveset(moves, rotations, repeat=repetitions, durr=0.4) # then go to the neutral position self.torso_neutral() # do handshake sequence (which hand?), then reset def hand_shake(self, rotation, repetitions): moves = [ARMS_SHAKE_DOWN, ARMS_SHAKE_MID, ARMS_SHAKE_UP, ARMS_SHAKE_MID] rotations = [rotation] * 4 self.do_moveset(moves, rotations, repeat=repetitions, durr=0.1) # then go to the neutral position self.torso_neutral() # do waving sequence (which hand?), then reset def wave(self, rotation, repetitions): moves = [ARMS_WAVE_DOWN, ARMS_WAVE_UP] rotations = [rotation] * 2 self.do_moveset(moves, rotations, repeat=repetitions, durr=0.4) # then go to the neutral position self.torso_neutral() # ????, then hold def look(self): self.set_torso_position(ARMS_LOOKING, 90) self.synchronize() # twist in the opposite direction of the base, so the torso remains mostly stationary # NOTE: does not call synchronize(), just puts frames in the queue and returns!!! let synchronize() be called in the walker function!! def twist_dance(self, speed, repetitions): for i in range(repetitions): self.set_waist_position(110, durr=speed) self.set_waist_position(90, durr=speed) self.set_waist_position(70, durr=speed) self.set_waist_position(90, durr=speed) # point with left arm or right arm in the specified direction, then hold def point(self, hand, direction): if(hand == RIGHT): self.set_torso_position(ARMS_POINTING_RIGHT, direction) elif(hand == LEFT): self.set_torso_position(ARMS_POINTING_LEFT, direction) self.synchronize() # direction is from 0-359, will pick leftarm/rightarm to point in the chosen direction # 0/360 = front # this does not control the waist at all, just the arms # !!! also demonstrates more dynamic control over the poses !!! def point_better(self, direction): direction = clamp(direction, 0, 359) if direction >= 180: # use left arm to point: dynamically create the leg-pose to have the angle i want armspos = ARMS_POSITIONS[ARMS_POINTING_FWD_LEFT].copy() # translate direction=[180=back, 270=left, 360=fwd] to [180=back, 90=out, 0=fwd] armspos.arm_l.list[MID_SERVO] = (-(direction-180))+180 self.set_arms_position(armspos) else: # use right arm to point armspos = ARMS_POSITIONS[ARMS_POINTING_FWD_RIGHT].copy() # translate direction=[0=fwd, 90=right, 180=back] to [0=fwd, 90=out, 180=back], no translation armspos.arm_r.list[MID_SERVO]
""" Contains backtesting logic and objects. """ from __future__ import division from copy import deepcopy import bt import ffn import pandas as pd import numpy as np from matplotlib import pyplot as plt import pyprind def run(*backtests): """ Runs a series of backtests and returns a Result object containing the results of the backtests. Args: * backtest (*list): List of backtests. Returns: Result """ # run each backtest for bkt in backtests: bkt.run() return Result(*backtests) def benchmark_random(backtest, random_strategy, nsim=100): """ Given a backtest and a random strategy, compare backtest to a number of random portfolios. The idea here is to benchmark your strategy vs a bunch of random strategies that have a similar structure but execute some part of the logic randomly - basically you are trying to determine if your strategy has any merit - does it beat randomly picking weight? Or randomly picking the selected securities? Args: * backtest (Backtest): A backtest you want to benchmark * random_strategy (Strategy): A strategy you want to benchmark against. The strategy should have a random component to emulate skilless behavior. * nsim (int): number of random strategies to create. Returns: RandomBenchmarkResult """ # save name for future use if backtest.name is None: backtest.name = "original" # run if necessary if not backtest.has_run: backtest.run() bts = [] bts.append(backtest) data = backtest.data # create and run random backtests for i in range(nsim): random_strategy.name = "random_%s" % i rbt = bt.Backtest(random_strategy, data) rbt.run() bts.append(rbt) # now create new RandomBenchmarkResult res = RandomBenchmarkResult(*bts) return res class Backtest(object): """ A Backtest combines a Strategy with data to produce a Result. A backtest is basically testing a strategy over a data set. Note: The Strategy will be deepcopied so it is re-usable in other backtests. To access the backtested strategy, simply access the strategy attribute. Args: * strategy (Strategy, Node, StrategyBase): The Strategy to be tested. * data (DataFrame): DataFrame containing data used in backtest. This will be the Strategy's "universe". * name (str): Backtest name - defaults to strategy name * initial_capital (float): Initial amount of capital passed to Strategy. * commissions (fn(quantity, price)): The commission function to be used. Ex: commissions=lambda q, p: max(1, abs(q) * 0.01) * progress_bar (Bool): Display progress bar while running backtest * additional_data (dict): Additional kwargs passed to StrategyBase.setup, after preprocessing Attributes: * strategy (Strategy): The Backtest's Strategy. This will be a deepcopy of the Strategy that was passed in. * data (DataFrame): Data passed in * dates (DateTimeIndex): Data's index * initial_capital (float): Initial capital * name (str): Backtest name * stats (ffn.PerformanceStats): Performance statistics * has_run (bool): Run flag * weights (DataFrame): Weights of each component over time * security_weights (DataFrame): Weights of each security as a percentage of the whole portfolio over time * additional_data (dict): Additional data passed to strategy setup """ def __init__( self, strategy, data, name=None, initial_capital=1000000.0, commissions=None, integer_positions=True, progress_bar=False, additional_data=None, ): if data.columns.duplicated().any(): cols = data.columns[data.columns.duplicated().tolist()].tolist() raise Exception( "data provided has some duplicate column names: \n%s \n" "Please remove duplicates!" % cols ) # we want to reuse strategy logic - copy it! # basically strategy is a template self.strategy = deepcopy(strategy) self.strategy.use_integer_positions(integer_positions) self._process_data(data, additional_data) self.initial_capital = initial_capital self.name = name if name is not None else strategy.name self.progress_bar = progress_bar if commissions is not None: self.strategy.set_commissions(commissions) self.stats = {} self._original_prices = None self._weights = None self._sweights = None self.has_run = False def _process_data(self, data, additional_data): # add virtual row at t0-1day with NaNs # this is so that any trading action at t0 can be evaluated relative to # a clean starting point. This is related to #83. Basically, if you # have a big trade / commision on day 0, then the Strategy.prices will # be adjusted at 0, and hide the 'total' return. The series should # start at 100, but may start at 90, for example. Here, we add a # starting point at t0-1day, and this is the reference starting point data_new = pd.concat( [ pd.DataFrame( np.nan, columns=data.columns, index=[data.index[0] - pd.DateOffset(days=1)], ), data, ] ) self.data = data_new self.dates = data_new.index self.additional_data = (additional_data or {}).copy() # Look for data frames with the same index as (original) data, # and add in the first row as well (i.e. "bidoffer") for k in self.additional_data: old = self.additional_data[k] if isinstance(old, pd.DataFrame) and old.index.equals(data.index): empty_row = pd.DataFrame( np.nan, columns=old.columns, index=[old.index[0] - pd.DateOffset(days=1)], ) new = pd.concat([empty_row, old]) self.additional_data[k] = new elif isinstance(old, pd.Series) and old.index.equals(data.index): empty_row = pd.Series( np.nan, index=[old.index[0] - pd.DateOffset(days=1)] ) new = pd.concat([empty_row, old]) self.additional_data[k] = new def run(self): """ Runs the Backtest. """ if self.has_run: return # set run flag to avoid running same test more than once self.has_run = True # setup strategy self.strategy.setup(self.data, **self.additional_data) # adjust strategy with initial capital self.strategy.adjust(self.initial_capital) # loop through dates # init progress bar if self.progress_bar: bar = pyprind.ProgBar(len(self.dates), title=self.name, stream=1) # since there is a dummy row at time 0, start backtest at date 1. # we must still update for t0 self.strategy.update(self.dates[0]) # and for the backtest loop, start at date 1 for dt in self.dates[1:]: # update progress bar if self.progress_bar: bar.update() # update strategy self.strategy.update(dt) if not self.strategy.bankrupt: self.strategy.run() # need update after to save weights, values and such self.strategy.update(dt) else: if self.progress_bar: bar.stop() self.stats = self.strategy.prices.calc_perf_stats() self._original_prices = self.strategy.prices @property def weights(self): """ DataFrame of each component's weight over time """ if self._weights is not None: return self._weights else: if self.strategy.fixed_income: vals = pd.DataFrame( {x.full_name: x.notional_values for x in self.strategy.members} ) vals = vals.div(self.strategy.notional_values, axis=0) else: vals = pd.DataFrame( {x.full_name: x.values for x in self.strategy.members} ) vals = vals.div(self.strategy.values, axis=0) self._weights = vals return vals @property def positions(self): """ DataFrame of each component's position over time """ return self.strategy.positions @property def security_weights(self): """ DataFrame containing weights of each security as a percentage of the whole portfolio over time """ if self._sweights is not None: return self._sweights else: # get values for all securities in tree and divide by root values # for security weights vals = {} for m in self.strategy.members: if isinstance(m, bt.core.SecurityBase): if self.strategy.fixed_income: m_values = m.notional_values else: m_values = m.values if m.name in vals: vals[m.name] += m_values else: vals[m.name] = m_values vals = pd.DataFrame(vals) # divide by root strategy values if self.strategy.fixed_income: vals = vals.div(self.strategy.notional_values, axis=0) else: vals = vals.div(self.strategy.values, axis=0) # save for future use self._sweights = vals return vals @property def herfindahl_index(self): """ Calculate Herfindahl-Hirschman Index (HHI) for the portfolio. For each given day, HHI is defined as a sum of squared weights of securities in a portfolio; and varies from 1/N to 1. Value of 1/N would correspond to an equally weighted portfolio and value of 1 corresponds to an extreme case when all amount is invested in a single asset. 1 / HHI is often considered as "an effective number of assets" in a given portfolio """ w = self.security_weights return (w ** 2).sum(axis=1) @property def turnover(self): """ Calculate the turnover for the backtest. This function will calculate the turnover for the strategy. Turnover is defined as the lesser of positive or negative outlays divided by NAV """ s = self.strategy outlays = s.outlays # seperate positive and negative outlays, sum them up, and keep min outlaysp = outlays[outlays >= 0].fillna(value=0).sum(axis=1) outlaysn = np.abs(outlays[outlays < 0].fillna(value=0).sum(axis=1)) # merge and keep minimum min_outlay = pd.DataFrame({"pos": outlaysp, "neg": outlaysn}).min(axis=1) # turnover is defined as min outlay / nav mrg = pd.DataFrame({"outlay": min_outlay, "nav": s.values}) return mrg["outlay"] / mrg["nav"] class Result(ffn.GroupStats): """ Based on ffn's GroupStats with a few extra helper methods. Args: * backtests (list): List of backtests Attributes: * backtest_list (list): List of bactests in the same order as provided * backtests (dict): Dict of backtests by name """ def __init__(self, *backtests): tmp = [pd.DataFrame({x.name: x.strategy.prices}) for x in backtests] super(Result, self).__init__(*tmp) self.backtest_list = backtests self.backtests = {x.name: x for x in backtests}
False, 'RequestUID'), "0031xx45" : ('LT', '1', 'RequestingPhysician', False, 'RequestingPhysician'), "0031xx50" : ('LT', '1', 'RequestedPhysician', False, 'RequestedPhysician'), "0033xx10" : ('LT', '1', 'PatientStudyUID', False, 'PatientStudyUID'), }, 'SIEMENS SIENET' : { "0019xx01" : (u'DS', u'1', u'?', False, u''), }, 'SIEMENS SMS-AX ACQ 1.0' : { "0021xx00" : ('US', '1', 'AcquisitionType', False, 'AcquisitionType'), "0021xx01" : ('US', '1', 'AcquisitionMode', False, 'AcquisitionMode'), "0021xx02" : ('US', '1', 'FootswitchIndex', False, 'FootswitchIndex'), "0021xx03" : ('US', '1', 'AcquisitionRoom', False, 'AcquisitionRoom'), "0021xx04" : ('SL', '1', 'CurrentTimeProduct', False, 'CurrentTimeProduct'), "0021xx05" : ('SL', '1', 'Dose', False, 'Dose'), "0021xx06" : ('SL', '1', 'SkinDosePercent', False, 'SkinDosePercent'), "0021xx07" : ('SL', '1', 'SkinDoseAccumulation', False, 'SkinDoseAccumulation'), "0021xx08" : ('SL', '1', 'SkinDoseRate', False, 'SkinDoseRate'), "0021xx0A" : ('UL', '1', 'CopperFilter', False, 'CopperFilter'), "0021xx0B" : ('US', '1', 'MeasuringField', False, 'MeasuringField'), "0021xx0C" : ('SS', '3', 'PostBlankingCircle', False, 'PostBlankingCircle'), "0021xx0D" : ('SS', '2-2n', 'DynaAngles', False, 'DynaAngles'), "0021xx0E" : ('SS', '1', 'TotalSteps', False, 'TotalSteps'), "0021xx0F" : ('SL', '3-3n', 'DynaXRayInfo', False, 'DynaXRayInfo'), "0021xx10" : ('US', '1', 'ModalityLUTInputGamma', False, 'ModalityLUTInputGamma'), "0021xx11" : ('US', '1', 'ModalityLUTOutputGamma', False, 'ModalityLUTOutputGamma'), "0021xx12" : ('OB', '1-n', 'SH_STPAR', False, 'SH_STPAR'), "0021xx13" : ('US', '1', 'AcquisitionZoom', False, 'AcquisitionZoom'), "0021xx14" : ('SS', '1', 'DynaAngulationStepWidth', False, 'DynaAngulationStepWidth'), "0021xx15" : ('US', '1', 'Harmonization', False, 'Harmonization'), "0021xx16" : ('US', '1', 'DRSingleFlag', False, 'DRSingleFlag'), "0021xx17" : ('SL', '1', 'SourceToIsocenter', False, 'SourceToIsocenter'), "0021xx18" : ('US', '1', 'PressureData', False, 'PressureData'), "0021xx19" : ('SL', '1', 'ECGIndexArray', False, 'ECGIndexArray'), "0021xx1A" : ('US', '1', 'FDFlag', False, 'FDFlag'), "0021xx1B" : ('OB', '1', 'SH_ZOOM', False, 'SH_ZOOM'), "0021xx1C" : ('OB', '1', 'SH_COLPAR', False, 'SH_COLPAR'), "0021xx1D" : ('US', '1', 'K_Factor', False, 'K_Factor'), "0021xx1E" : ('US', '8', 'EVE', False, 'EVE'), "0021xx1F" : ('SL', '1', 'TotalSceneTime', False, 'TotalSceneTime'), "0021xx20" : ('US', '1', 'RestoreFlag', False, 'RestoreFlag'), "0021xx21" : ('US', '1', 'StandMovementFlag', False, 'StandMovementFlag'), "0021xx22" : ('US', '1', 'FDRows', False, 'FDRows'), "0021xx23" : ('US', '1', 'FDColumns', False, 'FDColumns'), "0021xx24" : ('US', '1', 'TableMovementFlag', False, 'TableMovementFlag'), "0021xx25" : ('LO', '1', 'OriginalOrganProgramName', False, 'OriginalOrganProgramName'), "0021xx26" : ('DS', '1', 'CrispyXPIFilter', False, 'CrispyXPIFilter'), }, 'SIEMENS SMS-AX ORIGINAL IMAGE INFO 1.0' : { "0025xx00" : ('US', '1', 'ViewNative', False, 'ViewNative'), "0025xx01" : ('US', '1', 'OriginalSeriesNumber', False, 'OriginalSeriesNumber'), "0025xx02" : ('US', '1', 'OriginalImageNumber', False, 'OriginalImageNumber'), "0025xx03" : ('US', '1', 'WinCenter', False, 'WinCenter'), "0025xx04" : ('US', '1', 'WinWidth', False, 'WinWidth'), "0025xx05" : ('US', '1', 'WinBrightness', False, 'WinBrightness'), "0025xx06" : ('US', '1', 'WinContrast', False, 'WinContrast'), "0025xx07" : ('US', '1', 'OriginalFrameNumber', False, 'OriginalFrameNumber'), "0025xx08" : ('US', '1', 'OriginalMaskFrameNumber', False, 'OriginalMaskFrameNumber'), "0025xx09" : ('US', '1', 'Opac', False, 'Opac'), "0025xx0A" : ('US', '1', 'OriginalNumberOfFrames', False, 'OriginalNumberOfFrames'), "0025xx0B" : ('DS', '1', 'OriginalSceneDuration', False, 'OriginalSceneDuration'), "0025xx0C" : ('LO', '1', 'IdentifierLOID', False, 'IdentifierLOID'), "0025xx0D" : ('SS', '1-n', 'OriginalSceneVFRInfo', False, 'OriginalSceneVFRInfo'), "0025xx0E" : ('SS', '1', 'OriginalFrameECGPosition', False, 'OriginalFrameECGPosition'), "0025xx0F" : ('SS', '1', 'OriginalECG1stFrameOffset_retired', False, 'OriginalECG1stFrameOffset_retired'), "0025xx10" : ('SS', '1', 'ZoomFlag', False, 'ZoomFlag'), "0025xx11" : ('US', '1', 'Flex', False, 'Flex'), "0025xx12" : ('US', '1', 'NumberOfMaskFrames', False, 'NumberOfMaskFrames'), "0025xx13" : ('US', '1', 'NumberOfFillFrames', False, 'NumberOfFillFrames'), "0025xx14" : ('US', '1', 'SeriesNumber', False, 'SeriesNumber'), "0025xx15" : ('IS', '1', 'ImageNumber', False, 'ImageNumber'), }, 'SIEMENS SMS-AX QUANT 1.0' : { "0023xx00" : ('DS', '2', 'HorizontalCalibrationPixelSize', False, 'HorizontalCalibrationPixelSize'), "0023xx01" : ('DS', '2', 'VerticalCalibrationPixelSize', False, 'VerticalCalibrationPixelSize'), "0023xx02" : ('LO', '1', 'CalibrationObject', False, 'CalibrationObject'), "0023xx03" : ('DS', '1', 'CalibrationObjectSize', False, 'CalibrationObjectSize'), "0023xx04" : ('LO', '1', 'CalibrationMethod', False, 'CalibrationMethod'), "0023xx05" : ('ST', '1', 'Filename', False, 'Filename'), "0023xx06" : ('IS', '1', 'FrameNumber', False, 'FrameNumber'), "0023xx07" : ('IS', '2', 'CalibrationFactorMultiplicity', False, 'CalibrationFactorMultiplicity'), "0023xx08" : ('IS', '1', 'CalibrationTODValue', False, 'CalibrationTODValue'), }, 'SIEMENS SMS-AX VIEW 1.0' : { "0019xx00" : ('US', '1', 'ReviewMode', False, 'ReviewMode'), "0019xx01" : ('US', '1', 'AnatomicalBackgroundPercent', False, 'AnatomicalBackgroundPercent'), "0019xx02" : ('US', '1', 'NumberOfPhases', False, 'NumberOfPhases'), "0019xx03" : ('US', '1', 'ApplyAnatomicalBackground', False, 'ApplyAnatomicalBackground'), "0019xx04" : ('SS', '4-4n', 'PixelShiftArray', False, 'PixelShiftArray'), "0019xx05" : ('US', '1', 'Brightness', False, 'Brightness'), "0019xx06" : ('US', '1', 'Contrast', False, 'Contrast'), "0019xx07" : ('US', '1', 'Enabled', False, 'Enabled'), "0019xx08" : ('US', '1', 'NativeEdgeEnhancementPercentGain', False, 'NativeEdgeEnhancementPercentGain'), "0019xx09" : ('SS', '1', 'NativeEdgeEnhancementLUTIndex', False, 'NativeEdgeEnhancementLUTIndex'), "0019xx0A" : ('SS', '1', 'NativeEdgeEnhancementKernelSize', False, 'NativeEdgeEnhancementKernelSize'), "0019xx0B" : ('US', '1', 'SubtrEdgeEnhancementPercentGain', False, 'SubtrEdgeEnhancementPercentGain'), "0019xx0C" : ('SS', '1', 'SubtrEdgeEnhancementLUTIndex', False, 'SubtrEdgeEnhancementLUTIndex'), "0019xx0D" : ('SS', '1', 'SubtrEdgeEnhancementKernelSize', False, 'SubtrEdgeEnhancementKernelSize'), "0019xx0E" : ('US', '1', 'FadePercent', False, 'FadePercent'), "0019xx0F" : ('US', '1', 'FlippedBeforeLateralityApplied', False, 'FlippedBeforeLateralityApplied'), "0019xx10" : ('US', '1', 'ApplyFade', False, 'ApplyFade'), "0019xx12" : ('US', '1', 'Zoom', False, 'Zoom'), "0019xx13" : ('SS', '1', 'PanX', False, 'PanX'), "0019xx14" : ('SS', '1', 'PanY', False, 'PanY'), "0019xx15" : ('SS', '1', 'NativeEdgeEnhancementAdvPercGain', False, 'NativeEdgeEnhancementAdvPercGain'), "0019xx16" : ('SS', '1', 'SubtrEdgeEnhancementAdvPercGain', False, 'SubtrEdgeEnhancementAdvPercGain'), "0019xx17" : ('US', '1', 'InvertFlag', False, 'InvertFlag'), "0019xx1A" : ('OB', '1', 'Quant1KOverlay', False, 'Quant1KOverlay'), "0019xx1B" : ('US', '1', 'OriginalResolution', False, 'OriginalResolution'), "0019xx1C" : ('DS', '1', 'AutoWindowCenter', False, 'AutoWindowCenter'), "0019xx1D" : ('DS', '1', 'AutoWindowWidth', False, 'AutoWindowWidth'), }, 'SIEMENS Selma' : { "0019xx06" : (u'IS', u'1', u'?', False, u''), "0019xx07" : (u'IS', u'1', u'?', False, u''), "0019xx08" : (u'IS', u'1', u'?', False, u''), "0019xx26" : (u'LO', u'1', u'?', False, u''), "0019xx29" : (u'LO', u'1', u'?', False, u''), "0019xx30" : (u'US', u'1', u'?', False, u''), "0019xx31" : (u'US', u'1', u'?', False, u''), "0019xx32" : (u'US', u'1', u'?', False, u''), "0019xx33" : (u'US', u'1', u'?', False, u''), "0019xx34" : (u'US', u'1', u'?', False, u''), "0019xx35" : (u'US', u'1', u'?', False, u''), }, 'SIEMENS WH SR 1.0' : { "0071xx01" : (u'LO', u'1', u'?', False, u''), "0071xx02" : (u'LO', u'1', u'?', False, u''), }, 'SIEMENS_FLCOMPACT_VA01A_PROC' : { "0017xx0a" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx0b" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx0c" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx0d" : (u'FL', u'1', u'Internal Data', False, u'internal_data'), "0017xx0e" : (u'LO', u'1', u'Internal Data', False, u'internal_data'), "0017xx0f" : (u'LO', u'1', u'Internal Data', False, u'internal_data'), "0017xx14" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx16" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx17" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx18" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx19" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx1a" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx1b" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx1c" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx1e" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx1f" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx20" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx21" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx22" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx23" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx24" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx25" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx26" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx27" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx28" : (u'FL', u'1', u'Internal Data', False, u'internal_data'), "0017xx29" : (u'FL', u'1', u'Internal Data', False, u'internal_data'), "0017xx48" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx49" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx4a" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx4b" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx4c" : (u'LO', u'1', u'Internal Data', False, u'internal_data'), "0017xx4d" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx4e" : (u'LO', u'1', u'Internal Data', False, u'internal_data'), "0017xx4f" : (u'LO', u'1', u'Internal Data', False, u'internal_data'), "0017xx50" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx51" : (u'FL', u'1', u'Internal Data', False, u'internal_data'), "0017xx52" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx53" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx54" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx55" : (u'SS', u'1', u'Internal Data', False, u'internal_data'), "0017xx5a" : (u'OW', u'1', u'Internal Data', False, u'internal_data'), "0017xx5b" : (u'OW', u'1', u'Internal Data', False, u'internal_data'), "0017xx5c" : (u'OW', u'1', u'Internal Data', False, u'internal_data'), "0017xx64" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx66" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx67" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx68" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx85" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx86" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx87" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx88" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx89" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx8a" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx8b" : (u'US', u'1', u'Internal Data', False, u'internal_data'), "0017xx8c" : (u'FL', u'1',
<reponame>eladrich/pyrallis import argparse import dataclasses import inspect from logging import getLogger from typing import Any, Optional, List, Type, Dict, Set, Union, Tuple from . import docstring from .wrapper import Wrapper from .. import utils logger = getLogger(__name__) class FieldWrapper(Wrapper[dataclasses.Field]): """ The FieldWrapper class acts a bit like an 'argparse.Action' class, which essentially just creates the `option_strings` and `arg_options` that get passed to the `add_argument(*option_strings, **arg_options)` function of the `argparse._ArgumentGroup` (in this case represented by the `parent` attribute, an instance of the class `DataclassWrapper`). The `option_strings`, `required`, `help`, `default`, etc. attributes just autogenerate the argument of the same name of the above-mentioned `add_argument` function. The `arg_options` attribute fills in the rest and may overwrite these values, depending on the type of field. The `field` argument is the actually wrapped `dataclasses.Field` instance. """ def __init__(self, field: dataclasses.Field, parent: Any = None, prefix: str = ""): super().__init__(wrapped=field, name=field.name) self.field: dataclasses.Field = field self.prefix: str = prefix self._parent: Any = parent # Holders used to 'cache' the properties. # (could've used cached_property with Python 3.8). self._option_strings: Optional[Set[str]] = None self._required: Optional[bool] = None self._docstring: docstring.AttributeDocString = docstring.AttributeDocString() self._help: Optional[str] = None self._default: Optional[Union[Any, List[Any]]] = None self._dest: Optional[str] = None # the argparse-related options: self._arg_options: Dict[str, Any] = {} self._type: Optional[Type[Any]] = None # stores the resulting values for each of the destination attributes. self._results: Dict[str, Any] = {} @property def arg_options(self) -> Dict[str, Any]: """Dictionary of values to be passed to the `add_argument` method. The main feature of this package is to infer these arguments automatically using features of the built-in `dataclasses` package, as well as Python's type annotations. By passing additional keyword arguments to the `field()` function, the autogenerated arguments can be overwritten, giving access to all of the usual argparse features know and love. NOTE: When passing an `action` keyword argument, we remove all the autogenerated options that aren't required by the Action class constructor. For example, when specifying a custom `action` like "store_true" or "store_false", the `type` argument autogenerated here shouldn't be passed to the constructor of the `argparse._StoreFalseAction`, so we discard it. """ if self._arg_options: return self._arg_options # get the auto-generated options. options = self.get_arg_options() # overwrite the auto-generated options with given ones, if any. options.update(self.custom_arg_options) # only keep the arguments used by the Action constructor. action = options.get("action", "store") self._arg_options = only_keep_action_args(options, action) return self._arg_options def get_arg_options(self) -> Dict[str, Any]: """Create the `parser.add_arguments` kwargs for this field.""" if not self.field.init: return {} # TODO: Refactor this: # 1. Create a `get_argparse_options_for_field` function # 2. Use `get_argparse_options_for_annotation` below as part of that function # 3. Update the dict returned from 1. with values set in the field() function # 4. Update the dict from 3. with the values set by the DataclassWrapper, or # when this field is reused. (are they ever modified externally?) # 5. Return that dictionary. _arg_options: Dict[str, Any] = {} _arg_options["required"] = False # Required arguments can also be set from yaml, # so do not enforce with argparse _arg_options["dest"] = self.dest _arg_options["default"] = self.default if self.help: _arg_options["help"] = self.help elif self.default is not None: # issue 64: Need to add an empty 'help' string, so that the formatter # automatically adds the (default: '123') _arg_options["help"] = " " _arg_options['type'] = self.type try: _arg_options['type'].__name__ = self.type.__repr__().replace('typing.', '') except Exception as e: # Only to prettify printing, if fails just continue pass return _arg_options @property def action(self) -> Union[str, Type[argparse.Action]]: """The `action` argument to be passed to `add_argument(...)`.""" return self.custom_arg_options.get("action", "store") @property def action_str(self) -> str: if isinstance(self.action, str): return self.action return self.action.__name__ @property def custom_arg_options(self) -> Dict[str, Any]: """Custom argparse options that overwrite those in `arg_options`. Can be set by using the `field` function, passing in a keyword argument that would usually be passed to the parser.add_argument( *option_strings, **kwargs) method. """ return self.field.metadata.get("custom_args", {}) @property def option_strings(self) -> List[str]: """Generates the `option_strings` argument to the `add_argument` call. `parser.add_argument(*name_or_flags, **arg_options)` ## Notes: - Additional names for the same argument can be added via the `field` function. - Whenever the name of an attribute includes underscores ("_"), the same argument can be passed by using dashes ("-") instead. This also includes aliases. - If an alias contained leading dashes, either single or double, the same number of dashes will be used, even in the case where a prefix is added. For an illustration of this, see the aliases example. """ dashes: List[str] = [] # contains the leading dashes. options: List[str] = [] # contains the name following the dashes. # Currently create only a single option name, no support for aliases dashes.append('--') options.append(self.dest) # remove duplicates by creating a set. option_strings = set(f"{dash}{option}" for dash, option in zip(dashes, options)) return list(sorted(option_strings, key=len)) @property def dest(self) -> str: """Where the attribute will be stored in the Namespace.""" self._dest = super().dest return self._dest @property def nargs(self): return self.custom_arg_options.get("nargs", None) @property def default(self) -> Any: """Either a single default value, when parsing a single argument, or the list of default values, when this argument is reused multiple times (which only happens with the `ConflictResolution.ALWAYS_MERGE` option). In order of increasing priority, this could either be: 1. The default attribute of the field 2. the value of the corresponding attribute on the parent, if it has a default value """ if self._default is not None: return self._default default: Any = utils.default_value(self.field) if default is dataclasses.MISSING: default = None self._default = default return self._default @default.setter def default(self, value: Any): self._default = value @property def required(self) -> bool: if self._required is not None: return self._required if self.action_str.startswith("store_"): # all the store_* actions do not require a value. self._required = False elif self.is_optional: self._required = False elif self.parent.required: # if the parent dataclass is required, then this attribute is too. # TODO: does that make sense though? self._required = True elif self.nargs in {"?", "*"}: self._required = False elif self.nargs == "+": self._required = True elif self.default is None: self._required = True else: self._required = False return self._required @required.setter def required(self, value: bool): self._required = value @property def type(self) -> Type[Any]: """Returns the wrapped field's type annotation.""" if self._type is None: self._type = self.field.type return self._type def __str__(self): return f"""<FieldWrapper for field '{self.dest}'>""" @property def help(self) -> Optional[str]: if self._help: return self._help try: self._docstring = docstring.get_attribute_docstring( self.parent.dataclass, self.field.name ) except (SystemExit, Exception) as e: logger.debug( f"Couldn't find attribute docstring for field {self.name}, {e}" ) self._docstring = docstring.AttributeDocString() if self._docstring.docstring_below: self._help = self._docstring.docstring_below elif self._docstring.comment_above: self._help = self._docstring.comment_above elif self._docstring.comment_inline: self._help = self._docstring.comment_inline return self._help @help.setter def help(self, value: str): self._help = value @property def name(self) -> str: return self.field.name @property def is_list(self): return utils.is_list(self.type) @property def is_enum(self) -> bool: return utils.is_enum(self.type) @property def is_tuple(self) -> bool: return utils.is_tuple(self.type) @property def is_bool(self) -> bool: return utils.is_bool(self.type) @property def is_optional(self) -> bool: return utils.is_optional(self.field.type) @property def is_union(self) -> bool: return utils.is_union(self.field.type) @property def type_arguments(self) -> Optional[Tuple[Type, ...]]: return utils.get_type_arguments(self.type) @property def parent(self) -> "DataclassWrapper": return self._parent def only_keep_action_args( options: Dict[str, Any], action: Union[str, Any] ) -> Dict[str, Any]: """Remove all the arguments in `options` that aren't required by the Action. Parameters ---------- options : Dict[str, Any] A dictionary of options that would usually be passed to `add_arguments(*option_strings, **options)`. action : Union[str, Any] The action class or name. Returns ------- Dict[str, Any] [description] """ # TODO: explicitly tests these custom actions? argparse_action_classes: Dict[str, Type[argparse.Action]] = { "store": argparse._StoreAction, "store_const": argparse._StoreConstAction, "store_true": argparse._StoreTrueAction, "store_false": argparse._StoreFalseAction, "append": argparse._AppendAction, "append_const": argparse._AppendConstAction, "count": argparse._CountAction, "help": argparse._HelpAction, "version": argparse._VersionAction, "parsers": argparse._SubParsersAction, } if action not in argparse_action_classes: # the provided `action` is not a standard argparse-action. # We don't remove any of the provided options. return options # Remove all the keys that aren't needed by the action constructor: action_class = argparse_action_classes[action] argspec = inspect.getfullargspec(action_class) if argspec.varargs is not None or argspec.varkw is not None: # if the constructor takes variable arguments, pass all
<filename>experiment/models/punctuation_domain_model.py import copy import math import logging import os from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import tarfile import pytorch_lightning as pl import torch import torch.nn.functional as F from core import ClassificationReport from core.layers import * from core.losses import (AggregatorLoss, CrossEntropyLoss, FocalDiceLoss, FocalLoss, LinearChainCRF) from pytorch_lightning.utilities import rank_zero_only from core.optim import get_optimizer, parse_optimizer_args, prepare_lr_scheduler from omegaconf import DictConfig, OmegaConf, open_dict from transformers import AutoModel, AutoTokenizer import torch.utils.data.dataloader as dataloader from data import PunctuationDataModule, PunctuationInferenceDataset from os import path import tempfile from core.common import Serialization, FileIO from time import time from core.utils import view_aligned __all__ = ['PunctuationDomainModel'] _MODEL_CONFIG_YAML = "model_config.yaml" _MODEL_WEIGHTS = "model_weights.ckpt" class PunctuationDomainModel(pl.LightningModule, Serialization, FileIO): '''Pytorch Lightning Module for Punctuation Domain Transfer.''' def __init__(self, cfg: DictConfig, trainer: pl.Trainer = None, data_id: str = '', log_dir: str = '', ): if trainer is not None and not isinstance(trainer, pl.Trainer): raise ValueError( f"trainer constructor argument must be either None or pytorch_lightning.Trainer. But got {type(trainer)} instead." ) super().__init__() self._cfg = cfg self._cfg.log_dir=log_dir self._optimizer = None self._scheduler = None self._trainer = trainer self.transformer = AutoModel.from_pretrained(self._cfg.model.transformer_path) self.tokenizer=AutoTokenizer.from_pretrained(self._cfg.model.transformer_path) if self._cfg.model.no_space_label is not None and self._cfg.model.dataset.attach_label_to_end is None: s=set(self._cfg.model.punct_label_ids) s.add(self._cfg.model.no_space_label) self._cfg.model.punct_label_ids=sorted(list(s)) else: self._cfg.model.punct_label_ids=sorted(list(self._cfg.model.punct_label_ids)) self.ids_to_labels = {_[0]: _[1] for _ in enumerate(self._cfg.model.punct_label_ids)} self.labels_to_ids = {v:k for k,v in self.ids_to_labels.items()} self.label_map=None if self._cfg.model.label_map is None else {k:v for k,v in self._cfg.model.label_map.items()} self.data_id=data_id if self._cfg.inference==False: self._cfg.model.dataset.labelled = OmegaConf.create([] if self._cfg.model.dataset.labelled==None else self._cfg.model.dataset.labelled) self._cfg.model.dataset.unlabelled = OmegaConf.create([] if self._cfg.model.dataset.unlabelled==None else self._cfg.model.dataset.unlabelled) assert(len(self._cfg.model.dataset.labelled)>0,'Please include at least 1 labelled dataset') if self._cfg.model.dataset.low_resource_labelled_count>0: for d in self._cfg.model.dataset.unlabelled: dl=d+'.labelled.train.csv' du=d+'.unlabelled.train.csv' d=d+'.train.csv' os.system(f"awk 'NR==1 || NR<={self._cfg.model.dataset.low_resource_labelled_count+1}' {d} > {dl}") os.system(f"awk 'NR==1 || NR>{self._cfg.model.dataset.low_resource_labelled_count+1}' {d} > {du}") self._cfg.inference=True self.setup_datamodule() pp('setup complete') self.save_hyperparameters(self._cfg) pp(self._cfg) if self._cfg.model.punct_class_weights==None: if (self.hparams.model.punct_class_weight_factor>0 and self.hparams.model.punct_head.loss!='crf'): self._cfg.model.punct_class_weights=OmegaConf.create([ x**self.hparams.model.punct_class_weight_factor for x in\ self.dm.train_dataset.determine_class_weights().tolist()]) else: self._cfg.model.punct_class_weights=None extra_hidden_size=2 if self.hparams.model.domain_head.predict_labelled else self.hparams.model.dataset.num_domains\ if (self.hparams.model.cat_domain_logits) else self.transformer.config.hidden_size*2\ if self.hparams.model.domain_head.pooling=='mean_max'\ else self.transformer.config.hidden_size self.punct_classifier = TokenClassifier( hidden_size= self.transformer.config.hidden_size+extra_hidden_size \ if (self.hparams.model.cat_domain_and_states==True)\ else self.transformer.config.hidden_size*2 if self.hparams.model.domain_head.pooling=='mean_max' else\ self.transformer.config.hidden_size , num_classes=len(self.labels_to_ids), activation=self.hparams.model.punct_head.activation, log_softmax=self.hparams.model.punct_head.log_softmax, dropout=self.hparams.model.punct_head.fc_dropout, num_layers=self.hparams.model.punct_head.punct_num_fc_layers, use_transformer_init=self.hparams.model.punct_head.use_transformer_init, ) self.domain_classifier = TokenClassifier( hidden_size=self.transformer.config.hidden_size, num_classes=2 if self.hparams.model.domain_head.predict_labelled else\ self.hparams.model.dataset.num_domains, activation=self.hparams.model.domain_head.activation, log_softmax=self.hparams.model.domain_head.log_softmax, dropout=self.hparams.model.domain_head.fc_dropout, num_layers=self.hparams.model.domain_head.domain_num_fc_layers, use_transformer_init=self.hparams.model.domain_head.use_transformer_init, ) \ if self.hparams.model.domain_head.pooling is None else SequenceClassifier( hidden_size=self.transformer.config.hidden_size, num_classes=2 if self.hparams.model.domain_head.predict_labelled else\ self.hparams.model.dataset.num_domains, num_layers=self.hparams.model.domain_head.domain_num_fc_layers, activation=self.hparams.model.domain_head.activation, log_softmax=self.hparams.model.domain_head.log_softmax, dropout=self.hparams.model.domain_head.fc_dropout, use_transformer_init=self.hparams.model.domain_head.use_transformer_init, pooling=self.hparams.model.domain_head.pooling, idx_conditioned_on = self.hparams.model.domain_head.idx_conditioned_on, ) if not self.hparams.model.punct_head.loss in ['cel', 'dice', 'crf', 'focal']: self.log('punct_head loss not found, fallback to cross entropy loss') self._cfg.model.punct_head.loss = 'cel' if self.hparams.model.punct_head.loss == 'dice': self.punctuation_loss = FocalDiceLoss(**self._cfg.model.punct_head.dice_loss, weight=self._cfg.model.punct_class_weights, num_labels=self.hparams.model.dataset.num_labels) elif self.hparams.model.punct_head.loss == 'crf': self.punctuation_loss = LinearChainCRF(self._cfg.model.dataset.num_labels) elif self.hparams.model.punct_head.loss == 'focal': self.punctuation_loss = FocalLoss(**self._cfg.model.punct_head.focal_loss, weight=self._cfg.model.punct_class_weights) else: self.punctuation_loss = CrossEntropyLoss(logits_ndim=3, weight=self._cfg.model.punct_class_weights) if self.hparams.model.punct_head.bilstm: self.bilstm = torch.nn.LSTM(bidirectional=True, num_layers=2, input_size=self.transformer.config.hidden_size, hidden_size=self.transformer.config.hidden_size//2, batch_first=True) if not self.hparams.model.domain_head.loss in ['cel','focal','dice']: if not (self.hparams.model.domain_head.pooling is None and self.hparams.model.domain_head.loss == 'dice'): self.log('domain_head loss not found, fallback to cross entropy loss') self.hparams.model.domain_head.loss = 'cel' # self.hparams.model.domain_head.loss domain_weight=None if self.hparams.model.domain_head.weight is None else list(self.hparams.model.domain_head.weight) if (len(self.hparams.model.dataset.labelled)==0) or (len(self.hparams.model.dataset.unlabelled)==0): domain_weight=None if self.hparams.model.domain_head.loss == 'focal': self.domain_loss = FocalLoss(**self._cfg.model.domain_head.focal_loss, weight=domain_weight) elif self.hparams.model.domain_head.loss == 'dice': self.domain_loss = FocalDiceLoss(**self._cfg.model.domain_head.dice_loss, weight=domain_weight, num_labels=2 if self.hparams.model.domain_head.predict_labelled else\ self.hparams.model.dataset.num_domains) else: self.domain_loss = CrossEntropyLoss(logits_ndim=2, weight=domain_weight) self.agg_loss = AggregatorLoss(num_inputs=2) self.punct_class_report = ClassificationReport( num_classes=self.hparams.model.dataset.num_labels, label_ids=self.labels_to_ids, mode='macro', dist_sync_on_step=True, ignore=[0], ) self.chunked_punct_class_report = ClassificationReport( num_classes=self.hparams.model.dataset.num_labels, label_ids=self.labels_to_ids, mode='macro', dist_sync_on_step=True, ) self.domain_class_report = ClassificationReport( num_classes=2 if self.hparams.model.domain_head.predict_labelled else\ self.hparams.model.dataset.num_domains, label_ids={v:v for v in list(range(2 if \ self.hparams.model.domain_head.predict_labelled else\ self.hparams.model.dataset.num_domains))}, mode='macro', dist_sync_on_step=True, ) self.grad_reverse = GradientReverse self.grad_reverse.scale = self.hparams.model.domain_head.gamma_factor self.freeze() pp('init complete') def forward(self, input_ids, attention_mask, subtoken_mask=None, domain_ids=None): hidden_states = self.transformer( input_ids=input_ids, attention_mask=attention_mask )[0] if self.hparams.model.punct_head.bilstm: hidden_states,_=self.bilstm(hidden_states) reverse_grad_hidden_states = self.grad_reverse.apply(hidden_states) assert not torch.isnan(input_ids).any(), (input_ids,'inputid') assert not torch.isnan(attention_mask).any(), ('amask',attention_mask) if torch.isnan(hidden_states).any(): logging.error(hidden_states,attention_mask.sum(1),'hiddenstate') if self.hparams.model.domain_head.pooling is None: domain_logits = self.domain_classifier( hidden_states=reverse_grad_hidden_states, ) punct_hidden_states= torch.cat((hidden_states,domain_logits),dim=-1) \ if (self.hparams.model.cat_domain_logits and self.hparams.model.cat_domain_and_states) \ else hidden_states punct_logits = self.punct_classifier(hidden_states=punct_hidden_states) else: domain_logits,pooled = self.domain_classifier( hidden_states=reverse_grad_hidden_states, attention_mask=attention_mask) punct_hidden_states=( torch.cat((hidden_states,domain_logits.unsqueeze(1).repeat_interleave(hidden_states.shape[1],dim=1)),dim=-1) if self.hparams.model.cat_domain_logits\ else torch.cat((hidden_states,pooled.unsqueeze(1).repeat_interleave(hidden_states.shape[1],dim=1)),dim=-1) )\ if self.hparams.model.cat_domain_and_states else hidden_states punct_logits = self.punct_classifier(hidden_states=punct_hidden_states) # if self.hparams.model.domain_head.predict_labelled: # domain_logits=domain_logits.flatten() # print(attention_mask.sum(axis=1),domain_logits) return punct_logits, domain_logits def _make_step(self, batch): input_ids = batch['input_ids'] attention_mask = batch['attention_mask'] subtoken_mask = batch['subtoken_mask'] punct_labels = batch['labels'] # domain_labels = batch['domain'] domain_labels = torch.eq(subtoken_mask[:,0],1).long() if self.hparams.model.domain_head.predict_labelled else batch['domain'] punct_logits, domain_logits = self( input_ids=input_ids, attention_mask=attention_mask, subtoken_mask=subtoken_mask, ) punctuation_loss = self.punctuation_loss( logits=punct_logits[subtoken_mask[:,0]>0], labels=punct_labels[subtoken_mask[:,0]>0], loss_mask=subtoken_mask[subtoken_mask[:,0]>0]) if not torch.isnan(punctuation_loss).any(): self.hparams.model.domain_head.gamma=self.hparams.model.domain_head.gamma_factor*punctuation_loss.item() else: logging.error('punctuation_loss nan') self.hparams.model.domain_head.gamma=0 punctuation_loss=torch.zeros_like(punctuation_loss) #Domain consider each punct label separate, then punct class weight. punct_logits_pred=punct_logits.argmax(axis=-1) token_weight=F.one_hot(punct_logits_pred,num_classes=punct_logits.shape[-1])#*punct_logits if self._cfg.model.punct_class_weights is not None: token_weight=token_weight*torch.tensor(self._cfg.model.punct_class_weights).type_as(punct_logits) idx = punct_logits_pred.unsqueeze(2) token_weight = token_weight.gather(2, idx) token_weight = token_weight.squeeze(2) if (self.hparams.model.domain_head.loss in ['focal','cel']) and (self.hparams.model.domain_head.weight_tokens==True)\ and (self.hparams.model.domain_head.pooling is None): domain_loss = self.domain_loss( logits=domain_logits, labels=domain_labels.repeat(1,punct_labels.shape[-1]), token_weight=token_weight) else: domain_loss = self.domain_loss( logits=domain_logits, labels=domain_labels.repeat(1,punct_labels.shape[-1]) if self.hparams.model.domain_head.pooling is None else domain_labels) if torch.isnan(domain_loss).any(): logging.error('domain_loss nan') loss=punctuation_loss else: loss = self.agg_loss(loss_1=punctuation_loss, loss_2=domain_loss) return loss, punct_logits, domain_logits def training_step(self, batch, batch_idx): """ Lightning calls this inside the training loop with the data from the training dataloader passed in as `batch`. """ p=(self.current_epoch*self.train_size+batch_idx)/(self.train_size*self.hparams.trainer.max_epochs) self.grad_reverse.scale=(2/(1+math.exp(-10*p))-1)*self.hparams.model.domain_head.gamma if (batch_idx%1000==0): print('gamma:',self.grad_reverse.scale) loss, _, _ = self._make_step(batch) lr = self._optimizer.param_groups[0]['lr'] self.log('lr', lr, prog_bar=True) self.log('train_loss', loss) self.log('gamma', self.grad_reverse.scale,logger=True) return {'loss': loss, 'lr': lr} def validation_step(self, batch, batch_idx, dataloader_idx=0): """ Lightning calls this inside the validation loop with the data from the validation dataloader passed in as `batch`. """ input_ids = batch['input_ids'] attention_mask = batch['attention_mask'] subtoken_mask = batch['subtoken_mask'] punct_labels = batch['labels'] # domain_labels = batch['domain'] domain_labels = torch.eq(subtoken_mask[:,0],1).long() if self.hparams.model.domain_head.predict_labelled else batch['domain'] labelled_mask=subtoken_mask[:,0]>0 val_loss, punct_logits, domain_logits = self._make_step(batch) # attention_mask = attention_mask > 0.5 punct_preds = self.punctuation_loss.decode(punct_logits[labelled_mask], subtoken_mask[labelled_mask]) \ if self.hparams.model.punct_head.loss == 'crf' else torch.argmax(punct_logits[labelled_mask], axis=-1)[subtoken_mask[labelled_mask]] punct_labels = punct_labels[labelled_mask][subtoken_mask[labelled_mask]] self.punct_class_report.update(punct_preds, punct_labels) domain_preds = torch.argmax(domain_logits[labelled_mask], axis=-1)[subtoken_mask[labelled_mask]] if self.hparams.model.domain_head.pooling is None else torch.argmax(domain_logits, axis=1) domain_labels = domain_labels.repeat(1,self.hparams.model.dataset.max_seq_length)[labelled_mask][subtoken_mask[labelled_mask]] if self.hparams.model.domain_head.pooling is None else domain_labels.view(-1) self.domain_class_report.update(domain_preds, domain_labels) return { 'val_loss': val_loss, 'punct_tp': self.punct_class_report.tp, 'punct_fn': self.punct_class_report.fn, 'punct_fp': self.punct_class_report.fp, 'domain_tp': self.domain_class_report.tp, 'domain_fn': self.domain_class_report.fn, 'domain_fp': self.domain_class_report.fp, } def test_step(self, batch, batch_idx, dataloader_idx=0): """ Lightning calls this inside the validation loop with the data from the validation dataloader passed in as `batch`. """ attention_mask = batch['attention_mask'] subtoken_mask = batch['subtoken_mask'] punct_labels = batch['labels'] # domain_labels = batch['domain'] domain_labels = torch.eq(subtoken_mask[:,0],1).long() if self.hparams.model.domain_head.predict_labelled else batch['domain'] labelled_mask=subtoken_mask[:,0]>0 chunk=self.hparams.model.test_chunk_percent if chunk is not None: chunk_mask=torch.zeros_like(subtoken_mask) chunk_mask[:,torch.arange(int((0.5-chunk/2)*subtoken_mask.shape[-1]),int((0.5+chunk/2)*subtoken_mask.shape[-1]))]=1 chunk_mask=chunk_mask[labelled_mask][subtoken_mask[labelled_mask]] test_loss, punct_logits, domain_logits = self._make_step(batch) # attention_mask = attention_mask > 0.5 punct_preds = self.punctuation_loss.decode(punct_logits[labelled_mask], subtoken_mask[labelled_mask]) \ if self.hparams.model.punct_head.loss == 'crf' else torch.argmax(punct_logits[labelled_mask], axis=-1)[subtoken_mask[labelled_mask]] if chunk is not None: chunked_punct_preds = punct_preds[chunk_mask] punct_labels = punct_labels[labelled_mask][subtoken_mask[labelled_mask]] if chunk is not None: chunked_punct_labels = punct_labels[chunk_mask] self.punct_class_report.update(punct_preds, punct_labels) if chunk is not None: self.chunked_punct_class_report.update(chunked_punct_preds, chunked_punct_labels) domain_preds = torch.argmax(domain_logits[labelled_mask], axis=-1)[subtoken_mask[labelled_mask]] if self.hparams.model.domain_head.pooling is None else torch.argmax(domain_logits, axis=1) domain_labels = domain_labels.repeat(1,self.hparams.model.dataset.max_seq_length)[labelled_mask][subtoken_mask[labelled_mask]] if self.hparams.model.domain_head.pooling is None else domain_labels.view(-1) self.domain_class_report.update(domain_preds, domain_labels) out={ 'test_loss': test_loss, 'punct_tp': self.punct_class_report.tp, 'punct_fn': self.punct_class_report.fn, 'punct_fp': self.punct_class_report.fp, } if chunk is not None: out['chunked_punct_tp']=self.chunked_punct_class_report.tp out['chunked_punct_fn']=self.chunked_punct_class_report.fn out['chunked_punct_fp']=self.chunked_punct_class_report.fp out['domain_tp']=self.domain_class_report.tp, out['domain_fn']=self.domain_class_report.fn, out['domain_fp']=self.domain_class_report.fp, return out def validation_epoch_end(self, outputs): self.dm.train_dataset.shuffle() if outputs is not None and len(outputs) == 0: return {} if type(outputs[0]) == dict: output_dict = self.multi_validation_epoch_end(outputs) if output_dict is not None and 'log' in output_dict: self.log_dict(output_dict.pop('log'), on_epoch=True) return output_dict def multi_validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() # calculate metrics and log classification report for Punctuation task punct_precision, punct_recall, punct_f1, punct_report, punctuation_cm = self.punct_class_report.compute() logging.info(f'Punctuation report: {punct_report}') # calculate metrics and log classification report for domainalization task domain_precision, domain_recall, domain_f1, domain_report, domain_cm = self.domain_class_report.compute() logging.info(f'Domain report: {domain_report}') self.log('val_loss', avg_loss, prog_bar=True) self.log('punct_precision', punct_precision) self.log('punct_f1', punct_f1) self.log('punct_recall', punct_recall) self.log('domain_precision', domain_precision) self.log('domain_f1', domain_f1) self.log('domain_recall', domain_recall) # self.log('punctuation_cm', punctuation_cm) # self.log('domain_cm', domain_cm) def test_epoch_end(self, outputs): if outputs is not None and len(outputs) == 0: return {} # Case where we provide exactly 1 data loader if type(outputs[0]) == dict: output_dict = self.multi_test_epoch_end(outputs) if output_dict is not None and 'log' in output_dict: self.log_dict(output_dict.pop('log'), on_epoch=True) return output_dict def multi_test_epoch_end(self, outputs): """ Called at the end of test to aggregate outputs. outputs: list of individual outputs of each validation step. """ chunk=self.hparams.model.test_chunk_percent is not None pp(chunk,self.hparams.model.test_chunk_percent) avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean() # calculate metrics and log classification report for Punctuation task punct_precision, punct_recall, punct_f1, punct_report, punct_cm = self.punct_class_report.compute() logging.info(f'Punctuation report: {punct_report}') if chunk: chunked_punct_precision, chunked_punct_recall, chunked_punct_f1, chunked_punct_report, chunked_punct_cm = self.chunked_punct_class_report.compute() logging.info(f'Chunked Punctuation report: {chunked_punct_report}') # calculate metrics and log classification report for domainalization task domain_precision, domain_recall, domain_f1, domain_report, domain_cm = self.domain_class_report.compute() logging.info(f'Domain report: {domain_report}') path=f"{self.hparams.log_dir}/test-{''.join([_.split('/')[-1][:3] for _ in self.dm.test_dataset.labelled])}-{self.frozen}.txt" if self.hparams.log_dir!=''
def get_off_datas(cls): """ Instanciate Category class objects, Then inserts theirs products in database """ for i in cf.CATEGORIES: print("Téléchargement des produits de la catégorie :", i, "...") i = Category(i) print("Terminé.") cls.insert_products_from_category(i) @classmethod def check_user(cls, user, login): """ Verifies if specified user and login are in database User and login arguments must be spaceless strings Returns user ID if found """ user = "".join(str(user).split()) login = "".join(str(login).split()) # Extra protection against SQL injections cls.connect() query = "SELECT login, ID FROM Users WHERE name = '{}'".format(user) try: cls.cursor.execute(query) except mc.Error as err: print(err) else: try: result = cls.cursor.fetchone() registered_login = result[0] user_ID = result[1] except TypeError: # cursor is empty and result is a NoneType object # print("User not found") return False else: if registered_login == login: # print("User name and login are correct") return user_ID elif registered_login != login: # print("Incorrect login") return False finally: cls.disconnect() @classmethod def create_user(cls, user, login): """ Insert user and login in table Users User and login arguments must be spaceless strings """ cls.connect() try: print("Création de l'utilisateur {}: ".format(user), end='') cls.cursor.execute(cf.USER_INSERT, {"user": user, "login": login}) except mc.Error as err: if err.errno == 1062 and err.sqlstate == "23000": print("Utilisateur déjà enregistré") return False else: print(err) else: cls.connection.commit() # print("Creation ok") return True finally: cls.disconnect() @classmethod def product_informations(cls, product_code, informations = "code, name, healthyness, brands, description, stores, url"): """ Get specified informations (2nd argument) provided a product code (1st argument) Any column content of table Products can be requested provided column name as a string If several columns are requested, specify names with this syntax: "columnname1, columnname2, ..." Information argument is optionnal. If not specified, a default set of information will be retrieved. """ cls.connect() query = "SELECT {} FROM Products WHERE code = '{}'".format(informations, product_code) try: print("Récupération des informations sur le produit '{}'".format(product_code)) cls.cursor.execute(query) except mc.Error as err: print(err) else: try: iter(cls.cursor) except TypeError: print("Cursor definition encountered an unknown issue causing it not to be iterable") else: product_informations = cls.cursor.fetchall() # print("\n", product_informations) if len(product_informations) > 1: return product_informations elif len(product_informations) == 1: return product_informations[0] else: print("Informations non trouvées") finally: cls.disconnect() @classmethod def substitute(cls, product_code): """ Main method taking as argument code of a single product. Tries to retrieve the substitute in table Substitutes. Find substitute and return it's code. """ cls.connect() query = "SELECT substitute_code FROM Substitutes WHERE product_code = {}".format(product_code) cls.cursor.execute(query) result = cls.cursor.fetchone() try: cls.cursor.fetchall() # empties cursor if many entries have been found except mc.errors.InterfaceError: pass # cursor is already empty. Move on. cls.disconnect() # check if a subtitute have already been found for given product # if so, directly return it try: result[0] except TypeError: # cursor is empty and result is a NoneType object print("Aucun substitut n'a été cherché pour ce produit jusqu'à présent. \nRecherche en cours...") else: return result[0] # else process algorythm for substitute finding. # finds specified product's category, nutrition grade and subcategories category, healthyness, subcategories = DatabaseProcedures.product_informations(product_code, "category, healthyness, subcategories") subcategories = subcategories[1:-1].split(', ') # remove "[" and "]" in the string, then remove ', ' # to keep only 0 and 1 as a list of strings # print("category is: ", category) # print("healthyness is: ", healthyness) # print("subcategories are: ", subcategories) cls.connect() # gets every product that have: # - same category # - same or better healthyness # - different code query = ("SELECT code, subcategories, healthyness, popularity FROM Products WHERE category = '{}' AND healthyness <= '{}' AND code != '{}'".format(category, healthyness, product_code)) cls.cursor.execute(query) result = cls.cursor.fetchall() # each product found should be represented as a 4 elements tuple cls.disconnect() similar_products = [] max_proximity = 0 min_proximity = 999 for product in result: proximity = 0 # constructed indicator for each potential substitute based on subcategories comparison # if proximity is high, potential substitute is similar to initial product subcategory_list = product[1][1:-1].split(', ') # remove "[" and "]" in the string, then remove ', ' # to keep only 0 and 1 as a list of strings for index in range(len(subcategory_list)): if subcategory_list[index] == subcategories[index]: proximity += 1 # proximity is incremented every time a potential substitute have a subcategory # in common with the initial product if proximity > max_proximity: max_proximity = proximity if proximity < min_proximity: min_proximity = proximity similar_products.append([product[0], proximity, product[2], product[3]]) # similar_products is the list of every products of same category as initial product # with calculated proximity indicator replacing subcategories list indexes = [] delta_proximity = max_proximity - min_proximity closeness = 0.95 # arbitrary number between 0 and 1 # higher leads to closer to max_proximity as minimum target for proximity proximity_target = floor(min_proximity + delta_proximity * closeness) # print("proximity max is : ", max_proximity) # print("proximity min is : ", min_proximity) # print("delta is : ", delta_proximity) # print("proximity target is : ", proximity_target) for index in range(len(similar_products)): if similar_products[index][1] < proximity_target: # if closeness is set to 1, proximity_target equals max_proximity # "<" keeps proximity_target "<=" would have led to discard all potential substitutes indexes.append(index) # indexes is a list of indexes in similar_poducts list # where proximity is below proximity_target for index_element in reversed(indexes): # indexes is reversed to have indexes in descending order and avoid IndexError similar_products.pop(index_element) # removes every product with proximity lower than defined proximity similar_products.sort(key = lambda a : a[1], reverse = True) # print("sorted and cleaned list : \n", similar_products) # this command sorts similar_products list based on proximity in descending order if healthyness == "A" or healthyness == "a": # product chosen by user is already healthy # sorting using healthyness is irrelevant here so we use popularity similar_products.sort(key = lambda a : a[3], reverse = True) # on same popularity, proximity order is unchanged return similar_products[0][0] else: # for any other healthyness than "A" sorting healthyness is needed similar_products.sort(key = lambda a : a[2]) # on same healthyness, proximity order remains unchanged equally_healthy_products = [] for index in range(len(similar_products)): if similar_products[0][2] == similar_products[index][2]: # check for equally healthy products in similar_products equally_healthy_products.append(similar_products[index]) # this keeps only highest healthyness products with proximity order unchanged between them # equally_healthy_products at this point # is a list of products with same healthyness if len(equally_healthy_products) == 1: # if the higher healthyness concerns only one similar_product return similar_products[0][0] # then return this product's code else: # higher healthyness concerns many products so further sorting is needed equally_healthy_products.sort(key = lambda a : a[3], reverse = True) # products here are equally healthy # list is finaly sorted using popularity # on same popularity, proximity order remains unchanged equally_healthyandpopular_products = [] for index in range(len(equally_healthy_products)): if equally_healthy_products[0][3] == equally_healthy_products[index][3]: # check for equally healthy and equally popular products equally_healthyandpopular_products.append(equally_healthy_products[index]) # equally_healthyandpopular_products at this point # is a list of products with same healthyness and popularity if len(equally_healthyandpopular_products) == 1: # if the higher popularity on higher healthyness concerns only one product return equally_healthy_products[0][0] # then return this product's code else: # higher healthyness and higher popularity may concern many products random_index = randrange(0, len(equally_healthyandpopular_products)) # in such case, randomly pick one of them return equally_healthyandpopular_products[random_index] # if healthyness is equal between products, choose highest popularity # if there is still equalities, choose highest popularity # if there is still equalities, then randomly choose @classmethod def record_substitute(cls, user_id, product_code, substitute_code): """ Method to insert data in table "Subtitutes" if not inserted yet for this user Takes 3 arguments: user ID, product code and substitute code Each argument is a string representing an integer """ cls.connect() query = "SELECT user_ID, substitute_code FROM Substitutes WHERE product_code = {}".format(product_code) cls.cursor.execute(query) result = cls.cursor.fetchall() # check if a subtitute have
self, State s) -> Vec3 Parameters ---------- s: SimTK::State const & """ return _simbody.SimbodyMatterSubsystem_calcSystemMassCenterLocationInGround(self, s) def calcSystemMassPropertiesInGround(self, s): """ calcSystemMassPropertiesInGround(SimbodyMatterSubsystem self, State s) -> MassProperties Parameters ---------- s: SimTK::State const & """ return _simbody.SimbodyMatterSubsystem_calcSystemMassPropertiesInGround(self, s) def calcSystemCentralInertiaInGround(self, s): """ calcSystemCentralInertiaInGround(SimbodyMatterSubsystem self, State s) -> Inertia Parameters ---------- s: SimTK::State const & """ return _simbody.SimbodyMatterSubsystem_calcSystemCentralInertiaInGround(self, s) def calcSystemMassCenterVelocityInGround(self, s): """ calcSystemMassCenterVelocityInGround(SimbodyMatterSubsystem self, State s) -> Vec3 Parameters ---------- s: SimTK::State const & """ return _simbody.SimbodyMatterSubsystem_calcSystemMassCenterVelocityInGround(self, s) def calcSystemMassCenterAccelerationInGround(self, s): """ calcSystemMassCenterAccelerationInGround(SimbodyMatterSubsystem self, State s) -> Vec3 Parameters ---------- s: SimTK::State const & """ return _simbody.SimbodyMatterSubsystem_calcSystemMassCenterAccelerationInGround(self, s) def calcSystemMomentumAboutGroundOrigin(self, s): """ calcSystemMomentumAboutGroundOrigin(SimbodyMatterSubsystem self, State s) -> SpatialVec Parameters ---------- s: SimTK::State const & """ return _simbody.SimbodyMatterSubsystem_calcSystemMomentumAboutGroundOrigin(self, s) def calcSystemCentralMomentum(self, s): """ calcSystemCentralMomentum(SimbodyMatterSubsystem self, State s) -> SpatialVec Parameters ---------- s: SimTK::State const & """ return _simbody.SimbodyMatterSubsystem_calcSystemCentralMomentum(self, s) def calcKineticEnergy(self, state): """ calcKineticEnergy(SimbodyMatterSubsystem self, State state) -> SimTK::Real Parameters ---------- state: SimTK::State const & """ return _simbody.SimbodyMatterSubsystem_calcKineticEnergy(self, state) def multiplyBySystemJacobian(self, state, u, Ju): """ multiplyBySystemJacobian(SimbodyMatterSubsystem self, State state, Vector u, VectorOfSpatialVec Ju) Parameters ---------- state: SimTK::State const & u: SimTK::Vector const & Ju: SimTK::Vector_< SimTK::SpatialVec > & """ return _simbody.SimbodyMatterSubsystem_multiplyBySystemJacobian(self, state, u, Ju) def calcBiasForSystemJacobian(self, *args): """ calcBiasForSystemJacobian(SimbodyMatterSubsystem self, State state, VectorOfSpatialVec JDotu) Parameters ---------- state: SimTK::State const & JDotu: SimTK::Vector_< SimTK::SpatialVec > & calcBiasForSystemJacobian(SimbodyMatterSubsystem self, State state, Vector JDotu) Parameters ---------- state: SimTK::State const & JDotu: SimTK::Vector & """ return _simbody.SimbodyMatterSubsystem_calcBiasForSystemJacobian(self, *args) def multiplyBySystemJacobianTranspose(self, state, F_G, f): """ multiplyBySystemJacobianTranspose(SimbodyMatterSubsystem self, State state, VectorOfSpatialVec F_G, Vector f) Parameters ---------- state: SimTK::State const & F_G: SimTK::Vector_< SimTK::SpatialVec > const & f: SimTK::Vector & """ return _simbody.SimbodyMatterSubsystem_multiplyBySystemJacobianTranspose(self, state, F_G, f) def calcSystemJacobian(self, *args): """ calcSystemJacobian(SimbodyMatterSubsystem self, State state, MatrixOfSpatialVec J_G) Parameters ---------- state: SimTK::State const & J_G: SimTK::Matrix_< SimTK::SpatialVec > & calcSystemJacobian(SimbodyMatterSubsystem self, State state, Matrix J_G) Parameters ---------- state: SimTK::State const & J_G: SimTK::Matrix & """ return _simbody.SimbodyMatterSubsystem_calcSystemJacobian(self, *args) def multiplyByStationJacobian(self, *args): """ multiplyByStationJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 stationPInB, Vector u, VectorVec3 JSu) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & stationPInB: SimTK::Array_< SimTK::Vec3 > const & u: SimTK::Vector const & JSu: SimTK::Vector_< SimTK::Vec3 > & multiplyByStationJacobian(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 stationPInB, Vector u) -> Vec3 Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex stationPInB: SimTK::Vec3 const & u: SimTK::Vector const & """ return _simbody.SimbodyMatterSubsystem_multiplyByStationJacobian(self, *args) def multiplyByStationJacobianTranspose(self, *args): """ multiplyByStationJacobianTranspose(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 stationPInB, VectorVec3 f_GP, Vector f) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & stationPInB: SimTK::Array_< SimTK::Vec3 > const & f_GP: SimTK::Vector_< SimTK::Vec3 > const & f: SimTK::Vector & multiplyByStationJacobianTranspose(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 stationPInB, Vec3 f_GP, Vector f) Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex stationPInB: SimTK::Vec3 const & f_GP: SimTK::Vec3 const & f: SimTK::Vector & """ return _simbody.SimbodyMatterSubsystem_multiplyByStationJacobianTranspose(self, *args) def calcStationJacobian(self, *args): """ calcStationJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 stationPInB, MatrixVec3 JS) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & stationPInB: SimTK::Array_< SimTK::Vec3 > const & JS: SimTK::Matrix_< SimTK::Vec3 > & calcStationJacobian(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 stationPInB, RowVectorVec3 JS) Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex stationPInB: SimTK::Vec3 const & JS: SimTK::RowVector_< SimTK::Vec3 > & calcStationJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 stationPInB, Matrix JS) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & stationPInB: SimTK::Array_< SimTK::Vec3 > const & JS: SimTK::Matrix & calcStationJacobian(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 stationPInB, Matrix JS) Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex stationPInB: SimTK::Vec3 const & JS: SimTK::Matrix & """ return _simbody.SimbodyMatterSubsystem_calcStationJacobian(self, *args) def calcBiasForStationJacobian(self, *args): """ calcBiasForStationJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 stationPInB, VectorVec3 JSDotu) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & stationPInB: SimTK::Array_< SimTK::Vec3 > const & JSDotu: SimTK::Vector_< SimTK::Vec3 > & calcBiasForStationJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 stationPInB, Vector JSDotu) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & stationPInB: SimTK::Array_< SimTK::Vec3 > const & JSDotu: SimTK::Vector & calcBiasForStationJacobian(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 stationPInB) -> Vec3 Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex stationPInB: SimTK::Vec3 const & """ return _simbody.SimbodyMatterSubsystem_calcBiasForStationJacobian(self, *args) def multiplyByFrameJacobian(self, *args): """ multiplyByFrameJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 originAoInB, Vector u, VectorOfSpatialVec JFu) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & originAoInB: SimTK::Array_< SimTK::Vec3 > const & u: SimTK::Vector const & JFu: SimTK::Vector_< SimTK::SpatialVec > & multiplyByFrameJacobian(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 originAoInB, Vector u) -> SpatialVec Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex originAoInB: SimTK::Vec3 const & u: SimTK::Vector const & """ return _simbody.SimbodyMatterSubsystem_multiplyByFrameJacobian(self, *args) def multiplyByFrameJacobianTranspose(self, *args): """ multiplyByFrameJacobianTranspose(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 originAoInB, VectorOfSpatialVec F_GAo, Vector f) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & originAoInB: SimTK::Array_< SimTK::Vec3 > const & F_GAo: SimTK::Vector_< SimTK::SpatialVec > const & f: SimTK::Vector & multiplyByFrameJacobianTranspose(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 originAoInB, SpatialVec F_GAo, Vector f) Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex originAoInB: SimTK::Vec3 const & F_GAo: SimTK::SpatialVec const & f: SimTK::Vector & """ return _simbody.SimbodyMatterSubsystem_multiplyByFrameJacobianTranspose(self, *args) def calcFrameJacobian(self, *args): """ calcFrameJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 originAoInB, MatrixOfSpatialVec JF) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & originAoInB: SimTK::Array_< SimTK::Vec3 > const & JF: SimTK::Matrix_< SimTK::SpatialVec > & calcFrameJacobian(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 originAoInB, SimTK::RowVector_< SimTK::SpatialVec > & JF) Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex originAoInB: SimTK::Vec3 const & JF: SimTK::RowVector_< SimTK::SpatialVec > & calcFrameJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 originAoInB, Matrix JF) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & originAoInB: SimTK::Array_< SimTK::Vec3 > const & JF: SimTK::Matrix & calcFrameJacobian(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 originAoInB, Matrix JF) Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex originAoInB: SimTK::Vec3 const & JF: SimTK::Matrix & """ return _simbody.SimbodyMatterSubsystem_calcFrameJacobian(self, *args) def calcBiasForFrameJacobian(self, *args): """ calcBiasForFrameJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 originAoInB, VectorOfSpatialVec JFDotu) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & originAoInB: SimTK::Array_< SimTK::Vec3 > const & JFDotu: SimTK::Vector_< SimTK::SpatialVec > & calcBiasForFrameJacobian(SimbodyMatterSubsystem self, State state, SimTKArrayMobilizedBodyIndex onBodyB, SimTKArrayVec3 originAoInB, Vector JFDotu) Parameters ---------- state: SimTK::State const & onBodyB: SimTK::Array_< MobilizedBodyIndex > const & originAoInB: SimTK::Array_< SimTK::Vec3 > const & JFDotu: SimTK::Vector & calcBiasForFrameJacobian(SimbodyMatterSubsystem self, State state, MobilizedBodyIndex onBodyB, Vec3 originAoInB) -> SpatialVec Parameters ---------- state: SimTK::State const & onBodyB: MobilizedBodyIndex originAoInB: SimTK::Vec3 const & """ return _simbody.SimbodyMatterSubsystem_calcBiasForFrameJacobian(self, *args) def multiplyByM(self, state, a, Ma): """ multiplyByM(SimbodyMatterSubsystem self, State state, Vector a, Vector Ma) Parameters ---------- state: SimTK::State const & a: SimTK::Vector const & Ma: SimTK::Vector & """ return _simbody.SimbodyMatterSubsystem_multiplyByM(self, state, a, Ma) def multiplyByMInv(self, state, v, MinvV): """ multiplyByMInv(SimbodyMatterSubsystem self, State state, Vector v, Vector MinvV) Parameters ---------- state: SimTK::State const & v: SimTK::Vector const & MinvV: SimTK::Vector & """ return _simbody.SimbodyMatterSubsystem_multiplyByMInv(self, state, v, MinvV) def calcM(self, arg2, M): """ calcM(SimbodyMatterSubsystem self, State arg2, Matrix M) Parameters ---------- arg2: SimTK::State const & M: SimTK::Matrix & """ return _simbody.SimbodyMatterSubsystem_calcM(self, arg2, M) def calcMInv(self, arg2, MInv): """ calcMInv(SimbodyMatterSubsystem self, State arg2, Matrix MInv) Parameters ---------- arg2: SimTK::State const & MInv: SimTK::Matrix & """ return _simbody.SimbodyMatterSubsystem_calcMInv(self, arg2, MInv) def calcProjectedMInv(self, s, GMInvGt): """ calcProjectedMInv(SimbodyMatterSubsystem self, State s, Matrix GMInvGt) Parameters ---------- s: SimTK::State const & GMInvGt: SimTK::Matrix & """ return _simbody.SimbodyMatterSubsystem_calcProjectedMInv(self, s, GMInvGt) def solveForConstraintImpulses(self, state, deltaV, impulse): """ solveForConstraintImpulses(SimbodyMatterSubsystem self, State state, Vector deltaV, Vector impulse) Parameters ---------- state: SimTK::State const & deltaV: SimTK::Vector const & impulse: SimTK::Vector & """ return _simbody.SimbodyMatterSubsystem_solveForConstraintImpulses(self, state, deltaV, impulse) def multiplyByG(self, *args): """ multiplyByG(SimbodyMatterSubsystem self, State state, Vector ulike, Vector Gulike) Parameters ---------- state: SimTK::State const & ulike: SimTK::Vector const & Gulike: SimTK::Vector & multiplyByG(SimbodyMatterSubsystem self, State state, Vector ulike, Vector bias, Vector Gulike) Parameters ---------- state: SimTK::State const & ulike: SimTK::Vector const & bias: SimTK::Vector const & Gulike: SimTK::Vector & """ return _simbody.SimbodyMatterSubsystem_multiplyByG(self, *args)
<reponame>ajamtli/DCSLiveryManager import argparse import glob import os import platform import sys from pprint import pprint from prompt_toolkit import PromptSession, HTML from prompt_toolkit.completion import NestedCompleter from rich import box from rich.align import Align from rich.console import Console, RenderGroup from rich.columns import Columns from rich.panel import Panel from rich.progress import ( BarColumn, DownloadColumn, TextColumn, TransferSpeedColumn, TimeRemainingColumn, Progress, SpinnerColumn ) from rich.prompt import Prompt, Confirm from rich.rule import Rule from rich.status import Status from rich.table import Table from DCSLM import __version__ from DCSLM.DCSUFParser import DCSUFParser from DCSLM.Livery import DCSUserFile, Livery from DCSLM.LiveryManager import LiveryManager from DCSLM.UnitConfig import Units import DCSLM.Utilities as Utilities def set_console_title(title): if platform.system() == 'Windows': os.system(f'title {title}') else: os.system(f'echo "\033]0;{title}\007"') def clear_console(): if platform.system() == 'Windows': os.system('cls') else: os.system('clear') def set_console_size(w, h): if platform.system() == 'Windows': os.system(f'mode con: cols={w} lines={h}') else: os.system(f'printf \'\033[8;{h};{w}t\'') class DCSLMApp: def __init__(self): self.console = None self.session = PromptSession(reserve_space_for_menu=6, complete_in_thread=True, ) self.completer = None self.commands = None self.lm = None def start(self): self.setup_commands() self.setup_command_completer() self.setup_console_window() self.clear_and_print_header() self.setup_livery_manager() self.quick_check_upgrade_available() self.run() def setup_commands(self): self.commands = { 'install': { 'completer': None, 'usage': "\[id/url1] \[id/url2] \[id/url3] ...", 'desc': "Install DCS liveries from DCS User Files URLs or IDs.", 'flags': { 'keep': { 'tags': ['-k', '--keep'], 'desc': "Keep downloaded livery archive files", 'action': "store_true", 'confirm': False }, 'reinstall': { 'tags': ['-r', '--reinstall'], 'desc': "Do not prompt if the livery is already registered.", 'action': "store_true", 'confirm': False }, 'allunits': { 'tags': ['-a', '--allunits'], 'desc': "Do not prompt when given a choice to install to multiple units and install to all.", 'action': "store_true", 'confirm': False }, }, 'args': { 'url': { 'type': "number/string", 'optional': False, 'desc': "DCS User Files ID or URL" }, }, 'exec': self.install_liveries }, 'uninstall': { 'completer': None, 'usage': "\[flags] livery", 'desc': "Uninstall the given managed livery from the \'title\' or \'ID\'.", 'flags': { 'keep': { 'tags': ['-k', '--keep'], 'desc': "Keep livery files on disk (untrack them)", 'action': "store_true", 'confirm': False }, }, 'args': { 'livery': { 'type': "string", 'optional': False, 'desc': "DCS User Files livery title" }, }, 'exec': self.uninstall_liveries }, 'info': { 'completer': None, 'usage': "livery", 'desc': "Get additional info about an installed livery.", 'flags': {}, 'args': { 'livery': { 'type': "string", 'optional': False, 'desc': "DCS User Files livery title" }, }, 'exec': self.get_livery_info }, 'list': { 'completer': None, 'usage': "", 'desc': "List currently installed DCS liveries.", 'flags': { 'ids': { 'tags': ['ids'], 'desc': "List the IDs of all registered liveries for copying.", 'action': "store_true", 'confirm': False }, }, 'args': {}, 'exec': self.list_liveries }, 'check': { 'completer': None, 'usage': "", 'desc': "Check for updates to any installed liveries.", 'flags': {}, 'args': {}, 'exec': self.check_liveries }, 'update': { 'completer': None, 'usage': "", 'desc': "Update any installed liveries that have a more recent version upload to \'DCS User Files\'.", 'flags': {}, 'args': {}, 'exec': self.update_liveries }, 'optimize': { 'completer': None, 'usage': "\[flags] livery", 'desc': "Attempt to optimize an installed livery by looking for unused or shared files between liveries within packs.", 'flags': { 'reoptimize': { 'tags': ['-r','--reoptimize'], 'desc': "Optimize liveries even if they have already been optimized.", 'action': "store_true", 'confirm': False }, 'keepdesc': { 'tags': ['-d','--keepdesc'], 'desc': "Keep a copy of the original unmodified description.lua files.", 'action': "store_true", 'confirm': False }, 'keepunused': { 'tags': ['-u', '--keepunused'], 'desc': "Keep unused files on disk at the end of optimization.", 'action': "store_true", 'confirm': False }, 'verbose': { 'tags': ['-v', '--verbose'], 'desc': "Verbose printing of livery file reference data for debugging purposes.", 'action': "store_true", 'confirm': False }, }, 'args': { 'livery': { 'type': "string", 'optional': False, 'desc': "DCS User Files livery title" }, }, 'exec': self.optimize_livery }, 'scan': { 'completer': None, 'usage': "", 'desc': "Scan folders for existing liveries with .dcslm registry files.", 'flags': {}, 'args': {}, 'exec': self.scan_for_liveries }, 'upgrade': { 'completer': None, 'usage': "", 'desc': "Upgrade DCSLM to the latest version", 'flags': {}, 'args': {}, 'exec': self.upgrade_dcslm }, 'help': { 'completer': None, 'usage': "", 'desc': "List the commands and their usage.", 'flags': {}, 'args': {}, 'exec': self.print_help }, 'exit': { 'completer': None, 'usage': "", 'desc': "Exit the DCS Livery Manager program.", 'flags': {}, 'args': {}, 'exec': None } } def _install_liveries(self, liveryStrings, keepFiles=False, forceDownload=False, forceInstall=False, forceAllUnits=False): installData = {'success': [], 'failed': []} for liveryStr in liveryStrings: correctedLiveryURL, urlID = Utilities.correct_dcs_user_files_url(liveryStr) if not correctedLiveryURL: errorMsg = "Failed to get DCS User Files url or ID from \'" + liveryStr + "\'." installData['failed'].append({'url': liveryStr, 'error': errorMsg}) self.console.print(errorMsg, style="bold red") else: livery = None try: getUFStr = "Getting DCS User File information from " + correctedLiveryURL with self.console.status(getUFStr): livery = self.lm.get_livery_data_from_dcsuf_url(correctedLiveryURL) self.console.print(getUFStr + "\n") self.print_dcsuf_panel(livery) existingLivery = self.lm.get_registered_livery(id=int(urlID)) if existingLivery and not forceInstall: if existingLivery.dcsuf.datetime == livery.dcsuf.datetime: if not self.prompt_existing_livery(existingLivery): raise RuntimeError("Skipping reinstalling livery.") unitLiveries = Units.Units['aircraft'][livery.dcsuf.unit]['liveries'] if len(unitLiveries) > 1 and not forceAllUnits: unitLiveries = self.prompt_aircraft_livery_choice(livery, unitLiveries) if len(unitLiveries) == 0: raise RuntimeError("No units selected for install.") livery.installs['units'] = unitLiveries archivePath = self.lm.does_archive_exist(livery.dcsuf.download.split('/')[-1]) if archivePath: if not forceDownload and self.lm.compare_archive_sizes(archivePath, livery.dcsuf.download): self.console.print("\nArchive file \'" + livery.dcsuf.download.split('/')[-1] + "\' for \'" + livery.dcsuf.title + "\' already exists. Using that instead.") keepFiles = True else: archivePath = None if not archivePath: self.console.print("\nDownloading livery archive file " + livery.dcsuf.download) archivePath = self._download_archive_progress(livery) if archivePath: livery.archive = archivePath self.console.print("\n[bold]Running extraction program on downloaded archive:") extractPath = self.lm.extract_livery_archive(livery) if extractPath: self.console.print("\nExtracted \'" + livery.archive + "\' to temporary directory.") destinationPath = self.lm.generate_livery_destination_path(livery) livery.destination = destinationPath self.console.print("Detecting extracted liveries...") installRoots = self.lm.generate_aircraft_livery_install_path(livery, unitLiveries) extractedLiveryFiles = self.lm.get_extracted_livery_files(livery, extractPath) detectedLiveries = self.lm.detect_extracted_liveries(livery, extractPath, extractedLiveryFiles) if len(detectedLiveries) and len(installRoots): liveryNames = [l['name'] for l in detectedLiveries] self.console.print(liveryNames) self.console.print("Generating livery install paths...") installPaths = self.lm.generate_livery_install_paths(livery, installRoots, detectedLiveries) if len(installPaths): self.console.print("Installing " + str(len(detectedLiveries)) + (" liveries" if len(detectedLiveries) > 1 else " livery") + " to " + str(len(installRoots)) + " aircraft.") with self.console.status("Installing extracted liveries..."): copiedLiveries = self.lm.copy_detected_liveries(livery, extractPath, extractedLiveryFiles, installPaths) if len(copiedLiveries): with self.console.status("Writing registry files..."): self.lm.write_livery_registry_files(livery) self.console.print("Wrote " + str(len(installRoots) * len(detectedLiveries)) + " registry files to installed livery directories.") self.lm.register_livery(livery) self.console.print("[bold green]Livery[/bold green] \'" + str(livery.dcsuf.title) + "\' [bold green]Registered!") livery.calculate_size_installed_liveries() installData['success'].append(livery) else: raise RuntimeError("Failed to copy livery files to install directories!") else: raise RuntimeError("Failed to generate install paths!") else: raise RuntimeError("Failed to detect valid livery directories from extracted livery archive!") else: raise RuntimeError("Failed to extract livery archive \'" + livery.archive + "\'.") except Exception as e: installData['failed'].append({'url': correctedLiveryURL, 'error': e}) self.console.print(e, style="bold red") finally: if livery: if livery.destination: self.console.print("Removing temporarily extracted folder.") if not self.lm.remove_extracted_livery_archive(livery): failedExtractPath = os.path.join(os.getcwd(), self.lm.FolderRoot, "extract", str(livery.dcsuf.id)) failedMsg = "Failed to remove all extracted files to directory " + failedExtractPath self.console.print(failedMsg, style="red") installData['failed'].append({'url': livery.dcsuf.id, 'error': failedMsg}) if livery.archive and not keepFiles: self.console.print("Removing downloaded archive file \'" + os.path.split(livery.archive)[1] + "\'.") self.lm.remove_downloaded_archive(livery, livery.archive) self.console.print("") return installData def _print_livery_install_report(self, installData, tableTitle): if len(installData['success']): installTable = Table(title=tableTitle,expand=False, box=box.ROUNDED) installTable.add_column("Unit", justify="left", no_wrap=True, style="green") installTable.add_column("ID", justify="center", no_wrap=True, style="sky_blue1") installTable.add_column("Livery Title", justify="center", style="") installTable.add_column("# Liveries", justify="center", no_wrap=True, style="magenta") installTable.add_column("Size (MB)", justify="right", no_wrap=True, style="bold gold1") for l in installData['success']: installTable.add_row(Units.Units['aircraft'][l.dcsuf.unit]['friendly'], str(l.dcsuf.id), l.dcsuf.title, str(l.get_num_liveries()), Utilities.bytes_to_mb_string(l.get_size_installed_liveries())) self.console.print(installTable) if len(installData['failed']): self.console.print("[bold red]Failed Livery Installs:") for l in installData['failed']: self.console.print("[bold red]" + l['url'] + "[/bold red][red]: " + str(l['error'])) def _parse_install_args(self, sArgs): try: installArgsParser = argparse.ArgumentParser(usage=self.commands['install']['usage'], description=self.commands['install']['desc'], exit_on_error=False) for iA in self.commands['install']['flags'].keys(): installArgsParser.add_argument(*self.commands['install']['flags'][iA]['tags'], help=self.commands['install']['flags'][iA]['desc'], action=self.commands['install']['flags'][iA]['action'], dest=iA) installArgsParser.add_argument('url', type=str, help=self.commands['install']['args']['url']['desc'], nargs="+") parsedArgs = installArgsParser.parse_known_args(sArgs) if len(parsedArgs[1]): self.console.print("Failed to parse the following args for \'install\':", style="bold red") self.console.print("\t" + str(parsedArgs[1]), style="bold red") return parsedArgs[0] except SystemExit: raise RuntimeError("Unable to parse \'uninstall\' command.") # TODO: Allow selection of multiple numbers when installed to units with choices def install_liveries(self, sArgs): installArgs = self._parse_install_args(sArgs) self.console.print("Attempting to install " + str(len(installArgs.url)) + (" liveries" if len(installArgs.url) > 1 else " livery") + " from DCS User Files.") installData = self._install_liveries(installArgs.url, keepFiles=installArgs.keep, forceInstall=installArgs.reinstall, forceAllUnits=installArgs.allunits) self.lm.write_data() self._print_livery_install_report(installData, "Livery Install Report") self.console.print("") def _parse_uninstall_args(self, sArgs): try: uninstallArgsParser = argparse.ArgumentParser(usage=self.commands['uninstall']['usage'], description=self.commands['uninstall']['desc'], exit_on_error=False) uninstallArgsParser.add_argument(*self.commands['uninstall']['flags']['keep']['tags'], action="store_false", help=self.commands['uninstall']['flags']['keep']['desc'], dest='keep') uninstallArgsParser.add_argument('livery', type=str, nargs="+", help=self.commands['uninstall']['args']['livery']['desc']) parsedArgs = uninstallArgsParser.parse_known_args(sArgs) if len(parsedArgs[1]): self.console.print("Failed to parse the following args for \'uninstall\':", style="bold red") self.console.print("\t" + str(parsedArgs[1]), style="bold red") return parsedArgs[0] except SystemExit: raise RuntimeError("Unable to parse \'uninstall\' command.") def uninstall_liveries(self, sArgs): uninstallArgs = self._parse_uninstall_args(sArgs) self.console.print("Attempting to uninstall " + str(len(uninstallArgs.livery)) + (" registered
# -*- coding: utf-8 -*- # Copyright (C) 2010-2016 <NAME> All rights reserved # Langstrasse 4, A--2244 Spannberg, Austria. <EMAIL> # **************************************************************************** # This module is part of the package GTW.__test__. # # This module is licensed under the terms of the BSD 3-Clause License # <http://www.c-tanzer.at/license/bsd_3c.html>. # **************************************************************************** # #++ # Name # GTW.__test__.Boat_in_Regatta # # Purpose # Test creation and querying of Boat_in_Regatta # # Revision Dates # 3-May-2010 (MG) Creation # 3-May-2010 (CT) Creation continued # 14-Dec-2011 (CT) Add tests for `attrs` # 19-Jan-2012 (CT) Add tests for `object_referring_attributes` # 19-Jan-2012 (CT) Add `_delayed` tests # 19-Mar-2012 (CT) Adapt to `Boat_Class.name.ignore_case` now being `True` # 19-Mar-2012 (CT) Adapt to reification of `SRM.Handicap` # 27-Apr-2012 (CT) Add test for `skipper_not_multiplexed` # 7-May-2012 (CT) Add test for `crew_number_valid` # 12-Jun-2012 (CT) Add tests for `tn_pid`, `.attrs ("type_name")` # 27-Jun-2012 (CT) Add tests for `query_changes` for `type_name` # 1-Aug-2012 (CT) Add `_test_referential_integrity` # 3-Aug-2012 (MG) Improve `_test_referential_integrity` # 4-Aug-2012 (CT) Add `_test_undo`, add `raw = True` to entity creation # 12-Oct-2012 (CT) Adapt to repr change of `An_Entity` # 13-Nov-2012 (CT) Adapt to change of `SRM.Club.name.cooked` # 10-Dec-2012 (CT) Add tests for `.FO` (nested attributes) # 19-Mar-2013 (CT) Add tests for `AQ.Attrs`, `AQ.E_Type`, `AQx` # 19-Mar-2013 (CT) Add test for `AQ.Atoms` # 15-Apr-2013 (CT) Adapt to change of `MOM.Attr.Kind.reset` # 26-Jul-2013 (CT) Add `_test_polymorph` # 20-Aug-2013 (CT) Remove `show_ora`, `show_dep` from `test_code` # Lazy loading of objects breaks this in SAS, SAW backends # 9-Oct-2013 (CT) Add `_test_qr_grouped_by` # ««revision-date»»··· #-- _test_code = r""" >>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS Creating new scope MOMT__... >>> PAP = scope.PAP >>> SRM = scope.SRM >>> BiR = SRM.Boat_in_Regatta >>> today = BiR.registration_date.default >>> bc = SRM.Boat_Class ("Optimist", max_crew = 1, raw = True) >>> ys = SRM.Handicap ("Yardstick", raw = True) >>> b = SRM.Boat.instance_or_new ('Optimist', "1107", "AUT", raw = True) >>> p = PAP.Person.instance_or_new ("Tanzer", "Christian", raw = True) >>> s = SRM.Sailor.instance_or_new (p.epk_raw, nation = "AUT", mna_number = "29676", raw = True) ### 1 >>> rev = SRM.Regatta_Event ("Himmelfahrt", ("20080501", ), raw = True) >>> reg = SRM.Regatta_C (rev.epk_raw, bc.epk_raw, raw = True) >>> reh = SRM.Regatta_H (rev.epk_raw, ys, raw = True) >>> prepr (list (r.name for r in sorted (rev.regattas, key = TFL.Sorted_By ("+")))) ['Optimist', 'Yardstick'] >>> reg.set_raw (result = dict (date = "26.5.2009 10:20", software = "calculated with REGATTA.yellow8.com", status = "final", raw = True)) 1 >>> prepr ((str (reg.FO.result))) '2009-05-26 10:20, calculated with REGATTA.yellow8.com, final' >>> scope.commit () >>> scope.MOM.Id_Entity.query ().order_by (TFL.Sorted_By ("pid")).attrs ("pid", "type_name").all () [(1, 'SRM.Boat_Class'), (2, 'SRM.Handicap'), (3, 'SRM.Boat'), (4, 'PAP.Person'), (5, 'SRM.Sailor'), (6, 'SRM.Regatta_Event'), (7, 'SRM.Regatta_C'), (8, 'SRM.Regatta_H')] >>> scope.MOM.Id_Entity.query ().order_by (TFL.Sorted_By ("pid")).attrs ("type_name", "pid").all () [('SRM.Boat_Class', 1), ('SRM.Handicap', 2), ('SRM.Boat', 3), ('PAP.Person', 4), ('SRM.Sailor', 5), ('SRM.Regatta_Event', 6), ('SRM.Regatta_C', 7), ('SRM.Regatta_H', 8)] >>> scope.MOM.Id_Entity.query ().order_by (TFL.Sorted_By ("type_name", "pid")).attrs ("pid", "type_name").all () [(4, 'PAP.Person'), (3, 'SRM.Boat'), (1, 'SRM.Boat_Class'), (2, 'SRM.Handicap'), (7, 'SRM.Regatta_C'), (6, 'SRM.Regatta_Event'), (8, 'SRM.Regatta_H'), (5, 'SRM.Sailor')] >>> prepr (rev.epk_raw) ('Himmelfahrt', (('finish', '2008-05-01'), ('start', '2008-05-01')), 'SRM.Regatta_Event') >>> prepr (reg.epk_raw) (('Himmelfahrt', (('finish', '2008-05-01'), ('start', '2008-05-01')), 'SRM.Regatta_Event'), ('Optimist', 'SRM.Boat_Class'), 'SRM.Regatta_C') >>> SRM.Regatta_C.instance (* reg.epk) SRM.Regatta_C (('himmelfahrt', ('2008-05-01', '2008-05-01')), ('optimist', )) >>> SRM.Regatta.instance (* reg.epk) SRM.Regatta_C (('himmelfahrt', ('2008-05-01', '2008-05-01')), ('optimist', )) >>> SRM.Regatta_C.instance (* reg.epk_raw, raw = True) SRM.Regatta_C (('himmelfahrt', ('2008-05-01', '2008-05-01')), ('optimist', )) >>> bir = BiR (b.epk_raw, reg.epk_raw, skipper = s.epk_raw, raw = True) >>> bir.registration_date == today True >>> prepr (bir.epk_raw) ((('Optimist', 'SRM.Boat_Class'), '1107', 'AUT', '', 'SRM.Boat'), (('Himmelfahrt', (('finish', '2008-05-01'), ('start', '2008-05-01')), 'SRM.Regatta_Event'), ('Optimist', 'SRM.Boat_Class'), 'SRM.Regatta_C'), 'SRM.Boat_in_Regatta') >>> BiR.instance (* bir.epk_raw, raw = True) SRM.Boat_in_Regatta ((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2008-05-01', '2008-05-01')), ('optimist', ))) >>> print (bir.FO.pid) 9 >>> print (bir.FO.right.left.pid) 6 >>> print (bir.FO.left) Optimist, AUT 1107 >>> print (bir.FO.left.left) Optimist >>> print (bir.FO.left.left.name) Optimist >>> print (bir.FO.right) Himmelfahrt 2008-05-01, Optimist >>> print (bir.FO.right.left) Himmelfahrt 2008-05-01 >>> print (bir.FO.right.left.date) 2008-05-01 >>> print (bir.FO.right.left.date.start) 2008-05-01 >>> print (bir.FO.right.left.date.finish) 2008-05-01 >>> print (getattr (bir.FO, "pid")) 9 >>> print (getattr (bir.FO, "right.left.pid")) 6 >>> print (getattr (bir.FO, "left")) Optimist, AUT 1107 >>> print (getattr (bir.FO, "left.left")) Optimist >>> print (getattr (bir.FO, "left.left.name")) Optimist >>> print (getattr (bir.FO, "right")) Himmelfahrt 2008-05-01, Optimist >>> print (getattr (bir.FO, "right.left")) Himmelfahrt 2008-05-01 >>> print (getattr (bir.FO, "right.left.date")) 2008-05-01 >>> print (getattr (bir.FO, "right.left.date.finish")) 2008-05-01 >>> sorted (reg.boats) [SRM.Boat_in_Regatta ((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2008-05-01', '2008-05-01')), ('optimist', )))] >>> sort_key = TFL.Sorted_By ("-regatta.event.date.start", "skipper.person.last_name", "skipper.person.first_name") >>> print (sort_key) <Sorted_By: Descending-Getter function for `.regatta.event.date.start`, Getter function for `.skipper.person.last_name`, Getter function for `.skipper.person.first_name`> >>> print (BiR.E_Type.sort_key_pm (sort_key)) <Sorted_By: Getter function for `.relevant_root.type_name`, <Sorted_By: Descending-Getter function for `.regatta.event.date.start`, Getter function for `.skipper.person.last_name`, Getter function for `.skipper.person.first_name`>> >>> list (BiR.query (sort_key = sort_key)) [SRM.Boat_in_Regatta ((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2008-05-01', '2008-05-01')), ('optimist', )))] >>> list (BiR.query_s (sort_key = sort_key)) [SRM.Boat_in_Regatta ((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2008-05-01', '2008-05-01')), ('optimist', )))] >>> df = SRM.Regatta.AQ.event.date.start.EQ ("2008") >>> df Q.left.date.start.between (datetime.date(2008, 1, 1), datetime.date(2008, 12, 31)) >>> AQ = BiR.AQ.Select (MOM.Attr.Selector.sig) >>> q = SRM.Regatta.query_s ().filter (df) >>> fs = tuple (x.QR for x in AQ.regatta.Unwrapped_Atoms) >>> fsn = tuple (x._name for x in fs) >>> fss = ('left.__raw_name', 'left.date') >>> fst = ('left', ) >>> AQ <Attr.Type.Querier.E_Type for SRM.Boat_in_Regatta> >>> AQ._attr_selector <MOM.Attr.Selector.Kind sig_attr> >>> AQ.left._attr_selector <MOM.Attr.Selector.Kind sig_attr> >>> AQ.right.left.date._attr_selector <MOM.Attr.Selector.Kind sig_attr> >>> AQ.Attrs (<left.AQ [Attr.Type.Querier Id_Entity]>, <right.AQ [Attr.Type.Querier Id_Entity]>) >>> AQ.left.Attrs (<left.left.AQ [Attr.Type.Querier Id_Entity]>, <left.sail_number.AQ [Attr.Type.Querier Raw]>, <left.nation.AQ [Attr.Type.Querier Ckd]>, <left.sail_number_x.AQ [Attr.Type.Querier String]>) >>> AQ.right.left.date.Attrs (<right.left.date.start.AQ [Attr.Type.Querier Date]>, <right.left.date.finish.AQ [Attr.Type.Querier Date]>) >>> print (AQ.E_Type.type_name) SRM.Boat_in_Regatta >>> print (AQ.left.E_Type.type_name) SRM.Boat >>> print (AQ.right.left.date.E_Type.type_name) MOM.Date_Interval_C >>> for aq in BiR.AQ.Attrs_Transitive : ... prepr ((aq, aq.E_Type.type_name if aq.E_Type and aq.E_Type.PNS else "-"*5)) (<left.AQ [Attr.Type.Querier Id_Entity]>, 'SRM.Boat') (<left.left.AQ [Attr.Type.Querier Id_Entity]>, 'SRM.Boat_Class') (<left.left.name.AQ [Attr.Type.Querier String]>, '-----') (<left.left.beam.AQ [Attr.Type.Querier Ckd]>, '-----') (<left.left.loa.AQ [Attr.Type.Querier Ckd]>, '-----') (<left.left.max_crew.AQ [Attr.Type.Querier Ckd]>, '-----') (<left.left.sail_area.AQ [Attr.Type.Querier Ckd]>, '-----') (<left.sail_number.AQ [Attr.Type.Querier Raw]>, '-----') (<left.nation.AQ [Attr.Type.Querier Ckd]>, '-----') (<left.sail_number_x.AQ [Attr.Type.Querier String]>, '-----') (<left.name.AQ [Attr.Type.Querier String]>, '-----') (<right.AQ [Attr.Type.Querier Id_Entity]>, 'SRM.Regatta') (<right.left.AQ [Attr.Type.Querier Id_Entity]>, 'SRM.Regatta_Event') (<right.left.name.AQ [Attr.Type.Querier String]>, '-----') (<right.left.date.AQ [Attr.Type.Querier Composite]>, 'MOM.Date_Interval_C') (<right.left.date.start.AQ [Attr.Type.Querier Date]>, '-----') (<right.left.date.start.day.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.left.date.start.month.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.left.date.start.year.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.left.date.finish.AQ [Attr.Type.Querier Date]>, '-----') (<right.left.date.finish.day.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.left.date.finish.month.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.left.date.finish.year.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.left.date.alive.AQ [Attr.Type.Querier Boolean]>, '-----') (<right.left.club.AQ [Attr.Type.Querier Id_Entity]>, 'SRM.Club') (<right.left.club.name.AQ [Attr.Type.Querier String]>, '-----') (<right.left.club.long_name.AQ [Attr.Type.Querier String]>, '-----') (<right.left.desc.AQ [Attr.Type.Querier String]>, '-----') (<right.left.is_cancelled.AQ [Attr.Type.Querier Boolean]>, '-----') (<right.left.perma_name.AQ [Attr.Type.Querier String]>, '-----') (<right.left.year.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.boat_class.AQ [Attr.Type.Querier Id_Entity]>, 'SRM._Boat_Class_') (<right.boat_class.name.AQ [Attr.Type.Querier String]>, '-----') (<right.discards.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.is_cancelled.AQ [Attr.Type.Querier Boolean]>, '-----') (<right.kind.AQ [Attr.Type.Querier String]>, '-----') (<right.races.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.result.AQ [Attr.Type.Querier Composite]>, 'SRM.Regatta_Result') (<right.result.date.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.result.software.AQ [Attr.Type.Querier String]>, '-----') (<right.result.status.AQ [Attr.Type.Querier String]>, '-----') (<right.starters_rl.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.perma_name.AQ [Attr.Type.Querier String]>, '-----') (<right.races_counted.AQ [Attr.Type.Querier Ckd]>, '-----') (<right.year.AQ [Attr.Type.Querier Ckd]>, '-----') (<skipper.AQ [Attr.Type.Querier Id_Entity]>, 'SRM.Sailor') (<skipper.left.AQ [Attr.Type.Querier Id_Entity]>, 'PAP.Person') (<skipper.left.last_name.AQ [Attr.Type.Querier String_FL]>, '-----') (<skipper.left.first_name.AQ [Attr.Type.Querier String_FL]>, '-----') (<skipper.left.middle_name.AQ [Attr.Type.Querier String]>, '-----') (<skipper.left.title.AQ [Attr.Type.Querier String]>, '-----') (<skipper.left.lifetime.AQ [Attr.Type.Querier Composite]>, 'MOM.Date_Interval_lifetime') (<skipper.left.lifetime.start.AQ [Attr.Type.Querier Date]>, '-----') (<skipper.left.lifetime.start.day.AQ [Attr.Type.Querier Ckd]>, '-----') (<skipper.left.lifetime.start.month.AQ [Attr.Type.Querier Ckd]>, '-----') (<skipper.left.lifetime.start.year.AQ [Attr.Type.Querier Ckd]>, '-----') (<skipper.left.lifetime.finish.AQ [Attr.Type.Querier Date]>, '-----') (<skipper.left.lifetime.finish.day.AQ [Attr.Type.Querier Ckd]>, '-----') (<skipper.left.lifetime.finish.month.AQ [Attr.Type.Querier Ckd]>, '-----') (<skipper.left.lifetime.finish.year.AQ [Attr.Type.Querier Ckd]>, '-----') (<skipper.left.lifetime.alive.AQ [Attr.Type.Querier Boolean]>, '-----') (<skipper.left.sex.AQ [Attr.Type.Querier Ckd]>, '-----') (<skipper.mna_number.AQ [Attr.Type.Querier Raw]>, '-----') (<skipper.nation.AQ [Attr.Type.Querier Ckd]>, '-----') (<skipper.club.AQ [Attr.Type.Querier Id_Entity]>, 'SRM.Club') (<skipper.club.name.AQ [Attr.Type.Querier String]>, '-----') (<skipper.club.long_name.AQ [Attr.Type.Querier String]>, '-----') (<place.AQ [Attr.Type.Querier Ckd]>, '-----') (<points.AQ [Attr.Type.Querier Ckd]>, '-----') (<yardstick.AQ [Attr.Type.Querier Ckd]>, '-----') (<creation.AQ [Attr.Type.Querier Rev_Ref]>, 'MOM.MD_Change') (<creation.c_time.AQ [Attr.Type.Querier Ckd]>, '-----') (<creation.c_user.AQ [Attr.Type.Querier Id_Entity]>, 'MOM.Id_Entity') (<creation.kind.AQ [Attr.Type.Querier String]>, '-----') (<creation.time.AQ [Attr.Type.Querier Ckd]>, '-----') (<creation.user.AQ [Attr.Type.Querier Id_Entity]>, 'MOM.Id_Entity') (<last_change.AQ [Attr.Type.Querier Rev_Ref]>, 'MOM.MD_Change') (<last_change.c_time.AQ [Attr.Type.Querier Ckd]>, '-----') (<last_change.c_user.AQ [Attr.Type.Querier Id_Entity]>, 'MOM.Id_Entity') (<last_change.kind.AQ [Attr.Type.Querier String]>, '-----') (<last_change.time.AQ [Attr.Type.Querier Ckd]>, '-----') (<last_change.user.AQ [Attr.Type.Querier Id_Entity]>, 'MOM.Id_Entity') (<last_cid.AQ [Attr.Type.Querier Ckd]>, '-----') (<pid.AQ [Attr.Type.Querier Ckd]>, '-----') (<type_name.AQ [Attr.Type.Querier String]>, '-----') (<rank.AQ [Attr.Type.Querier Ckd]>, '-----') (<registration_date.AQ [Attr.Type.Querier Date]>, '-----') (<registration_date.day.AQ [Attr.Type.Querier Ckd]>, '-----') (<registration_date.month.AQ [Attr.Type.Querier Ckd]>, '-----') (<registration_date.year.AQ [Attr.Type.Querier Ckd]>, '-----') (<events.AQ [Attr.Type.Querier Rev_Ref]>, 'EVT.Event') (<events.date.AQ [Attr.Type.Querier Composite]>, 'MOM.Date_Interval') (<events.date.start.AQ [Attr.Type.Querier Date]>, '-----') (<events.date.start.day.AQ [Attr.Type.Querier Ckd]>, '-----') (<events.date.start.month.AQ [Attr.Type.Querier Ckd]>, '-----') (<events.date.start.year.AQ [Attr.Type.Querier Ckd]>, '-----') (<events.date.finish.AQ [Attr.Type.Querier Date]>, '-----') (<events.date.finish.day.AQ [Attr.Type.Querier Ckd]>, '-----') (<events.date.finish.month.AQ [Attr.Type.Querier Ckd]>, '-----') (<events.date.finish.year.AQ [Attr.Type.Querier Ckd]>, '-----') (<events.date.alive.AQ [Attr.Type.Querier Boolean]>, '-----') (<events.time.AQ
#!/usr/bin/env python3 # -*- encoding: utf-8 # SPDX-License-Identifier: MIT # Copyright (c) 2021 - 2021, <EMAIL> __banner__ = r""" ( ______ ____ _____ _______ _ _ /\ ____ _ | ____| / __ \ | __ \ |__ __| | | | | |/\| |___ \ _ (_) | |__ | | | | | |__) | | | | |__| | __) | (_) | __| | | | | | _ / | | | __ | |__ < _ | | | |__| | | | \ \ | | | | | | ___) | _ (_) |_| \____/ |_| \_\ |_| |_| |_| |____/ ( ) |/ ) """ # __banner__ class Engine: # { The Reference Implementation of FORTH^3 : p-unity } def __init__(self, run=None, run_tests=1, **kwargs): stack = kwargs.get('stack', []) memory = kwargs.get('memory', {}) self.root = TASK(self, root=True, stack=stack, memory=memory) self.call = CALL(self) self.tape = None self.sandbox = kwargs.get('sandbox', 0) self.guards = kwargs.get('guards', "") if not run == None: self.guards = "```" self.digits = {} for digit in "#$%-01234567890": self.digits[digit] = True vis = kwargs.get('vis', None) def load(self, vis, names): for name_level in names.split(" "): name, level = tuple(name_level.split(":")) if self.sandbox > 0 and self.sandbox < int(level): continue exec(f"from .WORDS import F_{name}") exec(f"self.{name} = F_{name}.LIB(self, self.root)") exec(f"if vis: vis.before_import('{name}', self.{name})") exec(f"self.import_lib(vis, self.{name})") exec(f"if vis: vis.after_import('{name}', self.{name})") if vis: vis.before_imports(self) # The :num indicates what level of sandbox applies to all words load(self, vis, "CORE:1 STACK:1 MATH:1 CONTROL:1") load(self, vis, "INPUT:3 OUTPUT:3 REPL:1") load(self, vis, "OBJECT:1 JSON:1") load(self, vis, "UNICODE:3 CURSES:6") load(self, vis, "HTTPS:6") load(self, vis, "ECDSA:1 HASHES:1 CHAINS:1") if vis: vis.after_imports(self) for level in [1, 2, 3]: if run_tests < level: break self.execute_tests(__tests__[level]) self.execute_tests(self.root.tests[level]) __banner__ = None if __banner__: self.execute(__banner__) if run: self.execute(run) def raise_SyntaxError(self, details): raise ForthSyntaxException(details) def raise_RuntimeError(self, details): raise ForthRuntimeException(details) symbol_map = { "bang": "!", "at": "@", "hash": "#", "dollar": "$", "tick": "'", "quote": '"', "btick": "`", "equal": "=", "under": "_", "tilde": "~", "minus": "-", "m": "-", "plus": "+", "pipe": "|", "slash": "\\", "divide": "/", "qmark": "?", "colon": ":", "semicolon": ";", "dot": ".", "comma": ",", "percent": "%", "carat": "^", "amper": "&", "times": "*", "lparen": "(", "rparen": ")", "langle": "<", "rangle": ">", "lbrack": "[", "rbrack": "]", "lbrace": "{", "rbrace": "}", "unicorn": "\u1F984", "rainbow": "\u1F308", "astonished": "\u1F632", } def __getattr__(self, attr): def impl(*args): if attr in self.root.words: depth = len(self.root.stack) self.root.stack.extend(args) self.execute(attr, include=True) result = tuple(self.root.stack[depth:]) self.root.stack = self.root.stack[:depth] return result return self.root.memory.get(attr, None) return impl def peek(self, addr, default=None): return self.root.memory.get(addr, default) def poke(self, addr, value): self.root.memory[addr] = value def save(self, save_memory=True, save_stack=True, save_words=False): self.tape = {} if save_stack: self.tape["stack"] = copy.copy(self.root.stack) if save_memory: self.tape["memory"] = copy.copy(self.root.memory) if save_words: self.tape["words"] = copy.copy(self.root.words) return self.tape def load(self): if self.tape: if 'stack' in self.tape: self.root.stack = copy.copy(self.tape["stack"]) else: self.root.stack = [] if 'memory' in self.tape: self.root.memory = copy.copy(self.tape["memory"]) if 'words' in self.tape: self.root.words = copy.copy(self.tape["words"]) def add_word(self, name, code, where=None): parts = name.lower().split("_") name = [] meta = None for part in parts: if meta is None: if part == "": meta = [] continue else: meta.append(part) continue name.append(self.symbol_map.get(part, part)) name = "".join(name) where = where if where else self.root # if name in where: # raise ForthException(f"{name}: error(-4): Word Already Defined") if name in where.word_immediate: del where.word_immediate[name] if not meta is None: if "i" in meta[0]: where.word_immediate[name] = True where.words[name] = code argc = code.__code__.co_argcount if argc > 3: where.word_argc[name] = argc - 3 where.tests[1].append(code.__doc__) return name def add_sigil(self, name, code, where=None): parts = name.lower().split("_") name = [] meta = None for part in parts: if meta is None: if part == "": meta = [] continue else: meta.append(part) continue name.append(self.symbol_map.get(part, part)) name = "".join(name) where = where if where else self.root if name in where.word_immediate: del where.sigil_immediate[name] if meta is not None: if "i" in meta[0]: where.sigil_immediate[name] = True # if name in where: # raise ForthException(f"{name}: error(-4): Sigil Already Defined") where.sigils[name] = code where.tests[1].append(code.__doc__) return name def import_lib(self, vis, source, where=None): word_names = [] sigil_names = [] for fname in dir(source): parts = fname.split("_") if len(parts) > 1 and parts[0][:4] == "word": word = getattr(source, fname) word_names.append((word.__code__.co_firstlineno, fname)) if len(parts) > 1 and parts[0][:5] == "sigil": sigil = getattr(source, fname) sigil_names.append((sigil.__code__.co_firstlineno, fname)) def full2short(fname): parts = fname.split("_") name = [] meta = None for part in parts: if meta is None: if part == "": meta = [] continue else: meta.append(part) continue name.append(part) return "_".join(name) sigil_names.sort() for order, fname in sigil_names: code = getattr(source, fname) tname = self.add_sigil(fname[6:], code) if not vis: continue vis.visit_sigil(code, fname, full2short(fname)[6:], tname) word_names.sort() for order, fname in word_names: code = getattr(source, fname) tname = self.add_word(fname[5:], code) if not vis: continue vis.visit_word(code, fname, full2short(fname)[5:], tname) if not where: where = self.root where.tests[2].append(source.__doc__) @staticmethod def to_number(e, t, c, token): if not isinstance(token, str): return (True, token) if not token[0] in e.digits: return (False, None) if token in e.root.words or token in t.words: return (False, None) token = token.replace("_", "") if token in ["", "#", "$", "%"]: return (False, None) base = t.base if token[0] == "#": base = 10 token = token[1:] elif token[0] == "$": base = 16 token = token[1:] elif token[0] == "%": base = 2 token = token[1:] if token[0] == "-": if len(token) == 1: return (False, None) if not token[1].isdigit(): return (False, None) if "j" in token: return (True, complex(token)) else: if "." in token: if base == 10: return (True, Decimal(token)) else: return (True, Decimal(int(token, base))) else: return (True, int(token, base)) @staticmethod def state_INTERPRET(e, t, c, token): # ic(token, t.stack, c.stack) if not isinstance(token, str): if not isinstance(token, tuple): t.stack.append(token) return if len(token) == 1: t.stack.extend(token) elif len(token) == 2: Engine.run(e, t, c, token, token_l=None) else: print(token) e.raise_RuntimeError("!: error(-1): Unknown XT") return if len(token) == 0: return token_l = token.lower() if isinstance(token, str) else token is_number, value = e.to_number(e, t, c, token_l) if is_number: t.stack.append(value) return Engine.run(e, t, c, token, token_l) @staticmethod def run(e, t, c, token, token_l=None): if isinstance(token, tuple): if len(token) == 1: t.stack.extend(token) return code, argc = token else: if not (token_l in t.words or token_l in e.root.words): for sigil_len in [5, 4, 3, 2, 1]: sigil = token_l[:sigil_len] if sigil in t.sigils: t.sigils[sigil](e, t, c, token, start=True) return if sigil in e.root.sigils: e.root.sigils[sigil](e, t, c, token, start=True) return details = f"{token_l}: error(-13): word not found" raise ForthException(details) if token_l in t.words: code = t.words[token_l] argc = t.word_argc.get(token_l, 0) else: code = e.root.words[token_l] argc = e.root.word_argc.get(token_l, 0) if isinstance(code, list): t.last_call = token_l if c.depth == 0: c.depth += 1 e.execute_tokens(e, t, c, code) c.depth -= 1 else: e.execute_tokens(e, t, CALL(e, c), code) return if isinstance(code, tuple): if len(token) == 1: t.stack.extend(code) return elif len(code) == 2: code, argc = code else: e.raise_RuntimeError("!: error(-1): Unknown XT") if argc > len(t.stack): details = f"{token}: error(-4): Needs {argc} arg(s)" raise ForthException(details) args = [] if argc > 0: t.stack, args = t.stack[:-argc], t.stack[-argc:] result = code(e, t, c, *args) if result is not None: if isinstance(result, tuple): t.stack.extend(result) else: t.stack.append(result) @staticmethod def execute_tokens(e, t, c, tokens): for token in tokens: if token in ["#"]: break t.state(e, t, c, token) if c.EXIT: break @staticmethod def execute_token(e, t, c, token): if not token in ["#"]: t.state(e, t, c, token) def execute_tests(self, tests): if not tests: return for test in tests: if not test: continue task = TASK(self) call = CALL(self, self.call) for line in test.split("\n"): line = line.strip() call.line = line if line == "" or line[0] in ["#"]: continue if line == "--END--": break f_count = task.test["f"] if 1: # try: call.tokens = line.split() while len(call.tokens): token = call.tokens.pop(0) if token in ["#"]: break state = task.state state(self, task, call, token) if call.EXIT: break # except Exception as ex: # print(ex) # task.test["f"] += 1 if not f_count == task.test["f"]: print("!!! ", line) self.root.test["p"] += task.test["p"] self.root.test["f"]
class Console(object): """ Represents the standard input,output,and error streams for console applications. This class cannot be inherited. """ @staticmethod def Beep(frequency=None,duration=None): """ Beep(frequency: int,duration: int) Plays the sound of a beep of a specified frequency and duration through the console speaker. frequency: The frequency of the beep,ranging from 37 to 32767 hertz. duration: The duration of the beep measured in milliseconds. Beep() Plays the sound of a beep through the console speaker. """ pass @staticmethod def Clear(): """ Clear() Clears the console buffer and corresponding console window of display information. """ pass @staticmethod def MoveBufferArea(sourceLeft,sourceTop,sourceWidth,sourceHeight,targetLeft,targetTop,sourceChar=None,sourceForeColor=None,sourceBackColor=None): """ MoveBufferArea(sourceLeft: int,sourceTop: int,sourceWidth: int,sourceHeight: int,targetLeft: int,targetTop: int,sourceChar: Char,sourceForeColor: ConsoleColor,sourceBackColor: ConsoleColor) Copies a specified source area of the screen buffer to a specified destination area. sourceLeft: The leftmost column of the source area. sourceTop: The topmost row of the source area. sourceWidth: The number of columns in the source area. sourceHeight: The number of rows in the source area. targetLeft: The leftmost column of the destination area. targetTop: The topmost row of the destination area. sourceChar: The character used to fill the source area. sourceForeColor: The foreground color used to fill the source area. sourceBackColor: The background color used to fill the source area. MoveBufferArea(sourceLeft: int,sourceTop: int,sourceWidth: int,sourceHeight: int,targetLeft: int,targetTop: int) Copies a specified source area of the screen buffer to a specified destination area. sourceLeft: The leftmost column of the source area. sourceTop: The topmost row of the source area. sourceWidth: The number of columns in the source area. sourceHeight: The number of rows in the source area. targetLeft: The leftmost column of the destination area. targetTop: The topmost row of the destination area. """ pass @staticmethod def OpenStandardError(bufferSize=None): """ OpenStandardError(bufferSize: int) -> Stream Acquires the standard error stream,which is set to a specified buffer size. bufferSize: The internal stream buffer size. Returns: The standard error stream. OpenStandardError() -> Stream Acquires the standard error stream. Returns: The standard error stream. """ pass @staticmethod def OpenStandardInput(bufferSize=None): """ OpenStandardInput(bufferSize: int) -> Stream Acquires the standard input stream,which is set to a specified buffer size. bufferSize: The internal stream buffer size. Returns: The standard input stream. OpenStandardInput() -> Stream Acquires the standard input stream. Returns: The standard input stream. """ pass @staticmethod def OpenStandardOutput(bufferSize=None): """ OpenStandardOutput(bufferSize: int) -> Stream Acquires the standard output stream,which is set to a specified buffer size. bufferSize: The internal stream buffer size. Returns: The standard output stream. OpenStandardOutput() -> Stream Acquires the standard output stream. Returns: The standard output stream. """ pass @staticmethod def Read(): """ Read() -> int Reads the next character from the standard input stream. Returns: The next character from the input stream,or negative one (-1) if there are currently no more characters to be read. """ pass @staticmethod def ReadKey(intercept=None): """ ReadKey(intercept: bool) -> ConsoleKeyInfo Obtains the next character or function key pressed by the user. The pressed key is optionally displayed in the console window. intercept: Determines whether to display the pressed key in the console window. true to not display the pressed key; otherwise,false. Returns: A System.ConsoleKeyInfo object that describes the System.ConsoleKey constant and Unicode character,if any,that correspond to the pressed console key. The System.ConsoleKeyInfo object also describes,in a bitwise combination of System.ConsoleModifiers values,whether one or more SHIFT,ALT,or CTRL modifier keys was pressed simultaneously with the console key. ReadKey() -> ConsoleKeyInfo Obtains the next character or function key pressed by the user. The pressed key is displayed in the console window. Returns: A System.ConsoleKeyInfo object that describes the System.ConsoleKey constant and Unicode character,if any,that correspond to the pressed console key. The System.ConsoleKeyInfo object also describes,in a bitwise combination of System.ConsoleModifiers values,whether one or more SHIFT,ALT,or CTRL modifier keys was pressed simultaneously with the console key. """ pass @staticmethod def ReadLine(): """ ReadLine() -> str Reads the next line of characters from the standard input stream. Returns: The next line of characters from the input stream,or null if no more lines are available. """ pass @staticmethod def ResetColor(): """ ResetColor() Sets the foreground and background console colors to their defaults. """ pass @staticmethod def SetBufferSize(width,height): """ SetBufferSize(width: int,height: int) Sets the height and width of the screen buffer area to the specified values. width: The width of the buffer area measured in columns. height: The height of the buffer area measured in rows. """ pass @staticmethod def SetCursorPosition(left,top): """ SetCursorPosition(left: int,top: int) Sets the position of the cursor. left: The column position of the cursor. top: The row position of the cursor. """ pass @staticmethod def SetError(newError): """ SetError(newError: TextWriter) Sets the System.Console.Error property to the specified System.IO.TextWriter object. newError: A stream that is the new standard error output. """ pass @staticmethod def SetIn(newIn): """ SetIn(newIn: TextReader) Sets the System.Console.In property to the specified System.IO.TextReader object. newIn: A stream that is the new standard input. """ pass @staticmethod def SetOut(newOut): """ SetOut(newOut: TextWriter) Sets the System.Console.Out property to the specified System.IO.TextWriter object. newOut: A stream that is the new standard output. """ pass @staticmethod def SetWindowPosition(left,top): """ SetWindowPosition(left: int,top: int) Sets the position of the console window relative to the screen buffer. left: The column position of the upper left corner of the console window. top: The row position of the upper left corner of the console window. """ pass @staticmethod def SetWindowSize(width,height): """ SetWindowSize(width: int,height: int) Sets the height and width of the console window to the specified values. width: The width of the console window measured in columns. height: The height of the console window measured in rows. """ pass @staticmethod def Write(*__args): """ Write(value: Single) Writes the text representation of the specified single-precision floating-point value to the standard output stream. value: The value to write. Write(value: int) Writes the text representation of the specified 32-bit signed integer value to the standard output stream. value: The value to write. Write(value: float) Writes the text representation of the specified double-precision floating-point value to the standard output stream. value: The value to write. Write(value: Decimal) Writes the text representation of the specified System.Decimal value to the standard output stream. value: The value to write. Write(value: UInt32) Writes the text representation of the specified 32-bit unsigned integer value to the standard output stream. value: The value to write. Write(value: object) Writes the text representation of the specified object to the standard output stream. value: The value to write,or null. Write(value: str) Writes the specified string value to the standard output stream. value: The value to write. Write(value: Int64) Writes the text representation of the specified 64-bit signed integer value to the standard output stream. value: The value to write. Write(value: UInt64) Writes the text representation of the specified 64-bit unsigned integer value to the standard output stream. value: The value to write. Write(format: str,arg0: object,arg1: object,arg2: object) Writes the text representation of the specified objects to the standard output stream using the specified format information. format: A composite format string (see Remarks). arg0: The first object to write using format. arg1: The second object to write using format. arg2: The third object to write using format. Write(format: str,arg0: object,arg1: object,arg2: object,arg3: object) Writes the text representation of the specified objects and variable-length parameter list to the standard output stream using the specified format information. format: A composite format string (see Remarks). arg0: The first object to write using format. arg1: The second object to write using format. arg2: The third object
<gh_stars>0 """ A Tkinter UI that allows me to scroll through the images and select regions on the images that should be tagged by the solution. """ # pyright: reportUnknownVariableType=false, reportUnknownMemberType=false import os import tkinter as tk from tkinter import filedialog from typing import Union import threading import src.model as model import ui_model as uiModel import data_access_layer as dal # To understand weak references, please see: # https://docs.python.org/3.8/c-api/weakref.html # and: https://www.geeksforgeeks.org/weak-references-in-python/ ROOT_TITLE = "Image data annotator" # The number of seconds until it automatically moves to the next image NEXT_IMAGE_SECONDS = 0.50 class DataAnnotatorUI: """The Tkinter UI class Here I have tried to focus as much as possible on just the UI layer, without any business or image logic. """ def __init__(self): # Setup the root window (title, initial size, etc.) self._root = tk.Tk() self.root.title(ROOT_TITLE) self.root.geometry("%sx%s" % (500, 500)) # _rootWindow.resizable(False,False) self.root.minsize(500, 500) self.root.configure(background="grey") self.root.bind("<KeyRelease>", self._onKeyUp) # type: ignore # Setup the main menu for the root window self.menubar = tk.Menu(self.root) self.filemenu = tk.Menu(self.menubar, tearoff=0) self.filemenu.add_command( label="Open folder...", command=self.promptUserForFolderToProcess ) self.filemenu.add_command( label="Save annotations", command=self._saveAnnotations ) self.menubar.add_cascade(label="File", menu=self.filemenu) self.root.config(menu=self.menubar) # Create the image _canvas self._canvas = tk.Canvas(self.root, borderwidth=0, highlightthickness=0) self._canvas.pack(fill="both", expand=True) self._canvas.bind("<Button-1>", self._onMouseDown) # type: ignore self._canvas.bind("<B1-Motion>", self._onMouseDrag) # type: ignore self._canvas.bind("<Configure>", self._onCanvasResize) # type: ignore @property def root(self) -> tk.Tk: """The root Tk window""" return self._root _root: tk.Tk @property def current(self) -> uiModel.AnnotatedImage | None: """The current annotated image view model""" if self._manager is None: return None return self._manager.current def mainloop(self): """Tinker speak for start the main message loop/pump to run the application.""" self.root.mainloop() # ############################################################################################## # region Image and image folder related members # ############################################################################################## def promptUserForFolderToProcess(self): """The function that processes the main "File|Open folder" menu item. This function then starts processing the images in that folder """ # https://docs.python.org/3.9/library/dialog.html#native-load-save-dialogs and # https://stackoverflow.com/questions/51192795/how-can-i-show-all-files-via-filedialog-askdirectory fileName: str = filedialog.askopenfilename( # type: ignore parent=self.root, title="Select any file in the folder to process..." ) if not fileName: return folder: str = os.path.dirname(fileName) if not folder: return self.openFolder(folder) def _saveAnnotations(self): """Save the image annotations processed so far""" if self._manager is not None: dal.saveAnnotatedImagesToJsonFile(self._manager.saveFileName, self._manager) def openJsonSaveFile(self, file: str): """Process all the images in the given file name""" # If we don't do this, then any old rectangles hang around on the screen self._removeImageRegionRectangles() # Create annotated image objects for all the images in the selected file (self._manager, lastIndex) = dal.loadAnnotatedImagesFromJsonFile(file) if self._manager: numImages = len(self._manager) print("Found: ", numImages, " images") self._manager.moveToImage(lastIndex) self._manager.onWindowResized(newWindowSize=self._canvasSize) self.moveToImage(self._manager.currentIndex) def openFolder(self, folder: str): """Starts processing the images in the given folder by using the business model to scan the entire folder and find all the images that we need to process. """ # If we don't do this, then any old rectangles hang around on the screen self._removeImageRegionRectangles() # Create annotated image objects for all the images in the selected folder self._manager = dal.loadDirectory(folder) if self._manager is not None: self._manager.onWindowResized(self._canvasSize) numImages = len(self._manager) print("Found: ", numImages, " images") self.moveToImage(self._manager.currentIndex) def moveToNextImage(self): """Move to the next image""" assert self._manager self.moveToImage(self._manager.currentIndex + 1) def moveToImage(self, index: int): """Open the image with the given index (into our ordered collection of annotated images that we received from the model layer) """ assert self._manager if not self._manager.isValidIndex(index): return # First, we need to remove the saved image regions for our current image self._removeImageRegionRectangles() # Scale the image so it fits while retaining the correct aspect ratio # Only scale if we haven't already previously scaled the image (which is slow) # Store it back in our domain logic layer for faster access self._manager.moveToImage(index) # Update our on-screen image to the new image self._updateImage() # With a new image, we may have new active regions # (and we need to remove the old ones) self._redrawAllRectangles() # Update the window title current = self._manager.currentIndex total = len(self._manager) tagged = "*" if self._manager.current.isTagged else " " newTitle = f"{ROOT_TITLE} - {tagged}{current+1} of {total}" self.root.title(newTitle) # endregion # ############################################################################################## # region Event handlers # ############################################################################################## def _onKeyUp(self, event: tk.Event) -> None: # type: ignore # Check pre-conditions if self._manager is None: return # Move "left" 1 image in the collection if event.keysym == "Left": self._stopAutoMoveTimer() self.moveToImage(self._manager.currentIndex - 1) return # Move "right" 1 image in the collection if event.keysym == "Right": self._stopAutoMoveTimer() self.moveToNextImage() self._saveAnnotations() return # Move "left" to previously tagged image if event.keysym == "d": self._stopAutoMoveTimer() previousTaggedIndex = self._manager.scanForTaggedIndex(-1) if previousTaggedIndex is not None: self.moveToImage(previousTaggedIndex) self._saveAnnotations() return # Move "right" to the next tagged image if event.keysym == "f": self._stopAutoMoveTimer() nextTaggedIndex = self._manager.scanForTaggedIndex(+1) if nextTaggedIndex is not None: self.moveToImage(nextTaggedIndex) self._saveAnnotations() return # Jump to the very first image if event.keysym == "Home": self._stopAutoMoveTimer() self.moveToImage(0) return # Jump to the very last processed image if event.keysym == "End": self._stopAutoMoveTimer() self.moveToImage(self._manager.maxViewed) return # Auto-move through the images, without having to continuously press "right" if event.keysym == "space": if self._autoMoveTimer is None: self._startAutoMoveTimer() else: self._stopAutoMoveTimer() # Delete - remove all the rectangle selections on the current image if event.keysym == "Delete": self._stopAutoMoveTimer() # Clear the current active rectangle (if there is one) self._removeActiveImageRegion() # Next, remove the existing rectangles from the screen self._removeImageRegionRectangles() # Clear out existing canvas IDs # NOW we can clear all the current image's regions self._manager.current.regions.clear() # Save the new region if self._manager: self._saveAnnotations() return # Escape - remove current active rectangle selection if event.keysym == "Escape": self._stopAutoMoveTimer() self._removeActiveImageRegion() return # Save the current active region if event.char in ["s"]: self._stopAutoMoveTimer() self._removeImageRegionRectangles() # Clear out existing canvas IDs self._manager.addActiveRegion() # Add our current active region self._redrawAllRectangles() # Now we can redraw them if self._manager: # Save the new region self._saveAnnotations() return def _onMouseDown(self, event: tk.Event) -> None: # type: ignore """Called when the user clicks on the image canvas. Used to capture the starting screen coordinates of the selection rectangle """ if self._manager is None: return newRegion = model.Region2d(event.x, event.y, 1, 1) self._manager.updateActiveScreenRegion(newRegion) def _onMouseDrag(self, event: tk.Event) -> None: # type: ignore """Called when the user drags the mouse, selecting a rectangular region of the image (kind of the entire point of the app) """ if self._manager is None: return activeRegion = self._manager.activeRegion newRegion: model.Region2d | None = None if activeRegion is None: newRegion = model.Region2d(event.x, event.y, 1, 1) else: # We already have a rectangle, and we're dragging it currentRect = activeRegion.screenRegion if currentRect is not None: x = currentRect.x y = currentRect.y w = event.x - currentRect.x h = event.y - currentRect.y newRegion = model.Region2d(x, y, w, h) # Update the active region in the view model layer if newRegion is not None: activeRegion = self._manager.updateActiveScreenRegion(newRegion) self._drawRegion(activeRegion, "blue") def _onCanvasResize(self, event: tk.Event) -> None: # type: ignore """Called when our image canvas Tk widget is resized. When that happens, we need to also resize the image (keeping the aspect ratio) and resize the current region selection (if there is one) on the screen """ # Store the new _canvas size self._canvasSize = model.Size2d(event.width, event.height) # Pre-condition checks if self._manager is None: return # Update our on-screen image to the new image self._manager.onWindowResized(self._canvasSize) # Redraw the image with the newly scaled image self._updateImage() # Scale the on-screen rectangle(s) to the new displayed image scale self._redrawAllRectangles() # endregion # ############################################################################################## # region private helper methods # ############################################################################################## def _redrawAllRectangles(self): """Draws the active region and the image regions""" # Draw all the active regions if self._manager is None: return for region in self._manager.regions: self._drawRegion(region, "red") # Now (re)draw the active rectangle (if there is one) activeRegion = self._manager.activeRegion if activeRegion: self._drawRegion(activeRegion, "blue") def _removeImageRegionRectangles(self): """Remove the annotated image regions we currently have""" # Draw all the active regions if self._manager is None: return if self.current is None: return for region in self._manager.regions: self._canvas.delete(region.canvasRectId) region.canvasRectId = 0 # Also remove the active region rectangle self._removeActiveImageRegionRectangle() def _removeActiveImageRegion(self): """Remove the current activate regions the user has currently drawn but not saved""" self._removeActiveImageRegionRectangle() # Also tell the manager there's no active region if self._manager is not None: self._manager.activeRegion = None def
<filename>restware.py """ Restware is a plugin for bottle and a piece of middleware (wsgi) to help developers create more guideline-conforming REST APIs using bottle. .. codeauthor:: <NAME> <<EMAIL>> Released under the BSD 2-Clause license, http://opensource.org/licenses/BSD-2-Clause """ import StringIO import gzip import json import logging import sys import bottle from bottle import response, request class RestwarePlugin: """ This plugin is designed to fix a few peculiar behaviors that keep Bottle from being a really good engine for quickly developing a REST-esque API. This class conforms to Bottle's Plugin interface, http://bottlepy.org/docs/dev/plugindev.html#plugin-api The two issues rectified by this module are: 1. Errors (400+ status code responses) are now always JSON responses 2. All request body data is checked for JSON in case the memory limit imposed by bottle comes into play; all response body data are automatically treated as JSON if they can be treated that way You can configure just one thing: what route prefix should be handled as JSON for response body data. This lets your app serve up documentation (static HTML, likely) while a set of routes that make up your API's operations are all treated as if they're going to return JSON .. codeauthor:: <NAME> <<EMAIL>> """ name = "restwareplugin" api = 2 def __init__(self, apiBasePath='/api/', logger=None): """ Args: apiBasePath (str, optional): Set the base path under which your API operations/routes will lie. Defaults to /api/, but you can set it to '/' to force all responses be JSON logger (logging.Logger, optional): if you want custom logging, specify your own logger instance """ self.baseRulePath = apiBasePath self.logger = logger if not self.logger: self.logger = logging.getLogger("RestPlugin") handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter("%(levelname)s %(module)s:%(funcName)s | %(message)s")) self.logger.addHandler(handler) self.logger.setLevel(logging.DEBUG) def setup(self, app): ''' Set all error status codes so that our own error handler function is used. We'll also go ahead and wrap that function with our Gzip and JSON'ing method (apply()). This way we get uniform behavior from our app, even when errors are returned: the client will never see HTML, only text or JSON. ''' def standardErrorHandlerFunc(errorInst): self.logger.warn("error, %s" % errorInst.status) if type(errorInst.body) is not dict: return {"message": errorInst.body} return errorInst.body for errorCode in [statusCode for statusCode, description in bottle.HTTP_CODES.iteritems() if statusCode >= 400]: self.logger.debug("applying for status-code %d" % errorCode) app.error_handler[int(errorCode)] = self.apply(standardErrorHandlerFunc, "error") def apply(self, callback, route): ''' This method wraps the given callback function so that we can try to serialize the response data as JSON, and then Gzip the returned string. This only applies in two scenarios: - for routes whose rule starts with some user-defined base path under which all API operations lie - for errors (400+) This only applies when the data returned from the callback if of type list or dict. If it is something else, then we let bottle do whatever it would normally do. Other data will be returned, but a warning message is printed to stdout if a /api/* route returns something we aren't able to serialize as JSON. We can also return pretty-printed JSON. All the client must do is specify the "pretty" query parameter and give it the "true" value: &pretty=true ''' def wrapper(*args, **kwargs): # perform pre operations to setup the request if necessary self.preprocessRequest(route) retval = callback(*args, **kwargs) return self.postprocessRequest(retval, route) return wrapper def preprocessRequest(self, route): """ This preprocessor specifically looks for POSTed JSON that exceeds bottle's MEMLIMIT (102400 bytes I think) """ request.jsonData = None if not request.headers.get("Content-Type", "").startswith("application/json"): # there is no JSON posted, so we can return self.logger.debug("No JSON to decode; finished") return # JSON is expected, so ensure it is either already parsed by bottle, or parse it ourselves if hasattr(request, "json") and request.json is not None: # It is already parsed, so there's nothing to do self.logger.debug("JSON data already parsed by bottle") request.jsonData = request.json return self.logger.debug("Attempting to parse JSON from request.body since request.json is missing/None") # ensure some data was actually POSTed if hasattr(request, "body") and request.body: try: # TODO: set encoding based on request header request.jsonData = json.load(request.body) self.logger.debug("Decoded JSON successfully") except Exception, e: self.logger.warn("Request header Content-Type indicates JSON, and we failed to parse request.body: %s" % e) request.body.seek(0) self.logger.warn("Request body (first 32bytes)=%s" % repr(request.body.read(32))) else: self.logger.warn("Request header Content-Type indicates JSON, but no data was POSTed?") def postprocessRequest(self, retval, route): """ Ensures the output is JSON. That's all this method does. """ JSONed = False GZIPPED = False if retval is None: self.logger.warn("retval is None!") return retval # Is this request under the a path we're enforcing JSON output for? if (route is not None and hasattr(route, 'rule') and route.rule.startswith(self.baseRulePath)) or response.status_code >= 400: # It is. Try to serialize the returned data as JSON self.logger.debug("response should be JSON") # First, is the data even something we can serialize as JSON? # if the retval is not a dict, we don't know what to do with it, so just be transparent if type(retval) not in (dict, list): self.logger.error("\033[41;1m You are trying to send the client data that doesn't look like it should be JSON (%s). Fix this! \033[0m" % type(retval)) # TODO: consider raising an exception so as to generate a server error (500), forcing the app developer # to confront why/how they are sending back something that doesn't make much sense serializing as JSON else: # Was the "pretty" query parameter set? if request.query.get("pretty") == 'true': # It was. Indent & sort keys self.logger.debug("found pretty query param, value is true, prettying JSON") retval = json.dumps(retval, indent=4, sort_keys=True) else: # It was not. By default, we'll use the most compact representation retval = json.dumps(retval, separators=(',', ':')) response.content_type = "application/json" self.logger.debug("%d bytes of JSON created" % len(retval)) JSONed = True else: self.logger.debug("response should NOT be JSON") # Gzipping the response # Can the client even handle gzipped response bodies? httpRespObj = None if isinstance(retval, bottle.HTTPResponse): # we'll keep the HTTPResponse so we can update it after gzipping. self.logger.debug("Found HTTPResponse instance") httpRespObj = retval if type(retval.body) in (str, unicode): retval = retval.body elif hasattr(retval.body, "read"): retval = retval.body.read() else: self.logger.error("HTTPResponse.body attr is not a str and does not have a read() method!") raise ValueError("HTTPResponse.body is not sane: attr is not a str, and is not a file-like object") elif isinstance(retval, bottle.HTTPError): self.logger.debug("Found HTTPError instance") httpRespObj = retval if type(retval.body) in (str, unicode): retval = retval.body elif hasattr(retval.body, "read"): retval = retval.body.read() else: self.logger.error("HTTPError.body attr is not a str and does not have a read() method!") raise ValueError("HTTPError.body is not sane: attr is not a str, and is not a file-like object") if 'gzip' in request.headers.get("Accept-Encoding", "") and len(retval) > 0: self.logger.debug("client accepts gzip, gzipping data") # the client handle gzipped data, so lets gzip out data self.logger.debug("original response data was %d bytes" % len(retval)) sio = StringIO.StringIO() gzFile = gzip.GzipFile(fileobj=sio, mode='wb', compresslevel=6) gzFile.write(retval) gzFile.close() sio.seek(0) retval = sio.read() sio.close() self.logger.debug("new gzipped response data is %d bytes" % len(retval)) GZIPPED = True # Were we given an HTTPResponse isntance? If so, we need to update it a bit if httpRespObj: self.logger.debug("Updating HTTPResponse instance with gzipped content, headers") httpRespObj.body = retval httpRespObj['Content-Length'] = str(len(retval)) httpRespObj['Content-Encoding'] = 'gzip' else: # update the content-length (it is already set) and add the content-encoding header response.set_header('Content-Length', str(len(retval))) response.set_header('Content-Encoding', 'gzip') else: self.logger.debug("client either doesn't accept gzip or there's no data to return; len(retval)=%d" % len(retval)) self.logger.info("RESPONSE %s gzipped:%s json:%s size:%dB" % (response.status_code, GZIPPED, JSONed, len(retval))) if httpRespObj: return httpRespObj return retval class Restware(object): """ Middleware that handles de-gzipping incoming request body data. This should work with any other WSGI app obviously, but it is tested with Bottle, and it is designed to work in conjunction with the RestwarePlugin (which handles gzipping response body data). .. codeauthor:: <NAME> <sm<EMAIL>> """ def __init__(self, app, logger=None): """ Args: app (wsgi app instance, required): the app to wrap with this middleware logger (logging.Logger, optional): provide a logger instance to use """ self.app = app self.logger = logger if not
# this is the camera class which can do the rayCasting and is moved throughout the maze # inspiration for raycasting from 3d sage's explanation in C : https://www.youtube.com/watch?v=gYRrGTC7GtA&t=423s # and also inspired from https://lodev.org/cgtutor/raycasting.html # cmu_112_graphics is from https://www.cs.cmu.edu/~112/notes/notes-animations-part2.html from cmu_112_graphics import * # from main_v1 import * # new changes # ive pretty much given up on texture mapping walls as its too slow for python # unless i port the entire game over to pygame which would be painful # i have however done stuff with sprite creation sans texture mapping # I credit the inspiration for my sprite creation technique to current TA Terry Feng's # implementation in his raycaster: https://www.youtube.com/watch?v=PM4WyIsWJ_8&ab_channel=TerryFeng # github: https://github.com/feng-terry/pacman3d/blob/master/rayCast.py # I was inspired by his technique of storing sprites that were seen in sets # so that they wouldn't be created multiple times # and using dictionaries and a distance List for drawing images # unlike his version though because I was casting sprites from images # its harder in a way to centre them I guess # So I basically also draw them as a wall as well to better visualize their position # and I down scale the image from its original size using cmu_112_graphics.py from https://www.cs.cmu.edu/~112/notes/notes-animations-part2.html # depending on how far away the sprite is from the player. # for the grudge you can visualize it as evil aura XD # for cockroach maybe a swarm # for the ring I guess its shiny gleam # pls visualize! import math class Camera(): def __init__(self, x, y, dx, dy, angle): self.x = x self.y = y self.dx = dx self.dy = dy self.angle = angle # code for a possible birds eye view def drawPlayer(self, canvas): cx , cy = self.x, self.y canvas.create_oval(cx - 10, cy - 10, cx + 10, cy + 10, fill = "yellow") # this is basically just a line a draw for me to to be able to visualize # the turning of the player x1 = self.dx*5 + cx y1 = self.dy*5 + cy canvas.create_line(self.x, self.y, x1, y1, fill= "yellow", width = 5) # This is what makes the rayCasting magic happen def rayCast(self, map, canvas): # point of these is to ensure that image is only drawn once # and not as many times as there are rays hitting the block cockroachSet = set() grudgeSet = set() ringSet = set() objectsDict = {} distanceList = [] rayExtension = map.rows # The basis of all the calculation I use come from the usage of trigonometry # and in particular using tangent, and the way I calculate differs depending # on the quadrant where the ray is cast # simple pythagorean theorem for distance def distance(x1,y1,x2,y2): return ((x1-x2)**2 +(y1-y2)**2)**0.5 # 60 degree field of view # start ray casting angle shifted 30 degress left of player centrer FOV angle = self.angle - math.radians(30) fov = 60 # Basically 60 degree FOV so 60 rays each shifted by 1 degree for ray in range(60): # variables predefined to prevent crashing isGrudge = False vDistObject = None hDistObject = None rayDist = 100000000 # limiting angle between 0 and 2pi if angle < 0: angle += 2*math.pi elif angle > 2*math.pi: angle -= 2*math.pi # calculations for rays hitting horizontal walls # again initializing some variables hDist = 1000000000 hX, hY = self.x, self.y rayDepth = 0 if 0< angle < math.pi/2: rayAngle = math.pi/2 - angle rayY = (self.y // map.gridLength) * map.gridLength + map.gridLength rayX = math.tan(rayAngle)*(rayY-self.y) + self.x yOffset = map.gridLength xOffset = math.tan(rayAngle)*yOffset elif math.pi/2< angle < math.pi: rayAngle = angle - math.pi/2 rayY = (self.y // map.gridLength) * map.gridLength + map.gridLength rayX = self.x - math.tan(rayAngle)* (rayY-self.y) yOffset = map.gridLength xOffset = -1* math.tan(rayAngle)*yOffset elif math.pi < angle < 3*math.pi/2: rayAngle = 3*math.pi/2 - angle rayY = (self.y // map.gridLength) * map.gridLength rayX = self.x - math.tan(rayAngle)*(self.y -rayY) yOffset = -1*map.gridLength xOffset = math.tan(rayAngle)*yOffset elif 3*math.pi/2 < angle < 2*math.pi: rayAngle = angle - 3*math.pi/2 rayY = (self.y // map.gridLength) * map.gridLength rayX = math.tan(rayAngle)*(self.y - rayY) + self.x yOffset = -1*map.gridLength xOffset = -1*math.tan(rayAngle)*yOffset elif angle == 0 or angle == math.pi: rayX = self.x rayY = self.y rayDepth = rayExtension # rays extended in length each time until collision with wall while rayDepth < rayExtension: if angle > math.pi : rayHCol = int(rayX / map.gridLength) rayHRow = int(rayY / map.gridLength) -1 else: rayHCol = int(rayX / map.gridLength) rayHRow = int(rayY / map.gridLength) if (0<=rayHRow<map.rows and 0<=rayHCol< map.cols and 6 >map.map[rayHRow][rayHCol] > 0): hX = rayX hY = rayY hDist = distance(self.x, self.y, rayX, rayY) rayDepth = rayExtension else: rayX += xOffset rayY += yOffset rayDepth += 1 # calculations for rays hitting horizontal walls # again initializing some variables vDist = 1000000000 vX, vY = self.x, self.y rayDepth = 0 if 0< angle < math.pi/2: rayAngle = angle rayX = (self.x // map.gridLength) * map.gridLength + map.gridLength rayY = math.tan(rayAngle)*(rayX-self.x) + self.y xOffset = map.gridLength yOffset = math.tan(rayAngle)*xOffset elif math.pi/2< angle < math.pi: rayAngle = math.pi - angle rayX = (self.x // map.gridLength) * map.gridLength rayY = math.tan(rayAngle)*(self.x-rayX) + self.y xOffset = -1*map.gridLength yOffset = -1*math.tan(rayAngle)*xOffset elif math.pi < angle < 3*math.pi/2: rayAngle = angle - math.pi rayX = (self.x // map.gridLength) * map.gridLength rayY = self.y - math.tan(rayAngle)*(self.x-rayX) xOffset = -1*map.gridLength yOffset = math.tan(rayAngle)*xOffset elif 3*math.pi/2 < angle < 2*math.pi: rayAngle = 2*math.pi - angle rayX = (self.x // map.gridLength) * map.gridLength + map.gridLength rayY = self.y - math.tan(rayAngle)*(rayX-self.x) xOffset = map.gridLength yOffset = -1*math.tan(rayAngle)*xOffset elif angle == math.pi/2 or angle == 3*math.pi/2: rayX = self.x rayY = self.y rayDepth = rayExtension # rays extended in length each time until collision with wall while rayDepth < rayExtension: if math.pi/2 <angle < 3*math.pi/2: rayVCol = int(rayX / map.gridLength)-1 rayVRow = int(rayY / map.gridLength) else: rayVCol = int(rayX / map.gridLength) rayVRow = int(rayY / map.gridLength) if (0<=rayVRow<map.rows and 0<=rayVCol< map.cols and 6>map.map[rayVRow][rayVCol] > 0): vX = rayX vY = rayY vDist = distance(self.x, self.y, rayX, rayY) rayDepth = rayExtension else: rayX += xOffset rayY += yOffset rayDepth += 1 # This part creates the illusion of shadows and texturing by darkening the # vertical walls a darker shade than horizontal walls # It also accounts for different cell types walls vs curtains # also grudge, cockroaches and the rings if vDist < hDist: rayDist = vDist rayRow = rayVRow rayCol = rayVCol if map.map[rayVRow][rayVCol] == 1: wallColor = 'grey24' lineColor = 'grey40' elif map.map[rayVRow][rayVCol] == 2: wallColor = 'dark olive green' lineColor = 'sky blue' elif map.map[rayVRow][rayVCol] == 3: wallColor = 'goldenrod' lineColor = 'light goldenrod' elif map.map[rayVRow][rayVCol] == 4: wallColor = 'DarkOrange4' lineColor = 'brown' elif map.map[rayVRow][rayVCol] == 5: isGrudge = True wallColor = 'black' lineColor = 'black' elif hDist < vDist: rayX, rayY = hX, hY rayDist = hDist rayRow = rayHRow rayCol = rayHCol if map.map[rayHRow][rayHCol] == 1: wallColor = 'grey33' lineColor = 'grey40' elif map.map[rayHRow][rayHCol] == 2: wallColor = 'DarkOliveGreen4' lineColor = 'sky blue' elif map.map[rayHRow][rayHCol] == 3: wallColor = 'gold' lineColor = "light goldenrod" elif map.map[rayHRow][rayHCol] == 4: wallColor = 'DarkOrange3' lineColor = 'brown' elif map.map[rayHRow][rayHCol] == 5: isGrudge = True wallColor = 'black' lineColor = "black" # After all the raycasting this is the final code for creating the 3d world # correcting the angle for the primary purpose of fixing the fish-eye effect fixedAngle = self.angle - angle if fixedAngle < 0: fixedAngle += 2*math.pi elif fixedAngle > 2*math.pi: fixedAngle -= 2*math.pi #fish-eye effect would be caused if the rayDistance was left as is # hence perpendicular distance is calculated and hence corrected for # minimum value to prevent crashing
#!/usr/bin/env python # -*- coding: utf-8 -*- """Module with functions to build plantuml text""" # Modules import datamodel from jarvis.shared_orchestrator import check_type_recursively from jarvis.question_answer import get_objects_names, check_get_object from .util import ObjDiagram, StateDiagram, SequenceDiagram def write_function_child(string_obj, function, input_flow_list, output_flow_list, xml_attribute_list): """Construct plantuml_text recursively""" function_input_port = [] function_output_port = [] external_input_port = [] external_output_port = [] parent_function_port = [] count = 1 nb_component = count_composed_component(function, count) for p in input_flow_list: if p[0][0] == function.name.lower(): function_input_port.append(p) if p[0][0] is None: external_input_port.append(p) if p[0][1] == function.name.lower(): parent_function_port.append(p) for q in output_flow_list: if q[0][0] == function.name.lower(): function_output_port.append(q) if q[0][0] is None: external_output_port.append(q) if q[0][1] == function.name.lower(): parent_function_port.append(q) string_obj.create_port(function_input_port, "in") string_obj.create_port(function_output_port, "out") string_obj.create_port(parent_function_port, 'None') child_with_no_child_list = [] child_with_child_list = [] # Create a child list for each parent function for child_function in function.child_list: if child_function.child_list: child_with_child_list.append(child_function) if not child_function.child_list: child_with_no_child_list.append(child_function) # For child that has no child: create object for fun in child_with_no_child_list: string_obj.create_object(fun, xml_attribute_list) for child in child_with_child_list: string_obj.create_component(child) write_function_child(string_obj, child, input_flow_list, output_flow_list, xml_attribute_list) nb_component -= 1 # Close all the brackets depending on the number of component within highest parent for i in range(nb_component): string_obj.append_string('}\n') for component in child_with_child_list: string_obj.create_component_attribute(component, xml_attribute_list) whole_child_list = child_with_child_list + child_with_no_child_list for fun in whole_child_list: for i in input_flow_list: if i[0][0] == fun.name.lower(): string_obj.create_port(input_flow_list, "in") for j in output_flow_list: if j[0][0] == fun.name.lower(): string_obj.create_port(output_flow_list, "out") string_obj.create_port(external_input_port, "in") string_obj.create_port(external_output_port, "out") def count_composed_component(function, count): """ Count the number of composed function within the higher function Parameters: function (Function) : Function to check count (int) : Number of component Returns: count (int) : Number of component """ for elem in function.child_list: if elem.child_list: count += 1 count_composed_component(elem, count) continue return count def write_function_object(string_obj, function, input_flow_list, output_flow_list, check, xml_attribute_list, component_obj=None, compo_diagram=False): """Write 'simple' function object with associated ports for flow_lists, close a pevious component if needed, returns plantuml_text""" string_obj.create_object(function, xml_attribute_list) if check: string_obj.append_string('}\n') if component_obj: string_obj.create_component_attribute(component_obj, xml_attribute_list) for p in input_flow_list: if compo_diagram: if p[0][0] == function.name.lower(): string_obj.create_port(input_flow_list, "in") else: if p[0][0] == function.name.lower() or p[0][1] == function.name.lower(): string_obj.create_port(input_flow_list, "in") for q in output_flow_list: if compo_diagram: if q[0][0] == function.name.lower(): string_obj.create_port(output_flow_list, "out") else: if q[0][0] == function.name.lower() or q[0][1] == function.name.lower(): string_obj.create_port(output_flow_list, "out") def get_function_diagrams(function_list, consumer_function_list, producer_function_list, parent_child_dict, data_list, xml_type_list, xml_attribute_list=None): """For fun_elem_function, function_context, function_decomposition and functions_chain, returns plantuml_text and url_diagram""" string_obj = ObjDiagram() # Filter output flows output_flow_list = get_output_flows(consumer_function_list, producer_function_list, concatenate=True) # Filter input flows input_flow_list = get_input_flows(consumer_function_list, producer_function_list, concatenate=True) # Filter consumers and producers list in order to create data flow data_flow_list = get_exchanged_flows(consumer_function_list, producer_function_list, parent_child_dict, concatenate=True) if data_list: per_message_data_flow_list = get_exchanged_flows(consumer_function_list, producer_function_list, parent_child_dict) if len(data_list) == len(per_message_data_flow_list): ordered_function_list, ordered_message_list = order_list(per_message_data_flow_list, data_list) if per_message_data_flow_list != ordered_message_list: for idx, i in enumerate(ordered_message_list): for j in data_flow_list: for k in j[1]: if i[2] == k and i[3]: new = str(idx+1) + ":" + k j[1].remove(k) j[1].append(new) # Loop in order to filter functions and write in output's file, see write_function_child() if not parent_child_dict: for function in function_list: write_function_object(string_obj, function, input_flow_list, output_flow_list, False, xml_attribute_list) if parent_child_dict: for function in function_list: if function.id in parent_child_dict.values() and \ function.id not in parent_child_dict.keys(): if check_function_type(function, xml_type_list): string_obj.create_component(function) write_function_child(string_obj, function, input_flow_list, output_flow_list, xml_attribute_list) if function.id not in parent_child_dict.keys() \ and function.id not in parent_child_dict.values(): write_function_object(string_obj, function, input_flow_list, output_flow_list, False, xml_attribute_list, compo_diagram=True) string_obj.create_input_flow(input_flow_list) string_obj.create_output_flow(output_flow_list) string_obj.create_data_flow(data_flow_list) diagram_url = string_obj.generator.get_diagram_path_or_url(string_obj.string) return string_obj.string, diagram_url def check_function_type(function, xml_type_list): """Checks if function's type(or recursive base type) from [Function, High level function, Safety function, High level safety function, unknown]""" specific_obj_type_list = datamodel.FunctionType.get_parent_function_type_list() check = False if any(a == str(function.type) for a in specific_obj_type_list): check = True return check if any(a == function.type for a in get_objects_names(xml_type_list)): obj_type = check_get_object(function.type, **{'xml_type_list': xml_type_list}) check = check_type_recursively(obj_type, [str(i).upper() for i in specific_obj_type_list]) return check return check def get_fun_elem_context_diagram(function_list, consumer_function_list, producer_function_list, data_list, xml_attribute_list, fun_elem_list, fun_inter_list, fun_elem_inter_list): """Returns plantuml_text, diagram_url for fun_elem_context""" string_obj = ObjDiagram() if fun_inter_list: unmerged_data_list = get_exchanged_flows(consumer_function_list, producer_function_list, {}) interface_list, data_flow_list = get_interface_list(fun_inter_list, data_list, unmerged_data_list, function_list, fun_elem_list) data_flow_list = concatenate_flows(data_flow_list) else: # Filter consumers and producers list in order to create data flow data_flow_list = get_exchanged_flows(consumer_function_list, producer_function_list, {}, concatenate=True) # Filter output flows output_flow_list = get_output_flows(consumer_function_list, producer_function_list, concatenate=True) # Filter input flows input_flow_list = get_input_flows(consumer_function_list, producer_function_list, concatenate=True) for fun_elem in fun_elem_list: string_obj.create_component(fun_elem) check_function = False for f in function_list: if any(a == f.id for a in fun_elem.allocated_function_list): if len(fun_elem.allocated_function_list) > 1: check_function = False else: check_function = True write_function_object(string_obj, f, input_flow_list, output_flow_list, check_function, xml_attribute_list, component_obj=fun_elem) if not check_function: string_obj.append_string('}\n') string_obj.create_component_attribute(fun_elem, xml_attribute_list) string_obj.create_input_flow(input_flow_list) string_obj.create_output_flow(output_flow_list) string_obj.create_data_flow(data_flow_list) if fun_elem_inter_list: string_obj.create_interface(fun_elem_inter_list) diagram_url = string_obj.generator.get_diagram_path_or_url(string_obj.string) return string_obj.string, diagram_url def get_interface_list(fun_inter_list, data_list, data_flow_list, function_list, fun_elem_list): """Get [fun_elem_1, fun_elem_2, fun_inter] when data allocated to fun_inter and pop according data from data_flow_list""" interface_list = [] removed_data_flow_list = [] initial_data = list(data_flow_list) idx = 0 # Get all fun_inter with allocated data within data_flow_list and create interface list # [[producer, consumer, fun_inter]...] for fun_inter in fun_inter_list: for data_id in fun_inter.allocated_data_list: for data in data_list: if data_id == data.id: for elem in data_flow_list.copy(): if data.name == elem[2]: first = None second = None for fun in function_list: if elem[0] == fun.name.lower(): first = fun if elem[1] == fun.name.lower(): second = fun if not (not first and not second): # if not any(fun_inter in s for s in interface_list): interface_list.insert(idx, [first, second, fun_inter]) removed_data_flow_list.insert(idx, elem) data_flow_list.remove(elem) idx += 1 output_list, interface_list = get_fun_elem_from_fun_inter(interface_list, fun_elem_list) if not output_list: return None, initial_data # (re)Add [producer, consumer, data_name] to data_flow_list if no interface exposed if any(isinstance(s, list) for s in interface_list): for idx, rest_inter in enumerate(interface_list): if isinstance(rest_inter, list): data_flow_list.append(removed_data_flow_list[idx]) return output_list, data_flow_list def get_fun_elem_from_fun_inter(interface_list, fun_elem_list): """Get output_list = [[fun_elem_1, fun_elem_2, fun_inter]...] list from interface_list = [[producer, consumer, fun_inter]...] and put value to False if (first, second, interface) have been added to output_list (i.e. fun_elem_1/fun_elem_2 have been found for a fun_inter)""" output_list = [] for ix, (first, second, interface) in enumerate(interface_list): fun_elem_1 = None fun_elem_2 = None if first: for elem_1 in fun_elem_list: if any(s == interface.id for s in elem_1.exposed_interface_list): if not elem_1.child_list: fun_elem_1 = elem_1 else: check = True for child in elem_1.child_list: if any(s == interface.id for s in child.exposed_interface_list): check = False if check: fun_elem_1 = elem_1 if second: for elem_2 in fun_elem_list: if not first: if any(s == interface.id for s in elem_2.exposed_interface_list): if not elem_2.child_list: fun_elem_2 = elem_2 else: check = True for child in elem_2.child_list: if any(s == interface.id for s in child.exposed_interface_list): check = False if check: fun_elem_2 = elem_2 else: if any(s == interface.id for s in elem_2.exposed_interface_list) and \ elem_2 != fun_elem_1: if not elem_2.child_list: fun_elem_2 = elem_2 else: check = True for child in elem_2.child_list: if any(s == interface.id for s in child.exposed_interface_list): check = False if check: fun_elem_2 = elem_2 if not (not fun_elem_1 and not fun_elem_2): if [fun_elem_1, fun_elem_2, interface] not in output_list: output_list.append([fun_elem_1, fun_elem_2, interface]) interface_list[ix] = False return output_list, interface_list def check_child_allocation(string_obj, fun_elem, function_list, xml_attribute_list): """ Check for each function allocated to fun_elem if not allocated to any fun_elem child: in that case => write function object string. Parameters: string_obj (Object) : Current object string fun_elem (Functional Element) : Functional element to check function_list ([Function]) : Functions list xml_attribute_list ([Attributes]) : Xml list of attributes Returns: out_str (string) : Function object(s) string """ for t in function_list: if t.id in fun_elem.allocated_function_list: child_allocated_function_list = [] for c in fun_elem.child_list: for j in c.allocated_function_list: child_allocated_function_list.append(j) if not any(s == t.id for s in child_allocated_function_list): write_function_object(string_obj, t, [], [], False, xml_attribute_list) def recursive_decomposition(string_obj, main_fun_elem, function_list, xml_attribute_list, first_iter=False): """ Creates Functional Elements as plantuml 'component' recursively """ if first_iter is True: string_obj.create_component(main_fun_elem) check_child_allocation(string_obj, main_fun_elem, function_list, xml_attribute_list) if main_fun_elem.child_list: recursive_decomposition(string_obj, main_fun_elem, function_list, xml_attribute_list) else: for c in main_fun_elem.child_list: string_obj.create_component(c) check_child_allocation(string_obj, c, function_list, xml_attribute_list) if c.child_list: recursive_decomposition(string_obj, c, function_list, xml_attribute_list) string_obj.append_string('}\n') string_obj.create_component_attribute(c, xml_attribute_list) string_obj.create_component_attribute(main_fun_elem, xml_attribute_list) def get_fun_elem_decomposition(main_fun_elem, fun_elem_list, allocated_function_list, consumer_list, producer_list, external_function_list, xml_attribute_list, data_list, fun_inter_list): """ Parses input lists in order to create dedicated functional element decomposition diagram by: Creating the whole string plantuml_text, retrieve url and return it. Parameters: main_fun_elem (Functional Element) : Main functional element fun_elem_list
def handle_lookup(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Lookup Response=' self.logger.info('Lookup Request') if self.config_map is None: self.logger.info('Unsupported Lookup Request in current Service, Please check your config file') self.request = b'' self._set_selector_events_mask('r') return primary_ip, primary_port = self.config_map['primary']['ip'], int(self.config_map['primary']['port']) secondary_ip, secondary_port = self.config_map['secondary']['ip'], int(self.config_map['secondary']['port']) resp_msg_type = MsgType.LOOKUP_RESPONSE present_map[0], present_map[3], present_map[4], present_map[5], present_map[6] = 1, 1, 1, 1, 1 msg_body = struct.pack('<B16sH16sH', 0, primary_ip.encode('utf-8'), primary_port , secondary_ip.encode('utf-8'), secondary_port) msg_len += 1 + 16 + 4 + 16 + 4 message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_test_request(self, present_map, msg_len, comp_id, comp_id_s): mm = 'HeartBeat for Test Request=' self.logger.info('Test Request') resp_msg_type = MsgType.HEART_BEAT pos = 54 test_req_id = struct.unpack('<H', self.request[pos: pos + 2])[0] present_map[0] = 1 msg_len += 2 msg_body = struct.pack('<H', test_req_id) message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_resend_request(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Resend Request=' self.logger.info('Test Request') resp_msg_type = MsgType.SEQUENCE_RESET pos = 54 seqs = struct.unpack('<II', self.request[pos: pos + 8]) start_seq = seqs[0] end_seq = seqs[1] self.logger.info('****** resend request: start_seq=%d, end_seq=%d', start_seq, end_seq) if end_seq == 0: end_seq = start_seq # while start_seq <= end_seq: msg_len = 58 present_map[0], present_map[1] = 1, 1 msg_len += 5 msg_body = struct.pack('<BI', ord('N'), end_seq) message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_heartbeat(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Heart Beat=' resp_msg_type = MsgType.HEART_BEAT self.logger.info('Heart Beat') msg_body = b'' message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_logout(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Logout=' resp_msg_type = MsgType.LOGOUT self.logger.info('Logout') # Session Status present_map[1] = 1 msg_len += 1 msg_body = struct.pack('<B', 0) message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_logon(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Logon=' resp_msg_type = MsgType.LOGON self.logger.info('Logon Message') seq_num=struct.unpack('<I',self.request[4:8])[0] pm = bitarray(format(self.request[22], 'b').zfill(8), endian='big') pos = 54 if pm[0]: password = self.request[pos: pos + 450] pos += 450 self.logger.info('password=%s', password.decode('utf-8')) if pm[1]: new_password = self.request[pos: pos + 450] pos += 450 self.logger.info('new_password=%s', new_password.decode('utf-8')) req_next_expected_seq = struct.unpack('<I', self.request[pos: pos + 4])[0] self.send_seq_dict[comp_id_s]=req_next_expected_seq-1 self.logger.info('send sequence = %d, next expected seq=%d', req_next_expected_seq, seq_num+1) # Next Expected message sequence present_map[2] = 1 msg_len += 4 # if comp_id_s in self.receive_exp_next_seq_dict: # next_expected_seq = self.receive_exp_next_seq_dict[comp_id_s] # else: # next_expected_seq = 1 next_expected_seq=seq_num+1 # Session Status present_map[3] = 1 msg_len += 1 msg_body = struct.pack('<IB', next_expected_seq, 0) message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_new_order(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Execution Report=' self.logger.info('New Order Request') resp_msg_type = MsgType.EXECUTION_REPORT utc_now = datetime.utcnow() transact_time = datetime.strftime(utc_now, '%Y%m%d %H:%M:%S.%f')[:-3] pm = bitarray(format(self.request[22], 'b').zfill(8), endian='big') pm.extend(format(self.request[23], 'b').zfill(8)) pm.extend(format(self.request[24], 'b').zfill(8)) pos = 54 cl_ord_id = self.request[pos: pos + 21] pos += 21 submitting_broker_id = self.request[pos: pos + 12] pos += 12 security_id = self.request[pos: pos + 21] pos += 21 security_id_source = self.request[pos] pos += 1 exch, broker_location_id, price = None, None, None if pm[4]: exch = self.request[pos: pos + 5] pos += 5 if pm[5]: broker_location_id = self.request[pos: pos + 11] pos += 11 ord_transact_time = self.request[pos: pos + 25] pos += 25 side = self.request[pos] pos += 1 ord_type = self.request[pos] pos += 1 if pm[9]: price = struct.unpack('<Q', self.request[pos: pos + 8])[0] pos += 8 order_qty = struct.unpack('<Q', self.request[pos: pos + 8])[0] pos += 8 time_in_force, position_effect, order_restriction, max_price_levels = None, None, None, None order_capacity, lot_type, text = None, None, None if pm[11]: time_in_force = self.request[pos] pos += 1 if pm[12]: position_effect = self.request[pos] pos += 1 if pm[13]: order_restriction = self.request[pos: pos + 21] pos += 21 if pm[14]: max_price_levels = self.request[pos] pos += 1 if pm[15]: order_capacity = self.request[pos] pos += 1 self.logger.info('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~text~~~~~~~~~~~~~~~~~~~~~~') if pm[16]: self.logger.info('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ has text~~~~~~~~~~~~~~~~~~~~~~') text_len = struct.unpack('<H', self.request[pos: pos + 2])[0] pos += 2 text = self.request[pos: pos + text_len] pos += text_len self.logger.info('text = %s', text) if pm[17]: exec_inst = self.request[pos: pos + 21] pos += 21 else: exec_inst = b'' if pm[18]: disclosure_inst = struct.unpack('<H', self.request[pos: pos + 2]) pos += 2 if pm[19]: lot_type = self.request[pos] self.logger.info('NewOrderSingle[cl_ord_id=%s, security_id=%s, transact_time=%s, side=%d, ord_type=%d' ', order_qty=%d, exec_inst=%s]', cl_ord_id.decode('utf-8'), security_id.decode('utf-8'), ord_transact_time.decode('utf-8') , side, ord_type, order_qty, exec_inst) symbol = security_id.decode('utf-8').strip('\x00') self.logger.info(' --------------- securityId = %s ------------------, %s', symbol, security_id) if symbol == '11': self.generate_business_reject(comp_id, comp_id_s, cl_ord_id, 11, struct.unpack('<I', self.request[4: 8])[0]) return else: self.logger.info('security_is is not 11') # generate the execution report present_map[0], present_map[1], present_map[2], present_map[3], present_map[4] = 1, 1, 1, 1, 1 msg_body = struct.pack('<21s12s21sB5s', cl_ord_id, submitting_broker_id, security_id, security_id_source, exch) msg_len += 21 + 12 + 21 + 1 + 5 if broker_location_id: present_map[5] = 1 msg_body += struct.pack('<11s', broker_location_id) msg_len += 11 present_map[6], present_map[7], present_map[9], present_map[11] = 1, 1, 1, 1 order_id = datetime.now().strftime('%H%M%S') + format(str(self.order_seq_no), 's').zfill(5) self.clordid_orderid[cl_ord_id] = order_id, order_qty, broker_location_id self.order_seq_no += 1 msg_body += struct.pack('<25sB21sB', transact_time.encode('utf-8'), side, order_id.encode('utf-8') , ord_type) msg_len += 25 + 1 + 21 + 1 if price: present_map[12] = 1 msg_body += struct.pack('<Q', price) msg_len += 8 present_map[13] = 1 msg_body += struct.pack('<Q', order_qty) msg_len += 8 if time_in_force: present_map[14] = 1 msg_body += struct.pack('<B', time_in_force) msg_len += 1 if position_effect: present_map[15] = 1 msg_body += struct.pack('<B', position_effect) msg_len += 1 if order_restriction: present_map[16] = 1 msg_body += struct.pack('<21s', order_restriction) msg_len += 21 if max_price_levels: present_map[17] = 1 msg_body += struct.pack('<B', max_price_levels) msg_len += 1 if order_capacity: present_map[18] = 1 msg_body += struct.pack('<B', order_capacity) msg_len += 1 if text: present_map[19] = 1 msg_body += struct.pack('<H'+str(len(text)+1)+'s', len(text)+1, text) msg_len += len(text)+3 present_map[21], present_map[22], present_map[23], present_map[24], present_map[25] = 1, 1, 1, 1, 1 exec_id = datetime.now().strftime('%H%M%S') + format(str(self.exec_id_seq_no), 's').zfill(5) self.exec_id_seq_no += 1 msg_body += struct.pack('<21sBcQQ', exec_id.encode('utf-8'), OrdStatus.NEW, ExecType.NEW.value.encode('utf-8'), 0, order_qty) msg_len += 21 + 1 + 1 + 8 + 8 if lot_type: present_map[27] = 1 msg_body += struct.pack('<B', lot_type) msg_len += 1 message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' sleep(1) self.response_created = True self._send_buffer += message try: last_px = price if not last_px: last_px = 100000000 if 10000000000 < order_qty <= 100000000000: self._send_buffer += self.generate_trade(comp_id, comp_id_s, cl_ord_id, submitting_broker_id , security_id, side, order_id, ord_type, price, order_qty, time_in_force, position_effect, order_qty, order_qty, last_px , text, broker_location_id) elif 100000000000 < order_qty <= 200000000000: last_qty = 100000000000 cum_qty = last_qty self._send_buffer += self.generate_trade(comp_id, comp_id_s, cl_ord_id, submitting_broker_id , security_id, side, order_id, ord_type, price, order_qty, time_in_force, position_effect, cum_qty, last_qty , last_px, text, broker_location_id) last_qty = order_qty-last_qty self._send_buffer += self.generate_trade(comp_id, comp_id_s, cl_ord_id, submitting_broker_id , security_id, side, order_id, ord_type, price, order_qty, time_in_force, position_effect, order_qty, last_qty, last_px , text, broker_location_id) elif 200000000000 < order_qty <= 500000000000: last_qty = 50000000000 cum_qty = last_qty self._send_buffer += self.generate_trade(comp_id, comp_id_s, cl_ord_id, submitting_broker_id , security_id, side, order_id, ord_type, price, order_qty, time_in_force, position_effect, cum_qty, last_qty, last_px , text, broker_location_id) last_qty = 100000000000 cum_qty = cum_qty + last_qty self._send_buffer += self.generate_trade(comp_id, comp_id_s, cl_ord_id, submitting_broker_id , security_id, side, order_id, ord_type, price, order_qty, time_in_force, position_effect, cum_qty, last_qty , last_px, text, broker_location_id) last_qty = order_qty - cum_qty self._send_buffer += self.generate_trade(comp_id, comp_id_s, cl_ord_id, submitting_broker_id , security_id, side, order_id, ord_type, price, order_qty, time_in_force, position_effect, order_qty, last_qty, last_px , text, broker_location_id) else: last_qty = order_qty//2 cum_qty = last_qty self._send_buffer += self.generate_trade(comp_id, comp_id_s, cl_ord_id, submitting_broker_id , security_id, side, order_id, ord_type, price, order_qty, time_in_force, position_effect, cum_qty, last_qty, last_px , text, broker_location_id) except
attribute:: interval AIS transmission interval **type**\: :py:class:`CfmBagAisInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagAisInterval>` .. attribute:: sending_ais Details of how AIS is being transmitted **type**\: :py:class:`CfmPmAisTransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmAisTransmit>` .. attribute:: receiving_ais Details of how the signal is being received **type**\: :py:class:`CfmPmAisReceive <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmAisReceive>` .. attribute:: last_interval The interval of the last received AIS packet **type**\: :py:class:`CfmBagAisInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagAisInterval>` .. attribute:: last_mac_address Source MAC address of the last received AIS packet **type**\: str **pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5} """ _prefix = 'ethernet-cfm-oper' _revision = '2017-10-06' def __init__(self): super(Cfm.Global.LocalMeps.LocalMep.AisStatistics, self).__init__() self.yang_name = "ais-statistics" self.yang_parent_name = "local-mep" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("sending-start", ("sending_start", Cfm.Global.LocalMeps.LocalMep.AisStatistics.SendingStart)), ("receiving-start", ("receiving_start", Cfm.Global.LocalMeps.LocalMep.AisStatistics.ReceivingStart))]) self._leafs = OrderedDict([ ('level', (YLeaf(YType.enumeration, 'level'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagMdLevel', '')])), ('interval', (YLeaf(YType.enumeration, 'interval'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagAisInterval', '')])), ('sending_ais', (YLeaf(YType.enumeration, 'sending-ais'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmAisTransmit', '')])), ('receiving_ais', (YLeaf(YType.enumeration, 'receiving-ais'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmAisReceive', '')])), ('last_interval', (YLeaf(YType.enumeration, 'last-interval'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagAisInterval', '')])), ('last_mac_address', (YLeaf(YType.str, 'last-mac-address'), ['str'])), ]) self.level = None self.interval = None self.sending_ais = None self.receiving_ais = None self.last_interval = None self.last_mac_address = None self.sending_start = Cfm.Global.LocalMeps.LocalMep.AisStatistics.SendingStart() self.sending_start.parent = self self._children_name_map["sending_start"] = "sending-start" self.receiving_start = Cfm.Global.LocalMeps.LocalMep.AisStatistics.ReceivingStart() self.receiving_start.parent = self self._children_name_map["receiving_start"] = "receiving-start" self._segment_path = lambda: "ais-statistics" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Cfm.Global.LocalMeps.LocalMep.AisStatistics, ['level', 'interval', 'sending_ais', 'receiving_ais', 'last_interval', 'last_mac_address'], name, value) class SendingStart(Entity): """ Time elapsed since AIS sending started .. attribute:: seconds Seconds **type**\: int **range:** 0..4294967295 **units**\: second .. attribute:: nanoseconds Nanoseconds **type**\: int **range:** 0..4294967295 **units**\: nanosecond """ _prefix = 'ethernet-cfm-oper' _revision = '2017-10-06' def __init__(self): super(Cfm.Global.LocalMeps.LocalMep.AisStatistics.SendingStart, self).__init__() self.yang_name = "sending-start" self.yang_parent_name = "ais-statistics" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('seconds', (YLeaf(YType.uint32, 'seconds'), ['int'])), ('nanoseconds', (YLeaf(YType.uint32, 'nanoseconds'), ['int'])), ]) self.seconds = None self.nanoseconds = None self._segment_path = lambda: "sending-start" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Cfm.Global.LocalMeps.LocalMep.AisStatistics.SendingStart, ['seconds', 'nanoseconds'], name, value) class ReceivingStart(Entity): """ Time elapsed since AIS receiving started .. attribute:: seconds Seconds **type**\: int **range:** 0..4294967295 **units**\: second .. attribute:: nanoseconds Nanoseconds **type**\: int **range:** 0..4294967295 **units**\: nanosecond """ _prefix = 'ethernet-cfm-oper' _revision = '2017-10-06' def __init__(self): super(Cfm.Global.LocalMeps.LocalMep.AisStatistics.ReceivingStart, self).__init__() self.yang_name = "receiving-start" self.yang_parent_name = "ais-statistics" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('seconds', (YLeaf(YType.uint32, 'seconds'), ['int'])), ('nanoseconds', (YLeaf(YType.uint32, 'nanoseconds'), ['int'])), ]) self.seconds = None self.nanoseconds = None self._segment_path = lambda: "receiving-start" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Cfm.Global.LocalMeps.LocalMep.AisStatistics.ReceivingStart, ['seconds', 'nanoseconds'], name, value) class Defects(Entity): """ Defects detected from peer MEPs .. attribute:: remote_meps_defects Defects detected from remote MEPs **type**\: :py:class:`RemoteMepsDefects <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.LocalMeps.LocalMep.Defects.RemoteMepsDefects>` .. attribute:: ais_received AIS or LCK received **type**\: bool .. attribute:: peer_meps_that_timed_out Number of peer MEPs that have timed out **type**\: int **range:** 0..4294967295 .. attribute:: missing Number of missing peer MEPs **type**\: int **range:** 0..4294967295 .. attribute:: auto_missing Number of missing auto cross\-check MEPs **type**\: int **range:** 0..4294967295 .. attribute:: unexpected Number of unexpected peer MEPs **type**\: int **range:** 0..4294967295 .. attribute:: local_port_status The local port or interface is down **type**\: bool .. attribute:: peer_port_status A peer port or interface is down **type**\: bool """ _prefix = 'ethernet-cfm-oper' _revision = '2017-10-06' def __init__(self): super(Cfm.Global.LocalMeps.LocalMep.Defects, self).__init__() self.yang_name = "defects" self.yang_parent_name = "local-mep" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("remote-meps-defects", ("remote_meps_defects", Cfm.Global.LocalMeps.LocalMep.Defects.RemoteMepsDefects))]) self._leafs = OrderedDict([ ('ais_received', (YLeaf(YType.boolean, 'ais-received'), ['bool'])), ('peer_meps_that_timed_out', (YLeaf(YType.uint32, 'peer-meps-that-timed-out'), ['int'])), ('missing', (YLeaf(YType.uint32, 'missing'), ['int'])), ('auto_missing', (YLeaf(YType.uint32, 'auto-missing'), ['int'])), ('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])), ('local_port_status', (YLeaf(YType.boolean, 'local-port-status'), ['bool'])), ('peer_port_status', (YLeaf(YType.boolean, 'peer-port-status'), ['bool'])), ]) self.ais_received = None self.peer_meps_that_timed_out = None self.missing = None self.auto_missing = None self.unexpected = None self.local_port_status = None self.peer_port_status = None self.remote_meps_defects = Cfm.Global.LocalMeps.LocalMep.Defects.RemoteMepsDefects() self.remote_meps_defects.parent = self self._children_name_map["remote_meps_defects"] = "remote-meps-defects" self._segment_path = lambda: "defects" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Cfm.Global.LocalMeps.LocalMep.Defects, ['ais_received', 'peer_meps_that_timed_out', 'missing', 'auto_missing', 'unexpected', 'local_port_status', 'peer_port_status'], name, value) class RemoteMepsDefects(Entity): """ Defects detected from remote MEPs .. attribute:: loss_threshold_exceeded Timed out (loss threshold exceeded) **type**\: bool .. attribute:: invalid_level Invalid level **type**\: bool .. attribute:: invalid_maid Invalid MAID **type**\: bool .. attribute:: invalid_ccm_interval Invalid CCM interval **type**\: bool .. attribute:: received_our_mac Loop detected (our MAC address received) **type**\: bool .. attribute:: received_our_mep_id Configuration Error (our MEP ID received) **type**\: bool .. attribute:: received_rdi Remote defection indication received **type**\: bool """ _prefix = 'ethernet-cfm-oper' _revision = '2017-10-06' def __init__(self): super(Cfm.Global.LocalMeps.LocalMep.Defects.RemoteMepsDefects, self).__init__() self.yang_name = "remote-meps-defects" self.yang_parent_name = "defects" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('loss_threshold_exceeded', (YLeaf(YType.boolean, 'loss-threshold-exceeded'), ['bool'])), ('invalid_level', (YLeaf(YType.boolean, 'invalid-level'), ['bool'])), ('invalid_maid', (YLeaf(YType.boolean, 'invalid-maid'), ['bool'])), ('invalid_ccm_interval', (YLeaf(YType.boolean, 'invalid-ccm-interval'), ['bool'])), ('received_our_mac', (YLeaf(YType.boolean, 'received-our-mac'), ['bool'])), ('received_our_mep_id', (YLeaf(YType.boolean, 'received-our-mep-id'), ['bool'])), ('received_rdi', (YLeaf(YType.boolean, 'received-rdi'), ['bool'])), ]) self.loss_threshold_exceeded = None self.invalid_level = None self.invalid_maid = None self.invalid_ccm_interval = None self.received_our_mac = None self.received_our_mep_id = None self.received_rdi = None self._segment_path = lambda: "remote-meps-defects" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Cfm.Global.LocalMeps.LocalMep.Defects.RemoteMepsDefects, ['loss_threshold_exceeded', 'invalid_level', 'invalid_maid', 'invalid_ccm_interval', 'received_our_mac', 'received_our_mep_id', 'received_rdi'], name, value) class PeerMePv2s(Entity): """ Peer MEPs table Version 2 .. attribute:: peer_me_pv2 Information about a peer MEP for a particular local MEP **type**\: list of :py:class:`PeerMePv2 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.PeerMePv2s.PeerMePv2>` """ _prefix = 'ethernet-cfm-oper' _revision = '2017-10-06' def __init__(self): super(Cfm.Global.PeerMePv2s, self).__init__() self.yang_name = "peer-me-pv2s" self.yang_parent_name = "global" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("peer-me-pv2", ("peer_me_pv2", Cfm.Global.PeerMePv2s.PeerMePv2))]) self._leafs = OrderedDict() self.peer_me_pv2 = YList(self) self._segment_path = lambda: "peer-me-pv2s" self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-cfm-oper:cfm/global/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Cfm.Global.PeerMePv2s, [], name, value) class PeerMePv2(Entity): """ Information about a peer MEP for a particular local MEP .. attribute:: domain (key) Maintenance Domain **type**\: str **length:** 1..79 .. attribute:: service (key) Service (Maintenance Association) **type**\: str **length:** 1..79 .. attribute:: local_mep_id (key) MEP ID of Local MEP **type**\: int **range:** 1..8191 .. attribute:: interface (key) Interface **type**\: str **pattern:** [a\-zA\-Z0\-9.\_/\-]+ .. attribute:: peer_mep_id (key) MEP ID of Peer MEP **type**\: int **range:** 1..8191 .. attribute:: peer_mac_address (key) Peer MAC address **type**\: str **pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5} .. attribute:: peer_mep Peer MEP **type**\: :py:class:`PeerMep <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.PeerMePv2s.PeerMePv2.PeerMep>` .. attribute:: domain_xr Maintenance domain name **type**\: str .. attribute:: service_xr Service name **type**\: str .. attribute:: level Maintenance level **type**\: :py:class:`CfmBagMdLevel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagMdLevel>` .. attribute:: mep_id MEP ID **type**\: int **range:** 0..65535 .. attribute:: interface_xr Interface **type**\: str **pattern:** [a\-zA\-Z0\-9.\_/\-]+ .. attribute:: mep_direction MEP facing direction **type**\: :py:class:`CfmBagDirection <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagDirection>` .. attribute:: standby The local MEP is on an interface in standby mode **type**\: bool """ _prefix = 'ethernet-cfm-oper' _revision = '2017-10-06' def __init__(self): super(Cfm.Global.PeerMePv2s.PeerMePv2, self).__init__() self.yang_name = "peer-me-pv2" self.yang_parent_name = "peer-me-pv2s" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['domain','service','local_mep_id','interface','peer_mep_id','peer_mac_address'] self._child_classes = OrderedDict([("peer-mep", ("peer_mep", Cfm.Global.PeerMePv2s.PeerMePv2.PeerMep))]) self._leafs = OrderedDict([ ('domain', (YLeaf(YType.str, 'domain'), ['str'])), ('service', (YLeaf(YType.str, 'service'), ['str'])), ('local_mep_id', (YLeaf(YType.uint32, 'local-mep-id'), ['int'])), ('interface', (YLeaf(YType.str, 'interface'), ['str'])), ('peer_mep_id', (YLeaf(YType.uint32, 'peer-mep-id'), ['int'])), ('peer_mac_address', (YLeaf(YType.str, 'peer-mac-address'), ['str'])), ('domain_xr', (YLeaf(YType.str, 'domain-xr'), ['str'])), ('service_xr', (YLeaf(YType.str, 'service-xr'), ['str'])), ('level', (YLeaf(YType.enumeration, 'level'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagMdLevel', '')])), ('mep_id', (YLeaf(YType.uint16, 'mep-id'), ['int'])), ('interface_xr', (YLeaf(YType.str, 'interface-xr'), ['str'])), ('mep_direction', (YLeaf(YType.enumeration, 'mep-direction'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagDirection', '')])), ('standby', (YLeaf(YType.boolean, 'standby'), ['bool'])), ]) self.domain = None self.service = None self.local_mep_id = None self.interface = None self.peer_mep_id = None self.peer_mac_address = None self.domain_xr = None self.service_xr = None self.level = None self.mep_id = None self.interface_xr = None self.mep_direction = None self.standby = None self.peer_mep = Cfm.Global.PeerMePv2s.PeerMePv2.PeerMep() self.peer_mep.parent = self self._children_name_map["peer_mep"] = "peer-mep" self._segment_path = lambda: "peer-me-pv2" + "[domain='" + str(self.domain) + "']" + "[service='" + str(self.service) + "']" + "[local-mep-id='" + str(self.local_mep_id) + "']" +
= "33664717.log" job_id = 33664717 file_path = "terminated/" + log ts, r = self.get_trigger_sha_and_repo(job_id) result = self.dispatcher.analyze(file_path, job_id, trigger_sha=ts, repo=r) yield self.compare_status, result, "terminated" def test_python_0(self): log = '250808150-orig.log' job_id = 250808150 file_path = 'python/' + log python0 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python0, 'ok' yield self.compare_analyzer, python0, 'python' yield self.compare_num_t_run, python0, 6610 yield self.compare_num_t_ok, python0, 6610 yield self.compare_num_t_failed, python0, 0 yield self.compare_num_t_skipped, python0, 22 yield self.compare_bool_t_ran, python0, True yield self.compare_bool_t_failed, python0, False yield self.compare_t_duration, python0, 43.33 yield self.compare_tr_t_failed, python0, '' yield self.compare_frameworks, python0, 'unittest' def test_python_1(self): log = '78170279-orig.log' job_id = 78170279 file_path = 'python/' + log python1 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python1, 'broken' yield self.compare_analyzer, python1, 'python' yield self.compare_num_t_run, python1, 2 yield self.compare_num_t_ok, python1, 0 yield self.compare_num_t_failed, python1, 2 yield self.compare_num_t_skipped, python1, 0 yield self.compare_bool_t_ran, python1, True yield self.compare_bool_t_failed, python1, True yield self.compare_t_duration, python1, 0.015 yield self.compare_tr_t_failed, python1, 'ImportError#ImportError' yield self.compare_frameworks, python1, 'unittest' def test_python_2(self): log = '73309390-orig.log' job_id = 73309390 file_path = 'python/' + log python2 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python2, 'broken' yield self.compare_analyzer, python2, 'python' yield self.compare_num_t_run, python2, 10128 yield self.compare_num_t_ok, python2, 10127 yield self.compare_num_t_failed, python2, 1 yield self.compare_num_t_skipped, python2, 568 yield self.compare_bool_t_ran, python2, True yield self.compare_bool_t_failed, python2, True yield self.compare_t_duration, python2, 275.869 yield self.compare_tr_t_failed, python2, 'test_non_current (tornado.test.ioloop_test.TestIOLoopCurrent)' yield self.compare_frameworks, python2, 'unittest' def test_python_3(self): log = '78833091-orig.log' job_id = 78833091 file_path = 'python/' + log python3 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python3, 'broken' yield self.compare_analyzer, python3, 'python' yield self.compare_num_t_run, python3, 247 yield self.compare_num_t_ok, python3, 247 yield self.compare_num_t_failed, python3, 0 yield self.compare_num_t_skipped, python3, 0 yield self.compare_bool_t_ran, python3, True yield self.compare_bool_t_failed, python3, False yield self.compare_t_duration, python3, 0.207 yield self.compare_tr_t_failed, python3, '' yield self.compare_frameworks, python3, 'unittest' def test_python_4(self): log = '159557987-orig.log' job_id = 159557987 file_path = 'python/' + log python4 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python4, 'broken' yield self.compare_analyzer, python4, 'python' yield self.compare_num_t_run, python4, 43 yield self.compare_num_t_ok, python4, 42 yield self.compare_num_t_failed, python4, 1 yield self.compare_num_t_skipped, python4, 0 yield self.compare_bool_t_ran, python4, True yield self.compare_bool_t_failed, python4, True yield self.compare_t_duration, python4, 0.146 yield self.compare_tr_t_failed, python4, 'test_multiline_index (__main__.Test)' yield self.compare_frameworks, python4, 'unittest' def test_python_5(self): log = '109227526-orig.log' job_id = 109227526 file_path = 'python/' + log python5 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python5, 'broken' yield self.compare_analyzer, python5, 'python' yield self.compare_num_t_run, python5, 247 yield self.compare_num_t_ok, python5, 242 yield self.compare_num_t_failed, python5, 5 yield self.compare_num_t_skipped, python5, 0 yield self.compare_bool_t_ran, python5, True yield self.compare_bool_t_failed, python5, True yield self.compare_t_duration, python5, 0.259 yield self.compare_tr_t_failed, python5, 'test_modified_url_encoding (verktyg.testsuite.test_requests.' \ 'RequestsTestCase)#test_shallow_mode (verktyg.testsuite.test' \ '_requests.RequestsTestCase)#test_storage_classes (verktyg.' \ 'testsuite.test_requests.RequestsTestCase)#test_base_request' \ ' (verktyg.testsuite.test_requests.RequestsTestCase)#test_form' \ '_data_ordering (verktyg.testsuite.test_requests.RequestsTestCase)' yield self.compare_frameworks, python5, 'unittest' def test_python_6(self): log = '109231519-orig.log' job_id = 109231519 file_path = 'python/' + log python6 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python6, 'ok' yield self.compare_analyzer, python6, 'python' yield self.compare_num_t_run, python6, 247 yield self.compare_num_t_ok, python6, 247 yield self.compare_num_t_failed, python6, 0 yield self.compare_num_t_skipped, python6, 0 yield self.compare_bool_t_ran, python6, True yield self.compare_bool_t_failed, python6, False yield self.compare_t_duration, python6, 0.252 yield self.compare_tr_t_failed, python6, '' yield self.compare_frameworks, python6, 'unittest' def test_python_7(self): log = '212206923-orig.log' job_id = 212206923 file_path = 'python/' + log python7 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python7, 'broken' yield self.compare_analyzer, python7, 'python' yield self.compare_num_t_run, python7, 591 yield self.compare_num_t_ok, python7, 590 yield self.compare_num_t_failed, python7, 1 yield self.compare_num_t_skipped, python7, 0 yield self.compare_bool_t_ran, python7, True yield self.compare_bool_t_failed, python7, True yield self.compare_t_duration, python7, 30.006 yield self.compare_tr_t_failed, python7, 'test_download_and_expand_tgz (COT.helpers.tests.test' \ '_helper.HelperGenericTest)' yield self.compare_frameworks, python7, 'unittest' def test_python_8(self): log = '212215615-orig.log' job_id = 212215615 file_path = 'python/' + log python8 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python8, 'ok' yield self.compare_analyzer, python8, 'python' yield self.compare_num_t_run, python8, 591 yield self.compare_num_t_ok, python8, 591 yield self.compare_num_t_failed, python8, 0 yield self.compare_num_t_skipped, python8, 0 yield self.compare_bool_t_ran, python8, True yield self.compare_bool_t_failed, python8, False yield self.compare_t_duration, python8, 30.436 yield self.compare_tr_t_failed, python8, '' yield self.compare_frameworks, python8, 'unittest' def test_python_9(self): log = '210833092-orig.log' job_id = 210833092 file_path = 'python/' + log python9 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python9, 'broken' yield self.compare_analyzer, python9, 'python' yield self.compare_num_t_run, python9, 11 yield self.compare_num_t_ok, python9, 9 yield self.compare_num_t_failed, python9, 2 yield self.compare_num_t_skipped, python9, 0 yield self.compare_bool_t_ran, python9, True yield self.compare_bool_t_failed, python9, True yield self.compare_t_duration, python9, 0.28 yield self.compare_tr_t_failed, python9, 'tests.unit.test_readkey::ReadKeyTest::test_page_down' \ '#tests.unit.test_readkey::ReadKeyTest::test_page_up' yield self.compare_frameworks, python9, 'pytest' def test_python_10(self): log = '149257173-orig.log' job_id = 149257173 file_path = 'python/' + log python10 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python10, 'broken' yield self.compare_analyzer, python10, 'python' yield self.compare_num_t_run, python10, 445 yield self.compare_num_t_ok, python10, 435 yield self.compare_num_t_failed, python10, 10 yield self.compare_num_t_skipped, python10, 2 yield self.compare_bool_t_ran, python10, True yield self.compare_bool_t_failed, python10, True yield self.compare_t_duration, python10, 70.302 yield self.compare_tr_t_failed, python10, 'test_list (vistrails.core.scripting.export.TestExport)' \ '#test_loop_append_mixed (vistrails.core.scripting.export.' \ 'TestExport)#test_loop_cartesian (vistrails.core.scripting.' \ 'export.TestExport)#test_loop_cartesian_reversed (vistrails.' \ 'core.scripting.export.TestExport)#test_loop_combined' \ ' (vistrails.core.scripting.export.TestExport)#test' \ '_loop_pairwise (vistrails.core.scripting.export.TestExport)' \ '#test_loop_wrap (vistrails.core.scripting.export.TestExport)' \ '#test_sources (vistrails.core.scripting.export.TestExport)' \ '#testIncorrectURL (vistrails.packages.URL.init.TestDownload' \ 'File)#testIncorrectURL_2 (vistrails.packages.URL.init.Test' \ 'DownloadFile)' yield self.compare_frameworks, python10, 'unittest' def test_python_11(self): log = '398075675-modified.log' job_id = 398075675 file_path = 'python/' + log python11 = self.dispatcher.analyze(file_path, job_id) yield self.compare_analyzer, python11, 'python' yield self.compare_num_t_run, python11, 9465 yield self.compare_num_t_ok, python11, 9460 yield self.compare_num_t_failed, python11, 5 yield self.compare_num_t_skipped, python11, 27 yield self.compare_bool_t_ran, python11, True yield self.compare_bool_t_failed, python11, True yield self.compare_t_duration, python11, 821.23 yield self.compare_frameworks, python11, 'pytest' yield self.compare_tr_t_failed, python11, 'sklearn.ensemble.tests.test_bagging::test_parallel_classification' \ '#sklearn.ensemble.tests.test_bagging::test_parallel_regression' \ '#sklearn.ensemble.tests.test_bagging::test_base_estimator' \ '#sklearn.ensemble.tests.test_iforest::test_iforest_parallel' \ '_regression#sklearn.tests.test_common::test_parallel_fit' def test_python_12(self): log = '109787645-orig.log' job_id = 109787645 file_path = 'python/' + log python12 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python12, 'broken' yield self.compare_analyzer, python12, 'python' yield self.compare_num_t_run, python12, 41 yield self.compare_num_t_ok, python12, 2 yield self.compare_num_t_failed, python12, 39 yield self.compare_num_t_skipped, python12, 0 yield self.compare_bool_t_ran, python12, True yield self.compare_bool_t_failed, python12, True yield self.compare_t_duration, python12, 3.74 yield self.compare_frameworks, python12, 'pytest' yield self.compare_tr_t_failed, python12, 'tests.test_api::TestAuth::test_api#tests.test_api::TestAuth::' \ 'test_backup#tests.test_api::TestAuth::test_non_existant_api' \ '#tests.test_api::TestAuth::test_submit#tests.test_api::' \ 'TestAuth::test_version_api#tests.test_auth::TestAuth::test' \ '_login#tests.test_auth::TestAuth::test_restricted#tests.test' \ '_auth::TestAuth::test_testing_login#tests.test_auth::TestAuth::' \ 'test_testing_login_fail#tests.test_group::TestGroup::test_accept' \ '#tests.test_group::TestGroup::test_accept_not_pending#tests.' \ 'test_group::TestGroup::test_decline#tests.test_group::TestGroup' \ '::test_decline_degenerate#tests.test_group::TestGroup::test' \ '_decline_not_pending#tests.test_group::TestGroup::test_invite' \ '#tests.test_group::TestGroup::test_invite_full#tests.test_group' \ '::TestGroup::test_invite_in_group#tests.test_group::TestGroup' \ '::test_invite_individual#tests.test_group::TestGroup::test_invite' \ '_not_enrolled#tests.test_group::TestGroup::test_locked#tests.' \ 'test_group::TestGroup::test_log#tests.test_group::TestGroup::' \ 'test_remove#tests.test_group::TestGroup::test_remove_degenerate' \ '#tests.test_group::TestGroup::test_remove_not_in_group#tests.' \ 'test_group::TestGroup::test_remove_self#tests.test_highlight::' \ 'TestHighlight::test_highlight_diff#tests.test_highlight::' \ 'TestHighlight::test_highlight_file#tests.test_main::TestMain::' \ 'test_home#tests.test_submission::TestSubmission::test_accept' \ '_unflag#tests.test_submission::TestSubmission::test_active_user' \ '_ids#tests.test_submission::TestSubmission::test_files#tests.' \ 'test_submission::TestSubmission::test_flag#tests.test' \ '_submission::TestSubmission::test_no_flags#tests.test_submission' \ '::TestSubmission::test_sabotage#tests.test_submission::' \ 'TestSubmission::test_two_flags#tests.test_submission::' \ 'TestSubmission::test_unflag#tests.test_submission::' \ 'TestSubmission::test_unflag_not_flagged#tests.test_user' \ '::TestUser::test_lookup#tests.test_utils::TestUtils::test_hashids' def test_python_13(self): log = '256802843-orig.log' job_id = 256802843 file_path = 'python/' + log python13 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python13, 'broken' yield self.compare_analyzer, python13, 'python' yield self.compare_num_t_run, python13, 2555 yield self.compare_num_t_ok, python13, 2546 yield self.compare_num_t_failed, python13, 9 yield self.compare_num_t_skipped, python13, 0 yield self.compare_bool_t_ran, python13, True yield self.compare_bool_t_failed, python13, True yield self.compare_frameworks, python13, 'pytest' yield self.compare_tr_t_failed, python13, 'tests.h.oauth.jwt_grant_token_test::TestJWTGrantToken::test' \ '_issuer_raises_for_missing_iss_claim#tests.h.oauth.jwt_grant' \ '_token_test::TestVerifiedJWTGrantToken::test_init_raises_for' \ '_missing_claims[aud-audience]#tests.h.oauth.jwt_grant_token' \ '_test::TestVerifiedJWTGrantToken::test_init_raises_for_missing' \ '_claims[exp-expiry]#tests.h.oauth.jwt_grant_token_test::' \ 'TestVerifiedJWTGrantToken::test_init_raises_for_missing' \ '_claims[nbf-start time]#tests.h.oauth.jwt_grant_token_test' \ '::TestVerifiedJWTGrantToken::test_init_raises_for_invalid' \ '_aud#tests.h.oauth.jwt_grant_token_test::' \ 'TestVerifiedJWTGrantToken::test_init_raises_for_invalid' \ '_timestamp_types[exp-expiry]#tests.h.oauth.jwt_grant' \ '_token_test::TestVerifiedJWTGrantToken::test_init_raises' \ '_for_invalid_timestamp_types[nbf-start time]#tests.h.oauth.jwt' \ '_grant_token_test::TestVerifiedJWTGrantToken::test_subject' \ '_raises_for_missing_sub_claim#tests.h.oauth.jwt_grant_token' \ '_test::TestVerifiedJWTGrantToken::test_subject_raises_for_empty' \ '_sub_claim' def test_python_14(self): log = '299944105-orig.log' job_id = 299944105 file_path = 'python/' + log python14 = self.dispatcher.analyze(file_path, job_id) yield self.compare_status, python14, "broken" yield self.compare_analyzer, python14, "python" yield self.compare_num_t_run, python14, 203 yield self.compare_num_t_ok, python14, 195 yield self.compare_num_t_failed, python14, 8 yield self.compare_num_t_skipped, python14, 14 yield self.compare_bool_t_ran, python14, True yield self.compare_bool_t_failed, python14, True yield self.compare_frameworks, python14, 'pytest' yield self.compare_tr_t_failed, python14, 'tests.test_client::SSHClientTest::test_4_auto_add_policy' \ '#tests.test_client::SSHClientTest::test_6_cleanup#tests.test' \ '_client::SSHClientTest::test_client_can_be_used_as_context' \ '_manager#tests.test_client::SSHClientTest::test_host_key' \ '_negotiation_1#tests.test_client::SSHClientTest::test_host' \ '_key_negotiation_2#tests.test_client::SSHClientTest::test' \ '_host_key_negotiation_3#tests.test_client::SSHClientTest::' \ 'test_host_key_negotiation_4#tests.test_client::SSHClientTest' \ '::test_update_environment' def test_python_15(self): log = '334185447-orig.log' job_id = 334185447 file_path = 'python/' + log python15 = self.dispatcher.analyze(file_path, job_id) yield self.compare_analyzer, python15, 'python' yield self.compare_num_t_run, python15, 984 yield self.compare_num_t_ok, python15, 983 yield self.compare_num_t_failed, python15, 1 yield self.compare_num_t_skipped, python15, 64 yield self.compare_bool_t_ran, python15, True yield self.compare_bool_t_failed, python15, True yield self.compare_frameworks, python15, 'pytest' yield self.compare_tr_t_failed, python15, 'joblib.shelf::shelve_mmap' def test_python_16(self): log = '403765814-orig.log' job_id = 403765814 file_path = 'python/' + log python16 = self.dispatcher.analyze(file_path, job_id) yield self.compare_analyzer, python16, 'python' yield self.compare_num_t_run, python16, 9627 yield self.compare_num_t_ok, python16, 9626 yield self.compare_num_t_failed, python16, 1 yield self.compare_num_t_skipped, python16, 29 yield self.compare_bool_t_ran, python16, True yield self.compare_bool_t_failed, python16, True yield self.compare_frameworks, python16, 'pytest' yield self.compare_tr_t_failed, python16, 'sklearn.cluster.bicluster::SpectralCoclustering' def test_python_17(self): log = '113007194-orig.log' job_id = 113007194 file_path = 'python/' + log python17 = self.dispatcher.analyze(file_path, job_id) yield self.compare_analyzer, python17, 'python' yield self.compare_num_t_run, python17, 6508 yield self.compare_num_t_ok, python17, 6507 yield self.compare_num_t_failed, python17, 1 yield self.compare_num_t_skipped, python17, 0 yield self.compare_bool_t_ran, python17, True yield self.compare_bool_t_failed, python17, True yield self.compare_frameworks, python17, 'unittest' yield self.compare_tr_t_failed, python17, "test_repository_package_names('./repository/m.json', ...)" \ " (tests.test.DefaultRepositoryTests)" def test_python_18(self): log = '65721530-orig.log' job_id = 65721530 file_path = 'python/' + log python18 = self.dispatcher.analyze(file_path,
# Copyright (c) 2018-2019 <NAME> and contributors. # bibmanager is open-source software under the MIT license (see LICENSE). __all__ = [ 'Bib', 'display_bibs', 'remove_duplicates', 'filter_field', 'loadfile', 'save', 'load', 'export', 'merge', 'init', 'add_entries', 'edit', 'search', ] import os import sys import shutil import datetime import re import pickle import urllib import subprocess import numpy as np import prompt_toolkit from prompt_toolkit.formatted_text import PygmentsTokens from prompt_toolkit import print_formatted_text import pygments from pygments.token import Token from pygments.lexers.bibtex import BibTeXLexer from .. import config_manager as cm from .. import utils as u # Some constant definitions: lexer = prompt_toolkit.lexers.PygmentsLexer(BibTeXLexer) months = {"jan":1, "feb":2, "mar":3, "apr": 4, "may": 5, "jun":6, "jul":7, "aug":8, "sep":9, "oct":10, "nov":11, "dec":12} class Bib(object): """ Bibliographic-entry object. """ def __init__(self, entry): """ Create a Bib() object from given entry. Minimally, entries must contain the author, title, and year keys. Parameters ---------- entry: String A bibliographic entry text. Examples -------- >>> import bibmanager.bib_manager as bm >>> from bibmanager.utils import Author >>> entry = '''@Misc{JonesEtal2001scipy, author = {<NAME> and <NAME> and <NAME>}, title = {{SciPy}: Open source scientific tools for {Python}}, year = {2001}, }''' >>> bib = bm.Bib(entry) >>> print(bib.title) SciPy: Open source scientific tools for Python >>> for author in bib.authors: >>> print(author) Author(last='Jones', first='Eric', von='', jr='') Author(last='Oliphant', first='Travis', von='', jr='') Author(last='Peterson', first='Pearu', von='', jr='') >>> print(bib.sort_author) Sort_author(last='jones', first='e', von='', jr='', year=2001, month=13) """ if u.count(entry) != 0: raise ValueError("Mismatched braces in entry.") self.content = entry # Defaults: self.month = 13 self.adsurl = None self.bibcode = None self.doi = None self.eprint = None self.isbn = None fields = u.get_fields(self.content) self.key = next(fields) for key, value, nested in fields: if key == "title": # Title with no braces, tabs, nor linebreak and corrected blanks: self.title = " ".join(re.sub("({|})", "", value).split()) elif key == "author": # Parse authors finding all non-brace-nested 'and' instances: authors, nests = u.cond_split(value.replace("\n"," "), " and ", nested=nested, ret_nests=True) self.authors = [u.parse_name(author, nested) for author,nested in zip(authors,nests)] elif key == "year": r = re.search('[0-9]{4}', value) self.year = int(r.group(0)) elif key == "month": value = value.lower().strip() self.month = months[value[0:3]] elif key == "doi": self.doi = value elif key == "adsurl": self.adsurl = value # Get bibcode from adsurl, un-code UTF-8, and remove backslashes: bibcode = os.path.split(value)[1].replace('\\', '') self.bibcode = urllib.parse.unquote(bibcode) elif key == "eprint": self.eprint = value.replace('arXiv:','').replace('astro-ph/','') elif key == "isbn": self.isbn = value.lower().strip() for attr in ['authors', 'title', 'year']: if not hasattr(self, attr): raise ValueError(f"Bibtex entry '{self.key}' is missing author, " "title, or year.") # First-author fields used for sorting: # Note this differs from Author[0], since fields are 'purified', # and 'first' goes only by initials(). self.sort_author = u.Sort_author(u.purify(self.authors[0].last), u.initials(self.authors[0].first), u.purify(self.authors[0].von), u.purify(self.authors[0].jr), self.year, self.month) def update_key(self, new_key): """Update key with new_key, making sure to also update content.""" self.content = self.content.replace(self.key, new_key, 1) self.key = new_key def __repr__(self): return self.content def __contains__(self, author): r""" Check if given author is in the author list of this bib entry. If the 'author' string begins with the '^' character, match only against the first author. Parameters ---------- author: String An author name in a valid BibTeX format. Examples -------- >>> import bibmanager.bib_manager as bm >>> bib = bm.Bib('''@ARTICLE{DoeEtal2020, author = {{<NAME>. and {<NAME>. and {Dupont}, J.}, title = "What Have the Astromomers ever Done for Us?", journal = {\apj}, year = 2020,}''') >>> # Check for first author: >>> '<NAME>' in bib True >>> # Format doesn't matter, as long as it is a valid format: >>> '<NAME>' in bib True >>> # Neglecting first's initials still match: >>> 'Doe' in bib True >>> # But, non-matching initials wont match: >>> '<NAME>.' in bib False >>> # Match against first author only if string begins with '^': >>> '^Doe' in bib True >>> '^Perez' in bib False """ # Check first-author mark: if author[0:1] == '^': author = author[1:] authors = [self.authors[0]] else: authors = self.authors # Parse and purify input author name: author = u.parse_name(author) first = u.initials(author.first) von = u.purify(author.von) last = u.purify(author.last) jr = u.purify(author.jr) # Remove non-matching authors by each non-empty field: if len(jr) > 0: authors = [author for author in authors if jr == u.purify(author.jr)] if len(von) > 0: authors = [author for author in authors if von == u.purify(author.von)] if len(first) > 0: authors = [author for author in authors if first == u.initials(author.first)[0:len(first)]] authors = [author for author in authors if last == u.purify(author.last)] return len(authors) >= 1 # https://docs.python.org/3.6/library/stdtypes.html def __lt__(self, other): """ Evaluate sequentially according to sort_author's fields: last, first, von, and jr, year, and month. If any of these fields are equal, go on to next field to compare. """ s, o = self.sort_author, other.sort_author if s.last != o.last: return s.last < o.last if len(s.first)==1 or len(o.first) == 1: if s.first[0:1] != o.first[0:1]: return s.first < o.first else: if s.first != o.first: return s.first < o.first if s.von != o.von: return s.von < o.von if s.jr != o.jr: return s.jr < o.jr if s.year != o.year: return s.year < o.year return s.month < o.month def __eq__(self, other): """ Check whether self and other have same sort_author (first author) and year/month. Evaluate to equal by first initial if one entry has less initials than the other. """ if len(self.sort_author.first)==1 or len(other.sort_author.first)==1: first = self.sort_author.first[0:1] == other.sort_author.first[0:1] else: first = self.sort_author.first == other.sort_author.first return (self.sort_author.last == other.sort_author.last and first and self.sort_author.von == other.sort_author.von and self.sort_author.jr == other.sort_author.jr and self.sort_author.year == other.sort_author.year and self.sort_author.month == other.sort_author.month) def __le__(self, other): return self.__lt__(other) or self.__eq__(other) def published(self): """ Published status according to the ADS bibcode field: Return -1 if bibcode is None. Return 0 if bibcode is arXiv. Return 1 if bibcode is peer-reviewed journal. """ if self.bibcode is None: return -1 return int(self.bibcode.find('arXiv') < 0) def get_authors(self, short=True): """ wrapper for string representation for the author list. See bib_manager.get_authors() for docstring. """ return u.get_authors(self.authors, short) def display_bibs(labels, bibs): r""" Display a list of bib entries on screen with flying colors. Parameters ---------- labels: List of Strings Header labels to show above each Bib() entry. bibs: List of Bib() objects BibTeX entries to display. Examples -------- >>> import bibmanager.bib_manager as bm >>> e1 = '''@Misc{JonesEtal2001scipy, author = {<NAME> and <NAME> and <NAME>}, title = {{SciPy}: Open source scientific tools for {Python}}, year = {2001}, }''' >>> e2 = '''@Misc{Jones2001, author = {<NAME> and <NAME> and <NAME>}, title = {SciPy: Open source scientific tools for Python}, year = {2001}, }''' >>> bibs = [bm.Bib(e1), bm.Bib(e2)] >>> bm.display_bibs(["DATABASE:\n", "NEW:\n"], bibs) :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: DATABASE: @Misc{JonesEtal2001scipy, author = {<NAME> and <NAME> and <NAME>}, title = {{SciPy}: Open source scientific tools for {Python}}, year = {2001}, } NEW: @Misc{Jones2001, author = {<NAME> and <NAME> and <NAME>}, title = {SciPy: Open source scientific tools for Python}, year = {2001}, } """ style = prompt_toolkit.styles.style_from_pygments_cls( pygments.styles.get_style_by_name(cm.get('style'))) if labels is None: labels = ["" for _ in bibs] tokens = [(Token.Comment, u.BANNER)] for label,bib in zip(labels, bibs): tokens += [(Token.Text, label)] tokens += list(pygments.lex(bib.content, lexer=BibTeXLexer())) tokens += [(Token.Text, "\n")] print_formatted_text(PygmentsTokens(tokens), end="", style=style) def remove_duplicates(bibs, field): """ Look for duplicates (within a same list of entries) by field and remove them (in place). Parameters ---------- bibs: List of Bib() objects Entries to filter. field: String Field to use for filtering ('doi', 'isbn', 'bibcode', or 'eprint'). """ fieldlist = [getattr(bib,field) if getattr(bib,field) is not None else "" for bib in bibs] # No entries: if len(fieldlist) == 0: return ubib, uinv, counts = np.unique(fieldlist, return_inverse=True, return_counts=True) multis = np.where((counts > 1) & (ubib != ""))[0] # No duplicates: if len(multis) == 0: return removes = [] for m in multis: all_indices = np.where(uinv == m)[0] entries = [bibs[i].content for i in all_indices] # Remove identical entries: uentries, uidx = np.unique(entries, return_index=True) indices = list(all_indices[uidx]) removes += [idx for idx in all_indices if idx not in indices] nbibs = len(uentries) if nbibs == 1: continue # Pick peer-reviewed over ArXiv over non-ADS: pubs = [bibs[i].published() for i in indices] pubmax = np.amax(pubs) removes += [idx
<reponame>handsome3163/H2Dgame-Firefly<gh_stars>100-1000 #coding:utf8 ''' Created on 2013-8-21 @author: lan (www.9miao.com) ''' import itertools import datetime def safeunicode(obj, encoding='utf-8'): r""" Converts any given object to unicode string. >>> safeunicode('hello') u'hello' >>> safeunicode(2) u'2' >>> safeunicode('\xe1\x88\xb4') u'\u1234' """ t = type(obj) if t is unicode: return obj elif t is str: return obj.decode(encoding) elif t in [int, float, bool]: return unicode(obj) elif hasattr(obj, '__unicode__') or isinstance(obj, unicode): return unicode(obj) else: return str(obj).decode(encoding) def safestr(obj, encoding='utf-8'): r""" Converts any given object to utf-8 encoded string. >>> safestr('hello') 'hello' >>> safestr(u'\u1234') '\xe1\x88\xb4' >>> safestr(2) '2' """ if isinstance(obj, unicode): return obj.encode(encoding) elif isinstance(obj, str): return obj elif hasattr(obj, 'next'): # iterator return itertools.imap(safestr, obj) else: return str(obj) def sqlify(obj): """ converts `obj` to its proper SQL version >>> sqlify(None) 'NULL' >>> sqlify(True) "'t'" >>> sqlify(3) '3' """ # because `1 == True and hash(1) == hash(True)` # we have to do this the hard way... if obj is None: return 'NULL' elif obj is True: return "'t'" elif obj is False: return "'f'" elif datetime and isinstance(obj, datetime.datetime): return repr(obj.isoformat()) else: if isinstance(obj, unicode): obj = obj.encode('utf8') return repr(obj) def sqllist(lst): """ Converts the arguments for use in something like a WHERE clause. >>> sqllist(['a', 'b']) 'a, b' >>> sqllist('a') 'a' >>> sqllist(u'abc') u'abc' """ if isinstance(lst, basestring): return lst else: return ', '.join(lst) def _sqllist(values): """ >>> _sqllist([1, 2, 3]) <sql: '(1, 2, 3)'> """ items = [] items.append('(') for i, v in enumerate(values): if i != 0: items.append(', ') items.append(sqlparam(v)) items.append(')') return SQLQuery(items) def sqlquote(a): """ Ensures `a` is quoted properly for use in a SQL query. >>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3) <sql: "WHERE x = 't' AND y = 3"> >>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3]) <sql: "WHERE x = 't' AND y IN (2, 3)"> """ if isinstance(a, list): return _sqllist(a) else: return sqlparam(a).sqlquery() def _interpolate(sformat): """ Takes a format string and returns a list of 2-tuples of the form (boolean, string) where boolean says whether string should be evaled or not. from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee) """ from tokenize import tokenprog tokenprog = tokenprog def matchorfail(text, pos): match = tokenprog.match(text, pos) if match is None: raise _ItplError(text, pos) return match, match.end() namechars = "abcdefghijklmnopqrstuvwxyz" \ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; chunks = [] pos = 0 while 1: dollar = sformat.find("$", pos) if dollar < 0: break nextchar = sformat[dollar + 1] if nextchar == "{": chunks.append((0, sformat[pos:dollar])) pos, level = dollar + 2, 1 while level: match, pos = matchorfail(sformat, pos) tstart, tend = match.regs[3] token = sformat[tstart:tend] if token == "{": level = level + 1 elif token == "}": level = level - 1 chunks.append((1, sformat[dollar + 2:pos - 1])) elif nextchar in namechars: chunks.append((0, sformat[pos:dollar])) match, pos = matchorfail(sformat, dollar + 1) while pos < len(sformat): if sformat[pos] == "." and \ pos + 1 < len(sformat) and sformat[pos + 1] in namechars: match, pos = matchorfail(sformat, pos + 1) elif sformat[pos] in "([": pos, level = pos + 1, 1 while level: match, pos = matchorfail(sformat, pos) tstart, tend = match.regs[3] token = sformat[tstart:tend] if token[0] in "([": level = level + 1 elif token[0] in ")]": level = level - 1 else: break chunks.append((1, sformat[dollar + 1:pos])) else: chunks.append((0, sformat[pos:dollar + 1])) pos = dollar + 1 + (nextchar == "$") if pos < len(sformat): chunks.append((0, sformat[pos:])) return chunks def sqlwhere(dictionary, grouping=' AND '): """ Converts a `dictionary` to an SQL WHERE clause `SQLQuery`. >>> sqlwhere({'cust_id': 2, 'order_id':3}) <sql: 'order_id = 3 AND cust_id = 2'> >>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ') <sql: 'order_id = 3, cust_id = 2'> >>> sqlwhere({'a': 'a', 'b': 'b'}).query() 'a = %s AND b = %s' """ return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping) def reparam(string_, dictionary): """ Takes a string and a dictionary and interpolates the string using values from the dictionary. Returns an `SQLQuery` for the result. >>> reparam("s = $s", dict(s=True)) <sql: "s = 't'"> >>> reparam("s IN $s", dict(s=[1, 2])) <sql: 's IN (1, 2)'> """ dictionary = dictionary.copy() # eval mucks with it result = [] for live, chunk in _interpolate(string_): if live: v = eval(chunk, dictionary) result.append(sqlquote(v)) else: result.append(chunk) return SQLQuery.join(result, '') class UnknownParamstyle(Exception): """ raised for unsupported db paramstyles (currently supported: qmark, numeric, format, pyformat) """ pass class _ItplError(ValueError): def __init__(self, text, pos): ValueError.__init__(self) self.text = text self.pos = pos def __str__(self): return "unfinished expression in %s at char %d" % ( repr(self.text), self.pos) class SQLParam(object): """ Parameter in SQLQuery. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")]) >>> q <sql: "SELECT * FROM test WHERE name='joe'"> >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.values() ['joe'] """ __slots__ = ["value"] def __init__(self, value): self.value = value def get_marker(self, paramstyle='pyformat'): if paramstyle == 'qmark': return '?' elif paramstyle == 'numeric': return ':1' elif paramstyle is None or paramstyle in ['format', 'pyformat']: return '%s' raise UnknownParamstyle, paramstyle def sqlquery(self): return SQLQuery([self]) def __add__(self, other): return self.sqlquery() + other def __radd__(self, other): return other + self.sqlquery() def __str__(self): return str(self.value) def __repr__(self): return '<param: %s>' % repr(self.value) sqlparam = SQLParam class SQLQuery(object): """ You can pass this sort of thing as a clause in any db function. Otherwise, you can pass a dictionary to the keyword argument `vars` and the function will call reparam for you. Internally, consists of `items`, which is a list of strings and SQLParams, which get concatenated to produce the actual query. """ __slots__ = ["items"] # tested in sqlquote's docstring def __init__(self, items=None): r"""Creates a new SQLQuery. >>> SQLQuery("x") <sql: 'x'> >>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)]) >>> q <sql: 'SELECT * FROM test WHERE x=1'> >>> q.query(), q.values() ('SELECT * FROM test WHERE x=%s', [1]) >>> SQLQuery(SQLParam(1)) <sql: '1'> """ if items is None: self.items = [] elif isinstance(items, list): self.items = items elif isinstance(items, SQLParam): self.items = [items] elif isinstance(items, SQLQuery): self.items = list(items.items) else: self.items = [items] # Take care of SQLLiterals for i, item in enumerate(self.items): if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral): self.items[i] = item.value.v def append(self, value): self.items.append(value) def __add__(self, other): if isinstance(other, basestring): items = [other] elif isinstance(other, SQLQuery): items = other.items else: return NotImplemented return SQLQuery(self.items + items) def __radd__(self, other): if isinstance(other, basestring): items = [other] else: return NotImplemented return SQLQuery(items + self.items) def __iadd__(self, other): if isinstance(other, (basestring, SQLParam)): self.items.append(other) elif isinstance(other, SQLQuery): self.items.extend(other.items) else: return NotImplemented return self def __len__(self): return len(self.query()) def query(self, paramstyle=None): """ Returns the query part of the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.query(paramstyle='qmark') 'SELECT * FROM test WHERE name=?' """ s = [] for x in self.items: if isinstance(x, SQLParam): x = x.get_marker(paramstyle) s.append(safestr(x)) else: x = safestr(x) # automatically escape % characters in the query # For backward compatability, ignore escaping when the query looks already escaped if paramstyle in ['format', 'pyformat']: if '%' in x and '%%' not in x: x = x.replace('%', '%%') s.append(x) return "".join(s) def values(self): """ Returns the values of the parameters used in the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.values() ['joe'] """ return [i.value for i in self.items if isinstance(i, SQLParam)] def join(items, sep=' ', prefix=None, suffix=None, target=None): """ Joins multiple queries. >>> SQLQuery.join(['a', 'b'], ', ') <sql: 'a, b'> Optinally, prefix and suffix arguments can be provided. >>> SQLQuery.join(['a', 'b'], ', ', prefix='(',
== parts[index + 1].lower(): if branch == '' or name != parts[index + 1]: branch = parts[index + 1] return True, name, branch else: print('ERROR: The label, ' + labelStr + ', does not belong to branch ' + branch + '.') return False, name, branch print('ERROR: The label, ' + labelStr + ', does not belong to branch ' + branch + '.') return False, name, branch def GetLabels( project ): command = P4EXE + ' -p ' + P4PORT + ' label -o ' + project.mOldLabel lines = os.popen4( command, 't' )[1].readlines() oldLabel = ParseLabel( lines, project, True ) if project.CompareToNow(): newLabel = Label() newLabel.mDate = datetime.datetime.now() newLabel.mName = 'Now' newLabel.mOwner = 'None' newLabel.mDescription = 'Comparing against the \'#head\' or latest revision' else: command = P4EXE + ' -p ' + P4PORT + ' label -o ' + project.mNewLabel lines = os.popen4( command, 't' )[1].readlines() newLabel = ParseLabel( lines, project, False ) return oldLabel, newLabel def ListLabelChanges( project, file ): beginLabel, endLabel = GetLabels( project ) Check( beginLabel.mDate != MAX_DATE, 'The old label, ' + project.mOldLabel + ', did not return a valid date.' ) Check( endLabel.mDate != MAX_DATE, 'The new label, ' + project.mNewLabel + ', did not return a valid date.' ) Check( project.mOldBranch.lower() == project.mNewBranch.lower(), 'Both old branch and new branch should be identical' ) file.write( '\n' + '-' * 79 ) file.write( '\nListing changes between ' + project.mOldLabel + ' and ' + project.mNewLabel + ' for:\n' ) file.write( 'Category: ' + project.mCategory + '\n' ) file.write( 'Name : ' + project.mName + '\n' ) file.write( 'Branch : ' + project.mOldBranch + '\n\n' ) # Doesn't matter which branch because they are garunteed to be the same file.write( 'Begin Label: ' + beginLabel.mName + '\n' ) file.write( 'Date : ' + beginLabel.mDate.strftime( '%Y/%m/%d %H:%M:%S' ) + '\n' ) file.write( 'Owner : ' + beginLabel.mOwner + '\n' ) file.write( 'Description: ' + beginLabel.mDescription.replace( '\n', '\n ' ) + '\n\n' ) file.write( 'End Label : ' + endLabel.mName + '\n' ) file.write( 'Date : ' + endLabel.mDate.strftime( '%Y/%m/%d %H:%M:%S' ) + '\n' ) file.write( 'Owner : ' + endLabel.mOwner + '\n' ) file.write( 'Description: ' + endLabel.mDescription.replace( '\n', '\n ' ) + '\n\n' ) changes = FindChanges( project, beginLabel, endLabel ) for change in changes: file.write( 'Change ' + str( change.mNumber ) + ' on ' + change.mDate.strftime( '%Y/%m/%d %H:%M:%S' ) + ' by ' + change.mUser + '.\n' ) if len( change.mDescription ) > 0: file.write( '\n\t' + change.mDescription.replace('\n', '\n\t') + '\n\n' ) else: file.write( '\n\tNo description of change.\n\n' ) if len( change.mFiles ) > 0: file.write( 'Affected files ...\n\n' ) for fileDesc in change.mFiles: file.write( fileDesc + '\n' ) file.write( '\n\n' ) def LocalWrite( file, str ): if( file != sys.stdout ): sys.stdout.write( str ) file.write( str ) def PrintProjects( projects, file ): class Column: gap = 2 def __init__( self, name ): self.name = name self.width = len( name ) def GetWidth( self ): return self.width + self.gap def SetWidth( self, newWidth ): self.width = max( self.width, newWidth ) def GetName( self ): return self.name cols = {} cols[1] = Column( "Item" ) cols[2] = Column( "Old Branch" ) cols[3] = Column( "Old Label" ) cols[4] = Column( "New Branch" ) cols[5] = Column( "New Label" ) for project in projects: cols[1].SetWidth( len( project.mName ) + 1 ) cols[2].SetWidth( len( project.mOldBranch ) ) cols[3].SetWidth( len( project.mOldLabel ) ) cols[4].SetWidth( len( project.mNewBranch ) ) cols[5].SetWidth( len( project.mNewLabel ) ) totalWidth = cols[1].GetWidth() + cols[2].GetWidth() + cols[3].GetWidth() totalWidth += cols[4].GetWidth() + cols[5].GetWidth() - Column.gap - 4 LocalWrite( file, '\n' + totalWidth * '=' + '\n' ) LocalWrite( file, 'Differing Components List\n' ) LocalWrite( file, totalWidth * '=' + '\n' ) for i in range(1,6): LocalWrite( file, ('%-' + str(cols[i].GetWidth() - 1) + 's') % cols[i].GetName() ) LocalWrite( file, '\n' ) hasDifferentBranches = False LocalWrite( file, totalWidth * '-' + '\n' ) for project in projects: if project.mOldBranch != project.mNewBranch: LocalWrite( file, ('%-' + str(cols[1].GetWidth()-1) + 's') % ('*' + project.mName) ) hasDifferentBranches = True else: LocalWrite( file, ('%-' + str(cols[1].GetWidth()-1) + 's') % project.mName ) LocalWrite( file, ('%-' + str(cols[2].GetWidth()-1) + 's') % project.mOldBranch ) LocalWrite( file, ('%-' + str(cols[3].GetWidth()-1) + 's') % project.mOldLabel ) LocalWrite( file, ('%-' + str(cols[4].GetWidth()-1) + 's') % project.mNewBranch ) LocalWrite( file, ('%-' + str(cols[5].GetWidth()-1) + 's') % project.mNewLabel + '\n') LocalWrite( file, '=' * totalWidth + "\n" ) if hasDifferentBranches: LocalWrite( file, '* = WARNING: The old branch doesn\'t equal the new branch. Cannot\n' ) LocalWrite( file, ' compare components where the old and new branches are different.\n\n' ) def MakeUnique( projects ): projects = RemoveAddedOrRemovedComponents( projects ) projects.sort() count = len( projects ) i = 0 while i < count: if i + 1 >= count: i = i + 1 continue if projects[i].mName.lower() == projects[i + 1].mName.lower() and \ projects[i].mCategory.lower() == projects[i + 1].mCategory.lower(): # Always grab the newest old label and newest new label -- matches how Raptor resolves identical projects if projects[i].mOldLabelNum < projects[i + 1].mOldLabelNum: projects[i].mOldBranch = projects[i + 1].mOldBranch projects[i].mOldLabel = projects[i + 1].mOldLabel projects[i].SetLabelNumbers() projects[i].mQueried = False if not projects[i].CompareToNow() and not projects[i + 1].CompareToNow() and \ projects[i].mNewLabelNum < projects[i + 1].mNewLabelNum: projects[i].mNewBranch = projects[i + 1].mNewBranch projects[i].mNewLabel = projects[i + 1].mNewLabel projects[i].SetLabelNumbers() projects[i].mQueried = False projects.remove( projects[i + 1] ) if len( projects ) != count: count = len( projects ) else: i = i + 1 return projects def RemoveAddedOrRemovedComponents( projects ): count = len( projects ) i = 0 while i < count: ## Remove projects that don't have two labels if projects[i].mOldLabel == '' or projects[i].mNewLabel == '': projects.remove( projects[i] ) if len( projects ) != count: count = len( projects ) else: i = i + 1 return projects def FindComponentsTxtDependencies( project ): command = P4EXE + ' -p ' + P4PORT + ' print -q "//SEABU/ProductSource/' + project.mCategory + '/' + project.mName + '/' + project.mOldBranch command += '/Components.txt@' + project.mOldLabel + '"' lines = os.popen4( command, 't' )[1].readlines() projects = [] project.mQueried = True for line in lines: line = line.strip() line = line.strip( '\n' ) line = line.replace( '=', '', 1 ) parts = line.split() if len( parts ) != 5 or parts[0].lower() != 'dep': continue name = parts[2].strip(';'); proj = Project() proj.mCategory = FixCategory( parts[1].strip(';') ) proj.mName = name.capitalize() proj.mOldBranch = parts[3].strip(';').capitalize() proj.mOldLabel = parts[4].split( '-' )[0] projects.append( proj ) # Now list the new components.txt and line them up with their previous projects command = P4EXE + ' -p ' + P4PORT + ' print -q "//SEABU/ProductSource/' + project.mCategory + '/' + project.mName + '/' + project.mNewBranch if project.CompareToNow(): command += '/Components.txt#head"' else: command += '/Components.txt@' + project.mNewLabel + '"' lines = os.popen4( command, 't' )[1].readlines() for line in lines: line = line.strip() line = line.strip( '\n' ) line = line.replace( '=', '', 1 ) parts = line.split() if len( parts ) != 5 or parts[0].lower() != 'dep': continue name = parts[2].strip(';'); count = len( projects ) j = 0 found = False # This will allow for duplicates but we remove them below while j < count: if projects[j].mName.lower() == name.lower() and projects[j].mNewLabel == '': projects[j].mNewLabel = parts[4].split( '-' )[0] projects[j].mNewBranch = parts[3].strip(';').capitalize() projects[j].SetLabelNumbers() found = True j = j + 1 # Update the project's to reflect the proper case of the project name and branch for proj in projects: GetLabels( proj ) return projects def RemoveIdenticalLabels( projects ): i = 0 while i < len( projects ): ## Remove projects where
initialization of key/val pairs will occur on that instance, instead of self. Used only when called from __new__() :type instance: ExperimentManager ''' if instance is not None: self = instance for file in os.listdir(self.csv_files_path): path = os.path.join(self.csv_files_path, file) # Sanity check: if Path(path).suffix == '.csv': # Get the field names (i.e. header row): with open(path, 'r') as fd: # Macos sometimes adds weird quarantine files # with a leading underscore; skip those: try: col_names = csv.DictReader(fd).fieldnames except UnicodeDecodeError as _e: continue # Make the writer: fd = open(path, 'a') writer = csv.DictWriter(fd, col_names) writer.fd = fd key = Path(path).stem self[key] = writer.fd.name self.csv_writers[key] = writer #------------------------------------ # _schedule_save #------------------- def _schedule_save(self): ''' If no self-save task is scheduled yet, schedule one: ''' try: # Only schedule a save if none # is scheduled yet: if self.auto_save_thread is not None and \ not self.auto_save_thread.cancelled(): return self.auto_save_thread = AutoSaveThread(self.save) self.auto_save_thread.start() except Exception as e: raise ValueError(f"Could not schedule an experiment save: {repr(e)}") #------------------------------------ # _cancel_save #------------------- def _cancel_save(self): ''' Cancel all self-save tasks: ''' try: if self.auto_save_thread is not None: self.auto_save_thread.cancel() except Exception as e: raise ValueError(f"Could not cancel an experiment save: {repr(e)}") #------------------------------------ #_save_records #------------------- def _save_records(self, item, fname, index_col=None, trust_list_dim=True, header=None): ''' Saves items of types dict, list, Pandas Series, numpy arrays, and DataFrames to a csv file. Creates the csv file and associated csv.DictWriter if needed. If DictWriter has to be created, adds it to the self.csv_writers dict under the fname key. When creating DictWriters, the header line (i.e. column names) is obtain from: o keys() if item is a dict, o index if item is a pd.Series o columns if item is a pd.DataFrame o range(top-level-num-columns) if item is a Python list or numpy array It is a ValueError for item to be an array-like with 3 or more dimensions. If DictWriter already exists, adds the record(s) The fname is used as a key into self.csv_writers, and is expected to not be a full path, or to have an extension such as '.csv'. Caller is responsible for the cleaning. The index_col is relevant only for dataframes: if None, the df's index (i.e. the row labels) are ignored. Else, the index values are stored as a column with column name index_col. The trust_list_dim is relevant only for 2D lists. If True, trust that all rows of the list are the same length. Else each row's length is checked, and a ValueError thrown if lengths are unequal. The header argument may be provided the first time any data are saved to key. :param item: data to be written to csv file :type item: {dict | list | pd.Series | pd.DataFrame} :param fname: name for the csv file stem, and retrieval key :type fname: str :param index_col: for dataframes only: name of index column. If None, index will be ignored :type index_col: {str | None} :param trust_list_dim: for 2D lists only: trust that all rows are of equal lengths :type trust_list_dim: True :param header: column names to use as header in CSV file :type header: [str] :return full path to the csv file :rtype str :raise TypeError if item type is unrecognized, or header is provided, but item is not None ''' # Do we already have a csv writer for the given fname? dst = os.path.join(self.csv_files_path, f"{fname}.csv") #if os.path.exists(dst): # dst = self._unique_fname(self.csv_files_path, fname) # Do we already have csv writer for this file: try: csv_writer = self.csv_writers[fname] except KeyError: # No CSV writer yet: if header is None: header = self._get_field_names(item, index_col=index_col, trust_list_dim=trust_list_dim) else: # Is the index_col header names provided? if index_col is not None: header = [index_col] + header fd = open(dst, 'w') csv_writer = csv.DictWriter(fd, header) # Save the fd with the writer obj so # we can flush() when writing to it: csv_writer.fd = fd csv_writer.writeheader() fd.flush() self.csv_writers[fname] = csv_writer else: header = csv_writer.fieldnames # If item is df, we need to determine # whether the index should be included # in a column: if type(item) == pd.DataFrame: col_names = item.columns # If we have one more fld name than # the number of cols, then assume that # the first fld name is for the index column: if len(header) == len(col_names) + 1: index_col = header[0] else: # Used to be "index_col = None" index_col = index_col # Now the DictWriter exists; write the data. # Method for writing may vary with data type. # For pd.Series, use its values as a row; # for lists, if type(item) == pd.Series: item = list(item) elif type(item) == list: item = np.array(item) # If given a dataframe, write each row: if type(item) == pd.DataFrame: num_dims = len(item.shape) if num_dims > 2: raise ValueError(f"For dataframes, can only handle 1D or 2D, not {item}") if index_col is None: # Get to ignore the index (i.e. the row labels): for row_dict in item.to_dict(orient='records'): # Keys must be strings: row_dict_str_keys = {str(key) : val for key, val in row_dict.items()} csv_writer.writerow(row_dict_str_keys) else: for row_dict in self._collapse_df_index_dict(item, index_col): try: csv_writer.writerow(row_dict) except ValueError as e: # Typical cause of a value error is # that csv_writer was closed. Add debug # info and re-raise: msg = "While writing csv record: " if type(csv_writer) == csv.DictWriter: msg += f"fname is {csv_writer.fd.name}; " raise ValueError(f"{msg} {repr(e)}") from e # Numpy array or Python list: elif type(item) in(np.ndarray, list): num_dims = len(self._list_shape(item)) if type(item) == list else len(item.shape) if num_dims == 1: csv_writer.writerow(self._arr_to_dict(item, header)) else: for row in item: csv_writer.writerow(self._arr_to_dict(row, header)) # A dict: elif type(item) == dict: # This is a DictWriter's native food: csv_writer.writerow(item) # If none of the above types, item must be None: elif item is not None: raise TypeError(f"Unknown item type {item}") csv_writer.fd.flush() return dst #------------------------------------ # _get_field_names #------------------- def _get_field_names(self, item, index_col=None, trust_list_dim=True): ''' Given a data structure, return the column header fields appropriate to the data Raises ValueError if the dimension of data is not 1D or 2D. The trust_list_dim is relevant only if item is a Python list. The arg controls whether the number of columns in the list is constant across all rows. If trust_list_dim is False, the length of each row is checked, which forces a loop through the list. Even with trust_list_dim is False, the dimensions of the list are checked to be 1D or 2D. Strategy for determining a column header, given type of item: o dict: list of keys o np.ndarray or Python list: range(num-columns) o pd.Series: index o pd.DataFrame: columns :param item: data structure from which to deduce a header :type item: {list | np.ndarray | pd.Dataframe | pd.Series | dict} :param index_col: only relevant if item is a dataframe. In that case: column name to use for the index column. If None, index will not be included in the columns. :type index_col: {None | str} :returns the header :rtype [str] :raises ValueError if dimensions are other than 1, or 2 ''' bad_shape = False # Get dimensions of list or numpy array if type(item) == list: dims = self._list_shape(item) elif type(item) == np.ndarray: dims = item.shape if type(item) == np.ndarray or type(item) == list: if len(dims) == 1: header = list(range(dims[0])) elif len(dims) == 2: header = list(range(dims[1])) else: bad_shape = True # When no index given to Series, col names will # be integers (0..<len of series values>). # Turn them into strs as expected by callers: if type(header[0]) == int: header = [str(col_name) for col_name in header] elif type(item) == dict: header = list(item.keys()) elif type(item) == pd.Series: header = item.index.to_list() #
<gh_stars>0 import math import random from typing import List import numpy as np from choreography.choreography import Choreography from choreography.drone import Drone, slow_to_pos from choreography.group_step import (BlindBehaviorStep, DroneListStep, PerDroneStep, StepResult) from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import (BallState, CarState, GameInfoState, GameState, Physics, Rotator, Vector3) from rlbot.utils.structures.game_data_struct import GameTickPacket from rlbot.utils.structures.game_interface import GameInterface from util.agent import Vector, ball_object from util.vec import Vec3 radius = 3000 radius2 = 2048 radius3 = 700 radian_offset = 0 demo_cooldown = (41-13)/60 gravity = Vector(z=-650) max_speed = 2300 throttle_accel = 66 + (2/3) brake_accel = Vector(x=-3500) boost_per_second = 33 + (1/3) jump_max_duration = 0.2 jump_speed = 291 + (2/3) jump_acc = 1458 + (1/3) boost_accel = 991 + (2/3) delta_time = 1/60 class TheSky2(Choreography): """ For seconds 0:12-1:56 of The Sky """ def __init__(self, game_interface: GameInterface): super().__init__() self.game_interface = game_interface self.ball = ball_object() self.attacked_center = False def generate_sequence(self, drones: List[Drone]): self.sequence.clear() pause_time = 1.5 self.drone_aerials = [] self.last_demo_time = -1 self.alive_drones = list(range(60)) self.sequence.append(DroneListStep(self.setup)) self.sequence.append(DroneListStep(self.circular_procession)) self.sequence.append(DroneListStep(self.setup_circle_align)) self.sequence.append(DroneListStep(self.circle_align)) self.sequence.append(DroneListStep(self.act_2)) self.sequence.append(DroneListStep(self.act_2_end)) self.sequence.append(DroneListStep(self.end_choreo)) @staticmethod def get_num_bots() -> int: return 64 def spin_around_rising_ball(self, packet, drones, start_time) -> StepResult: return StepResult(finished=True) def end_choreo(self, packet, drones, start_time) -> StepResult: self.game_interface.set_game_state(GameState(ball=BallState(Physics(location=Vector3(0, 5300, 400))))) return StepResult(finished=True) def setup(self, packet, drones, start_time) -> StepResult: self.game_interface.set_game_state(GameState(game_info=GameInfoState(game_speed=0.25))) car_states = {} radian_spacing = 2 * math.pi / 60 for index, drone in enumerate(drones): if 61 <= index <= 64: car_states[drone.index] = CarState( Physics(location=Vector3(3520, 5100, 0), velocity=Vector3(0, 0, 0))) continue if index == 60: car_states[drone.index] = CarState( Physics(location=Vector3(0, 0, 20), velocity=Vector3(0, 0, 0), rotation=Rotator(0, 0, 0))) continue progress = index * radian_spacing target = Vec3(radius * math.sin(progress), radius * math.cos(progress), 0) car_states[drone.index] = CarState( Physics(location=Vector3(target.x, target.y, 20), velocity=Vector3(0, 0, 0), rotation=Rotator(0, -progress, 0))) self.game_interface.set_game_state(GameState( cars=car_states, ball=BallState(physics=Physics( location=Vector3(0, 0, 155), velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0) )) )) return StepResult(finished=True) def circular_procession(self, packet: GameTickPacket, drones, start_time) -> StepResult: self.ball.update(packet) elapsed = packet.game_info.seconds_elapsed - start_time inactive_drones = max((elapsed - 4) / 0.48, 0) radian_spacing = 2 * math.pi / max(60 - inactive_drones, 16) adjusted_radius = radius - elapsed * 75 for i, drone in enumerate(drones): if i >= inactive_drones: if i < 60: progress = i * radian_spacing + elapsed * .25 target = [adjusted_radius * math.sin(progress), adjusted_radius * math.cos(progress), 0] slow_to_pos(drone, target) continue if len(self.drone_aerials) == i: progress = i * radian_spacing + (elapsed + 2) * .25 target = Vector(adjusted_radius * math.sin(progress), adjusted_radius * math.cos(progress), 200 + i * 10) self.drone_aerials.append(Hover(target, i != 60)) self.drone_aerials[i].target.z += 0.1 self.drone_aerials[i].run(drone, packet.game_info.seconds_elapsed) if i == 60: break return StepResult(finished=inactive_drones > 61) def setup_circle_align(self, packet: GameTickPacket, drones, start_time) -> StepResult: self.ball.update(packet) self.game_interface.set_game_state(GameState(ball=BallState(physics=Physics(location=Vector3(drones[60].location.x, drones[60].location.y), velocity=Vector3(0, 0), angular_velocity=Vector3(*drones[60].raw_angular_velocity))))) radian_spacing = 2 * math.pi / 20 radian_spacing_v = 2 * math.pi / 10 for i, drone in enumerate(drones): if i == 60: self.drone_aerials[i].target = Vector(0, 0, 1000) else: # 0 & 1: center circle # 2, 3, 4, & 5: side circles group = i % 6 if group < 2: progress = (i // 6 * 2 + group) * radian_spacing self.drone_aerials[i].target = Vector(radius2 * math.sin(progress), radius2 * math.cos(progress), 1000) elif group < 4: progress = (i // 6 * 2 + (group - 2)) * radian_spacing Q = radius3 * math.sin(progress) adjusted_radius = radius2 + Q self.drone_aerials[i].target = Vector(adjusted_radius * math.sin(progress), adjusted_radius * math.cos(progress), 1000 + Q) else: progress = (i // 6 * 2 + (group - 4)) * radian_spacing Q = radius3 * math.sin(progress) adjusted_radius = radius2 - Q self.drone_aerials[i].target = Vector(adjusted_radius * math.sin(progress), adjusted_radius * math.cos(progress), 1000 - Q) self.drone_aerials[i].run(drone, packet.game_info.seconds_elapsed) if i == 60: break return StepResult(finished=True) def circle_align(self, packet: GameTickPacket, drones, start_time) -> StepResult: self.ball.update(packet) self.game_interface.set_game_state(GameState(ball=BallState(physics=Physics(location=Vector3(drones[60].location.x, drones[60].location.y), velocity=Vector3(0, 0), angular_velocity=Vector3(*drones[60].raw_angular_velocity))))) for i, drone in enumerate(drones): self.drone_aerials[i].run(drone, packet.game_info.seconds_elapsed) if i == 60: break return StepResult(finished=packet.game_info.seconds_elapsed - start_time > 14) def get_random_demo_target(self): target = random.choice(self.alive_drones) self.alive_drones.remove(target) return target def act_2(self, packet: GameTickPacket, drones, start_time) -> StepResult: self.ball.update(packet) if self.odd_tick % 2 == 0: self.game_interface.set_game_state(GameState(ball=BallState(physics=Physics(location=Vector3(drones[60].location.x, drones[60].location.y), velocity=Vector3(0, 0), angular_velocity=Vector3(*drones[60].raw_angular_velocity))))) radian_spacing = 2 * math.pi / 20 elapsed = packet.game_info.seconds_elapsed - start_time hover_height = 1022 - max(0, (elapsed - 60) * 100) # elapsed @ 16 seconds (1:06): foreshadow attack # elapsed @ 31 seconds (1:21): start attack # elapsed @ 60 seconds (1:50): attack center air dribbler then stop for i, drone in enumerate(drones): if i < 60: if drone.demolished: continue elif i not in self.alive_drones: self.alive_drones.append(i) # 0 & 1: center circle # 2, 3, 4, & 5: side circles group = i % 6 if group < 2: progress = (i // 6 * 2 + group) * radian_spacing + elapsed * .3 self.drone_aerials[i].target = Vector(radius2 * math.sin(progress), radius2 * math.cos(progress), hover_height) elif group < 4: progress = (i // 6 * 2 + (group - 2)) * radian_spacing + elapsed * .3 Q = radius3 * math.sin(progress) adjusted_radius = radius2 + Q self.drone_aerials[i].target = Vector(adjusted_radius * math.sin(progress), adjusted_radius * math.cos(progress), hover_height + Q) else: progress = (i // 6 * 2 + (group - 4)) * radian_spacing + elapsed * .3 Q = radius3 * math.sin(progress) adjusted_radius = radius2 - Q self.drone_aerials[i].target = Vector(adjusted_radius * math.sin(progress), adjusted_radius * math.cos(progress), hover_height - Q) self.drone_aerials[i].run(drone, packet.game_info.seconds_elapsed) if i == 60: break if elapsed >= 31: if elapsed - self.last_demo_time >= demo_cooldown: car_states = {} for i in (61, 62): # (61, 62, 63) target = drones[self.get_random_demo_target()] if elapsed < 60 else drones[60] car_states[i] = CarState(physics=Physics( location=Vector3( target.location.x - 100, target.location.y, target.location.z), velocity=Vector3(2300, 0, 0), rotation=Vector3(0, 0, 0) )) if elapsed >= 60: self.attacked_center = True self.game_interface.set_game_state(GameState( cars=car_states )) self.last_demo_time = elapsed return StepResult(finished=elapsed > 63) def act_2_end(self, packet: GameTickPacket, drones, start_time) -> StepResult: return StepResult(finished=packet.game_info.seconds_elapsed - start_time > 10) def cap(x, low, high): # caps/clamps a number between a low and high value return low if x < low else (high if x > high else x) def sign(x): # returns the sign of a number, -1, 0, +1 if x < 0: return -1 if x > 0: return 1 return 0 def defaultPD(me, local_target, upside_down=False, up=None): # points the car towards a given local target. # Direction can be changed to allow the car to steer towards a target while driving backwards if up is None: up = me.local(Vector(z=-1 if upside_down else 1)) # where "up" is in local coordinates target_angles = ( math.atan2(local_target.z, local_target.x), # angle required to pitch towards target math.atan2(local_target.y, local_target.x), # angle required to yaw towards target math.atan2(up.y, up.z) # angle required to roll upright ) # Once we have the angles we need to rotate, we feed them into PD loops to determing the controller inputs me.ctrl.steer = steerPD(target_angles[1], 0) me.ctrl.pitch = steerPD(target_angles[0], me.angular_velocity.y/4) me.ctrl.yaw = steerPD(target_angles[1], -me.angular_velocity.z/4) me.ctrl.roll = steerPD(target_angles[2], me.angular_velocity.x/4) # Returns the angles, which can be useful for other purposes return target_angles def steerPD(angle, rate): # A Proportional-Derivative control loop used for defaultPD return cap(((35*(angle+rate))**3)/10, -1, 1) # A combination of Blind and Deaf's hover code and VirxERLU's car control + jump code class Hover: def __init__(self, target, fast_aerial=True): self.fast_aerial = fast_aerial self.target = target self.jump_type_fast = None self.jumping = False self.dodging = False self.jump_time = -1 self.counter = 0 def run(self, me, time): me.reset_ctrl() if self.jumping or (self.jump_time == -1 and me.on_ground): if self.jump_time == -1: self.jump_type_fast = self.fast_aerial self.jumping = True self.jump_time = time self.counter = 0 jump_elapsed = time - self.jump_time if self.jump_type_fast: if jump_elapsed <= jump_max_duration: me.ctrl.jump = True else: self.counter += 1 if self.counter == 3: me.ctrl.jump = True self.dodging = True elif self.counter == 4: self.dodging = self.jumping = False self.jump_time = -1 elif jump_elapsed <= jump_max_duration: me.ctrl.jump = True else: self.jumping = False self.jump_time = -1 delta_x = self.target - me.location if delta_x.magnitude() > boost_accel: delta_x *= boost_accel / delta_x.magnitude() delta_xy = Vector(delta_x.x - me.velocity.x, delta_x.y - me.velocity.y, 1000 if (not self.jumping or not self.jump_type_fast) else 0) direction = delta_xy.normalize() if self.counter in {0, 4}: defaultPD(me, me.local(delta_xy), up=sign(math.sin(time)) * (-1 if me.index == 60 else 1) * (Vector() - me.location).flatten().normalize()) me.ctrl.throttle = 1 # only boost/throttle if we're facing the right direction if abs(me.forward.angle(delta_xy))
tThisFlipGlobal = win.getFutureFlipTime(clock=None) frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *text* updates if text.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance: # keep track of start time/frame for later text.frameNStart = frameN # exact frame index text.tStart = t # local t and not account for scr refresh text.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(text, 'tStartRefresh') # time at next scr refresh text.setAutoDraw(True) if text.status == STARTED: # is it time to stop? (based on global clock, using actual start) if tThisFlipGlobal > text.tStartRefresh + 1.0-frameTolerance: # keep track of stop time/frame for later text.tStop = t # not accounting for scr refresh text.frameNStop = frameN # exact frame index win.timeOnFlip(text, 'tStopRefresh') # time at next scr refresh text.setAutoDraw(False) # *text_2* updates if text_2.status == NOT_STARTED and tThisFlip >= 1-frameTolerance: # keep track of start time/frame for later text_2.frameNStart = frameN # exact frame index text_2.tStart = t # local t and not account for scr refresh text_2.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(text_2, 'tStartRefresh') # time at next scr refresh text_2.setAutoDraw(True) # *key_resp_formal1* updates waitOnFlip = False if key_resp_formal1.status == NOT_STARTED and tThisFlip >= 1-frameTolerance: # keep track of start time/frame for later key_resp_formal1.frameNStart = frameN # exact frame index key_resp_formal1.tStart = t # local t and not account for scr refresh key_resp_formal1.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(key_resp_formal1, 'tStartRefresh') # time at next scr refresh key_resp_formal1.status = STARTED # keyboard checking is just starting waitOnFlip = True win.callOnFlip(key_resp_formal1.clock.reset) # t=0 on next screen flip win.callOnFlip(key_resp_formal1.clearEvents, eventType='keyboard') # clear events on next screen flip if key_resp_formal1.status == STARTED and not waitOnFlip: theseKeys = key_resp_formal1.getKeys(keyList=['left', 'right'], waitRelease=False) _key_resp_formal1_allKeys.extend(theseKeys) if len(_key_resp_formal1_allKeys): key_resp_formal1.keys = _key_resp_formal1_allKeys[-1].name # just the last key pressed key_resp_formal1.rt = _key_resp_formal1_allKeys[-1].rt # a response ends the routine continueRoutine = False # check for quit (typically the Esc key) if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]): core.quit() # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine break continueRoutine = False # will revert to True if at least one component still running for thisComponent in formal1Components: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() # -------Ending Routine "formal1"------- for thisComponent in formal1Components: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) loop_formal1.addData('text.started', text.tStartRefresh) loop_formal1.addData('text.stopped', text.tStopRefresh) loop_formal1.addData('text_2.started', text_2.tStartRefresh) loop_formal1.addData('text_2.stopped', text_2.tStopRefresh) # check responses if key_resp_formal1.keys in ['', [], None]: # No response was made key_resp_formal1.keys = None loop_formal1.addData('key_resp_formal1.keys',key_resp_formal1.keys) if key_resp_formal1.keys != None: # we had a response loop_formal1.addData('key_resp_formal1.rt', key_resp_formal1.rt) loop_formal1.addData('key_resp_formal1.started', key_resp_formal1.tStartRefresh) loop_formal1.addData('key_resp_formal1.stopped', key_resp_formal1.tStopRefresh) if "left"in key_resp_formal1.keys: mov1=path1 text1=fdb1 x+=mon if "right" in key_resp_formal1.keys: mov1=path2 text1=fdb2 x+=mon # the Routine "formal1" was not non-slip safe, so reset the non-slip timer routineTimer.reset() # ------Prepare to start Routine "movie"------- continueRoutine = True routineTimer.add(5.000000) # update component parameters for each repeat movie_2 = visual.MovieStim3( win=win, name='movie_2', noAudio = False, filename=mov1, ori=0, pos=(0, 0), opacity=1, loop=False, depth=0.0, ) # keep track of which components have finished movieComponents = [movie_2] for thisComponent in movieComponents: thisComponent.tStart = None thisComponent.tStop = None thisComponent.tStartRefresh = None thisComponent.tStopRefresh = None if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED # reset timers t = 0 _timeToFirstFrame = win.getFutureFlipTime(clock="now") movieClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip frameN = -1 # -------Run Routine "movie"------- while continueRoutine and routineTimer.getTime() > 0: # get current time t = movieClock.getTime() tThisFlip = win.getFutureFlipTime(clock=movieClock) tThisFlipGlobal = win.getFutureFlipTime(clock=None) frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *movie_2* updates if movie_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance: # keep track of start time/frame for later movie_2.frameNStart = frameN # exact frame index movie_2.tStart = t # local t and not account for scr refresh movie_2.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(movie_2, 'tStartRefresh') # time at next scr refresh movie_2.setAutoDraw(True) if movie_2.status == STARTED: # is it time to stop? (based on global clock, using actual start) if tThisFlipGlobal > movie_2.tStartRefresh + 5-frameTolerance: # keep track of stop time/frame for later movie_2.tStop = t # not accounting for scr refresh movie_2.frameNStop = frameN # exact frame index win.timeOnFlip(movie_2, 'tStopRefresh') # time at next scr refresh movie_2.setAutoDraw(False) # check for quit (typically the Esc key) if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]): core.quit() # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine break continueRoutine = False # will revert to True if at least one component still running for thisComponent in movieComponents: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() # -------Ending Routine "movie"------- for thisComponent in movieComponents: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) loop_formal1.addData('movie_2.started', movie_2.tStartRefresh) loop_formal1.addData('movie_2.stopped', movie_2.tStopRefresh) # ------Prepare to start Routine "feedback1"------- continueRoutine = True routineTimer.add(1.000000) # update component parameters for each repeat text_3.setText(text1) text_10.setText(x) # keep track of which components have finished feedback1Components = [text_3, text_9, text_10] for thisComponent in feedback1Components: thisComponent.tStart = None thisComponent.tStop = None thisComponent.tStartRefresh = None thisComponent.tStopRefresh = None if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED # reset timers t = 0 _timeToFirstFrame = win.getFutureFlipTime(clock="now") feedback1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip frameN = -1 # -------Run Routine "feedback1"------- while continueRoutine and routineTimer.getTime() > 0: # get current time t = feedback1Clock.getTime() tThisFlip = win.getFutureFlipTime(clock=feedback1Clock) tThisFlipGlobal = win.getFutureFlipTime(clock=None) frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *text_3* updates if text_3.status == NOT_STARTED and tThisFlip >= 0-frameTolerance: # keep track of start time/frame for later text_3.frameNStart = frameN # exact frame index text_3.tStart = t # local t and not account for scr refresh text_3.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(text_3, 'tStartRefresh') # time at next scr refresh text_3.setAutoDraw(True) if text_3.status == STARTED: # is it time to stop? (based on global clock, using actual start) if tThisFlipGlobal > text_3.tStartRefresh + 1-frameTolerance: # keep track of stop time/frame for later text_3.tStop = t # not accounting for scr refresh text_3.frameNStop = frameN # exact frame index win.timeOnFlip(text_3, 'tStopRefresh') # time at next scr refresh text_3.setAutoDraw(False) # *text_9* updates if text_9.status == NOT_STARTED and tThisFlip >= 0-frameTolerance: # keep track of start time/frame for later text_9.frameNStart = frameN # exact frame index text_9.tStart = t # local t and not account for scr refresh text_9.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(text_9, 'tStartRefresh') # time at next scr refresh text_9.setAutoDraw(True) if text_9.status == STARTED: # is it time to stop? (based on global clock, using actual start) if tThisFlipGlobal > text_9.tStartRefresh + 1-frameTolerance: # keep track of stop time/frame for later text_9.tStop = t # not accounting for scr refresh text_9.frameNStop = frameN # exact frame index win.timeOnFlip(text_9, 'tStopRefresh') # time at next scr refresh text_9.setAutoDraw(False) # *text_10* updates if text_10.status == NOT_STARTED and tThisFlip >= 0-frameTolerance: # keep track of start time/frame for later text_10.frameNStart = frameN # exact frame index text_10.tStart = t # local t and not account for scr refresh text_10.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(text_10, 'tStartRefresh') # time at next scr refresh text_10.setAutoDraw(True) if text_10.status == STARTED: # is it time to stop? (based on global clock, using actual start) if tThisFlipGlobal > text_10.tStartRefresh + 1.0-frameTolerance: # keep track of stop time/frame for later text_10.tStop = t
<filename>HIVE MIND.py import datetime class hivemind: class mind: class neurone: def __init__(self,name,resistance=0,accelerate=0.999,brake=0.999,bayeslearningrate=10): import random self.learningrate={} self.bayeslearningrate=bayeslearningrate self.inputs={} self.bias={} self.bayesbias={} if isinstance(resistance,str): self.resistance=ramdom.random() else: self.resistance=resistance self.pain=2 self.fired=[] self.name=name self.temp={} self.me=0 self.accelerate=accelerate self.brake=brake def forward(self,imp={},bayes={},error=0): import random a=0 c=0 for i in bayes: if i in self.bayesbias: try: c+=(self.bayesbias[i]*bayes[i]) except Exception as ex: template = "An exception of type {0} occurred. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) print(message) print(c) print(self.bayesbias[i]) print(bayes[i]) print(i) print(bayes) input("pause in forward") else: if error==2: print(i) print(self.bayesinputs) input("pause") self.bayesbias[i]=random.random() self.learningrate[i]=random.random() c+=self.bayesbias[i] c=self.outputactivation(c) if error==1: print(self.name) print(c) input() if c > self.resistance or self.name=="output": a=0 for i in imp: if i in self.bias: a+=(self.bias[i]*imp[i]) else: self.bias[i]=random.random() a=self.outputactivation(a) self.fired=imp self.pain=a return [self.name,a,c] else: return [] def backwards(self,actual,estimate,lisp,error=0): import random if self.name in lisp or self.name=='output': if len(self.fired)>0: a=0 c=actual-abs(estimate) d=estimate/actual e=0 if c > 0: if self.pain < 0: if actual >0: sel=0 else: sel=1 else: sel=1 else: if self.pain < 0: if actual >0: sel=1 else: sel=0 else: sel=0 for i in self.fired: if i in self.temp: if sel==1 and self.temp == 1: self.learningrate[i]=self.learningrate[i]*self.accelerate else: self.learningrate[i]=self.learningrate[i]*self.brake #self.temp[i]=c try: if c>0: for i in self.fired: self.bias[i]+=self.learningrate[i] self.bayesbias[i]+=(self.learningrate[i]/self.bayeslearningrate) self.temp[i]=sel else: for i in self.fired: self.bias[i]-=self.learningrate[i] self.bayesbias[i]-=(self.learningrate[i]/self.bayeslearningrate) self.temp[i]=sel except Exception as ex: template = "An exception of type {0} occurred. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) print(message) print(self.fired) print(i) input("Error in backwards") temp=self.fired.copy() self.fired=[] return temp #mind needs to take the reply and group all the returns and then feed into next row. #if mind gets a empty dict back for whole line then it needs to cycle through neurones and top up the bayes dict def nonresponse(self,estimate): import random for i in estimate: if i !=self.name: if i in self.bayesbias: self.bayesbias[i]+=1 else: self.bayesbias[i]=random.random()+1 self.learningrate[i]=random.random() def experience(self): self.accelerate-=0.00000001 self.brake-=0.00000001 if self.brake<0.00000001: self.brake=0.00000001 if self.accelerate < 1.00000001: self.accelerate=1.00000001 def reset(self): self.fired=[] class Relu: def outputactivation(self,x): if x > 0: return x else: return (x*0.1) return 1 / (1 + math.exp(-x)) class Sigmoid: def outputactivation(self,x): import math return 1 / (1 + math.exp(-x)) class Tanh: def outputactivation(self,x): import math x=math.tanh(x) return x class sigmoidneurone(Sigmoid,neurone): pass class reluneurone(Relu,neurone): pass class tanhneurone(Tanh,neurone): pass def __init__(self,width,depth,repeat=0,resistance=0,bayeslearningrate=10,linearegression=0): self.outputbias={} self.mind=[] self.source=[] self.fired={} self.repeat=repeat self.me=0 self.critime={} self.resistance=resistance c=0 for i in range(depth): cortex=[] for w in range(width): c+=1 name=str("No:"+str(c)+" row:"+str(i)+" width:"+str(w)) cortex.append(self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate)) if linearegression==1: name='output' self.output=self.reluneurone(name,resistance=0,bayeslearningrate=bayeslearningrate) self.mind.append(cortex.copy()) name='output' self.output=self.reluneurone(name,resistance=0,bayeslearningrate=bayeslearningrate) def labotomy(self,width=[4,4,4,4,4],typo=['r','r','r','r','r','r'],resistance=[0,0,0,0,0,0],bayeslearningrate=[10,10,10,10,10],linearegression=[0,0,0,0,0]): count=0 work=4 self.mind=[] rest=0 bayes=10 c=0 for i in range(len(typo)): try: work=width[count] rest=resistance[count] bayes=bayeslearningrate[count] except: pass cortex=[] for w in range(work): c+=1 name=str("No:"+str(c)+" row:"+str(i)+" width:"+str(w)) if typo[i].lower()=='r': cortex.append(self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate)) if typo[i].lower()=='s': cortex.append(self.sigmoidneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate)) if typo[i].lower()=='t': cortex.append(self.tanhneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate)) if linearegression[i].lower()==1: name='output' self.output=self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate) self.mind.append(cortex.copy()) count+=1 name='output' self.output=self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate) def forwardpage(self,inputs,error=0): output=0 nay={} bay={} responsenay={} responsebay={} for i in inputs: if isinstance(i,(int,float)): nay[i]=i bay[i]=i else: nay[i]=1 bay[i]=1 if error==2: print(inputs) for cortex in range(len(self.mind)): responsenay={} responsebay={} for nerve in self.mind[cortex]: response=nerve.forward(nay,bay) if len(response) >0: responsenay[response[0]]=response[1] responsebay[response[0]]=response[2] if len(responsenay)==0: for nerve in self.mind[cortex]: nerve.nonresponse(bay) if error==2: print(responsenay) print(responsebay) input("pause error 2 at forward page") nay=responsenay bay=responsebay response=self.output.forward(nay,bay) if len(response)==0: self.output.nonresponse(bay) self.output.nonresponse(bay) else: output=response[1] return output def slow(self): for cortex in range(len(self.mind)): for nerve in self.mind[cortex]: nerve.experience() def backapage(self,actual,estimate,error=0): nex=[] r=[] if estimate==None: estimate=0 nex=self.output.backwards(float(actual),float(estimate),[]) #print(nex) #input() for cortex in reversed(self.mind): for nerve in cortex: try: response=nerve.backwards(float(actual),float(estimate),nex) for re in response: if not re in r: r.append(re) except Exception as ex: pass nex=r #print(nex) #input("Previous Rows") self.fired=0 def learnbook(self,reader,element,accuracy=30,epochs=10,error=0,key=0,SECONDREAD=0): estimate=0 lastcount=1 count=1 rightcount=0 mike=0 check=0 for row in reader: if row.get(element): project_list=list(row.values()) project_list.remove(row.get(element)) estimate=self.forwardpage(project_list) self.backapage(row.get(element),estimate) step=0 temp=0 while step < epochs: lastcount=rightcount consider=[0,0,0,0,0,0,0,0,0,0,0,0,0] count=1 for row in reader: if row.get(element): count+=1 project_list=list(row.values()) if key !=0: project_list.remove(row.get(key)) project_list.remove(row.get(element)) estimate=self.forwardpage(project_list) if row.get(element) !=0: self.backapage(row.get(element),estimate) if error==1: print(estimate) print(row.get(element)) input("pause for error in learnbook") try: temp=int(round(abs(estimate-row.get(element))/accuracy,0)) except: pass try: consider[temp]+=1 except Exception as ex: pass if error==1: print(project_list) print(row.get(element)) print(estimate) print(lastcount) input("pause error 1 in learnbook") cumu=0 rightcount=consider[0]/count if rightcount <check: self.slow() check=rightcount for i in range(len(consider)): cumu+=((consider[i]/count)*100) #print("Within a accuracy " + str(i) + " we had a accuracy of " + str((consider[i]/count)*100) + " with cumulatve of " + str(cumu)) step+=1 #print("New Epoch " + str(step)) if isinstance(SECONDREAD,list): for row in SECONDREAD: project_list=list(row.values()) project_list.remove(row.get(element)) if key !=0: project_list.remove(row.get(key)) estimate=self.forwardpage(project_list) #if estimate < accuracy: # estimate=accuracy if error==2: print(row) print(project_list) input("Error 2 in learnbook") try: row["ESTIMATE"]=round(estimate,0) except: row["ESTIMATE"]="None response from AI, unrecognised engram - pleaser forecast manually" return SECONDREAD def prognosticate(self,reader,key,element): newreader=[] for row in reader: newrow={} project_list=list(row.values()) project_list.remove(row.get(element)) estimate=self.forwardpage(project_list) if estimate < 30: estimate=30 for cortex in reversed(self.mind): for nerve in cortex: nerve.reset() estimate=round(estimate,0) newrow[key]=row[key][-(len(row[key])-(len(key)+1)):] newrow[str(element)+" Estimate"]=estimate newreader.append(newrow.copy()) return newreader def testday(self,reader,accuracy,element,key=0): newreader=[] step=0 count=0 eva=0 eve=0 errors=0 checkframe=[] fileframe=[] column=0 row=0 for row in reader: try: eve+=row.get(element) count+=1 except: print(row) print(row.get(element)) input("error in testday") try: average=eve/count except: average=0 eve=0 count=0 var=0 hypo=0 for row in reader: count+=1 newrow={} project_list=list(row.values()) project_list.remove(row.get(element)) if key !=0: project_list.remove(row.get(key)) estimate=self.forwardpage(project_list) try: eva=estimate-row.get(element) except: errors+=1 if abs(eva) < accuracy: step+=1 var=abs(row.get(element)-average) hypo+=(var*var) eve+=(eva*eva) for cortex in reversed(self.mind): for nerve in cortex: nerve.reset() try: return [(step/count),(eve/count),errors,hypo/count,] except: return [0,0,errors,0,] def __init__(self,reader,key,startdate,endate,renamekey,start=1,accuracy=15,csvloci=r'C:\CSVs\\',setcritdelay=14,setalert=0,taskmove=1,setpercntile=0.95,setdependency=1): self.source=[] self.innaccurate=[] self.accuraccy=accuracy self.key=key self.uPDATE=0 self.renamekey=renamekey self.startdate=startdate import os directory=csvloci+'Analysis\\' if not os.path.exists(directory): try: os.makedirs(directory) except OSError: print ('Error: Creating directory. ' + directory) self.csvloci=directory directory=csvloci+'BrainsInAJar\\' if not os.path.exists(directory): try: os.makedirs(directory) except OSError: print ('Error: Creating directory. ' + directory) self.geniusloci=directory directory=csvloci+'Analysis\\' if not os.path.exists(directory): try: os.makedirs(directory) except OSError: print ('Error: Creating directory. ' + directory) self.analysisloci=directory directory=csvloci+'HIVE\\' if not os.path.exists(directory): try: os.makedirs(directory) except OSError: print ('Error: Creating directory. ' + directory) self.hiveloci=directory self.enddate=endate self.hive(reader,startdate) if start!=0: if start=="test": self.randomdata() else: self.swarm() #self.workplanner() def run(self,reader,queenme=0): if len(self.deps)==0: try: self.deps=self.Open(file_Name=self.geniusloci + '\DEPENDENCIES_FILE') if self.deps==False: self.deps={} except: self.deps={} try: self.tickboxes=self.Open(file_Name=self.geniusloci + '\TICKBOX_FILE') if self.tickboxes==False: self.tickboxes={} except: self.tickboxes={} try: self.alerts=self.Open(file_Name=self.geniusloci +'\ALERT_FILE') if self.alerts==False: self.alerts={} except: self.alerts={} try: self.critime=self.Open(file_Name=self.geniusloci +'\CRITIME_FILE') if self.critime==False: self.critime={} except: self.critime={} try: self.hardforward=self.Open(file_Name=self.geniusloci+'\HARD_FILE') if self.hardforward==False: self.hardforward={} except: self.hardforward={} self.hive(reader,self.startdate) x = threading.Thread(target=self.swarm, args=(self.startdate)) x.start() q = threading.Thread(target=self.reforecast, args=()) q.start() if queenme==1: queeme=threading.Thread(target=self.queen, args=()) queeme.start() def reference(self): print("Building the Hive") print("this is the dates i have found") print(self.dates) print(len(self.dates)) print("this is the labels i have found") print(self.kill) print(len(self.kill)) print("this is the numbers i have found") print(self.numbers) print(len(self.numbers)) def hive(self,reader,startdate,error=0): def inreader(row,reader,key): count=0 for newrow in reader: if row[key]==newrow[key]: return count count+=1 return False def addrow(row,startdate): newrow={} newrow["end"]=row[self.enddate] newrow[self.key]=row[self.key] newrow[startdate]=row[startdate] datarea={} for d in self.dates: temp=self.tryfindcmrdates(newrow[startdate],row[d]) try: if temp > 0: dateme[d]=1 except: pass datarea[d]=self.tryfindcmrdates(newrow[startdate],row[d]) #print(datarea.copy()) #input() newrow["Dates"]=datarea.copy() datarea={} for n in self.numbers: try: if isinstance(float(row[n]),(float,int)): datarea[n]=float(row[n]) else: datarea[n]=None except: datarea[n]=None pass newrow["Numbers"]=datarea.copy() for k in self.kill: if k in row: if isinstance(row[k],str): if not self.isdate(row[k]): if not len(row[k])==0: if error==1: print(row[self.key]) print(k) input(row[k]) datarea[k]=str(k)+':' +str(row[k]) newrow["Labels"]=datarea.copy() if row[self.key] in tempforecastdates: newrow["Forecast Dates"]=tempforecastdates[row[self.key]] del tempforecastdates[row[self.key]] else: newrow["Forecast Dates"]={} if row[self.key] in tempforecastnumbers: newrow["Forecast Numbers"]=tempforecastnumbers[row[self.key]] del tempforecastnumbers[row[self.key]] else: newrow["Forecast Numbers"]={} newrow["Reforecast Dates"]={} newrow["Overide Dates"]={} newrow["Overide Numbers"]={} return newrow if len(self.source)==0: tech=[] self.dates=[] self.numbers=[] self.kill=[] tempforecastdates={} tempforecastnumbers={} for s in self.source: tempforecastdates[s[self.key]]=s["Forecast Dates"] tempforecastnumbers[s[self.key]]=s["Forecast Numbers"] for row in reader: for cell in row: if self.isdate(row[cell]) and cell !=self.key and cell !=startdate: if not cell in self.dates: self.dates.append(cell) try: if isinstance(float(row[cell]),(float,int)): if cell !=self.key and cell !=startdate: if not cell in self.numbers: self.numbers.append(cell) except: pass if isinstance(row[cell],str) and cell !=self.key and cell !=startdate: if not isinstance(row[cell],(float,int)): if not cell in self.kill: self.kill.append(cell) now='' now=self.today for row in reader: tech.append(addrow(row,self.startdate)) self.source=tech else: temp=[] for row in reader: temp=inreader(source,self.source,self.key) if temp==False: self.source.append(addrow(row,now)) else: for d in self.dates: self.source[temp]["Dates"][d]=row[d] for n in self.numbers: self.source[temp]["Numbers"][n]=row[n] for k in self.kill: self.source[temp]["Labels"][k]=row[k] def swarm(self,error=0): print("Forecasting Dates") for d in self.dates: tempreader=[] otherereader=[] for row in self.source: if not d in row["Labels"]: newrow={} newrow["TARGET"]=row["Dates"][d] for k in row["Labels"]: if k !=d: newrow[k]=row["Labels"][k] newrow[self.key]=row[self.key] if newrow["TARGET"]==None: otherereader.append(newrow.copy()) else: if newrow["TARGET"] < 0: newrow["TARGET"]=0 tempreader.append(newrow.copy()) elif error==1: print(row[self.key]) print(d)
# -*- coding: utf-8 -*- # # Copyright (c) 2020 <NAME> <<EMAIL>> # Copyright (c) 2014 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import logging from typing import Union from reprlib import repr as _r from . import Packet, DataFormatType from . import (ResponsePacket, SendUnitDataResponsePacket, ReadTagServiceResponsePacket, RegisterSessionResponsePacket, UnRegisterSessionResponsePacket, ListIdentityResponsePacket, SendRRDataResponsePacket, MultiServiceResponsePacket, ReadTagFragmentedServiceResponsePacket, WriteTagServiceResponsePacket, WriteTagFragmentedServiceResponsePacket, GenericUnconnectedResponsePacket, GenericConnectedResponsePacket) from ..exceptions import CommError, RequestError from ..bytes_ import Pack, print_bytes_msg from ..const import (EncapsulationCommand, INSUFFICIENT_PACKETS, DataItem, AddressItem, EXTENDED_SYMBOL, ELEMENT_TYPE, TagService, CLASS_TYPE, INSTANCE_TYPE, DataType, DataTypeSize, ConnectionManagerService, ClassCode, CommonService, STRUCTURE_READ_REPLY, PRIORITY, TIMEOUT_TICKS, ATTRIBUTE_TYPE) class RequestPacket(Packet): __log = logging.getLogger(f'{__module__}.{__qualname__}') _message_type = None _address_type = None _timeout = b'\x0a\x00' # 10 _encap_command = None _response_class = ResponsePacket _response_args = () _response_kwargs = {} type_ = None VERBOSE_DEBUG = False def __init__(self, plc): super().__init__() self._msg = [] # message data self._plc = plc self.error = None def add(self, *value: bytes): self._msg.extend(value) return self @property def message(self) -> bytes: return b''.join(self._msg) def _build_request(self): msg = self._build_common_packet_format(addr_data=self._plc._target_cid) header = self._build_header(self._encap_command, len(msg)) return header + msg def _build_header(self, command, length) -> bytes: """ Build the encapsulate message header The header is 24 bytes fixed length, and includes the command and the length of the optional data portion. :return: the header """ try: return b''.join([ command, Pack.uint(length), # Length UINT Pack.udint(self._plc._session), # Session Handle UDINT b'\x00\x00\x00\x00', # Status UDINT self._plc._cfg['context'], # Sender Context 8 bytes Pack.udint(self._plc._cfg['option']), # Option UDINT ]) except Exception as err: raise CommError('Failed to build request header') from err def _build_common_packet_format(self, addr_data=None) -> bytes: addr_data = b'\x00\x00' if addr_data is None else Pack.uint(len(addr_data)) + addr_data msg = self.message return b''.join([ b'\x00\x00\x00\x00', # Interface Handle: shall be 0 for CIP self._timeout, b'\x02\x00', # Item count: should be at list 2 (Address and Data) self._address_type, addr_data, self._message_type, Pack.uint(len(msg)), msg ]) def _send(self, message): """ socket send :return: true if no error otherwise false """ try: if self.VERBOSE_DEBUG: self.__log.debug(print_bytes_msg(message, '>>> SEND >>>')) self._plc._sock.send(message) except Exception as err: raise CommError('failed to send message') from err def _receive(self): """ socket receive :return: reply data """ try: reply = self._plc._sock.receive() except Exception as err: raise CommError('failed to receive reply') from err else: if self.VERBOSE_DEBUG: self.__log.debug(print_bytes_msg(reply, '<<< RECEIVE <<<')) return reply def send(self) -> ResponsePacket: if not self.error: self._send(self._build_request()) self.__log.debug(f'Sent: {self!r}') reply = self._receive() response = self._response_class(reply, *self._response_args, **self._response_kwargs) else: response = self._response_class(*self._response_args, **self._response_kwargs) response._error = self.error self.__log.debug(f'Received: {response!r}') return response def __repr__(self): return f'{self.__class__.__name__}(message={_r(self._msg)})' __str__ = __repr__ class SendUnitDataRequestPacket(RequestPacket): __log = logging.getLogger(f'{__module__}.{__qualname__}') _message_type = DataItem.connected _address_type = AddressItem.connection _response_class = SendUnitDataResponsePacket _encap_command = EncapsulationCommand.send_unit_data def __init__(self, plc): super().__init__(plc) self._msg = [Pack.uint(plc._sequence), ] class ReadTagServiceRequestPacket(SendUnitDataRequestPacket): __log = logging.getLogger(f'{__module__}.{__qualname__}') type_ = 'read' _response_class = ReadTagServiceResponsePacket def __init__(self, plc): super().__init__(plc) self.tag = None self.elements = None self.tag_info = None def add(self, tag, elements=1, tag_info=None): self.tag = tag self.elements = elements self.tag_info = tag_info request_path = _create_tag_rp(self.tag, self._plc.tags, self._plc.use_instance_ids) if request_path is None: self.error = 'Invalid Tag Request Path' super().add( TagService.read_tag, request_path, Pack.uint(self.elements), ) def send(self): if not self.error: self._send(self._build_request()) self.__log.debug(f'Sent: {self!r}') reply = self._receive() response = ReadTagServiceResponsePacket(reply, elements=self.elements, tag_info=self.tag_info, tag=self.tag) else: response = ReadTagServiceResponsePacket(tag=self.tag) response._error = self.error self.__log.debug(f'Received: {response!r}') return response def __repr__(self): return f'{self.__class__.__name__}(tag={self.tag!r}, elements={self.elements!r})' class ReadTagFragmentedServiceRequestPacket(SendUnitDataRequestPacket): __log = logging.getLogger(f'{__module__}.{__qualname__}') type_ = 'read' _response_class = ReadTagFragmentedServiceResponsePacket def __init__(self, plc): super().__init__(plc) self.tag = None self.elements = None self.tag_info = None self.request_path = None def add(self, tag, elements=1, tag_info=None): self.tag = tag self.elements = elements self.tag_info = tag_info self.request_path = _create_tag_rp(self.tag, self._plc.tags, self._plc.use_instance_ids) if self.request_path is None: self.error = 'Invalid Tag Request Path' def send(self): if not self.error: offset = 0 responses = [] while offset is not None: self._msg.extend([TagService.read_tag_fragmented, self.request_path, Pack.uint(self.elements), Pack.dint(offset)]) self._send(self._build_request()) self.__log.debug(f'Sent: {self!r} (offset={offset})') reply = self._receive() response = ReadTagFragmentedServiceResponsePacket(reply, self.tag_info, self.elements) self.__log.debug(f'Received: {response!r}') responses.append(response) if response.service_status == INSUFFICIENT_PACKETS: offset += len(response.bytes_) self._msg = [Pack.uint(self._plc._sequence)] else: offset = None if all(responses): final_response = responses[-1] final_response.bytes_ = b''.join(resp.bytes_ for resp in responses) final_response.parse_bytes() self.__log.debug(f'Reassembled Response: {final_response!r}') return final_response failed_response = ReadTagServiceResponsePacket() failed_response._error = self.error or 'One or more fragment responses failed' self.__log.debug(f'Reassembled Response: {failed_response!r}') return failed_response def __repr__(self): return f'{self.__class__.__name__}(tag={self.tag!r}, elements={self.elements!r})' class WriteTagServiceRequestPacket(SendUnitDataRequestPacket): __log = logging.getLogger(f'{__module__}.{__qualname__}') type_ = 'write' _response_class = WriteTagServiceResponsePacket def __init__(self, plc): super().__init__(plc) self.tag = None self.elements = None self.tag_info = None self.value = None self.data_type = None def add(self, tag, value, elements=1, tag_info=None, bits_write=None): self.tag = tag self.elements = elements self.tag_info = tag_info self.value = value request_path = _create_tag_rp(self.tag, self._plc.tags, self._plc.use_instance_ids) if request_path is None: self.error = 'Invalid Tag Request Path' else: if bits_write: request_path = _make_write_data_bit(tag_info, value, request_path) data_type = 'BOOL' else: request_path, data_type = _make_write_data_tag(tag_info, value, elements, request_path) super().add( request_path, ) self.data_type = data_type def __repr__(self): return f'{self.__class__.__name__}(tag={self.tag!r}, value={_r(self.value)}, elements={self.elements!r})' class WriteTagFragmentedServiceRequestPacket(SendUnitDataRequestPacket): __log = logging.getLogger(f'{__module__}.{__qualname__}') type_ = 'write' _response_class = WriteTagFragmentedServiceResponsePacket def __init__(self, plc): super().__init__(plc) self.tag = None self.value = None self.elements = None self.tag_info = None self.request_path = None self.data_type = None self.segment_size = None def add(self, tag, value, elements=1, tag_info=None): try: if tag_info['tag_type'] == 'struct': self._packed_type = STRUCTURE_READ_REPLY + Pack.uint(tag_info['data_type']['template']['structure_handle']) self.data_type = tag_info['data_type']['name'] else: self._packed_type = Pack.uint(DataType[self.data_type]) self.data_type = tag_info['data_type'] self.tag = tag self.value = value self.elements = elements self.tag_info = tag_info self.request_path = _create_tag_rp(self.tag, self._plc.tags, self._plc.use_instance_ids) if self.request_path is None: self.error = 'Invalid Tag Request Path' except Exception as err: self.__log.exception('Failed adding request') self.error = err def send(self): if not self.error: responses = [] segment_size = self._plc.connection_size - (len(self.request_path) + len(self._packed_type) + 9) # 9 = len of other stuff in the path pack_func = Pack[self.data_type] if self.tag_info['tag_type'] == 'atomic' else lambda x: x segments = (self.value[i:i+segment_size] for i in range(0, len(self.value), segment_size)) offset = 0 elements_packed = Pack.uint(self.elements) for i, segment in enumerate(segments, start=1): segment_bytes = b''.join(pack_func(s) for s in segment) if not isinstance(segment, bytes) else segment self._msg.extend(( TagService.write_tag_fragmented, self.request_path, self._packed_type, elements_packed, Pack.dint(offset), segment_bytes )) self._send(self._build_request()) self.__log.debug(f'Sent: {self!r} (part={i} offset={offset})') reply = self._receive() response = WriteTagFragmentedServiceResponsePacket(reply) self.__log.debug(f'Received: {response!r}') responses.append(response) offset += len(segment_bytes) self._msg = [Pack.uint(self._plc._sequence), ] if all(responses): final_response = responses[-1] self.__log.debug(f'Reassembled Response: {final_response!r}') return final_response failed_response = WriteTagFragmentedServiceResponsePacket() failed_response._error = self.error or 'One or more fragment responses failed' self.__log.debug(f'Reassembled Response: {failed_response!r}') return failed_response class MultiServiceRequestPacket(SendUnitDataRequestPacket): __log = logging.getLogger(f'{__module__}.{__qualname__}') type_ = 'multi' _response_class = MultiServiceResponsePacket def __init__(self, plc): super().__init__(plc) self.tags = [] self._msg.extend(( CommonService.multiple_service_request, # the Request Service Pack.usint(2), # the Request Path Size length in word CLASS_TYPE["8-bit"], ClassCode.message_router, INSTANCE_TYPE["8-bit"], b'\x01', # Instance 1 )) self._message = None self._msg_errors = None @property def message(self) -> bytes: return self._message def build_message(self, tags): rp_list, errors = [], [] for tag in tags: if tag['rp'] is None: errors.append(f'Unable to create request path {tag["tag"]}') else: rp_list.append(tag['rp']) offset = len(rp_list) * 2 + 2 offsets = [] for rp in rp_list: offsets.append(Pack.uint(offset)) offset += len(rp) msg = self._msg + [Pack.uint(len(rp_list))] + offsets + rp_list return b''.join(msg) def add_read(self, tag, elements=1, tag_info=None): request_path = _create_tag_rp(tag, self._plc.tags, self._plc.use_instance_ids) if request_path is not None: request_path = TagService.read_tag + request_path + Pack.uint(elements) _tag = {'tag': tag, 'elements': elements, 'tag_info': tag_info, 'rp': request_path, 'service': 'read'} message = self.build_message(self.tags + [_tag]) if len(message) < self._plc.connection_size: self._message = message self.tags.append(_tag) return True else: return False else: self.__log.error(f'Failed to create request
method get_corporations_corporation_id_outposts_outpost_id" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'corporation_id' is set if ('corporation_id' not in params or params['corporation_id'] is None): raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_outposts_outpost_id`") # noqa: E501 # verify the required parameter 'outpost_id' is set if ('outpost_id' not in params or params['outpost_id'] is None): raise ValueError("Missing the required parameter `outpost_id` when calling `get_corporations_corporation_id_outposts_outpost_id`") # noqa: E501 if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501 raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_outposts_outpost_id`, must be a value greater than or equal to `1`") # noqa: E501 collection_formats = {} path_params = {} if 'corporation_id' in params: path_params['corporation_id'] = params['corporation_id'] # noqa: E501 if 'outpost_id' in params: path_params['outpost_id'] = params['outpost_id'] # noqa: E501 query_params = [] if 'datasource' in params: query_params.append(('datasource', params['datasource'])) # noqa: E501 if 'token' in params: query_params.append(('token', params['token'])) # noqa: E501 if 'user_agent' in params: query_params.append(('user_agent', params['user_agent'])) # noqa: E501 header_params = {} if 'x_user_agent' in params: header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['evesso'] # noqa: E501 return self.api_client.call_api( '/v1/corporations/{corporation_id}/outposts/{outpost_id}/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='GetCorporationsCorporationIdOutpostsOutpostIdOk', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_corporations_corporation_id_roles(self, corporation_id, **kwargs): # noqa: E501 """Get corporation member roles # noqa: E501 Return the roles of all members if the character has the personnel manager role or any grantable role. --- This route is cached for up to 3600 seconds # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_corporations_corporation_id_roles(corporation_id, async=True) >>> result = thread.get() :param async bool :param int corporation_id: An EVE corporation ID (required) :param str datasource: The server name you would like data from :param str token: Access token to use if unable to set a header :param str user_agent: Client identifier, takes precedence over headers :param str x_user_agent: Client identifier, takes precedence over User-Agent :return: list[GetCorporationsCorporationIdRoles200Ok] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_corporations_corporation_id_roles_with_http_info(corporation_id, **kwargs) # noqa: E501 else: (data) = self.get_corporations_corporation_id_roles_with_http_info(corporation_id, **kwargs) # noqa: E501 return data def get_corporations_corporation_id_roles_with_http_info(self, corporation_id, **kwargs): # noqa: E501 """Get corporation member roles # noqa: E501 Return the roles of all members if the character has the personnel manager role or any grantable role. --- This route is cached for up to 3600 seconds # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_corporations_corporation_id_roles_with_http_info(corporation_id, async=True) >>> result = thread.get() :param async bool :param int corporation_id: An EVE corporation ID (required) :param str datasource: The server name you would like data from :param str token: Access token to use if unable to set a header :param str user_agent: Client identifier, takes precedence over headers :param str x_user_agent: Client identifier, takes precedence over User-Agent :return: list[GetCorporationsCorporationIdRoles200Ok] If the method is called asynchronously, returns the request thread. """ all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_corporations_corporation_id_roles" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'corporation_id' is set if ('corporation_id' not in params or params['corporation_id'] is None): raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_roles`") # noqa: E501 if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501 raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_roles`, must be a value greater than or equal to `1`") # noqa: E501 collection_formats = {} path_params = {} if 'corporation_id' in params: path_params['corporation_id'] = params['corporation_id'] # noqa: E501 query_params = [] if 'datasource' in params: query_params.append(('datasource', params['datasource'])) # noqa: E501 if 'token' in params: query_params.append(('token', params['token'])) # noqa: E501 if 'user_agent' in params: query_params.append(('user_agent', params['user_agent'])) # noqa: E501 header_params = {} if 'x_user_agent' in params: header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['evesso'] # noqa: E501 return self.api_client.call_api( '/v1/corporations/{corporation_id}/roles/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[GetCorporationsCorporationIdRoles200Ok]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_corporations_corporation_id_roles_history(self, corporation_id, **kwargs): # noqa: E501 """Get corporation member roles history # noqa: E501 Return how roles have changed for a coporation's members, up to a month --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_corporations_corporation_id_roles_history(corporation_id, async=True) >>> result = thread.get() :param async bool :param int corporation_id: An EVE corporation ID (required) :param str datasource: The server name you would like data from :param int page: Which page of results to return :param str token: Access token to use if unable to set a header :param str user_agent: Client identifier, takes precedence over headers :param str x_user_agent: Client identifier, takes precedence over User-Agent :return: list[GetCorporationsCorporationIdRolesHistory200Ok] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_corporations_corporation_id_roles_history_with_http_info(corporation_id, **kwargs) # noqa: E501 else: (data) = self.get_corporations_corporation_id_roles_history_with_http_info(corporation_id, **kwargs) # noqa: E501 return data def get_corporations_corporation_id_roles_history_with_http_info(self, corporation_id, **kwargs): # noqa: E501 """Get corporation member roles history # noqa: E501 Return how roles have changed for a coporation's members, up to a month --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_corporations_corporation_id_roles_history_with_http_info(corporation_id, async=True) >>> result = thread.get() :param async bool :param int corporation_id: An EVE corporation ID (required) :param str datasource: The server name you would like data from :param int page: Which page of results to return :param str token: Access token to use if unable to set a header :param str user_agent: Client identifier, takes precedence over headers :param str x_user_agent: Client identifier, takes precedence over User-Agent :return: list[GetCorporationsCorporationIdRolesHistory200Ok] If the method is called asynchronously, returns the request thread. """ all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_corporations_corporation_id_roles_history" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'corporation_id' is set if ('corporation_id' not in params or params['corporation_id'] is None): raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_roles_history`") # noqa: E501 if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501 raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_roles_history`, must be a value greater than or equal to `1`") # noqa: E501 collection_formats = {} path_params = {} if 'corporation_id' in params: path_params['corporation_id'] = params['corporation_id'] # noqa: E501 query_params = [] if 'datasource' in params: query_params.append(('datasource', params['datasource'])) # noqa: E501 if 'page' in params: query_params.append(('page', params['page'])) # noqa: E501 if 'token' in params: query_params.append(('token', params['token'])) # noqa: E501 if 'user_agent' in params: query_params.append(('user_agent', params['user_agent'])) # noqa: E501 header_params = {} if 'x_user_agent' in params: header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['evesso'] # noqa: E501 return self.api_client.call_api( '/v1/corporations/{corporation_id}/roles/history/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[GetCorporationsCorporationIdRolesHistory200Ok]',
<reponame>tmct/statsmodels # -*- coding: utf-8 -*- """ Distance dependence measure and the dCov test. Implementation of Székely et al. (2007) calculation of distance dependence statistics, including the Distance covariance (dCov) test for independence of random vectors of arbitrary length. Author: <NAME> References ---------- .. <NAME>., <NAME>., and <NAME>. (2007) "Measuring and testing dependence by correlation of distances". Annals of Statistics, Vol. 35 No. 6, pp. 2769-2794. """ import numpy as np import warnings from collections import namedtuple from scipy.spatial.distance import pdist, squareform from scipy.stats import norm DistDependStat = namedtuple( "DistDependStat", ["test_statistic", "distance_correlation", "distance_covariance", "dvar_x", "dvar_y", "S"], ) def distance_covariance_test(x, y, B=None, method="auto"): r"""The Distance Covariance (dCov) test Apply the Distance Covariance (dCov) test of independence to `x` and `y`. This test was introduced in [1]_, and is based on the distance covariance statistic. The test is applicable to random vectors of arbitrary length (see the notes section for more details). Parameters ---------- x : array_like, 1-D or 2-D If `x` is 1-D than it is assumed to be a vector of observations of a single random variable. If `x` is 2-D than the rows should be observations and the columns are treated as the components of a random vector, i.e., each column represents a different component of the random vector `x`. y : array_like, 1-D or 2-D Same as `x`, but only the number of observation has to match that of `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the number of components in the random vector) does not need to match the number of columns in `x`. B : int, optional, default=`None` The number of iterations to perform when evaluating the null distribution of the test statistic when the `emp` method is applied (see below). if `B` is `None` than as in [1]_ we set `B` to be ``B = 200 + 5000/n``, where `n` is the number of observations. method : {'auto', 'emp', 'asym'}, optional, default=auto The method by which to obtain the p-value for the test. - `auto` : Default method. The number of observations will be used to determine the method. - `emp` : Empirical evaluation of the p-value using permutations of the rows of `y` to obtain the null distribution. - `asym` : An asymptotic approximation of the distribution of the test statistic is used to find the p-value. Returns ------- test_statistic : float The value of the test statistic used in the test. pval : float The p-value. chosen_method : str The method that was used to obtain the p-value. Mostly relevant when the function is called with `method='auto'`. Notes ----- The test applies to random vectors of arbitrary dimensions, i.e., `x` can be a 1-D vector of observations for a single random variable while `y` can be a `k` by `n` 2-D array (where `k > 1`). In other words, it is also possible for `x` and `y` to both be 2-D arrays and have the same number of rows (observations) while differing in the number of columns. As noted in [1]_ the statistics are sensitive to all types of departures from independence, including nonlinear or nonmonotone dependence structure. References ---------- .. [1] <NAME>., <NAME>., and <NAME>. (2007) "Measuring and testing by correlation of distances". Annals of Statistics, Vol. 35 No. 6, pp. 2769-2794. Examples -------- >>> from statsmodels.stats.dist_dependence_measures import ... distance_covariance_test >>> data = np.random.rand(1000, 10) >>> x, y = data[:, :3], data[:, 3:] >>> x.shape (1000, 3) >>> y.shape (1000, 7) >>> distance_covariance_test(x, y) (1.0426404792714983, 0.2971148340813543, 'asym') # (test_statistic, pval, chosen_method) """ x, y = _validate_and_tranform_x_and_y(x, y) n = x.shape[0] stats = distance_statistics(x, y) if method == "auto" and n <= 500 or method == "emp": chosen_method = "emp" test_statistic, pval = _empirical_pvalue(x, y, B, n, stats) elif method == "auto" and n > 500 or method == "asym": chosen_method = "asym" test_statistic, pval = _asymptotic_pvalue(stats) else: raise ValueError("Unknown 'method' parameter: {}".format(method)) # In case we got an extreme p-value (0 or 1) when using the empirical # distribution of the test statistic under the null, we fall back # to the asymptotic approximation. if chosen_method == "emp" and pval in [0, 1]: msg = ( "p-value was {} when using the empirical method. ".format(pval) + "The asymptotic approximation will be used instead" ) warnings.warn(msg) _, pval = _asymptotic_pvalue(stats) return test_statistic, pval, chosen_method def _validate_and_tranform_x_and_y(x, y): r"""Ensure `x` and `y` have proper shape and transform/reshape them if required. Parameters ---------- x : array_like, 1-D or 2-D If `x` is 1-D than it is assumed to be a vector of observations of a single random variable. If `x` is 2-D than the rows should be observations and the columns are treated as the components of a random vector, i.e., each column represents a different component of the random vector `x`. y : array_like, 1-D or 2-D Same as `x`, but only the number of observation has to match that of `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the number of components in the random vector) does not need to match the number of columns in `x`. Returns ------- x : array_like, 1-D or 2-D y : array_like, 1-D or 2-D Raises ------ ValueError If `x` and `y` have a different number of observations. """ x = np.asanyarray(x) y = np.asanyarray(y) if x.shape[0] != y.shape[0]: raise ValueError( "x and y must have the same number of observations (rows)." ) if len(x.shape) == 1: x = x.reshape((x.shape[0], 1)) if len(y.shape) == 1: y = y.reshape((y.shape[0], 1)) return x, y def _empirical_pvalue(x, y, B, n, stats): r"""Calculate the empirical p-value based on permutations of `y`'s rows Parameters ---------- x : array_like, 1-D or 2-D If `x` is 1-D than it is assumed to be a vector of observations of a single random variable. If `x` is 2-D than the rows should be observations and the columns are treated as the components of a random vector, i.e., each column represents a different component of the random vector `x`. y : array_like, 1-D or 2-D Same as `x`, but only the number of observation has to match that of `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the number of components in the random vector) does not need to match the number of columns in `x`. B : int The number of iterations when evaluating the null distribution. n : Number of observations found in each of `x` and `y`. stats: namedtuple The result obtained from calling ``distance_statistics(x, y)``. Returns ------- test_statistic : float The empirical test statistic. pval : float The empirical p-value. """ B = int(B) if B else int(np.floor(200 + 5000 / n)) empirical_dist = _get_test_statistic_distribution(x, y, B) pval = 1 - np.searchsorted( sorted(empirical_dist), stats.test_statistic ) / len(empirical_dist) test_statistic = stats.test_statistic return test_statistic, pval def _asymptotic_pvalue(stats): r"""Calculate the p-value based on an approximation of the distribution of the test statistic under the null. Parameters ---------- stats: namedtuple The result obtained from calling ``distance_statistics(x, y)``. Returns ------- test_statistic : float The test statistic. pval : float The asymptotic p-value. """ test_statistic = np.sqrt(stats.test_statistic / stats.S) pval = (1 - norm.cdf(test_statistic)) * 2 return test_statistic, pval def _get_test_statistic_distribution(x, y, B): r""" Parameters ---------- x : array_like, 1-D or 2-D If `x` is 1-D than it is assumed to be a vector of observations of a single random variable. If `x` is 2-D than the rows should be observations and the columns are treated as the components of a random vector, i.e., each column represents a different component of the random vector `x`. y : array_like, 1-D or 2-D Same as `x`, but only the number of observation has to match that of `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the number of components in the random vector) does not need to match
<filename>src/controller/GameManager.py import random #import when running testMain.py in src from model.Player import Player #from ..model.Player import Player from model.card.Sanity import Sanity #from ..model.card.Sanity import Sanity from model.card.saneCard import * #from ..model.card.saneCard import * from model.card.insaneCard import * #from ..model.card.insaneCard import * from model.ai.Agent import Agent from model.ai.AIActionsEnum import AIActionsEnum class GameManager: def __init__(self, view, nbPlayer, nbAI = 0) : if (nbPlayer + nbAI) < 2 : raise Exception("At least 2 player are needed !!") self.allAI = (nbPlayer == 0) self.view = view # players indice to the player currently playing. self.currentPlayer = 0 # Count round from the beginning, negative number means that the game isn't started. self.roundNumber = -1 # List of card defining deck. self.deck = [] self.players = [] # List of cards that are removed at the beginnig of a round self.removedCards = [] # Instantiate as many players as nbPlayer defines. for _i in range(nbPlayer) : self.players.append(Player(0, 0, [], [], False, True, False)) for _i in range(nbAI) : self.players.append(Agent(0, 0, [], [], False, True, False)) ## Builds deck by creating card list & shuffles it. @staticmethod def buildDeck() : deck = [] # Fill the liste # Sane Cards # Once in the deck deck.append(TheNecronomicon()) deck.append(TheSilverKey()) deck.append(RandolphCarter()) # Twice in the deck for _i in range(2) : deck.append(ProfessorHenryArmitage()) deck.append(ElderSign()) deck.append(GreatRaceOfYith()) deck.append(CatsOfUlthar()) # Five times in the deck for _i in range(5) : deck.append(Investigators()) # Insane cards # once in the deck deck.append(Cthulhu()) deck.append(TheShiningTrapezohedron()) deck.append(Nyarlathotep()) deck.append(MiGo()) deck.append(LiberIvonis()) deck.append(HoundOfTindalos()) deck.append(GoldenMead()) deck.append(DeepOnes()) # Shuffle for _i in range(1) : random.shuffle(deck) return deck ## Builds a hand by drawing def buildHand(self) : hand = [] hand.append(self.deck.pop()) return hand ## Determines if the game is finished. def isGameEnd(self) : for i in range(len(self.players)) : player = self.players[i] if player.getSaneToken() >= 2 : return i if player.getInsaneToken() >= 3 : return i if player.getDiscard() : lastCardPlayed = player.getDiscard()[-1] if (isinstance(lastCardPlayed, Cthulhu) and lastCardPlayed.sanity == Sanity.INSANE) : return i return -1 # -1 if no player end the game. ## Start a new round. def startNewRound(self) : self.currentPlayer = 0 self.roundNumber += 1 self.deck = GameManager.buildDeck() #reset removedCards self.removedCards = [] # Remove the top card of the deck. self.removedCards.append(self.deck.pop()) # If 2 players game remove 5 other cards. if len(self.players) <= 2 : for _i in range(5) : self.removedCards.append(self.deck.pop()) # Give some cards to players for player in self.players : player.setHand(self.buildHand()) player.setDiscard([]) # Reset their states player.setKnockedOut(False) player.setImmune(False) player.setKnockableOut(True) ## Apply effect of the card choosen by the player. ## @params cardNumber index of card in the hand of currentPlayer. def play(self, cardNumber) : currentPlayer = self.players[self.currentPlayer] #delete the immunity of the player if he was immune in the last round if currentPlayer.getImmune() : currentPlayer.setImmune(False) # The card that the player want to play. card = currentPlayer.getCardFromHand(cardNumber) #the card that is played while (not self.checkPlayableCard(card)) : currentPlayer.pickUp(card) self.view.cardCantBePlayed() card = currentPlayer.getCardFromHand(self.view.cardToPlay(currentPlayer.getHand())) card.sanity = self.askInsanity(card) # Apply card effect card.effect(self) # Push on the discard stack. currentPlayer.addDiscardedCard(card) #Apply effect of the card choosen by the Agent def playAI(self,cardEffectValue): currentPlayer = self.players[self.currentPlayer] choosenCard = None #delete the immunity of the player if he was immune in the last round if(currentPlayer.getImmune()): currentPlayer.setImmune(False) #TODO improve if cardEffectValue == AIActionsEnum.CatsOfUltharSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],CatsOfUlthar): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.ElderSignSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],ElderSign): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.GreatRaceOfYithSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],GreatRaceOfYith): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.InvestigatorSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],Investigators): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.ProfessorHenryArmitageSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],ProfessorHenryArmitage): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.RandolphCarterSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],RandolphCarter): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.TheNecronomiconSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],TheNecronomicon): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.TheSilverKeySane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],TheSilverKey): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.CthulhuInsane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],Cthulhu): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.INSANE break if cardEffectValue == AIActionsEnum.CthulhuSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],Cthulhu): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.DeepOnesInsane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],DeepOnes): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.INSANE break if cardEffectValue == AIActionsEnum.DeepOnesSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],DeepOnes): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.GoldenMeadInsane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],GoldenMead): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.INSANE break if cardEffectValue == AIActionsEnum.GoldenMeadSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],GoldenMead): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.HoundOfTindalosInsane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],HoundOfTindalos): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.INSANE break if cardEffectValue == AIActionsEnum.HoundOfTindalosSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],HoundOfTindalos): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.LiberIvonisInsane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],LiberIvonis): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.INSANE break if cardEffectValue == AIActionsEnum.LiberIvonisSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],LiberIvonis): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.MiGoInsane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],MiGo): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.INSANE break if cardEffectValue == AIActionsEnum.MiGoSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],MiGo): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.MiGoBrainCaseInsane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],MiGoBraincase): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.INSANE break if cardEffectValue == AIActionsEnum.MiGoBrainCaseSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],MiGoBraincase): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.NyarlathotepInsane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],Nyarlathotep): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.INSANE break if cardEffectValue == AIActionsEnum.NyarlathotepSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],Nyarlathotep): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if cardEffectValue == AIActionsEnum.TheShiningTrapezohedronInsane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],TheShiningTrapezohedron): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.INSANE break if cardEffectValue == AIActionsEnum.TheShiningTrapezohedronSane.value: for i in range(len(currentPlayer.hand)): if isinstance(currentPlayer.hand[i],TheShiningTrapezohedron): choosenCard = currentPlayer.getCardFromHand(i) choosenCard.sanity = Sanity.SANE break if choosenCard is None: raise Exception("No choosen card") if choosenCard is not None: # Display wich card will be played. if not self.allAI : self.view.displayCardWillBePlayed(self.currentPlayer, choosenCard) # Apply card effect choosenCard.effect(self) # Push on the discard stack. currentPlayer.addDiscardedCard(choosenCard) ## Check if the round is ended. def isRoundEnd(self) : winner = self.findWinnerWthSpecialEffect() if winner == -1 : # Count how many player are not knocked out nbPlayerAlive = 0 for i in range(len(self.players)) : player = self.players[i] if not player.getKnockedOut() : nbPlayerAlive += 1 winner = i # If any player is not knocked out there is a tie. # If there is more than one player not knocked out there isn't winner. if nbPlayerAlive == 0 : winner = -2 elif nbPlayerAlive > 1 : winner = -1 # Le deck est vide : chaque joueur joue la carte qu'il a en main et celui qui a la plus grosse gagne. if (not self.deck) and (winner == -1) : greatestValue = -1 for i in range(len(self.players)) : player = self.players[i] if not player.getKnockedOut() : value = player.getHand()[0].getValue() if value > greatestValue : greatestValue = value winner = i elif value == greatestValue : player.setKnockedOut(True) self.players[winner].setKnockedOut(True) winner = self.isRoundEnd() break if winner == -1 : raise Exception("A winner have to be found !!!") return winner ## @return the player who is playing. def getCurrentPlayer(self) : return self.players[self.currentPlayer] ## @params nbPlayer number of player to ask. ## @params allowCurrentPlayer if the current player could targets imself or not. ## @return targets choose by the current player. def chooseTargetPlayer(self, nbPlayer, allowCurrentPlayer) : notImmunePlayers = [] for player in self.players : if (not player.getImmune()) and ((player != self.getCurrentPlayer()) or (allowCurrentPlayer)): notImmunePlayers.append(player) #AI playing if isinstance(self.getCurrentPlayer(),Agent): if notImmunePlayers: return random.sample(notImmunePlayers,nbPlayer) else: return [] #TODO Ajouter un feedback si aucun joueur ne peut être target. return self.view.chooseTargetPlayer(nbPlayer, notImmunePlayers) if notImmunePlayers else [] def getPlayers(self) : return self.players def checkPlayableCard(self, card) : otherCard = self.getCurrentPlayer().getHand()[0]
Metadata.update(imdbid, tmdbid) if r['response'] is True: return {'response': True, 'message': _('Metadata updated.')} else: return r @cherrypy.expose @cherrypy.tools.json_out() def single_movie_details(self, key, value): ''' Gets single movie's details from database key (str): key for sql.get_movie_details value (str): value for sql.get_movie_details Returns dict ''' return core.sql.get_movie_details(key, value) @cherrypy.expose @cherrypy.tools.json_out() def set_movie_details(self, data): ''' Updates movie in database data (dict): movie fields and values to update data *must* include valid tmdbid Returns dict ''' data = json.loads(data) tmdbid = data.pop('tmdbid') if not core.sql.update_multiple_values('MOVIES', data, 'tmdbid', tmdbid): return {'response': False, 'error': Errors.database_write} else: return {'response': True, 'message': 'Database Updated'} @cherrypy.expose @cherrypy.tools.json_out() def get_kodi_movies(self, url): ''' Gets list of movies from kodi server url (str): url of kodi server Calls Kodi import method to gather list. Returns dict ajax-style response ''' return library.ImportKodiLibrary.get_movies(url) @cherrypy.expose def import_kodi_movies(self, movies): ''' Imports list of movies in movies from Kodi library movie_data (str): json-formatted list of dicts of movies Iterates through movies and gathers all required metadata. If imported, generates and stores fake search result. Creates dict {'success': [], 'failed': []} and appends movie data to the appropriate list. Yeilds dict ajax-style response ''' movies = json.loads(movies) fake_results = [] success = [] length = len(movies) progress = 1 logging.info('Adding {} Kodi movies to library.'.format(length)) for movie in movies: if not movie['imdbid']: yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format("NONE")}) progress += 1 continue tmdb_data = TheMovieDatabase._search_imdbid(movie['imdbid']) if not tmdb_data or not tmdb_data[0].get('id'): yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])}) progress += 1 continue tmdb_data = tmdb_data[0] movie['id'] = tmdb_data['id'] movie['size'] = 0 movie['status'] = 'Disabled' movie['predb'] = 'found' movie['finished_file'] = (movie.get('finished_file') or '').strip() movie['origin'] = 'Kodi Import' response = Manage.add_movie(movie) if response['response'] is True: fake_results.append(searchresults.generate_simulacrum(movie)) yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']}) progress += 1 success.append(movie) continue else: yield json.dumps({'response': False, 'title': movie['title'], 'imdbid': movie['imdbid'], 'progress': [progress, length], 'error': response['error']}) progress += 1 continue fake_results = searchresults.score(fake_results, imported=True) for i in success: for r in fake_results: if r['imdbid'] == i['imdbid']: core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid']) break core.sql.write_search_results(fake_results) import_kodi_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False} @cherrypy.expose @cherrypy.tools.json_out() def upload_plex_csv(self, file_input): ''' Recieves upload of csv from browser file_input (b'str): csv file fo read Reads/parses csv file into a usable dict Returns dict ajax-style response ''' try: csv_text = file_input.file.read().decode('utf-8') file_input.file.close() except Exception as e: logging.error('Unable to parse Plex CSV', exc_info=True) return {'response': False, 'error': str(e)} if csv_text: return library.ImportPlexLibrary.read_csv(csv_text) else: return {'response': True, 'complete': [], 'incomplete': []} @cherrypy.expose def import_plex_csv(self, movies, corrected_movies): ''' Imports list of movies genrated by csv import movie_data (list): dicts of movie info ready to import corrected_movies (list): dicts of user-corrected movie info Iterates through corrected_movies and attmpts to get metadata again if required. If imported, generates and stores fake search result. Creates dict {'success': [], 'failed': []} and appends movie data to the appropriate list. Yeilds dict ajax-style response ''' movie_data = json.loads(movies) corrected_movies = json.loads(corrected_movies) fake_results = [] success = [] length = len(movie_data) + len(corrected_movies) progress = 1 if corrected_movies: logging.info('Adding {} Plex movies to library.'.format(len(corrected_movies))) for movie in corrected_movies: tmdbdata = TheMovieDatabase._search_imdbid(movie['imdbid']) if tmdbdata: tmdbdata = tmdbdata[0] movie['year'] = tmdbdata['release_date'][:4] movie.update(tmdbdata) movie_data.append(movie) else: logging.error(Errors.tmdb_not_found.format(movie['imdbid'])) yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])}) progress += 1 logging.info('Adding {} Plex movies to library.'.format(length)) for movie in movie_data: logging.info('Importing Plex movie {} {}'.format(movie.get('title', ''), movie.get('year', ''))) fm = False if not movie.get('imdbid') and movie.get('tmdbid'): tmdb_data = TheMovieDatabase._search_tmdbid(movie['tmdbid']) if tmdb_data: movie.update(tmdb_data[0]) fm = True else: yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['tmdbid'])}) progress += 1 continue if movie.get('imdbid'): movie['status'] = 'Disabled' movie['predb'] = 'found' movie['origin'] = 'Plex Import' if not movie.get('id'): tmdb_data = TheMovieDatabase._search_imdbid(movie['imdbid']) if tmdb_data: movie.update(tmdb_data[0]) else: yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['imdbid'])}) progress += 1 continue response = Manage.add_movie(movie, full_metadata=fm) if response['response'] is True: fake_results.append(searchresults.generate_simulacrum(movie)) yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']}) progress += 1 success.append(movie) continue else: yield json.dumps({'response': False, 'progress': [progress, length], 'error': response['error'], 'title': movie['title']}) progress += 1 continue else: logging.error(Errors.tmdb_not_found.format(movie['title'])) yield json.dumps({'response': False, 'progress': [progress, length], 'error': _('Unable to find IMDB ID for {} on TheMovieDB.').format(movie['title']), 'title': movie['title']}) progress += 1 continue if fake_results: fake_results = searchresults.score(fake_results, imported=True) for i in success: for r in fake_results: if r['imdbid'] == i['imdbid']: core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid']) break if fake_results: core.sql.write_search_results(fake_results) import_plex_csv._cp_config = {'response.stream': True, 'tools.gzip.on': False} @cherrypy.expose @cherrypy.tools.json_out() def get_cp_movies(self, url, apikey): ''' Gets movies from CP server url (str): url to cp server apikey (str): cp api key Reads/parses cp api response Returns dict ajax-style response ''' url = '{}/api/{}/movie.list/'.format(url, apikey) if not url.startswith('http'): url = 'http://{}'.format(url) return library.ImportCPLibrary.get_movies(url) @cherrypy.expose def import_cp_movies(self, wanted, finished): ''' Imports movies from CP list to library wanted (list): dicts of wanted movies finished (list): dicts of finished movies Yields dict ajax-style response ''' wanted = json.loads(wanted) finished = json.loads(finished) fake_results = [] success = [] length = len(wanted) + len(finished) progress = 1 logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(wanted))) for movie in wanted: response = Manage.add_movie(movie, full_metadata=True) if response['response'] is True: yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie}) progress += 1 continue else: yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']}) progress += 1 continue logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(finished))) for movie in finished: movie['predb'] = 'found' movie['status'] = 'Disabled' movie['origin'] = 'CouchPotato Import' response = Manage.add_movie(movie, full_metadata=True) if response['response'] is True: fake_results.append(searchresults.generate_simulacrum(movie)) yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie}) progress += 1 success.append(movie) continue else: yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']}) progress += 1 continue fake_results = searchresults.score(fake_results, imported=True) for i in success: for r in fake_results: if r['imdbid'] == i['imdbid']: core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid']) break core.sql.write_search_results(fake_results) import_cp_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False} @cherrypy.expose def manager_backlog_search(self, movies): ''' Bulk manager action for backlog search movies (list): dicts of movies, must contain keys imdbid and tmdbid Yields dict ajax-style response ''' movies = json.loads(movies) logging.info('Performing bulk backlog search for {} movies.'.format(len(movies))) ids = [i['imdbid'] for i in movies] movies = [i for i in core.sql.get_user_movies() if i['imdbid'] in ids] for i, movie in enumerate(movies): title = movie['title'] year = movie['year'] imdbid = movie['imdbid'] year = movie['year'] quality = movie['quality'] logging.info('Performing backlog search for {} {}.'.format(title, year)) if not searcher.search(imdbid, title, year, quality): response = {'response': False, 'error': Errors.database_write, 'imdbid': imdbid, 'index': i + 1} else: response = {'response': True, 'index': i + 1} yield json.dumps(response) manager_backlog_search._cp_config = {'response.stream': True, 'tools.gzip.on': False} @cherrypy.expose def manager_update_metadata(self, movies): ''' Bulk manager action for metadata update movies (list): dicts of movies, must contain keys imdbid and tmdbid Yields dict ajax-style response ''' movies = json.loads(movies) logging.info('Performing bulk metadata update for {} movies.'.format(len(movies))) for i, movie in enumerate(movies): r = Metadata.update(movie.get('imdbid'), movie.get('tmdbid')) if r['response'] is False: response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], 'index': i + 1} else: response = {'response': True, 'index': i + 1} yield json.dumps(response) manager_update_metadata._cp_config = {'response.stream': True, 'tools.gzip.on': False} @cherrypy.expose def manager_change_quality(self, movies, quality): ''' Bulk manager action to change movie quality profile movies (list): dicts of movies, must contain keys imdbid quality (str): quality to set movies to Yields dict ajax-style response ''' movies = json.loads(movies) logging.info('Setting quality to {} for: {}'.format(quality, ', '.join(i['imdbid'] for i in movies))) for i, movie in enumerate(movies): if not core.sql.update('MOVIES', 'quality', quality, 'imdbid', movie['imdbid']): response = {'response': False, 'error': Errors.database_write, 'imdbid': movie['imdbid'], 'index': i + 1} else: response = {'response': True, 'index': i + 1} yield json.dumps(response) manager_change_quality._cp_config = {'response.stream': True, 'tools.gzip.on': False} @cherrypy.expose def manager_reset_movies(self, movies): ''' Bulk manager action to reset movies movies (list): dicts of movies, must contain key imdbid Removes all search results Updates database row with db_reset dict Yields dict ajax-style response ''' movies = json.loads(movies) logging.info('Resetting status for {} movies.'.format(len(movies))) for i, movie in enumerate(movies): logging.debug('Resetting {}'.format(movie['imdbid'])) imdbid =
<filename>tankobon/ui/gui.py # coding: utf8 """ # tankobon {version} Copyright (c) 2020-2021 <NAME> Licensed under the MIT License. star this project at [ongyx/tankobon](https://github.com/ongyx/tankobon) or something, idk sources: {supported} """ import functools import pathlib import signal import sys import threading import traceback from PySide6.QtCore import Qt, Signal, QSize from PySide6.QtGui import QAction, QIcon, QPixmap from PySide6.QtWidgets import ( QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QMainWindow, QMessageBox, QMenuBar, QLabel, QListWidget, QListWidgetItem, QProgressDialog, QScrollArea, QSizePolicy, QSpinBox, QSplashScreen, QTabWidget, QTableWidget, # QTableWidgetItem, QToolBar, QToolButton, QVBoxLayout, QWidget, QWidgetItem, ) import natsort # type: ignore from .. import core, iso639, models from ..sources.base import Parser from ..utils import CONFIG from ..__version__ import __version__ from . import common, resources, template, utils _app = QApplication([]) _app.setAttribute(Qt.AA_UseHighDpiPixmaps) QStyle = _app.style() LOGO = QPixmap(":/logo.jpg") HOME = pathlib.Path.home() CACHE = core.Cache() T_ADD = "Add Manga" T_DELETE = "Delete Manga" T_DOWNLOAD = "Download Manga" MANGA = {} MANGA_LOCK = threading.Lock() def _is_ascii(s): try: s.encode(encoding="utf8").decode("ascii") except UnicodeDecodeError: return False else: return True def _normalize(s): return s.replace("_", " ").capitalize() def _load_manga(hash): with MANGA_LOCK: if hash not in MANGA: MANGA[hash] = CACHE.load(hash) return MANGA[hash] def delete(widget): if isinstance(widget, QWidgetItem): widget.widget().close() else: widget.hide() widget.deleteLater() class SpinningCursor: def __enter__(self): _app.setOverrideCursor(Qt.WaitCursor) def __exit__(self, t, v, tb): _app.restoreOverrideCursor() class TitleLabel(QLabel): def __init__(self, *args): super().__init__(*args) self.setTextFormat(Qt.RichText) self.setAlignment(Qt.AlignCenter) self.setWordWrap(True) self.setStyleSheet("background-color: #CCCCFF; color: black;") self.setAutoFillBackground(True) class SubtitleLabel(QLabel): def __init__(self, subtitle): super().__init__(f"<b>{subtitle}</b>") self.setAlignment(Qt.AlignLeft | Qt.AlignTop) # A message box without the window icon. class MessageBox(QMessageBox): def __init__(self, *args): super().__init__(*args) self.setWindowIcon(QIcon(LOGO)) @classmethod def info(cls, title, text): msgbox = cls(cls.Information, title, text, cls.Ok) return msgbox.exec() @classmethod def ask(cls, title, text): msgbox = cls(cls.Question, title, text, cls.Yes | cls.No) return msgbox.exec() @classmethod def warn(cls, title, text): msgbox = cls(cls.Warning, title, text, cls.Ok) return msgbox.exec() @classmethod def crit(cls, title, text): msgbox = cls(cls.Critical, title, text, cls.Ok) return msgbox.exec() def _excepthook(ex_type, ex_value, ex_traceback): MessageBox.crit( "An exception occured.", "".join(traceback.format_exception(ex_type, ex_value, ex_traceback)), ) sys.excepthook = _excepthook # A text dialog that requires input before allowing 'ok' to be pressed. class RequiredDialog(QInputDialog): def __init__(self, *args): super().__init__(*args) self.setWindowIcon(QIcon(LOGO)) self.textValueChanged.connect(self.onTextValueChanged) self.setInputMode(self.TextInput) self.setOkButtonText("Ok") self.ok_button, _ = self.findChild(QDialogButtonBox).buttons() self.ok_button.setEnabled(False) def onTextValueChanged(self, text): if text: self.ok_button.setEnabled(True) else: self.ok_button.setEnabled(False) class ProgressDialog(QProgressDialog): def __init__(self, *args): super().__init__(*args) self.setMinimumDuration(0) self.setWindowModality(Qt.WindowModal) self.setAttribute(Qt.WA_DeleteOnClose, True) class LanguageComboBox(QComboBox): def __init__(self): super().__init__() index = None for code, lang in iso639.DATASET.items(): self.addItem(f"{lang.native_name} ({code})", code) if code == CONFIG["lang"]: index = self.count() - 1 self.setCurrentIndex(index) self.currentIndexChanged.connect(self.onCurrentIndexChanged) def onCurrentIndexChanged(self, index): CONFIG["lang"] = self.currentData() class RateLimitSpinBox(QSpinBox): def __init__(self): super().__init__() self.setMinimum(1) self.setValue(CONFIG["download.rate_limit"]) self.valueChanged.connect(self.onValueChanged) def onValueChanged(self, value): CONFIG["download.rate_limit"] = value class DataSaverCheckBox(QCheckBox): def __init__(self): super().__init__("Data saver (low-quality pages)") state = Qt.Unchecked if CONFIG["mangadex.data_saver"]: state = Qt.Checked self.setCheckState(state) self.stateChanged.connect(self.onStateChanged) def onStateChanged(self, state): CONFIG["mangadex.data_saver"] = state == Qt.Checked class SettingsTab(QWidget): def __init__(self): super().__init__() self.layout = QVBoxLayout(self) class Settings(QDialog): def __init__(self, parent): super().__init__(parent) self.setWindowTitle("Settings") layout = QVBoxLayout(self) tabs = QTabWidget() tabs.addTab(self.general(), "General") tabs.addTab(self.downloads(), "Downloads") tabs.addTab(self.sources(), "Sources") layout.addWidget(tabs) buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) buttons.accepted.connect(self.accept) buttons.rejected.connect(self.reject) layout.addWidget(buttons) def general(self): tab = SettingsTab() tab.layout.addWidget(QLabel("Language")) tab.layout.addWidget(LanguageComboBox()) return tab def downloads(self): tab = SettingsTab() tab.layout.addWidget(QLabel("Rate Limit")) spinbox = RateLimitSpinBox() spinbox.setToolTip( ( "The maximum number of requests that can be made concurrently.\n" "Lower values reduce bandwidth usage but downloads will be slower." ) ) tab.layout.addWidget(spinbox) return tab def sources(self): tab = SettingsTab() tab.layout.addWidget(QLabel("Mangadex")) data_saver = DataSaverCheckBox() data_saver.setToolTip( "Download low-quality pages to save bandwidth and disk space." ) tab.layout.addWidget(data_saver) return tab class AboutBox(MessageBox): def __init__(self, *args): super().__init__(*args) self.setWindowTitle("About") # build table of supported sources supported = [] for cls in Parser.registered: supported.append(f"`{cls.__module__}` ({cls.domain.pattern}) ") self.setTextFormat(Qt.MarkdownText) self.setText( __doc__.format( version=__version__, supported="\n".join(supported), ) ) self.setAttribute(Qt.WA_DeleteOnClose) small_logo = LOGO.scaled( QSize(256, 256), Qt.KeepAspectRatio, Qt.SmoothTransformation ) self.setIconPixmap(small_logo) # A manga item. class Item(QListWidgetItem): def __init__(self, meta: models.Metadata): self.meta = meta super().__init__(self.meta.title) # self.setToolTip(", ".join(self.meta.alt_titles)) # A preview of the manga infomation (title, author, etc.) class ItemInfoBox(QWidget): def __init__(self, item: Item): super().__init__() layout = QGridLayout(self) meta = item.meta # infobox spans one row and two columns. SPAN = (1, 2) # wikipedia-style info box at the side title = TitleLabel(f"<h2><i>{meta.title}</i></h2>") layout.addWidget(title, 0, 0, *SPAN) self.cover = QPixmap() try: manga_path = CACHE.root / meta.hash cover_path = next(manga_path.glob("cover.*")) except StopIteration: self.cover.load(":/missing.jpg") else: self.cover.load(str(cover_path)) self.cover = self.cover.scaled( int(self.width() / 2), self.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation, ) self.cover_label = QLabel() self.cover_label.setScaledContents(True) self.cover_label.setPixmap(self.cover) layout.addWidget(self.cover_label, 1, 0, *SPAN) if meta.alt_titles is not None: _alt_titles = "<br>".join( f"<i>{t}</i>" if _is_ascii(t) else t for t in meta.alt_titles ) else: _alt_titles = "(empty)" alt_titles = TitleLabel(_alt_titles) alt_titles.setStyleSheet("background-color: #DDDDFF; color: black;") layout.addWidget(alt_titles, 2, 0, *SPAN) genre_header = SubtitleLabel("Genre") layout.addWidget(genre_header, 3, 0) if meta.genres is not None: _genres = "<br>".join(_normalize(g) for g in meta.genres) else: _genres = "(empty)" genres = QLabel(_genres) layout.addWidget(genres, 3, 1) manga_header = TitleLabel("<b>Manga</b>") layout.addWidget(manga_header, 4, 0, *SPAN) author_header = SubtitleLabel("Authored by") layout.addWidget(author_header, 5, 0) if meta.authors is not None: _authors = "<br>".join(a for a in meta.authors) else: _authors = "(empty)" authors = QLabel(_authors) layout.addWidget(authors, 5, 1) source_header = SubtitleLabel("Source") layout.addWidget(source_header, 6, 0) source = QLabel(f'<a href="{meta.url}">{meta.url}</b>') source.setWordWrap(True) source.setOpenExternalLinks(True) layout.addWidget(source, 6, 1) langs_header = SubtitleLabel("Languages") layout.addWidget(langs_header, 7, 0) manga = CACHE[meta.hash] langs_set = set() for chapter in manga["chapters"].values(): langs_set.update(chapter.keys()) langs = QLabel("<br>".join(common.describe_langs(list(langs_set)))) layout.addWidget(langs, 7, 1) # A list of manga items in the sidebar. class ItemList(QListWidget): def __init__(self): super().__init__() self.setSortingEnabled(True) self.hashs = set() for _, manga in CACHE.data.items(): self.addItem(Item(manga["meta"])) self.reload() def addItem(self, item): self.hashs.add(item.meta.hash) super().addItem(item) def reload(self): self.setMaximumWidth(self.sizeHintForColumn(0) + 5) MANGA_ITEMS = ItemList() class PageViewToolBar(QToolBar): BUTTONS = [ ("start", "chevrons-left"), ("previous", "chevron-left"), ("pageno", ""), ("next", "chevron-right"), ("end", "chevrons-right"), ] setPage = Signal(int) def __init__(self, total): super().__init__() self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.setStyleSheet("background-color: red") self.setMovable(True) self.setFloatable(True) self.pageno = 1 self.total = total self.label = QLabel() self.setPage.connect(self.onSetPage) for method_name, icon_path in self.BUTTONS: if method_name == "pageno": self.addWidget(self.label) continue method = getattr(self, method_name) tooltip = f"{method_name.title()} page..." icon = utils.icon(icon_path) action = self.addAction(icon, tooltip) action.triggered.connect(method) def onSetPage(self, pageno): self.pageno = pageno self.label.setText(f"{pageno} / {self.total}") actions = self.actions() for action in actions: action.setEnabled(True) # disable start/prev or end/next actions on the first and last page respectively. disable = [] if self.pageno == 1: disable = actions[:2] elif self.pageno == self.total: disable = actions[2:] for action in disable: action.setEnabled(False) def start(self): self.setPage.emit(1) def previous(self): self.setPage.emit(self.pageno - 1) def next(self): self.setPage.emit(self.pageno + 1) def end(self): self.setPage.emit(self.total) class PageView(QScrollArea): def __init__(self, parent, pages): super().__init__(parent) self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored) self.setStyleSheet("background-color: #000000") self.setWidgetResizable(True) self.pages = pages self.label = QLabel() self.label.setScaledContents(True) self.setWidget(self.label) self.toolbar = PageViewToolBar(len(self.pages)) self.toolbar.setPage.connect(self.onSetPage) self._height = self.label.height() + self.toolbar.height() / 2 self.toolbar.start() def onSetPage(self, pageno): pixmap = QPixmap(self.pages[pageno - 1]) self.label.setPixmap(pixmap) def _download(manga, chapters, downloader): dialog = ProgressDialog() parser = Parser.by_url(manga.meta.url) for count, chapter in enumerate(chapters): dialog.setLabelText(f"Downloading chapter {chapter.id}...") if not chapter.pages: parser.add_pages(chapter) total = len(chapter.pages) # HACK: make the progress bar display properly during first iteration dialog.setMaximum(total) dialog.setValue(total - 1) dialog.setValue(0) if dialog.wasCanceled(): break downloader.download(chapter, progress=dialog.setValue) dialog.setValue(total) # Toolbar at the bottom of the window. # This shows a bunch of buttons to manage manga items (add, remove, etc.) class ToolBar(QToolBar): deletedManga = Signal() BUTTONS = [ ("add", "plus"), ( "delete", "minus", ), ( "refresh", "refresh-cw", ), # ("download", "download",), ("locate", "folder"), ] def __init__(self): super().__init__() MANGA_ITEMS.itemClicked.connect(self.onSelectedManga) self.deletedManga.connect(self.onDeletedManga) self.selected = None self.summaries = {} for method_name, icon_path in self.BUTTONS: method = getattr(self, method_name) tooltip = f"{method_name.title()} a manga..." action = QAction() action.setToolTip(tooltip) action.setIcon(utils.icon(icon_path)) action.triggered.connect(method) button = QToolButton() button.setDefaultAction(action) self.addWidget(button) # spacer spacer = QWidget() spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) self.addWidget(spacer) # text below the manga preview self.summary = QLabel() self.addWidget(self.summary) self.show() def onSelectedManga(self, manga_item): self.selected = manga_item hash = manga_item.meta.hash if hash not in self.summaries: manga = _load_manga(hash) self.summaries[hash] = f"{len(manga.chapters)} chapters" self.summary.setText(self.summaries[hash]) def onDeletedManga(self): self.summary.setText("") def ensureSelected(self, method): if self.selected is None: MessageBox.info( f"{method.capitalize()} Manga", f"Please select a manga to {method} first.", ) return False return True def _refresh(self, manga): with SpinningCursor(): parser = Parser.by_url(manga.meta.url) parser.add_chapters(manga) CACHE.dump(manga) # add to item list (only if manga is new) if manga.meta.hash not in MANGA_ITEMS.hashs: MANGA_ITEMS.addItem(Item(manga.meta)) MANGA_ITEMS.reload() def add(self): dialog = RequiredDialog() dialog.setWindowTitle(T_ADD) dialog.setLabelText("Enter the manga url below:") dialog_code = dialog.exec() if dialog_code == QInputDialog.Rejected: # canceled return url = dialog.textValue() if url in CACHE.alias: MessageBox.info( T_ADD, "Manga already exists in cache. To refresh a manga, select a manga and click the refresh button.", ) return try: parser = Parser.by_url(url) except core.UnknownDomainError: MessageBox.warn( T_ADD, "Manga url is invalid or no parser was found for the url.", ) return manga = parser.create(url) self._refresh(manga) if manga.meta.cover: with core.Downloader(CACHE.root / manga.meta.hash) as downloader: downloader.download_cover(manga) def delete(self): if not self.ensureSelected("delete"): return reply = MessageBox.ask( T_DELETE, "Are you sure you want to delete this manga? This cannot be
<gh_stars>1-10 # -*- coding: utf-8 -*- """This module contains the methods used to deal with BELGraphs.""" import logging from operator import methodcaller import networkx as nx from flask import abort, Response, jsonify, send_file from flask import current_app from itertools import combinations from pybel import collapse_to_genes, to_bel_lines, to_bytes, to_csv, union from pybel.canonicalize import edge_to_bel from pybel.constants import * from pybel.dsl import BaseAbundance from pybel.io import from_bytes from pybel.struct import add_annotation_value from pybel.struct.summary import get_annotation_values_by_annotation from pybel_tools.summary.contradictions import relation_set_has_contradictions from six import BytesIO, StringIO from pathme_viewer.constants import BLACK_LIST, PATHWAYS_ARGUMENT, RESOURCES_ARGUMENT log = logging.getLogger(__name__) def _to_graphml_umbrella(graph): """Convert a BEL graph to GraphML XML file by previously canonicalizing the nodes. :param graph: A BEL graph """ rv = nx.MultiDiGraph() for u, v, key, edge_data in graph.edges(data=True, keys=True): bel_string = edge_to_bel(u, v, edge_data).split(' ') rv.add_edge( bel_string[0], bel_string[2], key=key, relation=edge_data[RELATION], bel=graph.edge_to_bel(u, v, edge_data), ) return rv def to_graphml(graph, path): """Write a graph to a GraphML XML file using :func:`networkx.write_graphml`. :param graph: BEL Graph :param path: Path to the new exported file The .graphml file extension is suggested so Cytoscape can recognize it. By default, this function exports using the PyBEL schema of including modifier information into the edges. As an alternative, this function can also distinguish between """ rv = _to_graphml_umbrella(graph) nx.write_graphml(rv, path) def throw_parameter_error(parameter): """Return 500 error. :param str parameter: :return: HTTP error """ abort(500, '"{}" argument is missing in the request'.format(parameter)) def add_annotation_key(graph): """Add annotation key in data (in place operation). :param pybel.BELGraph graph: BEL Graph """ for u, v, k in graph.edges(keys=True): if ANNOTATIONS not in graph[u][v][k]: graph[u][v][k][ANNOTATIONS] = {} def process_request(request): """Process request and return it as a dict[pathway ids,resources]. :param flask.request request: http request :rtype: dict """ pathways_list = request.args.getlist(PATHWAYS_ARGUMENT) resources_list = request.args.getlist(RESOURCES_ARGUMENT) if not resources_list: throw_parameter_error(RESOURCES_ARGUMENT) if not pathways_list: throw_parameter_error(PATHWAYS_ARGUMENT) return { pathway_id: resource for pathway_id, resource in zip(pathways_list, resources_list) } def get_annotations_from_request(request): """Return dictionary with annotations. :param flask.request request: http request :rtype: dict """ annotations = {} for arguments in request.args.keys(): if arguments in BLACK_LIST: continue annotations[arguments] = request.args.getlist(arguments) return annotations def merge_pathways(pathways): """Return merged graphs from pathways in the request. :param dict pathways: pathways to be merged :rtype: Optional[pybel.BELGraph] """ networks = [] for name, resource in pathways.items(): pathway = current_app.pathme_manager.get_pathway_by_id(name, resource) if not pathway: abort( 500, 'Pathway "{}" in resource "{}" was not found in the database. ' 'Please check that you have used correctly the autocompletion form.'.format( name, resource) ) # Loads the BELGraph and adds annotations to track provenance later graph = from_bytes(pathway.blob) graph.annotation_list['Database'] = {'kegg', 'reactome', 'wikipathways'} graph.annotation_pattern['PathwayID'] = '.*' graph.annotation_pattern['Pathway name'] = '.*' graph.annotation_list['Interesting edge'] = {'Contradicts', 'May contradict'} add_annotation_key(graph) add_annotation_value(graph, 'Pathway name', pathway.name) add_annotation_value(graph, 'Database', pathway.resource_name) add_annotation_value(graph, 'PathwayID', pathway.pathway_id) log.debug('Adding graph {} {}:with {} nodes and {} edges'.format( name, resource, graph.number_of_nodes(), graph.number_of_edges()) ) networks.append(graph) if not networks: abort( 500, 'Any pathway was requested. Please select at least one pathway.' ) graph = union(networks) graph.name = 'Merged graph from {}'.format([graph.name for graph in networks]) graph.version = '0.0.0' contradicting_edges = get_contradiction_summary(graph) for u, v, _ in contradicting_edges: label_graph_edges(graph, u, v, 'Interesting edge', 'Contradicts') return graph def to_json_custom(graph, _id='id', source='source', target='target'): """Prepares JSON for the biological network explorer :type graph: pybel.BELGraph :param str _id: The key to use for the identifier of a node, which is calculated with an enumeration :param str source: The key to use for the source node :param str target: The key to use for the target node :rtype: dict """ result = {} mapping = {} result['nodes'] = [] for i, node in enumerate(sorted(graph, key=methodcaller('as_bel'))): nd = node.copy() nd[_id] = node.sha512 nd['bel'] = node.as_bel() if VARIANTS in nd or FUSION in nd or MEMBERS in nd: nd['cname'] = nd['bel'] result['nodes'].append(nd) mapping[node] = i edge_set = set() rr = {} for u, v, data in graph.edges(data=True): if data[RELATION] in TWO_WAY_RELATIONS and (u, v) != tuple(sorted((u, v), key=methodcaller('as_bel'))): continue # don't keep two way edges twice entry_code = u, v if entry_code not in edge_set: # Avoids duplicate sending multiple edges between nodes with same relation rr[entry_code] = { source: mapping[u], target: mapping[v], 'contexts': [] } edge_set.add(entry_code) payload = { 'bel': graph.edge_to_bel(u, v, data) } payload.update(data) if data[RELATION] in CAUSAL_INCREASE_RELATIONS: rr[entry_code][RELATION] = INCREASES elif data[RELATION] in CAUSAL_DECREASE_RELATIONS: rr[entry_code][RELATION] = DECREASES rr[entry_code]['contexts'].append(payload) result['links'] = list(rr.values()) return result def export_graph(graph, format=None): """Convert PyBEL graph to a different format. :param PyBEL graph graph: graph :param format: desire format :return: graph representation in different format """ if format is None or format == 'json': data = to_json_custom(graph) return jsonify(data) elif format == 'bytes': data = BytesIO(to_bytes(graph)) return send_file( data, mimetype='application/octet-stream', as_attachment=True, attachment_filename='graph.gpickle' ) elif format == 'bel': data = '\n'.join(to_bel_lines(graph)) return Response(data, mimetype='text/plain') elif format == 'graphml': bio = BytesIO() to_graphml(graph, bio) bio.seek(0) return send_file( bio, mimetype='text/xml', attachment_filename='graph.graphml', as_attachment=True ) elif format == 'csv': bio = StringIO() to_csv(graph, bio) bio.seek(0) data = BytesIO(bio.read().encode('utf-8')) return send_file( data, mimetype="text/tab-separated-values", attachment_filename="graph.tsv", as_attachment=True ) abort(500, '{} is not a valid format'.format(format)) def get_tree_annotations(graph): """Build tree structure with annotation for a given graph. :param pybel.BELGraph graph: A BEL Graph :return: The JSON structure necessary for building the tree box :rtype: list[dict] """ annotations = get_annotation_values_by_annotation(graph) return [ { 'text': annotation, 'children': [{'text': value} for value in sorted(values)] } for annotation, values in sorted(annotations.items()) ] def label_graph_edges(graph, u, v, annotation, value): """Label edges between two nodes with an annotation and a value. :param pybel.BELGraph graph: :param u: subject node :param v: object node :param str annotation: annotation to be labelled :param str value: value to be labelled """ if annotation not in graph.defined_annotation_keywords: raise ValueError('annotation not defined: {}'.format(annotation)) if not graph.has_edge(u, v): raise ValueError('edge does not exists') # Iterate over all edges between u and v for k, data in graph[u][v].items(): # Add annotation key in data if not exists if ANNOTATIONS not in data: graph[u][v][k][ANNOTATIONS] = {} if annotation not in data[ANNOTATIONS]: graph[u][v][k][ANNOTATIONS] = {annotation: {}} graph[u][v][k][ANNOTATIONS][annotation][value] = True def relation_set_has_differences(relations): """Return if the set of relations contains differences. :param set[str] relations: A set of relations :rtype: bool """ has_causal = any(relation in CAUSAL_RELATIONS for relation in relations) has_unspecific_event = any(relation in {ASSOCIATION} for relation in relations) return 1 < sum([has_causal, has_unspecific_event]) def get_contradiction_summary(graph): """Yield triplets of (source node, target node, set of relations) for (source node, target node) pairs that have multiple, contradictory relations. :param pybel.BELGraph graph: A BEL graph :rtype: iter[tuple] """ for u, v in set(graph.edges()): relations = {data[RELATION] for data in graph[u][v].values()} if relation_set_has_contradictions(relations): yield u, v, relations def get_pathway_nodes(pathway): """Return single nodes in pathway. :param pathme_viewer.models.Pathway pathway: pathway entry :return: BaseAbundance nodes :rtype: list[pybel.dsl.BaseAbundance] """ # Loads the BELGraph graph = from_bytes(pathway.blob) collapse_to_genes(graph) # Return BaseAbundace BEL nodes return { node.as_bel() for node in graph if isinstance(node, BaseAbundance) } def prepare_venn_diagram_data(manager, pathways): """Prepare Venn Diagram data. :param pathme_viewer.manager.Manager manager: Manager :param dict[str,str] pathways: pathway id resource dict :rtype: dict """ pathway_data = {} for pathway_id, resource in pathways.items(): # Get pathway from DB pathway = manager.get_pathway_by_id(pathway_id, resource) # Confirm that pathway exists if not pathway: abort( 500, 'Pathway "{}" in resource "{}" was not found in the database. ' 'Please check that you have used correctly the autocompletion form.'.format( pathway_id, resource) ) # Get pathway nodes nodes = get_pathway_nodes(pathway) pathway_data[pathway.name] = nodes return pathway_data def process_overlap_for_venn_diagram(pathways_nodes, skip_gene_set_info=False): """Calculate gene sets overlaps and process the structure to render venn diagram -> https://github.com/benfred/venn.js/. :param dict[str,set] pathways_nodes: pathway to bel nodes dictionary :param bool skip_gene_set_info: include gene set overlap data :return: list[dict] """ # Creates future js array with gene sets' lengths overlaps_venn_diagram = [] pathway_to_index = {} index = 0 for name, bel_nodes in pathways_nodes.items(): # Only minimum info is returned if skip_gene_set_info: overlaps_venn_diagram.append( {'sets': [index], 'size': len(bel_nodes), 'label': name.upper()} ) # Returns gene set overlap/intersection information as well else: overlaps_venn_diagram.append( {'sets': [index], 'size': len(bel_nodes), 'label': name, 'bel_nodes': list(bel_nodes)} ) pathway_to_index[name] = index index += 1 # Perform intersection calculations for (set_1_name, set_1_values), (set_2_name, set_2_values) in combinations(pathways_nodes.items(), r=2): # Only minimum info is returned if skip_gene_set_info: overlaps_venn_diagram.append( { 'sets': [pathway_to_index[set_1_name], pathway_to_index[set_2_name]], 'size': len(set_1_values.intersection(set_2_values)), } ) # Returns gene set overlap/intersection information as well
"chemical_substance" }, { "id": "umlscui:C1966623", "type": "chemical_substance" }, { "id": "umlscui:C2961550", "type": "chemical_substance" }, { "id": "umlscui:C0710420", "type": "chemical_substance" }, { "id": "umlscui:C2962545", "type": "chemical_substance" }, { "id": "umlscui:C2962729", "type": "chemical_substance" }, { "id": "umlscui:C2962733", "type": "chemical_substance" }, { "id": "umlscui:C0710394", "type": "chemical_substance" }, { "id": "umlscui:C3249809", "type": "chemical_substance" }, { "id": "umlscui:C2979106", "type": "chemical_substance" }, { "id": "umlscui:C2979108", "type": "chemical_substance" }, { "id": "umlscui:C2979177", "type": "chemical_substance" }, { "id": "umlscui:C0710401", "type": "chemical_substance" }, { "id": "umlscui:C2370127", "type": "chemical_substance" }, { "id": "umlscui:C2370174", "type": "chemical_substance" }, { "id": "umlscui:C2586469", "type": "chemical_substance" }, { "id": "umlscui:C0710316", "type": "chemical_substance" }, { "id": "umlscui:C3245048", "type": "chemical_substance" }, { "id": "umlscui:C3709736", "type": "chemical_substance" }, { "id": "umlscui:C4019484", "type": "chemical_substance" }, { "id": "umlscui:C4237811", "type": "chemical_substance" }, { "id": "umlscui:C2978894", "type": "chemical_substance" }, { "id": "umlscui:C2980718", "type": "chemical_substance" }, { "id": "umlscui:C0710377", "type": "chemical_substance" }, { "id": "umlscui:C3152278", "type": "chemical_substance" }, { "id": "umlscui:C3153662", "type": "chemical_substance" }, { "id": "umlscui:C3154175", "type": "chemical_substance" }, { "id": "umlscui:C3159420", "type": "chemical_substance" }, { "id": "umlscui:C3160222", "type": "chemical_substance" }, { "id": "umlscui:C3163535", "type": "chemical_substance" }, { "id": "umlscui:C3192005", "type": "chemical_substance" }, { "id": "umlscui:C0710423", "type": "chemical_substance" }, { "id": "umlscui:C0710408", "type": "chemical_substance" }, { "id": "umlscui:C0710392", "type": "chemical_substance" }, { "id": "umlscui:C0715994", "type": "chemical_substance" }, { "id": "umlscui:C0715389", "type": "chemical_substance" }, { "id": "umlscui:C0715391", "type": "chemical_substance" }, { "id": "umlscui:C1329338", "type": "chemical_substance" }, { "id": "umlscui:C0697421", "type": "chemical_substance" }, { "id": "umlscui:C1642798", "type": "chemical_substance" }, { "id": "umlscui:C2940395", "type": "chemical_substance" }, { "id": "umlscui:C0710387", "type": "chemical_substance" }, { "id": "umlscui:C0710389", "type": "chemical_substance" }, { "id": "umlscui:C0710297", "type": "chemical_substance" }, { "id": "umlscui:C0710349", "type": "chemical_substance" }, { "id": "umlscui:C0710398", "type": "chemical_substance" }, { "id": "umlscui:C0710301", "type": "chemical_substance" }, { "id": "umlscui:C0710364", "type": "chemical_substance" }, { "id": "umlscui:C0710395", "type": "chemical_substance" }, { "id": "umlscui:C0710300", "type": "chemical_substance" }, { "id": "umlscui:C0710359", "type": "chemical_substance" }, { "id": "umlscui:C0713131", "type": "chemical_substance" }, { "id": "umlscui:C0715978", "type": "chemical_substance" }, { "id": "umlscui:C0710367", "type": "chemical_substance" }, { "id": "umlscui:C0714624", "type": "chemical_substance" }, { "id": "umlscui:C1631089", "type": "chemical_substance" }, { "id": "umlscui:C0711604", "type": "chemical_substance" }, { "id": "umlscui:C0711606", "type": "chemical_substance" }, { "id": "umlscui:C0710400", "type": "chemical_substance" }, { "id": "umlscui:C0710314", "type": "chemical_substance" }, { "id": "umlscui:C0710379", "type": "chemical_substance" }, { "id": "umlscui:C0710425", "type": "chemical_substance" }, { "id": "umlscui:C1637859", "type": "chemical_substance" }, { "id": "umlscui:C0711613", "type": "chemical_substance" }, { "id": "umlscui:C2961233", "type": "chemical_substance" }, { "id": "umlscui:C0711672", "type": "chemical_substance" }, { "id": "umlscui:C0876592", "type": "chemical_substance" }, { "id": "umlscui:C1169490", "type": "chemical_substance" }, { "id": "umlscui:C1878604", "type": "chemical_substance" }, { "id": "umlscui:C1329687", "type": "chemical_substance" }, { "id": "umlscui:C1658555", "type": "chemical_substance" }, { "id": "umlscui:C1656904", "type": "chemical_substance" }, { "id": "umlscui:C1877255", "type": "chemical_substance" }, { "id": "umlscui:C1877256", "type": "chemical_substance" }, { "id": "umlscui:C1654649", "type": "chemical_substance" }, { "id": "umlscui:C1724013", "type": "chemical_substance" }, { "id": "umlscui:C1738621", "type": "chemical_substance" }, { "id": "umlscui:C2962514", "type": "chemical_substance" }, { "id": "umlscui:C1877259", "type": "chemical_substance" }, { "id": "umlscui:C2343768", "type": "chemical_substance" }, { "id": "umlscui:C2722500", "type": "chemical_substance" }, { "id": "umlscui:C1877342", "type": "chemical_substance" }, { "id": "umlscui:C1964744", "type": "chemical_substance" }, { "id": "umlscui:C2342927", "type": "chemical_substance" }, { "id": "umlscui:C1877877", "type": "chemical_substance" }, { "id": "umlscui:C2586576", "type": "chemical_substance" }, { "id": "umlscui:C2586863", "type": "chemical_substance" }, { "id": "umlscui:C2723199", "type": "chemical_substance" }, { "id": "umlscui:C2731591", "type": "chemical_substance" }, { "id": "umlscui:C2343501", "type": "chemical_substance" }, { "id": "umlscui:C3500667", "type": "chemical_substance" }, { "id": "umlscui:C3528836", "type": "chemical_substance" }, { "id": "umlscui:C3666810", "type": "chemical_substance" }, { "id": "umlscui:C3856087", "type": "chemical_substance" }, { "id": "umlscui:C4032992", "type": "chemical_substance" }, { "id": "umlscui:C3555080", "type": "chemical_substance" }, { "id": "umlscui:C3666558", "type": "chemical_substance" }, { "id": "umlscui:C3666785", "type": "chemical_substance" }, { "id": "umlscui:C3709991", "type": "chemical_substance" }, { "id": "umlscui:C3692152", "type": "chemical_substance" }, { "id": "umlscui:C3714747", "type": "chemical_substance" }, { "id": "umlscui:C3817780", "type": "chemical_substance" }, { "id": "umlscui:C3817771", "type": "chemical_substance" }, { "id": "umlscui:C3817723", "type": "chemical_substance" }, { "id": "umlscui:C3857808", "type": "chemical_substance" }, { "id": "umlscui:C3859488", "type": "chemical_substance" }, { "id": "umlscui:C4535498", "type": "chemical_substance" }, { "id": "umlscui:C4018369", "type": "chemical_substance" }, { "id": "umlscui:C4018598", "type": "chemical_substance" }, { "id": "umlscui:C4018759", "type": "chemical_substance" }, { "id": "umlscui:C4059793", "type": "chemical_substance" }, { "id": "umlscui:C4237819", "type": "chemical_substance" }, { "id": "umlscui:C3249403", "type": "chemical_substance" }, { "id": "umlscui:C3254776", "type": "chemical_substance" }, { "id": "umlscui:C3249766", "type": "chemical_substance" }, { "id": "umlscui:C3268209", "type": "chemical_substance" }, { "id": "umlscui:C0715313", "type": "chemical_substance" }, { "id": "umlscui:C3257095", "type": "chemical_substance" }, { "id": "umlscui:C3265742", "type": "chemical_substance" }, { "id": "umlscui:C1329745", "type": "chemical_substance" }, { "id": "umlscui:C3497698", "type": "chemical_substance" }, { "id": "umlscui:C3497702", "type": "chemical_substance" }, { "id": "umlscui:C4489717", "type": "chemical_substance" }, { "id": "umlscui:C4473852", "type": "chemical_substance" }, { "id": "umlscui:C4476017", "type": "chemical_substance" }, { "id": "umlscui:C0710303", "type": "chemical_substance" }, { "id": "umlscui:C4477998", "type": "chemical_substance" }, { "id": "umlscui:C0710311", "type": "chemical_substance" }, { "id": "umlscui:C2962205", "type": "chemical_substance" }, { "id": "umlscui:C0789590", "type": "chemical_substance" }, { "id": "umlscui:C2920191", "type": "chemical_substance" }, { "id": "umlscui:C0783354", "type": "chemical_substance" }, { "id": "umlscui:C1614863", "type": "chemical_substance" }, { "id": "SCTID:372682005", "type": "chemical_substance" }, { "id": "SCTID:19510001", "type": "chemical_substance" }, { "id": "SCTID:320730006", "type": "chemical_substance" }, { "id": "SCTID:320726008", "type": "chemical_substance" }, { "id": "SCTID:374346009", "type": "chemical_substance" }, { "id": "SCTID:374347000", "type": "chemical_substance" }, { "id": "SCTID:420700002", "type": "chemical_substance" }, { "id": "SCTID:441834006", "type": "chemical_substance" }, { "id": "SCTID:322377005", "type": "chemical_substance" }, { "id": "SCTID:26458009", "type": "chemical_substance" }, { "id": "SCTID:420943003", "type": "chemical_substance" }, { "id": "SCTID:423936008", "type": "chemical_substance" }, { "id": "SCTID:412377008", "type": "chemical_substance" }, { "id": "SCTID:430971000", "type": "chemical_substance" }, { "id": "SCTID:429979001", "type": "chemical_substance" }, { "id": "SCTID:430140005", "type": "chemical_substance" }, { "id": "SCTID:420413009", "type": "chemical_substance" }, { "id": "SCTID:421236006", "type": "chemical_substance" }, { "id": "SCTID:422227009", "type": "chemical_substance" }, { "id": "SCTID:420639000", "type": "chemical_substance" }, { "id": "SCTID:419106007", "type": "chemical_substance" }, { "id": "SCTID:331636008", "type": "chemical_substance" }, { "id": "SCTID:320727004", "type": "chemical_substance" }, { "id": "SCTID:407877003", "type": "chemical_substance" }, { "id": "SCTID:392560004", "type": "chemical_substance" }, { "id": "SCTID:392558001", "type": "chemical_substance" }, { "id": "SCTID:392561000", "type": "chemical_substance" }, { "id": "SCTID:392556002", "type": "chemical_substance" }, { "id": "SCTID:392557006", "type": "chemical_substance" }, { "id": "SCTID:423376005", "type": "chemical_substance" }, { "id": "SCTID:392555003", "type": "chemical_substance" }, { "id": "SCTID:412493000", "type": "chemical_substance" }, { "id": "SMILES: CC(C)(C(O)=O)c1ccc(cc1)C(O)CCCN2CCC(CC2)C(O)(c3ccccc3)c4ccccc4", "type": "chemical_substance" }, { "id": "CAS: 159389-12-5", "type": "chemical_substance" }, { "id": "CAS: 76815-58-2", "type": "chemical_substance" }, { "id": "CAS: 83799-24-0", "type": "chemical_substance" }, { "id": "MESH:C093230", "type": "chemical_substance" }, { "id": "CHEMBL:CHEMBL914", "type": "chemical_substance" }, { "id": "PUBCHEM:3348", "type": "chemical_substance" }, { "id": "rxcui:1116528", "type": "chemical_substance" }, { "id": "rxcui:1670344", "type": "chemical_substance" }, { "id": "rxcui:1192737", "type": "chemical_substance" }, { "id": "rxcui:997512", "type": "chemical_substance" }, { "id": "rxcui:997515", "type": "chemical_substance" }, { "id": "rxcui:997422", "type": "chemical_substance" }, { "id": "rxcui:997484", "type": "chemical_substance" }, { "id": "rxcui:997493", "type": "chemical_substance" }, { "id": "rxcui:997502", "type": "chemical_substance" }, { "id": "rxcui:1488053", "type": "chemical_substance" }, { "id": "rxcui:1190331", "type": "chemical_substance" }, { "id": "rxcui:1190334", "type": "chemical_substance" }, { "id": "rxcui:997406", "type": "chemical_substance" }, { "id": "rxcui:997415", "type": "chemical_substance" }, { "id": "rxcui:997420", "type": "chemical_substance" }, { "id": "rxcui:997482", "type": "chemical_substance" }, { "id": "rxcui:997488", "type": "chemical_substance" }, { "id": "rxcui:997491", "type": "chemical_substance" }, { "id": "rxcui:997494", "type": "chemical_substance" }, { "id": "rxcui:997501", "type": "chemical_substance" }, { "id": "rxcui:997550", "type": "chemical_substance" }, { "id": "umlscui:C3163531", "type": "chemical_substance" }, { "id": "umlscui:C4048787", "type": "chemical_substance" }, { "id": "umlscui:C3247651", "type": "chemical_substance" }, { "id": "umlscui:C0716180", "type": "chemical_substance" }, { "id": "umlscui:C1616745", "type": "chemical_substance" }, { "id": "umlscui:C0938972", "type": "chemical_substance" }, { "id": "umlscui:C1965892", "type": "chemical_substance" }, { "id": "umlscui:C1813542", "type": "chemical_substance" }, { "id": "umlscui:C0875865", "type": "chemical_substance" }, { "id": "umlscui:C3709686", "type": "chemical_substance" }, { "id": "umlscui:C3245135", "type": "chemical_substance" }, { "id": "umlscui:C3245138", "type": "chemical_substance" }, { "id": "umlscui:C0976924", "type": "chemical_substance" }, { "id": "umlscui:C1576499", "type": "chemical_substance" }, { "id": "umlscui:C0976919", "type": "chemical_substance" }, { "id": "umlscui:C1964496", "type": "chemical_substance" }, { "id": "umlscui:C0976920", "type": "chemical_substance" }, { "id": "umlscui:C1813539", "type": "chemical_substance" }, { "id": "umlscui:C0976921", "type": "chemical_substance" }, { "id": "umlscui:C0976923", "type": "chemical_substance" }, { "id":
from __future__ import print_function import re import sys import itertools from collections import OrderedDict from copy import copy try: # python 3.3+ from inspect import signature, Signature, Parameter except ImportError: from funcsigs import signature, Signature, Parameter try: from inspect import iscoroutinefunction except ImportError: # let's assume there are no coroutine functions in old Python def iscoroutinefunction(f): return False try: from inspect import isgeneratorfunction except ImportError: # assume no generator function in old Python versions def isgeneratorfunction(f): return False try: # python 3.5+ from typing import Callable, Any, Union, Iterable except ImportError: pass # macroscopic signature strings checker (we do not look inside params, `signature` will do it for us) FUNC_DEF = re.compile('(?s)^\\s*(?P<funcname>[_\\w][_\\w\\d]*)?\\s*' '\\(\\s*(?P<params>.*?)\\s*\\)\\s*' '(((?P<typed_return_hint>->\\s*[^:]+)?(?P<colon>:)?\\s*)|:\\s*#\\s*(?P<comment_return_hint>.+))*$') def create_wrapper(wrapped, wrapper, new_sig=None, # type: Union[str, Signature] func_name=None, # type: str inject_as_first_arg=False, # type: bool add_source=True, # type: bool add_impl=True, # type: bool doc=None, # type: str qualname=None, # type: str module_name=None, # type: str **attrs ): """ Creates a signature-preserving wrapper function. See `@makefun.wraps` """ func_name, func_sig, doc, qualname, module_name, all_attrs = _get_args_for_wrapping(wrapped, new_sig, func_name, doc, qualname, module_name, attrs) return create_function(func_sig, wrapper, func_name=func_name, inject_as_first_arg=inject_as_first_arg, add_source=add_source, add_impl=add_impl, doc=doc, qualname=qualname, module_name=module_name, **all_attrs) def create_function(func_signature, # type: Union[str, Signature] func_impl, # type: Callable[[Any], Any] func_name=None, # type: str inject_as_first_arg=False, # type: bool add_source=True, # type: bool add_impl=True, # type: bool doc=None, # type: str qualname=None, # type: str module_name=None, # type: str **attrs): """ Creates a function with signature <func_signature> that will call <func_impl> with its arguments when called. Arguments are passed as keyword-arguments when it is possible (so all the time, except for var-positional or positional-only arguments that get passed as *args. Note that pos-only does not yet exist in python but this case is already covered because it is supported by `Signature` objects). `func_signature` can be provided: - as a string containing the name and signature without 'def' keyword, such as `'foo(a, b: int, *args, **kwargs)'`. In which case the name in the string will be used for the `__name__` and `__qualname__` of the created function by default - as a `Signature` object, for example created using `signature(f)` or handcrafted. In this case the `__name__` and `__qualname__` of the created function will be copied from `func_impl` by default. All the other metadata of the created function are defined as follows: - default `__name__` attribute (see above) can be overriden by providing a non-None `func_name` - default `__qualname__` attribute (see above) can be overridden by providing a non-None `qualname` - `__annotations__` attribute is created to match the annotations in the signature. - `__doc__` attribute is copied from `func_impl.__doc__` except if overridden using `doc` - `__module__` attribute is copied from `func_impl.__module__` except if overridden using `module_name` Finally two new attributes are optionally created - `__source__` attribute: set if `add_source` is `True` (default), this attribute contains the source code of the generated function - `__func_impl__` attribute: set if `add_impl` is `True` (default), this attribute contains a pointer to `func_impl` :param func_signature: either a string without 'def' such as "foo(a, b: int, *args, **kwargs)" or "(a, b: int)", or a `Signature` object, for example from the output of `inspect.signature` or from the `funcsigs.signature` backport. Note that these objects can be created manually too. If the signature is provided as a string and contains a non-empty name, this name will be used instead of the one of the decorated function. :param func_impl: the function that will be called when the generated function is executed. Its signature should be compliant with (=more generic than) `func_signature` :param inject_as_first_arg: if `True`, the created function will be injected as the first positional argument of `func_impl`. This can be handy in case the implementation is shared between several facades and needs to know from which context it was called. Default=`False` :param func_name: provide a non-`None` value to override the created function `__name__` and `__qualname__`. If this is `None` (default), the `__name__` will default to the one of `func_impl` if `func_signature` is a `Signature`, or to the name defined in `func_signature` if `func_signature` is a `str` and contains a non-empty name. :param add_source: a boolean indicating if a '__source__' annotation should be added to the generated function (default: True) :param add_impl: a boolean indicating if a '__func_impl__' annotation should be added to the generated function (default: True) :param doc: a string representing the docstring that will be used to set the __doc__ attribute on the generated function. If None (default), the doc of func_impl will be used. :param qualname: a string representing the qualified name to be used. If None (default), the `__qualname__` will default to the one of `func_impl` if `func_signature` is a `Signature`, or to the name defined in `func_signature` if `func_signature` is a `str` and contains a non-empty name. :param module_name: the name of the module to be set on the function (under __module__ ). If None (default), `func_impl.__module__` will be used. :param attrs: other keyword attributes that should be set on the function :return: """ # grab context from the caller frame try: attrs.pop('_with_sig_') # called from `@with_signature` frame = _get_callerframe(offset=1) except KeyError: frame = _get_callerframe() evaldict, _ = extract_module_and_evaldict(frame) # name defaults user_provided_name = True if func_name is None: func_name = func_impl.__name__ user_provided_name = False # qname default user_provided_qname = True if qualname is None: qualname = getattr(func_impl, '__qualname__', None) user_provided_qname = False # doc default if doc is None: doc = getattr(func_impl, '__doc__', None) # module name default if module_name is None: module_name = func_impl.__module__ # input signature handling if isinstance(func_signature, str): # transform the string into a Signature and make sure the string contains ":" func_name_from_str, func_signature, func_signature_str = get_signature_from_string(func_signature, evaldict) # if not explicitly overridden using `func_name`, the name in the string takes over if func_name_from_str is not None: if not user_provided_name: func_name = func_name_from_str if not user_provided_qname: qualname = func_name # fix the signature if needed if func_name_from_str is None: func_signature_str = func_name + func_signature_str elif isinstance(func_signature, Signature): # create the signature string func_signature_str = get_signature_string(func_name, func_signature, evaldict) else: raise TypeError("Invalid type for `func_signature`: %s" % type(func_signature)) # extract all information needed from the `Signature` params_to_kw_assignment_mode = get_signature_params(func_signature) params_names = list(params_to_kw_assignment_mode.keys()) # Note: in decorator the annotations were extracted using getattr(func_impl, '__annotations__') instead. # This seems equivalent but more general (provided by the signature, not the function), but to check annotations, defaults, kwonlydefaults = get_signature_details(func_signature) # create the body of the function to compile # The generated function body should dispatch its received arguments to the inner function. # For this we will pass as much as possible the arguments as keywords. # However if there are varpositional arguments we cannot assignments = [("%s=%s" % (k, k)) if is_kw else k for k, is_kw in params_to_kw_assignment_mode.items()] params_str = ', '.join(assignments) if inject_as_first_arg: params_str = "%s, %s" % (func_name, params_str) if _is_generator_func(func_impl): if sys.version_info >= (3, 3): body = "def %s\n yield from _func_impl_(%s)\n" % (func_signature_str, params_str) else: from makefun._main_legacy_py import get_legacy_py_generator_body_template body = get_legacy_py_generator_body_template() % (func_signature_str, params_str) else: body = "def %s\n return _func_impl_(%s)\n" % (func_signature_str, params_str) if iscoroutinefunction(func_impl): body = ("async " + body).replace('return', 'return await') # create the function by compiling code, mapping the `_func_impl_` symbol to `func_impl` protect_eval_dict(evaldict, func_name, params_names) evaldict['_func_impl_'] = func_impl f = _make(func_name, params_names, body, evaldict) # add the source annotation if needed if add_source: attrs['__source__'] = body # add the handler if needed if add_impl: attrs['__func_impl__'] = func_impl # update the signature _update_fields(f, name=func_name, qualname=qualname, doc=doc, annotations=annotations, defaults=tuple(defaults), kwonlydefaults=kwonlydefaults, module=module_name, **attrs) return f def _is_generator_func(func_impl): """ Return True if the func_impl is a generator :param func_impl: :return: """ if (3, 5) <= sys.version_info < (3, 6): # with Python 3.5 isgeneratorfunction returns True for all coroutines # however we know that it is NOT possible to have a generator # coroutine in python 3.5: PEP525 was not there yet return isgeneratorfunction(func_impl) and not iscoroutinefunction(func_impl) else: return isgeneratorfunction(func_impl) class _SymbolRef: """ A class used to protect signature default values and
type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) output_address_blocks (str): Specifies whether to return a formatted version of the address.. [optional] if omitted the server will use the default value of "Y" # noqa: E501 perform_us_processing (str): Specifies whether or not to process U.S. addresses.. [optional] if omitted the server will use the default value of "Y" # noqa: E501 perform_dpv (str): Delivery Point Validation (DPV®) validates that a specific address exists. [optional] if omitted the server will use the default value of "N" # noqa: E501 output_formatted_on_fail (str): Specifies whether to return a formatted address when an address cannot be validated.. [optional] if omitted the server will use the default value of "N" # noqa: E501 output_postal_code_separator (str): Specifies whether to use separators (spaces or hyphens) in ZIP™ Codes or Canadian postal codes.. [optional] if omitted the server will use the default value of "Y" # noqa: E501 output_country_format (str): Specifies the format to use for the country name returned in the Country output field.. [optional] if omitted the server will use the default value of "E" # noqa: E501 keep_multimatch (str): Indicates whether to return multiple address for input addresses that have more than one possible matches.. [optional] if omitted the server will use the default value of "N" # noqa: E501 output_casing (str): Specifies the casing of the output address. M for mixed case and U for upper case.. [optional] if omitted the server will use the default value of "M" # noqa: E501 maximum_results (str): Specifies a number between 1 and 10 that indicates the maximum number of addresses to be returned.. [optional] if omitted the server will use the default value of "10" # noqa: E501 output_record_type (str): Specifies the type of the output record.. [optional] if omitted the server will use the default value of "A" # noqa: E501 output_field_level_return_codes (str): Identifies which output addresses are candidate addresses as value if Y for OutputFieldLevelReturnCodes.. [optional] if omitted the server will use the default value of "N" # noqa: E501 dpv_determine_no_stat (str): Determines the no stat status of an address which means it exists but cannot receive mails.. [optional] if omitted the server will use the default value of "N" # noqa: E501 street_matching_strictness (str): Specifies the algorithm to determe if an input address matches in the postal database.. [optional] if omitted the server will use the default value of "M" # noqa: E501 can_french_apartment_label (str): Specifies the default apartment label for the output if there is no apartment label in the input address. This is specific to French address. . [optional] if omitted the server will use the default value of "Appartement" # noqa: E501 output_abbreviated_alias (str): Specifies whether to use a street's abbreviated alias in the output if the output address line is longer than 31 characters.. [optional] if omitted the server will use the default value of "N" # noqa: E501 dpv_successful_status_condition (str): Selecting the match condition where a DPV result does NOT cause a record to fail.. [optional] if omitted the server will use the default value of "A" # noqa: E501 standard_address_pmb_line (str): Specifies where Private Mailbox (PMB) information is placed.. [optional] if omitted the server will use the default value of "N" # noqa: E501 firm_matching_strictness (str): Specifies the algorithm to determining if an input address matches in the postal database.. [optional] if omitted the server will use the default value of "M" # noqa: E501 can_rural_route_format (str): Specifies where to place rural route delivery information.. [optional] if omitted the server will use the default value of "A" # noqa: E501 can_prefer_house_num (str): Specifies whether to select a house number of postal code in case of conflict.. [optional] if omitted the server will use the default value of "N" # noqa: E501 output_preferred_alias (str): Specifies whether to use a street's preferred alias in the output.. [optional] if omitted the server will use the default value of "N" # noqa: E501 directional_matching_strictness (str): Specifies the algorithm to determine if an input address matches in the postal database.. [optional] if omitted the server will use the default value of "M" # noqa: E501 extract_firm (str): Specifies whether to extract the firm name from AddressLine1 through AddressLine4 and place it in the FirmName output field.. [optional] if omitted the server will use the default value of "N" # noqa: E501 fail_on_cmra_match (str): Specifies whether to consider Treat Commercial Mail Receiving Agency (CMRA) matches as failures?. [optional] if omitted the server will use the default value of "N" # noqa: E501 can_non_civic_format (str): Specifies whether or not non-civic keywords are abbreviated in the output. . [optional] if omitted the server will use the default value of "A" # noqa: E501 can_sslvr_flg (str): Changes the civic and/or suite information to match the LVR or single-single record.. [optional] if omitted the server will use the default value of "N" # noqa: E501 output_street_name_alias (str): Specifies how to handle street name aliases used in the input. This is specific to US.. [optional] if omitted the server will use the default value of "Y" # noqa: E501 perform_ews (str): Specifies the Early Warning System (EWS) that uses the USPS EWS File to validate addresses that are not in the ZIP + 4 database.. [optional] if omitted the server will use the default value of "N" # noqa: E501 can_output_city_format (str): Specifies whether to use the long, medium, or short version of the city if the city has a long name.. [optional] if omitted the server will use the default value of "D" # noqa: E501 dual_address_logic (str): Specifies how to return a match if multiple non-blank address lines are present or multiple address types are on the same address line. (U.S. addresses only.). [optional] if omitted the server will use the default value of "N" # noqa: E501 perform_suite_link (str): Specifies whether to perform SuiteLink processing.. [optional] if omitted the server will use the default value of "N" # noqa: E501 can_standard_address_format (str): Specifies where to place secondary address information in the output address.. [optional] if omitted the server will use the default value of "D" # noqa: E501 output_preferred_city (str): Specifies whether the preferred last line city name should be stored.. [optional] if omitted the server will use the default value of "Z" # noqa: E501 output_multinational_characters (str): Specifies whether to return multinational characters, including diacritical marks such as umlauts or accents.. [optional] if omitted the server will use the default value of "N" # noqa: E501 can_delivery_office_format (str): Specifies where to place station information.. [optional] if omitted the server
+ m.b85 <= 0) m.c47 = Constraint(expr= - m.b42 + m.b86 <= 0) m.c48 = Constraint(expr= - m.b42 + m.b87 <= 0) m.c49 = Constraint(expr= - m.b42 + m.b88 <= 0) m.c50 = Constraint(expr= - m.b42 + m.b89 <= 0) m.c51 = Constraint(expr= - m.b42 + m.b90 <= 0) m.c52 = Constraint(expr= - m.b42 + m.b91 <= 0) m.c53 = Constraint(expr= - m.b42 + m.b92 <= 0) m.c54 = Constraint(expr= - m.b42 + m.b93 <= 0) m.c55 = Constraint(expr= - m.b42 + m.b94 <= 0) m.c56 = Constraint(expr= - m.b42 + m.b95 <= 0) m.c57 = Constraint(expr= - m.b42 + m.b96 <= 0) m.c58 = Constraint(expr= - m.b42 + m.b97 <= 0) m.c59 = Constraint(expr= - m.b42 + m.b98 <= 0) m.c60 = Constraint(expr= - m.b42 + m.b99 <= 0) m.c61 = Constraint(expr= - m.b42 + m.b100 <= 0) m.c62 = Constraint(expr= - m.b43 + m.b101 <= 0) m.c63 = Constraint(expr= - m.b43 + m.b102 <= 0) m.c64 = Constraint(expr= - m.b43 + m.b103 <= 0) m.c65 = Constraint(expr= - m.b43 + m.b104 <= 0) m.c66 = Constraint(expr= - m.b43 + m.b105 <= 0) m.c67 = Constraint(expr= - m.b43 + m.b106 <= 0) m.c68 = Constraint(expr= - m.b43 + m.b107 <= 0) m.c69 = Constraint(expr= - m.b43 + m.b108 <= 0) m.c70 = Constraint(expr= - m.b43 + m.b109 <= 0) m.c71 = Constraint(expr= - m.b43 + m.b110 <= 0) m.c72 = Constraint(expr= - m.b43 + m.b111 <= 0) m.c73 = Constraint(expr= - m.b43 + m.b112 <= 0) m.c74 = Constraint(expr= - m.b43 + m.b113 <= 0) m.c75 = Constraint(expr= - m.b43 + m.b114 <= 0) m.c76 = Constraint(expr= - m.b43 + m.b115 <= 0) m.c77 = Constraint(expr= - m.b43 + m.b116 <= 0) m.c78 = Constraint(expr= - m.b43 + m.b117 <= 0) m.c79 = Constraint(expr= - m.b43 + m.b118 <= 0) m.c80 = Constraint(expr= - m.b43 + m.b119 <= 0) m.c81 = Constraint(expr= - m.b43 + m.b120 <= 0) m.c82 = Constraint(expr= - m.b44 + m.b121 <= 0) m.c83 = Constraint(expr= - m.b44 + m.b122 <= 0) m.c84 = Constraint(expr= - m.b44 + m.b123 <= 0) m.c85 = Constraint(expr= - m.b44 + m.b124 <= 0) m.c86 = Constraint(expr= - m.b44 + m.b125 <= 0) m.c87 = Constraint(expr= - m.b44 + m.b126 <= 0) m.c88 = Constraint(expr= - m.b44 + m.b127 <= 0) m.c89 = Constraint(expr= - m.b44 + m.b128 <= 0) m.c90 = Constraint(expr= - m.b44 + m.b129 <= 0) m.c91 = Constraint(expr= - m.b44 + m.b130 <= 0) m.c92 = Constraint(expr= - m.b44 + m.b131 <= 0) m.c93 = Constraint(expr= - m.b44 + m.b132 <= 0) m.c94 = Constraint(expr= - m.b44 + m.b133 <= 0) m.c95 = Constraint(expr= - m.b44 + m.b134 <= 0) m.c96 = Constraint(expr= - m.b44 + m.b135 <= 0) m.c97 = Constraint(expr= - m.b44 + m.b136 <= 0) m.c98 = Constraint(expr= - m.b44 + m.b137 <= 0) m.c99 = Constraint(expr= - m.b44 + m.b138 <= 0) m.c100 = Constraint(expr= - m.b44 + m.b139 <= 0) m.c101 = Constraint(expr= - m.b44 + m.b140 <= 0) m.c102 = Constraint(expr= - m.b45 + m.b141 <= 0) m.c103 = Constraint(expr= - m.b45 + m.b142 <= 0) m.c104 = Constraint(expr= - m.b45 + m.b143 <= 0) m.c105 = Constraint(expr= - m.b45 + m.b144 <= 0) m.c106 = Constraint(expr= - m.b45 + m.b145 <= 0) m.c107 = Constraint(expr= - m.b45 + m.b146 <= 0) m.c108 = Constraint(expr= - m.b45 + m.b147 <= 0) m.c109 = Constraint(expr= - m.b45 + m.b148 <= 0) m.c110 = Constraint(expr= - m.b45 + m.b149 <= 0) m.c111 = Constraint(expr= - m.b45 + m.b150 <= 0) m.c112 = Constraint(expr= - m.b45 + m.b151 <= 0) m.c113 = Constraint(expr= - m.b45 + m.b152 <= 0) m.c114 = Constraint(expr= - m.b45 + m.b153 <= 0) m.c115 = Constraint(expr= - m.b45 + m.b154 <= 0) m.c116 = Constraint(expr= - m.b45 + m.b155 <= 0) m.c117 = Constraint(expr= - m.b45 + m.b156 <= 0) m.c118 = Constraint(expr= - m.b45 + m.b157 <= 0) m.c119 = Constraint(expr= - m.b45 + m.b158 <= 0) m.c120 = Constraint(expr= - m.b45 + m.b159 <= 0) m.c121 = Constraint(expr= - m.b45 + m.b160 <= 0) m.c122 = Constraint(expr= - m.b46 + m.b161 <= 0) m.c123 = Constraint(expr= - m.b46 + m.b162 <= 0) m.c124 = Constraint(expr= - m.b46 + m.b163 <= 0) m.c125 = Constraint(expr= - m.b46 + m.b164 <= 0) m.c126 = Constraint(expr= - m.b46 + m.b165 <= 0) m.c127 = Constraint(expr= - m.b46 + m.b166 <= 0) m.c128 = Constraint(expr= - m.b46 + m.b167 <= 0) m.c129 = Constraint(expr= - m.b46 + m.b168 <= 0) m.c130 = Constraint(expr= - m.b46 + m.b169 <= 0) m.c131 = Constraint(expr= - m.b46 + m.b170 <= 0) m.c132 = Constraint(expr= - m.b46 + m.b171 <= 0) m.c133 = Constraint(expr= - m.b46 + m.b172 <= 0) m.c134 = Constraint(expr= - m.b46 + m.b173 <= 0) m.c135 = Constraint(expr= - m.b46 + m.b174 <= 0) m.c136 = Constraint(expr= - m.b46 + m.b175 <= 0) m.c137 = Constraint(expr= - m.b46 + m.b176 <= 0) m.c138 = Constraint(expr= - m.b46 + m.b177 <= 0) m.c139 = Constraint(expr= - m.b46 + m.b178 <= 0) m.c140 = Constraint(expr= - m.b46 + m.b179 <= 0) m.c141 = Constraint(expr= - m.b46 + m.b180 <= 0) m.c142 = Constraint(expr= - m.b47 + m.b181 <= 0) m.c143 = Constraint(expr= - m.b47 + m.b182 <= 0) m.c144 = Constraint(expr= - m.b47 + m.b183 <= 0) m.c145 = Constraint(expr= - m.b47 + m.b184 <= 0) m.c146 = Constraint(expr= - m.b47 + m.b185 <= 0) m.c147 = Constraint(expr= - m.b47 + m.b186 <= 0) m.c148 = Constraint(expr= - m.b47 + m.b187 <= 0) m.c149 = Constraint(expr= - m.b47 + m.b188 <= 0) m.c150 = Constraint(expr= - m.b47 + m.b189 <= 0) m.c151 = Constraint(expr= - m.b47 + m.b190 <= 0) m.c152 = Constraint(expr= - m.b47 + m.b191 <= 0) m.c153 = Constraint(expr= - m.b47 + m.b192 <= 0) m.c154 = Constraint(expr= - m.b47 + m.b193 <= 0) m.c155 = Constraint(expr= - m.b47 + m.b194 <= 0) m.c156 = Constraint(expr= - m.b47 + m.b195 <= 0) m.c157 = Constraint(expr= - m.b47 + m.b196 <= 0) m.c158 = Constraint(expr= - m.b47 + m.b197 <= 0) m.c159 = Constraint(expr= - m.b47 + m.b198 <= 0) m.c160 = Constraint(expr= - m.b47 + m.b199 <= 0) m.c161 = Constraint(expr= - m.b47 + m.b200 <= 0) m.c162 = Constraint(expr= - m.b48 + m.b201 <= 0) m.c163 = Constraint(expr= - m.b48 + m.b202 <= 0) m.c164 = Constraint(expr= - m.b48 + m.b203 <= 0) m.c165 = Constraint(expr= - m.b48 + m.b204 <= 0) m.c166 = Constraint(expr= - m.b48 + m.b205 <= 0) m.c167 = Constraint(expr= - m.b48 + m.b206 <= 0) m.c168 = Constraint(expr= - m.b48 + m.b207 <= 0) m.c169 = Constraint(expr= - m.b48 + m.b208 <= 0) m.c170 = Constraint(expr= - m.b48 + m.b209 <= 0) m.c171 = Constraint(expr= - m.b48 + m.b210 <= 0) m.c172 = Constraint(expr= - m.b48 + m.b211 <= 0) m.c173 = Constraint(expr= - m.b48 + m.b212 <= 0) m.c174 = Constraint(expr= - m.b48 + m.b213 <= 0) m.c175 = Constraint(expr= - m.b48 + m.b214 <= 0) m.c176 = Constraint(expr= - m.b48 + m.b215 <= 0) m.c177 = Constraint(expr= - m.b48 + m.b216 <= 0) m.c178 = Constraint(expr= - m.b48 + m.b217 <= 0) m.c179 = Constraint(expr= - m.b48 + m.b218 <= 0) m.c180 = Constraint(expr= - m.b48 + m.b219 <= 0) m.c181 = Constraint(expr= - m.b48 + m.b220 <= 0) m.c182 = Constraint(expr= - m.b49 + m.b221 <= 0) m.c183 = Constraint(expr= - m.b49 + m.b222 <= 0) m.c184 = Constraint(expr= - m.b49 + m.b223 <= 0) m.c185 = Constraint(expr= - m.b49 + m.b224 <= 0) m.c186 = Constraint(expr= - m.b49 + m.b225 <= 0) m.c187 = Constraint(expr= - m.b49 + m.b226 <= 0) m.c188 = Constraint(expr= - m.b49 + m.b227 <= 0) m.c189 = Constraint(expr= - m.b49 + m.b228 <= 0) m.c190 = Constraint(expr= - m.b49 + m.b229 <= 0) m.c191 = Constraint(expr= - m.b49 + m.b230 <= 0) m.c192 = Constraint(expr= - m.b49 + m.b231 <= 0) m.c193 = Constraint(expr= - m.b49 + m.b232 <= 0) m.c194 = Constraint(expr= - m.b49 + m.b233 <= 0) m.c195 = Constraint(expr= - m.b49 + m.b234 <= 0) m.c196 = Constraint(expr= - m.b49 + m.b235 <= 0) m.c197 = Constraint(expr= - m.b49 + m.b236 <= 0) m.c198 = Constraint(expr= - m.b49 + m.b237 <= 0) m.c199 = Constraint(expr= - m.b49 + m.b238 <= 0) m.c200 = Constraint(expr= - m.b49 + m.b239 <= 0) m.c201 = Constraint(expr= - m.b49 + m.b240 <= 0) m.c202 = Constraint(expr= - m.b50 + m.b241 <= 0) m.c203 = Constraint(expr= - m.b50 + m.b242 <= 0) m.c204 = Constraint(expr= - m.b50 + m.b243 <= 0) m.c205 = Constraint(expr= - m.b50 + m.b244 <= 0) m.c206 = Constraint(expr= - m.b50
<reponame>cubed4th/cubed4th-py #!/usr/bin/env python3 # -*- encoding: utf-8 # SPDX-License-Identifier: MIT # Copyright (c) https://github.com/scott91e1 ~ 2021 - 2021 __banner__ = r""" ( _ _ ______ ____ _____ _______ _ _ | | | | | ____/ __ \| __ \__ __| | | | | |_ ___ ___| |_ | |__ | | | | |__) | | | | |__| | | __/ _ \/ __| __| | __|| | | | _ / | | | __ | | || __/\__ \ |_ | | | |__| | | \ \ | | | | | | \__\___||___/\__| |_| \____/|_| \_\ |_| |_| |_| ______ |______| ) """ # __banner__ class TestFORTH: options = {} def test_0000(self): r""" ``` 1 2 3 : hello 'World ; """ e = FORTH.Engine(run=self.test_0000.__doc__, **self.options) print e.hello assert e.root.stack == [1, 2, 3] assert e.root.memory == {} assert e.root.test["f"] == 0 def test_0001(self): r""" ``` 'Hello 'World """ e = FORTH.Engine(self.test_0001.__doc__, **self.options) assert e.root.stack == ["Hello", "World"] assert e.root.memory == {} assert e.root.test["f"] == 0 def test_0002(self): r""" ``` 123 456 ! """ e = FORTH.Engine(self.test_0002.__doc__, **self.options) assert e.root.stack == [] assert e.root.memory == {456: 123} assert e.root.test["f"] == 0 def test_0003(self): r""" ``` 123 'FOO_1 ! 'Baz 'FOO_2 ! """ e = FORTH.Engine(self.test_0003.__doc__, **self.options) assert e.root.stack == [] assert e.root.memory == {"FOO_1": 123, "FOO_2": "Baz"} assert e.root.test["f"] == 0 def test_1000(self): r""" ``` T{ 'Hello 'World DROP -> ("Hello") }T """ e = FORTH.Engine(self.test_1000.__doc__, **self.options) assert e.root.test["f"] == 0 def test_BASIC_ASSUMPTIONS(self): e = FORTH.Engine(self.BASIC_ASSUMPTIONS, **self.options) assert e.root.test["f"] == 0 BASIC_ASSUMPTIONS = r""" ``` T{ -> }T \ START WITH CLEAN SLATE ( TEST IF ANY BITS ARE SET; ANSWER IN BASE 1 ) T{ : BITSSET? IF 0 0 ELSE 0 THEN ; -> }T T{ 0 BITSSET? -> 0 }T ( ZERO IS ALL BITS CLEAR ) T{ 1 BITSSET? -> 0 0 }T ( OTHER NUMBER HAVE AT LEAST ONE BIT ) T{ -1 BITSSET? -> 0 0 }T """ def test_BOOLEANS_INVERT(self): e = FORTH.Engine(self.BOOLEANS_INVERT, **self.options) assert e.root.test["f"] == 0 BOOLEANS_INVERT = r""" ``` T{ 0 0 AND -> 0 }T T{ 0 1 AND -> 0 }T T{ 1 0 AND -> 0 }T T{ 1 1 AND -> 1 }T T{ 0 INVERT 1 AND -> 1 }T T{ 1 INVERT 1 AND -> 0 }T 0 CONSTANT 0S 0 INVERT CONSTANT 1S T{ 0S INVERT -> 1S }T T{ 1S INVERT -> 0S }T T{ 0S 0S AND -> 0S }T T{ 0S 1S AND -> 0S }T T{ 1S 0S AND -> 0S }T T{ 1S 1S AND -> 1S }T T{ 0S 0S OR -> 0S }T T{ 0S 1S OR -> 1S }T T{ 1S 0S OR -> 1S }T T{ 1S 1S OR -> 1S }T T{ 0S 0S XOR -> 0S }T T{ 0S 1S XOR -> 1S }T T{ 1S 0S XOR -> 1S }T T{ 1S 1S XOR -> 0S }T """ def test_LSHIFT_RSHIFT(self): e = FORTH.Engine(self.LSHIFT_RSHIFT, **self.options) assert e.root.test["f"] == 0 LSHIFT_RSHIFT = r""" ``` 0 CONSTANT 0S 0 INVERT CONSTANT 1S 1S 1 RSHIFT INVERT CONSTANT MSB T{ : BITSSET? IF 0 0 ELSE 0 THEN ; -> }T ( ) ( WE TRUST 1S, INVERT, AND BITSSET?; WE WILL CONFIRM RSHIFT LATER ) 1S 1 RSHIFT INVERT CONSTANT MSB # T{ MSB BITSSET? -> 0 0 }T T{ 0S 2* -> 0S }T T{ 1 2* -> 2 }T T{ 4000 2* -> 8000 }T T{ 1S 2* 1 XOR -> 1S }T T{ MSB 2* -> 0S }T T{ 0S 2/ -> 0S }T T{ 1 2/ -> 0 }T T{ 4000 2/ -> 2000 }T T{ 1S 2/ -> 1S }T \ MSB PROPOGATED T{ 1S 1 XOR 2/ -> 1S }T T{ MSB 2/ MSB AND -> MSB }T T{ 1 0 LSHIFT -> 1 }T T{ 1 1 LSHIFT -> 2 }T T{ 1 2 LSHIFT -> 4 }T # T{ 1 F LSHIFT -> 8000 }T \ BIGGEST GUARANTEED SHIFT T{ 1S 1 LSHIFT 1 XOR -> 1S }T T{ MSB 1 LSHIFT -> 0 }T T{ 1 0 RSHIFT -> 1 }T T{ 1 1 RSHIFT -> 0 }T T{ 2 1 RSHIFT -> 1 }T T{ 4 2 RSHIFT -> 1 }T # T{ 8000 F RSHIFT -> 1 }T \ BIGGEST T{ MSB 1 RSHIFT MSB AND -> 0 }T \ RSHIFT ZERO FILLS MSBS T{ MSB 1 RSHIFT 2* -> MSB }T """ def test_COMPARISONS(self): e = FORTH.Engine(self.COMPARISONS, **self.options) assert e.root.test["f"] == 0 COMPARISONS = r""" ``` 0 CONSTANT 0S 0 INVERT CONSTANT 1S 1S 1 RSHIFT INVERT CONSTANT MSB T{ : BITSSET? IF 0 0 ELSE 0 THEN ; -> }T ( ) T{ 0 0= -> <TRUE> }T T{ 1 0= -> <FALSE> }T T{ 2 0= -> <FALSE> }T T{ -1 0= -> <FALSE> }T T{ 0 0 = -> <TRUE> }T T{ 1 1 = -> <TRUE> }T T{ -1 -1 = -> <TRUE> }T T{ 1 0 = -> <FALSE> }T T{ -1 0 = -> <FALSE> }T T{ 0 1 = -> <FALSE> }T T{ 0 -1 = -> <FALSE> }T T{ 0 0< -> <FALSE> }T T{ -1 0< -> <TRUE> }T T{ 1 0< -> <FALSE> }T T{ 0 1 < -> <TRUE> }T T{ 1 2 < -> <TRUE> }T T{ -1 0 < -> <TRUE> }T T{ -1 1 < -> <TRUE> }T T{ 0 0 < -> <FALSE> }T T{ 1 1 < -> <FALSE> }T T{ 1 0 < -> <FALSE> }T T{ 2 1 < -> <FALSE> }T T{ 0 -1 < -> <FALSE> }T T{ 1 -1 < -> <FALSE> }T T{ 0 1 > -> <FALSE> }T T{ 1 2 > -> <FALSE> }T T{ -1 0 > -> <FALSE> }T T{ -1 1 > -> <FALSE> }T T{ 0 0 > -> <FALSE> }T T{ 1 1 > -> <FALSE> }T T{ 1 0 > -> <TRUE> }T T{ 2 1 > -> <TRUE> }T T{ 0 -1 > -> <TRUE> }T T{ 1 -1 > -> <TRUE> }T T{ 0 1 U< -> <TRUE> }T T{ 1 2 U< -> <TRUE> }T T{ 0 0 U< -> <FALSE> }T T{ 1 1 U< -> <FALSE> }T T{ 1 0 U< -> <FALSE> }T T{ 2 1 U< -> <FALSE> }T T{ 0 1 MIN -> 0 }T T{ 1 2 MIN -> 1 }T T{ -1 0 MIN -> -1 }T T{ -1 1 MIN -> -1 }T T{ 0 0 MIN -> 0 }T T{ 1 1 MIN -> 1 }T T{ 1 0 MIN -> 0 }T T{ 2 1 MIN -> 1 }T T{ 0 -1 MIN -> -1 }T T{ 1 -1 MIN -> -1 }T T{ 0 1 MAX -> 1 }T T{ 1 2 MAX -> 2 }T T{ -1 0 MAX -> 0 }T T{ -1 1 MAX -> 1 }T T{ 0 0 MAX -> 0 }T T{ 1 1 MAX -> 1 }T T{ 1 0 MAX -> 1 }T T{ 2 1 MAX -> 2 }T T{ 0 -1 MAX -> 0 }T T{ 1 -1 MAX -> 1 }T """ def test_STACK_OPS(self): e = FORTH.Engine(self.STACK_OPS, **self.options) assert e.root.test["f"] == 0 STACK_OPS = r""" ``` T{ 1 2 2DROP -> }T T{ 1 2 2DUP -> 1 2 1 2 }T T{ 1 2 3 4 2OVER -> 1 2 3 4 1 2 }T T{ 1 2 3 4 2SWAP -> 3 4 1 2 }T T{ 0 ?DUP -> 0 }T T{ 1 ?DUP -> 1 1 }T T{ -1 ?DUP -> -1 -1 }T T{ DEPTH -> 0 }T T{ 0 DEPTH -> 0 1 }T T{ 0 1 DEPTH -> 0 1 2 }T T{ 0 DROP -> }T T{ 1 2 DROP -> 1 }T T{ 1 DUP -> 1 1 }T T{ 1 2 OVER -> 1 2 1 }T T{ 1 2 3 ROT -> 2 3 1 }T T{ 1 2 SWAP -> 2 1 }T """ def test_RETURN_STACK(self): e = FORTH.Engine(self.RETURN_STACK, **self.options) assert e.root.test["f"] == 0 RETURN_STACK = r""" ``` 0 CONSTANT 0S 0 INVERT CONSTANT 1S ( ) T{ : GR1 >R R> ; -> }T T{ : GR2 >R R@ R> DROP ; -> }T T{ 123 GR1 -> 123 }T T{ 123 GR2 -> 123 }T T{ 1S GR1 -> 1S }T ( RETURN STACK HOLDS CELLS ) """ def test_ADD_SUBTRACT(self): e = FORTH.Engine(self.ADD_SUBTRACT, **self.options) assert e.root.test["f"] == 0 ADD_SUBTRACT = r""" ``` T{ 0 5 + -> 5 }T T{ 5 0 + -> 5 }T T{ 0 -5 + -> -5 }T T{ -5
hasattr(self, "target_user_id") and self.target_user_id: result["targetUserId"] = str(self.target_user_id) elif include_empty: result["targetUserId"] = str() if hasattr(self, "payment_provider") and self.payment_provider: result["paymentProvider"] = str(self.payment_provider) elif include_empty: result["paymentProvider"] = str() if hasattr(self, "payment_method") and self.payment_method: result["paymentMethod"] = str(self.payment_method) elif include_empty: result["paymentMethod"] = str() if hasattr(self, "region") and self.region: result["region"] = str(self.region) elif include_empty: result["region"] = str() if hasattr(self, "language") and self.language: result["language"] = str(self.language) elif include_empty: result["language"] = str() if hasattr(self, "zip_code") and self.zip_code: result["zipCode"] = str(self.zip_code) elif include_empty: result["zipCode"] = str() if hasattr(self, "state") and self.state: result["state"] = str(self.state) elif include_empty: result["state"] = str() if hasattr(self, "tax") and self.tax: result["tax"] = int(self.tax) elif include_empty: result["tax"] = int() if hasattr(self, "vat") and self.vat: result["vat"] = int(self.vat) elif include_empty: result["vat"] = int() if hasattr(self, "sales_tax") and self.sales_tax: result["salesTax"] = int(self.sales_tax) elif include_empty: result["salesTax"] = int() if hasattr(self, "payment_provider_fee") and self.payment_provider_fee: result["paymentProviderFee"] = int(self.payment_provider_fee) elif include_empty: result["paymentProviderFee"] = int() if hasattr(self, "payment_method_fee") and self.payment_method_fee: result["paymentMethodFee"] = int(self.payment_method_fee) elif include_empty: result["paymentMethodFee"] = int() if hasattr(self, "currency") and self.currency: result["currency"] = self.currency.to_dict(include_empty=include_empty) elif include_empty: result["currency"] = CurrencySummary() if hasattr(self, "payment_station_url") and self.payment_station_url: result["paymentStationUrl"] = str(self.payment_station_url) elif include_empty: result["paymentStationUrl"] = str() if hasattr(self, "transactions") and self.transactions: result["transactions"] = [i0.to_dict(include_empty=include_empty) for i0 in self.transactions] elif include_empty: result["transactions"] = [] if hasattr(self, "status_reason") and self.status_reason: result["statusReason"] = str(self.status_reason) elif include_empty: result["statusReason"] = str() if hasattr(self, "created_time") and self.created_time: result["createdTime"] = str(self.created_time) elif include_empty: result["createdTime"] = str() if hasattr(self, "charged_time") and self.charged_time: result["chargedTime"] = str(self.charged_time) elif include_empty: result["chargedTime"] = str() if hasattr(self, "authorised_time") and self.authorised_time: result["authorisedTime"] = str(self.authorised_time) elif include_empty: result["authorisedTime"] = str() if hasattr(self, "refunded_time") and self.refunded_time: result["refundedTime"] = str(self.refunded_time) elif include_empty: result["refundedTime"] = str() if hasattr(self, "chargeback_time") and self.chargeback_time: result["chargebackTime"] = str(self.chargeback_time) elif include_empty: result["chargebackTime"] = str() if hasattr(self, "chargeback_reversed_time") and self.chargeback_reversed_time: result["chargebackReversedTime"] = str(self.chargeback_reversed_time) elif include_empty: result["chargebackReversedTime"] = str() if hasattr(self, "return_url") and self.return_url: result["returnUrl"] = str(self.return_url) elif include_empty: result["returnUrl"] = str() if hasattr(self, "channel") and self.channel: result["channel"] = str(self.channel) elif include_empty: result["channel"] = str() if hasattr(self, "notify_url") and self.notify_url: result["notifyUrl"] = str(self.notify_url) elif include_empty: result["notifyUrl"] = str() if hasattr(self, "custom_parameters") and self.custom_parameters: result["customParameters"] = {str(k0): v0 for k0, v0 in self.custom_parameters.items()} elif include_empty: result["customParameters"] = {} if hasattr(self, "charging") and self.charging: result["charging"] = bool(self.charging) elif include_empty: result["charging"] = bool() if hasattr(self, "subscription_id") and self.subscription_id: result["subscriptionId"] = str(self.subscription_id) elif include_empty: result["subscriptionId"] = str() if hasattr(self, "recurring_payment_order_no") and self.recurring_payment_order_no: result["recurringPaymentOrderNo"] = str(self.recurring_payment_order_no) elif include_empty: result["recurringPaymentOrderNo"] = str() if hasattr(self, "omit_notification") and self.omit_notification: result["omitNotification"] = bool(self.omit_notification) elif include_empty: result["omitNotification"] = bool() if hasattr(self, "metadata") and self.metadata: result["metadata"] = {str(k0): str(v0) for k0, v0 in self.metadata.items()} elif include_empty: result["metadata"] = {} if hasattr(self, "total_tax") and self.total_tax: result["totalTax"] = int(self.total_tax) elif include_empty: result["totalTax"] = int() if hasattr(self, "total_price") and self.total_price: result["totalPrice"] = int(self.total_price) elif include_empty: result["totalPrice"] = int() if hasattr(self, "subtotal_price") and self.subtotal_price: result["subtotalPrice"] = int(self.subtotal_price) elif include_empty: result["subtotalPrice"] = int() return result # endregion to methods # region static methods @classmethod def create( cls, rvn: Optional[int] = None, created_at: Optional[str] = None, updated_at: Optional[str] = None, payment_order_no: Optional[str] = None, namespace: Optional[str] = None, user_id: Optional[str] = None, status: Optional[str] = None, sandbox: Optional[bool] = None, ext_order_no: Optional[str] = None, title: Optional[str] = None, description: Optional[str] = None, item_type: Optional[str] = None, price: Optional[int] = None, sku: Optional[str] = None, ext_user_id: Optional[str] = None, target_namespace: Optional[str] = None, target_user_id: Optional[str] = None, payment_provider: Optional[str] = None, payment_method: Optional[str] = None, region: Optional[str] = None, language: Optional[str] = None, zip_code: Optional[str] = None, state: Optional[str] = None, tax: Optional[int] = None, vat: Optional[int] = None, sales_tax: Optional[int] = None, payment_provider_fee: Optional[int] = None, payment_method_fee: Optional[int] = None, currency: Optional[CurrencySummary] = None, payment_station_url: Optional[str] = None, transactions: Optional[List[Transaction]] = None, status_reason: Optional[str] = None, created_time: Optional[str] = None, charged_time: Optional[str] = None, authorised_time: Optional[str] = None, refunded_time: Optional[str] = None, chargeback_time: Optional[str] = None, chargeback_reversed_time: Optional[str] = None, return_url: Optional[str] = None, channel: Optional[str] = None, notify_url: Optional[str] = None, custom_parameters: Optional[Dict[str, Any]] = None, charging: Optional[bool] = None, subscription_id: Optional[str] = None, recurring_payment_order_no: Optional[str] = None, omit_notification: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, total_tax: Optional[int] = None, total_price: Optional[int] = None, subtotal_price: Optional[int] = None, ) -> PaymentOrder: instance = cls() if rvn is not None: instance.rvn = rvn if created_at is not None: instance.created_at = created_at if updated_at is not None: instance.updated_at = updated_at if payment_order_no is not None: instance.payment_order_no = payment_order_no if namespace is not None: instance.namespace = namespace if user_id is not None: instance.user_id = user_id if status is not None: instance.status = status if sandbox is not None: instance.sandbox = sandbox if ext_order_no is not None: instance.ext_order_no = ext_order_no if title is not None: instance.title = title if description is not None: instance.description = description if item_type is not None: instance.item_type = item_type if price is not None: instance.price = price if sku is not None: instance.sku = sku if ext_user_id is not None: instance.ext_user_id = ext_user_id if target_namespace is not None: instance.target_namespace = target_namespace if target_user_id is not None: instance.target_user_id = target_user_id if payment_provider is not None: instance.payment_provider = payment_provider if payment_method is not None: instance.payment_method = payment_method if region is not None: instance.region = region if language is not None: instance.language = language if zip_code is not None: instance.zip_code = zip_code if state is not None: instance.state = state if tax is not None: instance.tax = tax if vat is not None: instance.vat = vat if sales_tax is not None: instance.sales_tax = sales_tax if payment_provider_fee is not None: instance.payment_provider_fee = payment_provider_fee if payment_method_fee is not None: instance.payment_method_fee = payment_method_fee if currency is not None: instance.currency = currency if payment_station_url is not None: instance.payment_station_url = payment_station_url if transactions is not None: instance.transactions = transactions if status_reason is not None: instance.status_reason = status_reason if created_time is not None: instance.created_time = created_time if charged_time is not None: instance.charged_time = charged_time if authorised_time is not None: instance.authorised_time = authorised_time if refunded_time is not None: instance.refunded_time = refunded_time if chargeback_time is not None: instance.chargeback_time = chargeback_time if chargeback_reversed_time is not None: instance.chargeback_reversed_time = chargeback_reversed_time if return_url is not None: instance.return_url = return_url if channel is not None: instance.channel = channel if notify_url is not None: instance.notify_url = notify_url if custom_parameters is not None: instance.custom_parameters = custom_parameters if charging is not None: instance.charging = charging if subscription_id is not None: instance.subscription_id = subscription_id if recurring_payment_order_no is not None: instance.recurring_payment_order_no = recurring_payment_order_no if omit_notification is not None: instance.omit_notification = omit_notification if metadata is not None: instance.metadata = metadata if total_tax is not None: instance.total_tax = total_tax if total_price is not None: instance.total_price = total_price if subtotal_price is not None: instance.subtotal_price = subtotal_price return instance @classmethod def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> PaymentOrder: instance = cls() if not dict_: return instance if "rvn" in dict_ and dict_["rvn"] is not None: instance.rvn = int(dict_["rvn"]) elif include_empty: instance.rvn = int() if "createdAt" in dict_ and dict_["createdAt"] is not None: instance.created_at = str(dict_["createdAt"]) elif include_empty: instance.created_at = str() if "updatedAt" in dict_ and dict_["updatedAt"] is not None: instance.updated_at = str(dict_["updatedAt"]) elif include_empty: instance.updated_at = str() if "paymentOrderNo" in dict_ and dict_["paymentOrderNo"] is not None: instance.payment_order_no = str(dict_["paymentOrderNo"]) elif include_empty: instance.payment_order_no = str() if "namespace" in dict_ and dict_["namespace"] is not None: instance.namespace = str(dict_["namespace"]) elif include_empty: instance.namespace = str() if "userId" in dict_ and dict_["userId"] is not None: instance.user_id = str(dict_["userId"]) elif include_empty: instance.user_id = str() if "status" in dict_ and dict_["status"] is not None: instance.status = str(dict_["status"]) elif include_empty: instance.status = str() if "sandbox" in dict_ and dict_["sandbox"] is not None: instance.sandbox = bool(dict_["sandbox"]) elif include_empty: instance.sandbox = bool() if "extOrderNo" in dict_ and dict_["extOrderNo"] is not None: instance.ext_order_no = str(dict_["extOrderNo"]) elif include_empty: instance.ext_order_no = str() if "title" in dict_ and dict_["title"] is not None: instance.title = str(dict_["title"]) elif include_empty: instance.title = str() if "description" in dict_ and dict_["description"] is not None: instance.description
#!/usr/bin/python3 # -*- coding: utf-8 -*- # Util functions # @author <EMAIL> from lxml import etree from lxml.etree import tostring from itertools import chain from nltk.tokenize import wordpunct_tokenize from random import shuffle from sklearn import metrics import numpy import operator import pandas import re import subprocess import torch debug = True #def clean_file(filename): # remove blank lines | remove extra spaces| remove leading and trailing spaces | fix utf-8 chars #command = r"sed '/^\s*$/d' $file | sed -e 's/ */ /g' | sed -e 's/^ //g' | sed -e 's/ $//g' | sed -e 's/&amp;/and/g' | sed -e 's/&#13;/ /g' | sed -e 's/&#8217;/\'/g' | sed -e 's/&#8221;/\"/g' | sed -e 's/&#8220;/\"/g' | sed -e 's/&#65533;//g' | sed -e 's/&#175\7;//g'| sed -e 's/&#1770;/\'/g'" # TODO def add_labels(df, labels, labelname): print('add_labels:', len(labels)) df[labelname] = '' for i, row in df.iterrows(): #print('add_labels i=', i) if i < len(labels): #print('add_labels labelname:', labelname, labels[i]) df.at[i, labelname] = labels[i] else: print('WARNING, add_labels i out of range:', i) return df def add_time_ids(event_elem, tag_elem): time_map = {} for tlink in tag_elem.findall('TLINK'): eventid = None timeid = None if 'eventID' in tlink.attrib and 'relatedToTime' in tlink.attrib: eventid = tlink.get('eventID') timeid = tlink.get('relatedToTime') elif 'timeID' in tlink.attrib and 'relatedToEventID' in tlink.attrib: eventid = tlink.get('relatedToEventID') timeid = tlink.get('timeID') if timeid is not None and eventid is not None: if eventid not in time_map: time_map[eventid] = [] time_map[eventid].append(timeid) for event in event_elem: eid = event.get('eid') time_ids = time_map[eid] if time_ids is not None: tid_string = ','.join(time_ids) event.set('relatedToTime', tid_string) return event_elem ''' (In progress) ''' def add_thyme_labels(filename, outfile): brain = [] colon = [] xmltree = etree.parse(filename) root = xmltree.getroot() for child in root: idname = child.get('record_id').text.split('_')[0] id = idname[2:] if int(id) in brain: label = 'brain_cancer' elif int(id) in colon: label = 'colon_cancer' else: print('WARNING: id not found:', id) label = 'none' labelnode = etree.SubElement(child, 'diagnosis') labelnode.text = label etree.write(outfile) def create_df(df): return pandas.DataFrame(columns=['ID']) def collapse_labels(labels): flat_labels = [] for lab in labels: for item in lab: flat_labels.append(item) return flat_labels def extract_ranks(events, event_list=None, allow_empty=False): elem = load_xml_tags(events, decode=False) ranks = [] event_map = {} if debug: print('extract_ranks: events:', type(events))# 'elem:', etree.tostring(elem)) if debug: print('extract_ranks: event_list:', type(event_list)) if event_list is not None: for event in event_list: if event.tag == 'EVENT': #print(etree.tostring(event)) id = event.get('eid') rank = event.get('rank') if rank is None: print('ERROR: no rank attribute found:', etree.tostring(event)) rank = 0 if not allow_empty: exit(1) event_map[id] = rank event_count = 0 for event in elem: if debug: print('child tag:', event.tag) if event.tag == 'EVENT': event_count += 1 #print('elem event:', etree.tostring(event)) if event_list is None: rank = event.get('rank') else: eventid = event.get('eid') #print('looking up eid', eventid) rank = event_map[eventid] if rank is None: print('ERROR: no rank attribute found:', etree.tostring(event)) rank = 0 if not allow_empty: exit(1) #ranks.append(0) #if int(rank) == 0: # print('WARNING: rank is 0:', etree.tostring(event)) ranks.append(float(rank)) #if int(rank) == 0: # print('WARNING: rank is 0:', etree.tostring(event)) if debug: print('events:', event_count, 'ranks:', len(ranks)) assert(len(ranks) == event_count) return ranks ''' Convert arrows in text to non-arrows (for xml processing) filename: the file to fix (file will be overwritten) ''' def fix_arrows(filename): sed_command = r"sed -e 's/-->/to/g' " + filename + r" | sed -e 's/->/to/g' | sed -e 's/ < / lt /g' | sed -e 's/ > / gt /g'" print("sed_command: ", sed_command) #f = open("temp", 'wb') ps = subprocess.Popen(sed_command, shell=True, stdout=subprocess.PIPE) output = ps.communicate()[0] out = open(filename, 'w') out.write(output) out.close() def fix_escaped_chars(filename): subprocess.call(["sed", "-i", "-e", 's/&lt;/ </g', filename]) subprocess.call(["sed", "-i", "-e", 's/&gt;/> /g', filename]) subprocess.call(["sed", "-i", "-e", 's/ / /g', filename]) subprocess.call(["sed", "-i", "-e", "s/‘/'/g", filename]) subprocess.call(["sed", "-i", "-e", "s/’/'/g", filename]) subprocess.call(["sed", "-i", "-e", "s/&#8216;/'/g", filename]) subprocess.call(["sed", "-i", "-e", "s/&#8217;/'/g", filename]) subprocess.call(["sed", "-i", "-e", "s/&#8211;/,/g", filename]) ''' Remove blank lines, convert \n to space, remove double spaces, insert a line break before each record filename: the file to fix (file will be overwritten) rec_type: the type of record: adult, child, or neonate ''' def fix_line_breaks(filename, rec_type): tag = "<Adult_Anonymous>" if rec_type == "child": tag = "<Child_Anonymous>" elif rec_type == "neonate": tag = "<Neonate_Anonymous>" sed_command = "s/" + tag + r"/\n" + tag + "/g" sed_command2 = r"sed -e 's/<\/root>/\n<\/root>/g'" #print "sed_command: " + sed_command tr_command = "tr " + r"'\n' " + "' '" #print "tr_command: " + tr_command #f = open("temp", 'wb') command = "sed -e '/^\s$/d' " + filename + " | " + tr_command + " | sed -e 's/ / /g' | sed -e '" + sed_command + "'" + " | " + sed_command2 ps = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) output = ps.communicate()[0] out = open(filename, 'w') out.write(output) out.close() def fix_xml_tags(text): text = text.replace('&amp;', '&') text = text.replace('&lt;EVENT&gt;', '<EVENT>').replace('&lt;/EVENT&gt;', '</EVENT>') text = text.replace('&lt;EVENT', '<EVENT') text = text.replace('&amp;lt;EVENT&amp;gt;;', '<EVENT>').replace('&amp;lt;/EVENT&amp;gt;', '</EVENT>') text = text.replace('&lt;TIMEX3&gt;', '<TIMEX3>').replace('&lt;/TIMEX3&gt;', '</TIMEX3>') text = text.replace('&lt;TIMEX3', '<TIMEX3') text = text.replace('&amp;lt;TIMEX3&amp;gt;', '<TIMEX3>').replace('&amp;lt;/TIMEX3&amp;gt;', '</TIMEX3>') text = text.replace('&lt;SIGNAL&gt;', '<SIGNAL>').replace('&lt;/SIGNAL&gt;', '</SIGNAL>') text = text.replace('&lt;SIGNAL', '<SIGNAL') text = text.replace('&lt;TLINK', '<TLINK').replace('/&gt;', '/>') text = text.replace('" &gt;', '">') text = text.replace('"&gt;', '">').replace(' >', '>') text = text.replace('&', '&amp;') # escape any leftover and signs return text def shuffle_input(x, y): new_x = [] new_y = [] for n in range(len(x)): #rank_map = {} x_list = x[n] y_list = y[n] new_x_list = [] new_y_list = [] temp_list = list(zip(x_list, y_list)) shuffle(temp_list) new_lists = [list(t) for t in zip(*temp_list)] new_x_list = new_lists[0] new_y_list = new_lists[1] new_x.append(new_x_list) new_y.append(new_y_list) #print('Shuffle entry:', str(new_y_list)) return new_x, new_y ''' Shuffle events within the same rank value, produce one shuffled example for every in-order example ''' def generate_permutations(ids, x, y): new_ids = ids new_x = x new_y = y for n in range(len(x)): #rank_map = {} doc_id = ids[n] x_list = x[n] y_list = y[n] new_x_list = [] new_y_list = [] temp_list = list(zip(x_list, y_list)) shuffle(temp_list) new_lists = [list(t) for t in zip(*temp_list)] new_x_list = new_lists[0] new_y_list = new_lists[1] new_ids.append(doc_id) new_x.append(new_x_list) new_y.append(new_y_list) #print('Shuffle entry:', str(new_y_list)) # Shuffle the final training list temp_pairs = list(zip(new_ids, new_x, new_y)) shuffle(temp_pairs) #print('shuffle temp pairs[0]:', str(temp_pairs[0])) new_lists = [list(t) for t in zip(*temp_pairs)] new_ids = new_lists[0] new_x = new_lists[1] new_y = new_lists[2] #print('shuffle new_y[0]', str(new_y[0])) return new_ids, new_x, new_y def load_time_pairs(filename): print('load time pairs:', filename) time_df = pandas.read_csv(filename, header=None, index_col=False) time_df.columns = ['time1', 'time2', 'order'] pairs = [] labels = [] for i, row in time_df.iterrows(): pairs.append((split_words(row['time1']), split_words(row['time2']))) labels.append(row['order']) #print('loaded time pair:', pairs[-1], labels[-1]) return pairs, labels def load_xml_tags(ann, unwrap=True, decode=False): if debug: print('load_xml_tags:', ann) if decode or type(ann) is bytes: ann = ann.decode('utf8') if unwrap: ann_xml = etree.fromstring(ann) ann_text = stringify_children(ann_xml) else: ann_text = ann ann_text = fix_xml_tags(ann_text) # Escape & signs that might have been unescaped #if len(ann_text) > 830: # print(ann_text[820:]) ann_element = etree.fromstring("<root>" + ann_text + "</root>") return ann_element def reorder_encodings(encodings, orderings): print('reorder encodings:', len(encodings), len(orderings)) assert(len(encodings) == len(orderings)) dim = 0 # Get the dim later after we make sure it's not None new_encodings = [] for x in range(len(encodings)): if encodings[x] is not None: dim = encodings[x].size(-1) #print('dim:', dim) enc = encodings[x].view(-1, dim) order = orderings[x]#.squeeze() print('timeline for reordering:', enc.size(), 'ranks:', order) indices = [] for y in range(len(order)): indices.append((y, order[y])) indices.sort(key=lambda k: k[1]) #shuffle(indices) enc_list = [] for pair in indices: rank = pair[1] index = pair[0] print('picking rank:', rank, 'at index:', index) enc_list.append(enc[index]) new_enc = torch.stack(enc_list).view(1, -1, dim) print('encodings size:', new_enc.size()) new_encodings.append(new_enc) return new_encodings def score_majority_class(true_labs): pred_labs = [] majority_lab = None count_map = {} for lab in true_labs: if lab not in count_map.keys(): count_map[lab] = 0 count_map[lab] = count_map[lab]+1 majority_lab = max(count_map.iteritems(), key=operator.itemgetter(1))[0] for lab in true_labs: pred_labs = majority_lab # Score precision = metrics.precision_score(true_labs, pred_labs, average="weighted") recall = metrics.recall_score(true_labs, pred_labs, average="weighted") f1 = metrics.f1_score(true_labs, pred_labs, average="weighted") return precision, recall, f1 ''' Scores vector labels with binary values returns: avg precision, recall, f1 of 1 labels (not 0s) ''' def score_vec_labels(true_labs, pred_labs): p_scores = [] r_scores = [] f1_scores = [] micro_pos = 0 micro_tp = 0 micro_fp = 0 assert(len(true_labs) == len(pred_labs)) for x in range(len(true_labs)): true_lab = true_labs[x] pred_lab = pred_labs[x] pos = 0 tp = 0 fp = 0 for y in range(len(true_lab)): true_val = true_lab[y] pred_val = pred_lab[y] if true_val == 1: pos = pos+1 micro_pos = micro_pos+1 if
import %r", item) imports_to_remove = set(find_all_package_nodes(item)) # Remove references between module nodes, as though they would # not be imported from 'name'. # Note: Doing this in a nested loop is less efficient than # collecting all import to remove first, but log messages # are easier to understand since related to the "Excluding ..." # message above. for src in hooked_mods: # modules, this `src` does import references = set( node.identifier for node in self.module_graph.getReferences(src)) # Remove all of these imports which are also in # "imports_to_remove". for dest in imports_to_remove & references: self.module_graph.removeReference(src, dest) logger.info( " Removing import of %s from module %s", dest, src) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #FIXME: This class has been obsoleted by "ModuleHookCache" and will be removed. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! class HooksCache(dict): """ Dictionary mapping from the fully-qualified names of each module hooked by at least one hook script to lists of the absolute paths of these scripts. This `dict` subclass caches the list of all hooks applicable to each module, permitting Pythonic mapping, iteration, addition, and removal of such hooks. Each dictionary key is a fully-qualified module name. Each dictionary value is a list of the absolute paths of all hook scripts specific to that module, including both official PyInstaller hooks and unofficial user-defined hooks. See Also ---------- `_load_file_list()` For details on hook priority. """ def __init__(self, hooks_dir): """ Initialize this dictionary. Parameters ---------- hook_dir : str Absolute or relative path of the directory containing hooks with which to populate this cache. By default, this is the absolute path of the `PyInstaller/hooks` directory containing official hooks. """ super(dict, self).__init__() self._load_file_list(hooks_dir) def _load_file_list(self, hooks_dir): """ Cache all hooks in the passed directory. **Order of caching is significant** with respect to hooks for the same module, as the values of this dictionary are ordered lists. Hooks for the same module will be run in the order in which they are cached. Previously cached hooks are always preserved (rather than overidden). Specifically, any hook in the passed directory having the same module name as that of a previously cached hook will be appended to the list of hooks for that module name. By default, official hooks are cached _before_ user-defined hooks. For modules with both official and user-defined hooks, this implies that the former take priority over and will be run _before_ the latter. Parameters ---------- hooks_dir : str Absolute or relative path of the directory containing additional hooks to be cached. For convenience, tilde and variable expansion will be applied to this path (e.g., a leading `~` will be replaced by the absolute path of the corresponding home directory). """ # Perform tilde and variable expansion and validate the result. hooks_dir = expand_path(hooks_dir) if not os.path.isdir(hooks_dir): logger.error('Hook directory %r not found', os.path.abspath(hooks_dir)) return # For each hook in the passed directory... hook_files = glob.glob(os.path.join(hooks_dir, 'hook-*.py')) for hook_file in hook_files: # Absolute path of this hook's script. hook_file = os.path.abspath(hook_file) # Fully-qualified name of this hook's corresponding module, # constructed by removing the "hook-" prefix and ".py" suffix. module_name = os.path.basename(hook_file)[5:-3] # If this module already has cached hooks, append this hook's path # to the existing list of such paths. if module_name in self: self[module_name].append(hook_file) # Else, default to a new list containing only this hook's path. else: self[module_name] = [hook_file] def add_custom_paths(self, hooks_dirs): """ Cache all hooks in the list of passed directories. Parameters ---------- hooks_dirs : list List of the absolute or relative paths of all directories containing additional hooks to be cached. """ for hooks_dir in hooks_dirs: self._load_file_list(hooks_dir) def remove(self, module_names): """ Remove all key-value pairs whose key is a fully-qualified module name in the passed list from this dictionary. Parameters ---------- module_names : list List of all fully-qualified module names to be removed. """ for module_name in set(module_names): # Eliminate duplicate entries. if module_name in self: del self[module_name] class AdditionalFilesCache(object): """ Cache for storing what binaries and datas were pushed by what modules when import hooks were processed. """ def __init__(self): self._binaries = {} self._datas = {} def add(self, modname, binaries, datas): self._binaries[modname] = binaries or [] self._datas[modname] = datas or [] def __contains__(self, name): return name in self._binaries or name in self._datas def binaries(self, modname): """ Return list of binaries for given module name. """ return self._binaries[modname] def datas(self, modname): """ Return list of datas for given module name. """ return self._datas[modname] #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #FIXME: This class has been obsoleted by "ModuleHook" and will be removed. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! class ImportHook(object): """ Class encapsulating processing of hook attributes like hiddenimports, etc. """ def __init__(self, modname, hook_filename): """ :param hook_filename: File name where to load hook from. """ logger.info('Processing hook %s' % os.path.basename(hook_filename)) self._name = modname self._filename = hook_filename # _module represents the code of 'hook-modname.py' # Load hook from file and parse and interpret it's content. hook_modname = 'PyInstaller_hooks_' + modname.replace('.', '_') self._module = importlib_load_source(hook_modname, self._filename) # Public import hook attributes for further processing. self.binaries = set() self.datas = set() # Internal methods for processing. def _process_hook_function(self, mod_graph): """ Call the hook function hook(mod). Function hook(mod) has to be called first because this function could update other attributes - datas, hiddenimports, etc. """ # Process a `hook(hook_api)` function. hook_api = PostGraphAPI(self._name, mod_graph) self._module.hook(hook_api) self.datas.update(set(hook_api._added_datas)) self.binaries.update(set(hook_api._added_binaries)) for item in hook_api._added_imports: self._process_one_hiddenimport(item, mod_graph) for item in hook_api._deleted_imports: # Remove the graph link between the hooked module and item. # This removes the 'item' node from the graph if no other # links go to it (no other modules import it) mod_graph.removeReference(hook_api.node, item) def _process_hiddenimports(self, mod_graph): """ 'hiddenimports' is a list of Python module names that PyInstaller is not able detect. """ # push hidden imports into the graph, as if imported from self._name for item in self._module.hiddenimports: self._process_one_hiddenimport(item, mod_graph) def _process_one_hiddenimport(self, item, mod_graph): try: # Do not try to first find out if a module by that name already exist. # Rely on modulegraph to handle that properly. # Do not automatically create namespace packages if they do not exist. caller = mod_graph.findNode(self._name, create_nspkg=False) mod_graph.import_hook(item, caller=caller) except ImportError: # Print warning if a module from hiddenimport could not be found. # modulegraph raises ImporError when a module is not found. # Import hook with non-existing hiddenimport is probably a stale hook # that was not updated for a long time. logger.warning("Hidden import '%s' not found (probably old hook)", item) def _process_excludedimports(self, mod_graph): """ 'excludedimports' is a list of Python module names that PyInstaller should not detect as dependency of this module name. So remove all import-edges from the current module (and it's submodules) to the given `excludedimports` (end their submodules). """ def find_all_package_nodes(name): mods = [name] name += '.' for subnode in mod_graph.nodes(): if subnode.identifier.startswith(name): mods.append(subnode.identifier) return mods # Collect all submodules of this module. hooked_mods = find_all_package_nodes(self._name) # Collect all dependencies and their submodules # TODO: Optimize this by using a pattern and walking the graph # only once. for item in set(self._module.excludedimports): excluded_node = mod_graph.findNode(item, create_nspkg=False) if excluded_node is None: logger.info("Import to be excluded not found: %r", item) continue logger.info("Excluding import %r", item) imports_to_remove = set(find_all_package_nodes(item)) # Remove references between module nodes, as though they would # not be imported from 'name'. # Note: Doing this in a nested loop is less efficient than # collecting all import to remove first, but log messages # are easier to understand since related to the "Excluding ..." # message above. for src in hooked_mods: # modules, this `src` does import references = set(n.identifier for n in mod_graph.getReferences(src)) # Remove all of these imports which are also in `imports_to_remove` for dest in imports_to_remove & references: mod_graph.removeReference(src, dest) logger.warning(" From %s removing import %s", src, dest) def _process_datas(self, mod_graph): """ 'datas' is a list of globs of files or directories to bundle as datafiles. For each glob, a destination directory is specified. """ # Find all files and interpret glob statements.
from dataclasses import dataclass from typing import Any, List import inspect import itertools import warnings import types import requests from flask_discord_interactions.response import Response class CommandOptionType: "Represents the different option type integers." SUB_COMMAND = 1 SUB_COMMAND_GROUP = 2 STRING = 3 INTEGER = 4 BOOLEAN = 5 USER = 6 CHANNEL = 7 ROLE = 8 MENTIONABLE = 9 NUMBER = 10 class ChannelType: "Represents the different :class:`Channel` type integers." GUILD_TEXT = 0 DM = 1 GUILD_VOICE = 2 GROUP_DM = 3 GUILD_CATEGORY = 4 GUILD_NEWS = 5 GUILD_STORE = 6 class Permission: """ An object representing a single permission overwrite. ``Permission(role='1234')`` allows users with role ID 1234 to use the command ``Permission(user='5678')`` allows user ID 5678 to use the command ``Permission(role='9012', allow=False)`` denies users with role ID 9012 from using the command """ def __init__(self, role=None, user=None, allow=True): if bool(role) == bool(user): raise ValueError("specify only one of role or user") self.type = 1 if role else 2 self.id = role or user self.permission = allow def dump(self): return { "type": self.type, "id": self.id, "permission": self.permission } class ContextObject: @classmethod def from_dict(cls, data): """ Construct the Context object from a dictionary, skipping any keys in the dictionary that do not correspond to fields of the class. Parameters ---------- data A dictionary of fields to set on the Context object. """ return cls(**{ k: v for k, v in data.items() if k in inspect.signature(cls).parameters }) @dataclass class User(ContextObject): """ Represents a User (the identity of a Discord user, not tied to any specific guild). Attributes ---------- id The ID (snowflake) of the user. username The Discord username of the user. discriminator The code following the # after the username. avatar_hash The unique hash identifying the profile picture of the user. bot Whether the user is a bot account. system Whether the user is a Discord system account. mfa_enabled Whether the user has enabled Two-Factor Authentication. locale The locale of the user. flags Miscellaneous information about the user. premium_type The Nitro status of the user. public_flags Miscellaneous information about the user. """ id: str = None username: str = None discriminator: str = None avatar_hash: str = None bot: bool = None system: bool = None mfa_enabled: bool = None locale: str = None flags: int = None premium_type: int = None public_flags: int = None @classmethod def from_dict(cls, data): data = {**data, **data.get("user", {})} data["avatar_hash"] = data.get("avatar") return super().from_dict(data) @property def display_name(self): "The displayed name of the user (the username)." return self.username @property def avatar_url(self): "The URL of the user's profile picture." return ("https://cdn.discordapp.com/avatars/" f"{self.id}/{self.avatar_hash}.png") @dataclass class Member(User): """ Represents a Member (a specific Discord :class:`User` in one particular guild.) Attributes ---------- nick The guild nickname of the user. roles An array of role IDs that the user has. joined_at The timestamp that the user joined the guild at. premium_since The timestamp that the user started Nitro boosting the guild at. permissions The permissions integer of the user. deaf Whether the user has been server deafened. mute Whether the user has been server muted. pending Whether the user has passed the membership requirements of a guild. """ nick: str = None roles: list = None joined_at: str = None premium_since: str = None permissions: int = None deaf: bool = None mute: bool = None pending: bool = None def __post_init__(self): if isinstance(self.permissions, str): self.permissions = int(self.permissions) @property def display_name(self): """ The displayed name of the user (their nickname, or if none exists, their username). """ return self.nick or self.username @dataclass class Channel(ContextObject): """ Represents a Channel in Discord. This includes voice channels, text channels, and channel categories. Attributes ---------- id The unique ID (snowflake) of the channel. name The name of the channel. permissions The permissions integer of the invoking user in that channel. type The type of channel. """ id: str = None name: str = None permissions: int = None type: int = None @dataclass class Role(ContextObject): """ Represents a Role in Discord. Attributes ---------- id The unique ID (snowflake) of the role. name The name of the role. color The color given to the role. hoist Whether the role is displayed separately in the member list. position The position of the role in the roles list. permissions The permissions integer of the role. managed Whether the role is managed by an integration (bot). mentionable Whether the role can be mentioned by all users. tags Miscellaneous information about the role. """ id: str = None name: str = None color: str = None hoist: bool = None position: int = None managed: bool = None mentionable: bool = None tags: dict = None @dataclass class Context(ContextObject): """ Represents the context in which a :class:`SlashCommand` or custom ID handler is invoked. Attributes ---------- author A :class:`Member` object representing the invoking user. id The unique ID (snowflake) of this interaction. token The token to use when sending followup messages. channel_id The unique ID (snowflake) of the channel this command was invoked in. guild_id The unique ID (snowflake) of the guild this command was invoked in. options A list of the options passed to the command. values A list of the values selected, if this is a Select Menu handler. resolved Additional data () command_name The name of the command that was invoked. command_id The unique ID (snowflake) of the command that was invoked. members :class:`Member` objects for each user specified as an option. channels :class:`Channel` objects for each channel specified as an option. roles :class:`Role` object for each role specified as an option. """ author: Member = None id: str = None token: str = None channel_id: str = None guild_id: str = None options: list = None values: list = None resolved: dict = None command_name: str = None command_id: str = None members: List[Member] = None channels: List[Channel] = None roles: List[Role] = None app: Any = None discord: Any = None custom_id: str = None primary_id: str = None handler_state: list = None @classmethod def from_data(cls, discord=None, app=None, data={}): if data is None: data = {} # If this is a proxy (e.g. flask.current_app), get the current object # https://flask.palletsprojects.com/en/2.0.x/reqcontext/#notes-on-proxies if hasattr(app, "_get_current_object"): app = app._get_current_object() result = cls( app = app, discord = discord, author = Member.from_dict(data.get("member", {})), id = data.get("id"), token = data.get("token"), channel_id = data.get("channel_id"), guild_id = data.get("guild_id"), options = data.get("data", {}).get("options"), values = data.get("data", {}).get("values", []), resolved = data.get("data", {}).get("resolved", {}), command_name = data.get("data", {}).get("name"), command_id = data.get("data", {}).get("id"), custom_id = data.get("data", {}).get("custom_id") or "" ) result.data = data result.parse_custom_id() result.parse_resolved() return result @property def auth_headers(self): if self.discord: return self.discord.auth_headers(self.app) else: return self.frozen_auth_headers def parse_custom_id(self): """ Parse the custom ID of the incoming interaction data. This includes the primary ID as well as any state stored in the handler. """ self.primary_id = self.custom_id.split("\n", 1)[0] self.handler_state = self.custom_id.split("\n") def parse_resolved(self): """ Parse the ``"resolved"`` section of the incoming interaction data. This section includes objects representing each user, member, channel, and role passed as an argument to the command. """ self.members = {} for id in self.resolved.get("members", {}): member_info = self.resolved["members"][id] member_info["user"] = self.resolved["users"][id] self.members[id] = Member.from_dict(member_info) self.channels = {id: Channel.from_dict(data) for id, data in self.resolved.get("channels", {}).items()} self.roles = {id: Role.from_dict(data) for id, data in self.resolved.get("roles", {}).items()} def create_args(self): """ Create the arguments which will be passed to the function when the :class:`SlashCommand` is invoked. """ def create_args_recursive(data, resolved): if not data.get("options"): return [], {} args = [] kwargs = {} for option in data["options"]: if option["type"] in [ CommandOptionType.SUB_COMMAND, CommandOptionType.SUB_COMMAND_GROUP]: args.append(option["name"]) sub_args, sub_kwargs = create_args_recursive( option, resolved) args += sub_args kwargs.update(sub_kwargs) elif option["type"] == CommandOptionType.USER: member_data = resolved["members"][option["value"]] member_data["user"] = resolved["users"][option["value"]] kwargs[option["name"]] = Member.from_dict(member_data) elif option["type"] == CommandOptionType.CHANNEL: kwargs[option["name"]] = Channel.from_dict( resolved["channels"][option["value"]]) elif option["type"] == CommandOptionType.ROLE: kwargs[option["name"]] = Role.from_dict( resolved["roles"][option["value"]]) else: kwargs[option["name"]] = option["value"] return args, kwargs return create_args_recursive({"options": self.options}, self.resolved) def create_handler_args(self, handler): """ Create the arguments which will be passed to the function when
np.random.gamma(p1,p2) #Where p1 is the shape parameter and p2 the scale parameter. - ('exponential', p1) = np.random.exponential(p1) #Where p1 is the scale parameter (NOT the rate). - ('uniform', lower, upper) = np.random.uniform(0,lower,upper) """ self._putative_reaction_times, self._putative_reaction_times_distr_parameters = ParseDistributions(distributions, self.SSA.rate_names) self.HAS_PUTATIVE_REACTION_TIMES = True def DoSingleMoleculeStochSim(self, end=False, mode=False, method=False, trajectories=False, species_selection = None, IsOnlyLastTimepoint = False,quiet=False): """ Run a single molecule stochastic simulation until `end` is reached. This can be either time steps or end time (which could be a *HUGE* number of steps). Input (similar to .DoStochSim()): - *end* [default=1000] simulation end (steps or time) - *mode* [default='steps'] simulation mode, can be one of: ['steps','time'] - *method* [default='SingleMoleculeMethod'] stochastic algorithm, can be one of: ['SMM',fSMM'] - *trajectories* [default = 1] - *species_selection* [default = None] (list) of names of species to store. This saves memory space and prevents Memory Errors. - *IsOnlyLastTimepoint* [default = False] - *quiet* [default = False] suppress print statements """ if self._IsQuiet: quiet = True # Check whether waiting time distributions were given. if not self.HAS_PUTATIVE_REACTION_TIMES: raise Warning("No distributions have been set for the model '{0:s}'. First use the function .SetPutativeReactionTimes().".format(self.model_file)) if method != False: self.Method(method) self._MethodSetBy = "DoStochSim" elif self._MethodSetBy == "DoStochSim" and self.sim_method_name != "FastSingleMoleculeMethod": self.Method("fSMM") # Default method if not self._IsSingleMoleculeMethod: print("*** WARNING ***: an invalid method ({0}) was selected. Switching to the fast Single Molecule Method.".format(self.sim_method_name)) self.Method('fSMM') if self.sim_method_name == "FastSingleMoleculeMethod" and 2 in [self.SSA.parse.reaction_orders[j] for j in self._putative_reaction_times]: print("*** WARNING ***: Second-order reactions are not supported by the fSMM. Switching to the SMM.") self.Method('SMM') # Pass delay parameters to SingleMolecule SSA implementation. self.SSA.distr_functions = copy.copy(self._putative_reaction_times) self.SSA.distr_parameters = copy.copy(self._putative_reaction_times_distr_parameters) #If Single Molecule Method, set exponential distributions to reactions not specified in self._putative_reaction_times. if self.sim_method_name == 'SingleMoleculeMethod':# and len(self.SSA.distr_functions) < self.SSA.n_reactions: self.SSA.auto_exponential_reactions = [] #print("\nUsing exponential waiting time distributions for:") for j in range(self.SSA.n_reactions): if j not in self.SSA.distr_functions: # Don't replace already assigned distributions. self.SSA.distr_functions[j] = np.random.exponential self.SSA.distr_parameters[j] = np.nan # 31-03-2014 To be specified at start of simulation (self.SSA.EvaluatePropensities) self.SSA.auto_exponential_reactions.append(j) # 31-03-2014 # Specify that delayed method is set by the script here. Prevents DoStochSim to select other method. temp_MethodSetBy = self._MethodSetBy # Either "User" or "DoStochSim" self._MethodSetBy = "Script" if self._IsTrackPropensities: print("*** WARNING ***: Propensities cannot be tracked with the single molecule method") self._IsTrackPropensities = False self.DoStochSim(end=end, mode=mode, method=False, trajectories=trajectories, IsTrackPropensities=False, species_selection = species_selection, IsOnlyLastTimepoint = IsOnlyLastTimepoint,quiet=quiet) self._MethodSetBy = "DoStochSim" # RESET # Reset to original value self._MethodSetBy = temp_MethodSetBy if IsOnlyLastTimepoint and not quiet: print('Info: not enough data points (are stored) to determine statistics.') def DoCompleteStochSim(self, error = 0.001, size=100000,IsTrackPropensities=False, rate_selection=None, species_selection = None,quiet=False): """ Do a stochastic simulation until the first four moments converge (in development, beta-status) Input: - *error* maximal allowed error [default = 0.001] - *size* (integer) number of steps before checking the first four moments [default = 100000] - *IsTrackPropensities* [default = False] - *rate_selection* [default = None] (list) of names of rates to store (saves memory space and prevents Memory Errors when propensities propensities are tracked) - *species_selection* [default = None] (list) of names of species to store (saves memory space and prevents Memory Errors (occurring at ~15 species)) - *quiet* [default = False] suppress print statements """ if self._IsQuiet: quiet = True if species_selection and isinstance(species_selection,str): species_selection = [species_selection] if species_selection and isinstance(species_selection,list): for s_id in species_selection: assert s_id in self.SSA.species_names, "Species {0} is not in the model or species selection".format(s_id) self._IsTrackPropensities = IsTrackPropensities if rate_selection and isinstance(rate_selection,str): rate_selection = [rate_selection] self._IsTrackPropensities = True if rate_selection and isinstance(rate_selection,list): for r_id in rate_selection: assert r_id in self.SSA.rate_names, "Reaction {0} is not in the model or reaction selection".format(r_id) self._IsTrackPropensities = True self.Trajectories(1) self._IsFixedIntervalMethod = False self.HAS_AVERAGE = False self.DeleteTempfiles() # Delete '.dat' files self.data_stochsim = IntegrationStochasticDataObj() self.data_stochsim_grid = RegularGridDataObj() self._current_trajectory = 1 t1 = time.time() self.settings = SSASettings(x_matrix=self.SSA.X_matrixinit,timesteps=size,starttime=0,endtime=10**50, species_selection=species_selection, track_propensities=self._IsTrackPropensities,rate_selection = rate_selection,last_timepoint=False,seed = self._IsSeed,quiet=quiet) self.SSA.Execute(self.settings) if self.settings.species_selection: self.sim_species_tracked = [s_id for s_id in self.settings.species_selection] else: self.sim_species_tracked = copy.copy(self.SSA.species_names) (L_probability_mass, D_means, D_stds,D_moments) = Analysis.GetSpeciesDistributions(self.SSA.sim_output,self.sim_species_tracked) m1 = [np.array(list(D_moments[s_id].values())) for s_id in self.sim_species_tracked] IsContinue = True if not quiet: print('Info: {0:d} time steps simulated'.format(size)) n=1 while IsContinue: self.settings = SSASettings(x_matrix=self.SSA.X_matrixinit,timesteps=size*(n+1),starttime=self.SSA.sim_t,endtime=10**50, species_selection=species_selection, track_propensities=self._IsTrackPropensities,rate_selection = rate_selection,last_timepoint=False,seed = self._IsSeed,quiet=quiet) self.SSA.Execute(self.settings) (L_probability_mass,D_means,D_stds,D_moments) = Analysis.GetDataDistributions(self.SSA.sim_output,self.sim_species_tracked) m2 = [np.array(list(D_moments[s_id].values())) for s_id in self.sim_species_tracked] max_total = 0 for i in range(self.SSA.n_species): max_s = abs(1-(m2[i]/m1[i])).max() if max_s > max_total: max_total = max_s m1 = copy.deepcopy(m2) n+=1 if not quiet: print('Info: {0:d} time steps simulated'.format(n*size)) if max_total < error: IsContinue = False t2 = time.time() self.simulation_time = t2-t1 if not quiet: print("Info: Simulation time {0:1.5f}".format(self.simulation_time)) self.FillDataStochsim() self._IsSimulationDone = True self.sim_trajectories_done = copy.copy(self.sim_trajectories) try: self.plot = Analysis.DoPlotting(self.data_stochsim.species_labels,self.sim_rates_tracked,self.plot.plotnum,quiet) except: self.plot = Analysis.DoPlotting(self.data_stochsim.species_labels,self.sim_rates_tracked,quiet=quiet) def _getSpecies2Plot(self,species2plot): """ *** For internal use only ***: this function determines the species for which we will plot something """ if species2plot == True: species2plot = self.sim_species_tracked if isinstance(species2plot,str): species2plot = [species2plot] for s_id in species2plot: assert s_id in self.sim_species_tracked, "Species {0} is not in the model or species selection".format(s_id) return species2plot def _getRates2Plot(self,rates2plot): """ *** For internal use only ***: this function determines the reactions for which we will plot something """ if rates2plot == True: rates2plot = self.sim_rates_tracked if isinstance(rates2plot,str): rates2plot = [rates2plot] for r_id in rates2plot: assert r_id in self.sim_rates_tracked, "Reaction {0} is not in the model or reaction selection".format(r_id) return rates2plot def GetWaitingtimes(self): """ Get for each reaction the waiting times """ assert self._IsSimulationDone, "First do a stochastic simulation (and do not use the Tau-Leaping method)" assert not self._IsTauleaping, "Tau-Leaping method does not allow for calculation of waiting times" assert not self._IsFixedIntervalMethod, "Fixed-interval output solvers do not allow for calculation of waiting times" assert not self._IsOnlyLastTimepoint, "Calculating waiting times is disabled when saving only the last time point" for n in range(1,self.sim_trajectories_done+1): if self.sim_trajectories_done > 1: self.GetTrajectoryData(n) D_waitingtimes = Analysis.ObtainWaitingtimes(self.data_stochsim,self.SSA.rate_names) # hard coded for all reactions self.data_stochsim.setWaitingtimes(D_waitingtimes,self.SSA.rate_names) self.data_stochsim.setWaitingtimesMeans(self.data_stochsim.waiting_times,self.SSA.rate_names) self.data_stochsim.setWaitingtimesStandardDeviations(self.data_stochsim.waiting_times,self.SSA.rate_names) if self.sim_trajectories_done > 1: # "store" the data, otherwise the added waiting times get lost again by opening via GetTrajectoryData self.DumpTrajectoryData(n) def GetRegularGrid(self,n_samples=51): """ The Gillespie method generates data at irregular time points. This function puts the data on a fixed regular time grid of which the user can specify the resolution (n_samples). For each trajectory, we use the same grid. This has the following consequences for the two type of simulation modes: - time: the end time of each trajectory is identical - time steps: the end time of each trajectory is different. We select the minimal end time of each of these simulations and ignore the period afterwards. Input: - *n_samples* [default = 51] (integer) """ assert self._IsSimulationDone, "First do a stochastic simulation" assert not self._IsOnlyLastTimepoint, "Generating a regular grid is disabled when saving only the last time point" ### Determine the number of samples ### if isinstance(n_samples,int): pass elif type(n_samples) in [float,np.float64,np.float32]: print("*** WARNING ***: 'n_samples' must be an integer rather than a float; float {0} is rounded to {1:d}".format(n_samples,int(n_samples))) n_samples = int(n_samples) elif n_samples == True: n_samples = int(self.data_stochsim.simulation_endtime) else: raise TypeError("Argument of GetRegularGrid() must be an integer") self._n_samples = n_samples n_species = len(self.data_stochsim.species_labels) L_species = [[] for i in range(n_species)] if self._IsCellDivision: L_volumes = [] # hard coded one sort of volume if self._IsTrackPropensities: n_rates = len(self.sim_rates_tracked) L_propensities = [[] for j in range(n_rates)] self.data_stochsim_grid.propensities_autocorrelations = [[] for j in range(n_rates)] if self.sim_mode == 'time': sample_timepoints = np.linspace(0,self.sim_end,n_samples) else: simulation_endtimes = [] for n in range(1,self.sim_trajectories_done+1): if self.sim_trajectories_done > 1: self.GetTrajectoryData(n) simulation_endtimes.append(self.data_stochsim.simulation_endtime)
# internal data manipulations import numpy as np # operate on data from the hdf5 file and image generation import pandas as pd # Data frames from numpy arrays, especially for output # image generation, manipulation, and analysis import cv2 from matplotlib import path # dealing with voronoi facets polylines as paths import colorsys # color code managment from scipy import ndimage # image manipulation # plotting import seaborn as sns # simple debug printing, enabled with DEBUG_PRINT DEBUG_PRINT = False # probably want to move to a more formal logging at some point def dprint(s): if(DEBUG_PRINT): print(s) def get_timesteps(trajectory): """ Determine valid timesteps for the associated hdf5 dump Parameters ---------- trajectory : The decoded hdf5 file containing the dumped run data. Usually from something like h5py.File(infile, 'r') Returns ------- A tuple consisting of three entries. The first is a sorted list of integers for every valid timestep. The second and third are for convenience and represent the start and end time of the run. (First and last timesteps.) """ trajectory_times = sorted([int(k) for k in trajectory['id'].keys()]) start_time = trajectory_times[0] end_time = trajectory_times[len(trajectory_times)-1] return(trajectory_times, start_time, end_time) # Using numpy arrays for a lot of the work rather than pandas # Setting up constants to keep column indices correct CELL_ID_COL = 0 CELL_X_COL = 1 CELL_Y_COL = 2 CELL_Z_COL = 3 CELL_RADIUS_COL = 4 CELL_ANCESTOR_COL = 5 # Magic value indicating a cell has not yet been assigned an ancestor NO_ANCESTOR_ASSIGNED = -1 # TODO, in a few places, it may be better to move to a more object oriented # approach. Mainly for a more clear interface, while avoiding state. For # example, get_timesteps could easily produce an object with 'steps()', # 'start_time()', and 'end_time()' methods for clarity without affecting # performance or relying on too much internal state. # TODO make compatible with the newer, corrected hdf5 radius dump def radius_key(timestep): """ Generate the appropriate key for a radius at a given timestep. Does not check timestep for validity. This function exists because current phototroph runs use an older version of NUFEB which output individual radius keys for each timestep, (e.g. radius0, radius100, etc) rather than a single radius entry indexed by timestep. Parameters ---------- timestep : The numeric time step at which we want radius info Returns ------- A string representing the key for the radius information at the given timestep """ return(f'radius{timestep}') # TODO error out gracefully if time does not exist def get_cells(trajectory, time=0, scale=1E6): """ Provide the scaled location and radius of all cells at a particular timestep, with each cell associted with a tag id which remains consistent between timesteps. Scaling is intended mainly to translate spatial coordinate to image pixel locations. Parameters ---------- trajectory : The decoded hdf5 file containing the dumped run data. Usually from something like h5py.File(infile, 'r') time : An integer representing the timestep to query for cell locations. Most runs start at time 0, so this has been left as a default value. scale : A value by which to multiply the physical coordinates. The inteded goal to convert from spatial coordinates to pixel locations so scale is generally passed a number representing pixels per meter. The default value returns distance in terms of microns. WARNING: Because we are return an integer based numpy array, setting the scale low (as it may be tempting to set the scale to 1) would lead to most values being 0. Returns ------- A five column, multi-row numpy array. Where the columns, in order, are: 1. The consistent atom tag (id) 2. The scaled x, y, and z coordinates of the cell 3. The cell radius 4. The cell's ancestors. This column is intended for later bookkeeping and is not populated here, beyond initializing to NO_ANCESTOR_ASSIGNED Each column can be referenced by the defined constants: CELL_ID_COL = 0 CELL_X_COL = 1 CELL_Y_COL = 2 CELL_Z_COL = 3 CELL_RADIUS_COL = 4 CELL_ANCESTOR_COL = 5 """ time = str(time) ret_array = np.column_stack( (trajectory['id'][time], scale*np.column_stack((trajectory['x'][time], trajectory['y'][time], trajectory['z'][time], trajectory[radius_key(time)])), np.full((len(trajectory['id'][time]), 1), NO_ANCESTOR_ASSIGNED) )).astype(int) # Occasionally a cell with id == 0 is saved, this is not a valid cell return( ret_array[ret_array[:,CELL_ID_COL]!= 0]) def get_seeds(trajectory, start_time=0, scale=1E6): """ As with get_cells: Provide the scaled location and radius of all cells at a particular timestep, with each cell associted with a tag id which remains consistent between timesteps. HOWEVER: Also assigns the ancestor id to the same as the cell id, since these are the initial cells. Parameters ---------- trajectory : The decoded hdf5 file containing the dumped run data. Usually from something like h5py.File(infile, 'r') start_time : An integer representing the initial timestep. Most runs start at time 0, so this has been left as a default value. scale : A value by which to multiply the physical coordinates. The inteded goal to convert from spatial coordinates to pixel locations so scale is generally passed a number representing pixels per meter. The default value returns distance in terms of microns. WARNING: Because we are return an integer based numpy array, setting the scale low (as it may be tempting to set the scale to 1) would lead to most values being 0. Returns ------- A five column, multi-row numpy array. Where the columns, in order, are: 1. The consistent atom tag (id) 2. The scaled x, y, and z coordinates of the cell 3. The cell radius 4. The cell's ancestors. Unlike with get_cells, this column is populated. Specfically, it ought to match the value in the CELL_ID_COL since these are the initial seeds. Each column can be referenced by the defined constants: CELL_ID_COL = 0 CELL_X_COL = 1 CELL_Y_COL = 2 CELL_Z_COL = 3 CELL_RADIUS_COL = 4 CELL_ANCESTOR_COL = 5 """ seeds = get_cells(trajectory, start_time, scale) # Since this is the first set of cells, they are their own ancestor seeds[:, CELL_ANCESTOR_COL] = seeds[:, CELL_ID_COL] return(seeds) # %% def assign_ancestry(trajectory): """ Infer the ancestor of all cells during all timesteps. Since cell ancestors are not necessarily tracked, we have to infer them as we go. This method steps through each timestep, identifies cells with unknown ancestors, and assigns them an ancestor based on the nearest cell with a known/inferred ancestor. There are many other approaches, but this one has proven to be the least brittle in practice. Do note however, that the accuracy of the inference will likely go down if the time between recorded timesteps is too large. Although this does a brute force nearest-neighbor search, it has not proven to take very long for the number of cells used in our current runs (order of 1000). There are internal comments noting where optimizations could be made. Parameters ---------- trajectory : The decoded hdf5 file containing the dumped run data. Usually from something like h5py.File(infile, 'r') Returns ------- A dictionary mapping each cell present in the timestep to the id of its ancestor. """ dprint('Infeerring cell ancestries') trajectory_times, start_time, end_time = get_timesteps(trajectory) # Do not need to scale these, since we only care about relative distances seeds = get_seeds(trajectory, start_time=start_time) # Dictionary which will hold associations between cell ids and ancestors ancestry = {} # All seeds have a known ancestry, themselves for seed in seeds: ancestry[seed[CELL_ID_COL]] = seed[CELL_ANCESTOR_COL] for time in trajectory_times: dprint(f'\tProcessing time: {time}') # Do not need to scale, we only care about relative distances cells = get_cells(trajectory, time=time) # for cells with known ancestors, set the appropriate value in the # ancestor column. Used to filter cell list for those with unknown # ancestors for cell_id, anc_id in ancestry.items(): # Every once in a while a cell leaves the simulation, so make sure # it actually exists at this timestep if(len(cells[cells[:, CELL_ID_COL] == cell_id]) > 0): ancestor_found = cells[cells[:, CELL_ID_COL] == cell_id][0] ancestor_found[CELL_ANCESTOR_COL] = anc_id cells[cells[:, CELL_ID_COL] == cell_id] = ancestor_found # for all the cells with no currently known ancestor, find the # nearest cell with an ancestor and assign that ancestor to the # unknown cell
_RecordingFileDurationMeasurement, _First): name = 'First Recording File Duration' class FirstRecordingFileEndIndexMeasurement( _RecordingFileEndIndexMeasurement, _First): name = 'First Recording File End Index' class FirstRecordingFileEndTimeMeasurement( _RecordingFileEndTimeMeasurement, _First): name = 'First Recording File End Time' class FirstRecordingFileLengthMeasurement( _RecordingFileLengthMeasurement, _First): name = 'First Recording File Length' class FirstRecordingFileNameMeasurement( _RecordingFileNameMeasurement, _First): name = 'First Recording File Name' class FirstRecordingFileNumberMeasurement( _RecordingFileNumberMeasurement, _First): name = 'First Recording File Number' class FirstRecordingFilePathMeasurement( _RecordingFilePathMeasurement, _First): name = 'First Recording File Path' class FirstRecordingFileStartIndexMeasurement( _RecordingFileStartIndexMeasurement, _First): name = 'First Recording File Start Index' class FirstRecordingFileStartTimeMeasurement( _RecordingFileStartTimeMeasurement, _First): name = 'First Recording File Start Time' class IdMeasurement(Measurement): name = 'ID' def measure(self, clip): return clip.id class _Last: """Provides file index to last recording file measurements.""" _file_index = -1 class LastRecordingFileDurationMeasurement( _RecordingFileDurationMeasurement, _Last): name = 'Last Recording File Duration' class LastRecordingFileEndIndexMeasurement( _RecordingFileEndIndexMeasurement, _Last): name = 'Last Recording File End Index' class LastRecordingFileEndTimeMeasurement( _RecordingFileEndTimeMeasurement, _Last): name = 'Last Recording File End Time' class LastRecordingFileLengthMeasurement( _RecordingFileLengthMeasurement, _Last): name = 'Last Recording File Length' class LastRecordingFileNameMeasurement( _RecordingFileNameMeasurement, _Last): name = 'Last Recording File Name' class LastRecordingFileNumberMeasurement( _RecordingFileNumberMeasurement, _Last): name = 'Last Recording File Number' class LastRecordingFilePathMeasurement( _RecordingFilePathMeasurement, _Last): name = 'Last Recording File Path' class LastRecordingFileStartIndexMeasurement( _RecordingFileStartIndexMeasurement, _Last): name = 'Last Recording File Start Index' class LastRecordingFileStartTimeMeasurement( _RecordingFileStartTimeMeasurement, _Last): name = 'Last Recording File Start Time' class LengthMeasurement(Measurement): name = 'Length' def measure(self, clip): return clip.length class LunarAltitudeMeasurement(Measurement): name = 'Lunar Altitude' def measure(self, clip): return _get_lunar_position(clip).altitude def _get_lunar_position(clip): sun_moon = _get_sun_moon(clip) return sun_moon.get_lunar_position(clip.start_time) class LunarAzimuthMeasurement(Measurement): name = 'Lunar Azimuth' def measure(self, clip): return _get_lunar_position(clip).azimuth class LunarIlluminationMeasurement(Measurement): name = 'Lunar Illumination' def measure(self, clip): sun_moon = _get_sun_moon(clip) return sun_moon.get_lunar_illumination(clip.start_time) class MicrophoneOutputNameMeasurement(Measurement): name = 'Microphone Output Name' def measure(self, clip): return clip.mic_output.name class NauticalDawnMeasurement(_SolarEventTimeMeasurement): name = 'Nautical Dawn' class NauticalDuskMeasurement(_SolarEventTimeMeasurement): name = 'Nautical Dusk' class RecentClipCountMeasurement(Measurement): # This measurement assumes that clips of a given station and # detector are visited in order of increasing start time. name = 'Recent Clip Count' def __init__(self, settings=None): if settings is None: settings = {} annotation_name = settings.get('annotation_name', 'Classification') self._count_window_size = self._get_count_window_size(settings) self._included_classifications = \ settings.get('included_classifications') self._excluded_classifications = \ settings.get('excluded_classifications') self._lumped_classifications = \ settings.get('lumped_classifications') self._clip_start_times = defaultdict(deque) self._annotation_info = \ AnnotationInfo.objects.get(name=annotation_name) def _get_count_window_size(self, settings): window_size = settings.get('count_window_size', 60) return TimeDelta(seconds=window_size) def measure(self, clip): classification = \ model_utils.get_clip_annotation_value(clip, self._annotation_info) if classification is None: return None classification_key = self._get_classification_key(classification) if classification_key is None: # clips of this classification not counted return None else: # clips of this classification counted # Get saved clip times. detector_name = model_utils.get_clip_detector_name(clip) key = (clip.station.name, detector_name, classification_key) clip_times = self._clip_start_times[key] # Discard saved clip times that precede count window. window_start_time = clip.start_time - self._count_window_size while len(clip_times) != 0 and clip_times[0] < window_start_time: clip_times.popleft() # Save current clip time. clip_times.append(clip.start_time) return len(clip_times) def _get_classification_key(self, classification): if self._included_classifications is not None: if not _matches(classification, self._included_classifications): return None if self._excluded_classifications is not None: if _matches(classification, self._excluded_classifications): return None if self._lumped_classifications is not None: for classifications in self._lumped_classifications: if _matches(classification, classifications): return classifications[0] # If we get here, the classification is included but not lumped. return classification def _matches(classification, classifications): for c in classifications: if c.endswith('*') and classification.startswith(c[:-1]): return True elif classification == c: return True # If we get here, `classification` did not match any of the # classifications in `classifications`. return False class RecordingChannelNumberMeasurement(Measurement): name = 'Recording Channel Number' def measure(self, clip): return clip.channel_num class RecordingDurationMeasurement(Measurement): name = 'Recording Duration' def measure(self, clip): return clip.recording.duration class RecordingEndTimeMeasurement(Measurement): name = 'Recording End Time' def measure(self, clip): return clip.recording.end_time class RecordingLengthMeasurement(Measurement): name = 'Recording Length' def measure(self, clip): return clip.recording.length class RecordingStartTimeMeasurement(Measurement): name = 'Recording Start Time' def measure(self, clip): return clip.recording.start_time _SOLAR_EVENT_NAMES = frozenset(SunMoon.SOLAR_EVENT_NAMES) class _RelativeTimeMeasurement(Measurement): def __init__(self, settings=None): if settings is None: settings = {} self._reference_name = settings.get( 'reference_time', 'Recording Start Time') if self._reference_name in _SOLAR_EVENT_NAMES: self._get_required_setting(settings, 'diurnal') def measure(self, clip): reference_time = self._get_reference_time(clip) if reference_time is None: return None else: clip_time = self._get_clip_time(clip) delta = clip_time - reference_time return delta.total_seconds() def _get_reference_time(self, clip): reference_name = self._reference_name if reference_name == 'Recording Start Time': return clip.recording.start_time elif reference_name == 'Recording End Time': return clip.recording.end_time elif reference_name == self._recording_file_start_time_reference_name: return self._get_recording_file_reference_time(clip, 'start') elif reference_name == self._recording_file_end_time_reference_name: return self._get_recording_file_reference_time(clip, 'end') else: return _get_solar_event_time(clip, reference_name, self._diurnal) def _get_recording_file_reference_time(self, clip, name): info = _get_recording_file_info(clip) if info is None: return None else: recording_file = info[0][self._recording_file_index] return getattr(recording_file, name + '_time') class RelativeEndTimeMeasurement(_RelativeTimeMeasurement): name = 'Relative End Time' _recording_file_start_time_reference_name = \ 'Last Recording File Start Time' _recording_file_end_time_reference_name = \ 'Last Recording File End Time' _recording_file_index = -1 def _get_clip_time(self, clip): return clip.end_time class RelativeStartTimeMeasurement(_RelativeTimeMeasurement): name = 'Relative Start Time' _recording_file_start_time_reference_name = \ 'First Recording File Start Time' _recording_file_end_time_reference_name = \ 'First Recording File End Time' _recording_file_index = 0 def _get_clip_time(self, clip): return clip.start_time class SampleRateMeasurement(Measurement): name = 'Sample Rate' def measure(self, clip): return clip.sample_rate class SensorNameMeasurement(Measurement): name = 'Sensor Name' def measure(self, clip): station_name = clip.station.name mic_name = clip.mic_output.device.name return f'{station_name} {mic_name}' class SolarAltitudeMeasurement(Measurement): name = 'Solar Altitude' def measure(self, clip): return _get_solar_position(clip).altitude def _get_solar_position(clip): sun_moon = _get_sun_moon(clip) return sun_moon.get_solar_position(clip.start_time) class SolarAzimuthMeasurement(Measurement): name = 'Solar Azimuth' def measure(self, clip): return _get_solar_position(clip).azimuth class SolarMidnightMeasurement(_SolarEventTimeMeasurement): name = 'Solar Midnight' class SolarNoonMeasurement(_SolarEventTimeMeasurement): name = 'Solar Noon' class SolarPeriodMeasurement(Measurement): name = 'Solar Period' def measure(self, clip): sun_moon = _get_sun_moon(clip) return sun_moon.get_solar_period_name(clip.start_time) class StartIndexMeasurement(_IndexMeasurement): name = 'Start Index' _default_reference_index_name = 'Recording Start Index' _recording_file_start_index_reference_name = \ 'First Recording File Start Index' _recording_file_end_index_reference_name = \ 'First Recording File End Index' _recording_file_index = 0 def _get_index(self, clip): return clip.start_index class StartTimeMeasurement(Measurement): name = 'Start Time' def measure(self, clip): return clip.start_time class StationNameMeasurement(Measurement): name = 'Station Name' def measure(self, clip): return clip.station.name class SunriseMeasurement(_SolarEventTimeMeasurement): name = 'Sunrise' class SunsetMeasurement(_SolarEventTimeMeasurement): name = 'Sunset' class TagStatusMeasurement(Measurement): name = 'Tag Status' def __init__(self, settings): tag_name = self._get_required_setting(settings, 'tag_name') self._tag_info = TagInfo.objects.get(name=tag_name) def measure(self, clip): return model_utils.is_clip_tagged(clip, self._tag_info) _MEASUREMENT_CLASSES = dict((c.name, c) for c in [ AnnotationValueMeasurement, AstronomicalDawnMeasurement, AstronomicalDuskMeasurement, CivilDawnMeasurement, CivilDuskMeasurement, DetectorNameMeasurement, DetectorTypeMeasurement, DurationMeasurement, EndIndexMeasurement, EndTimeMeasurement, FirstRecordingFileDurationMeasurement, FirstRecordingFileEndIndexMeasurement, FirstRecordingFileEndTimeMeasurement, FirstRecordingFileLengthMeasurement, FirstRecordingFileNameMeasurement, FirstRecordingFileNumberMeasurement, FirstRecordingFilePathMeasurement, FirstRecordingFileStartIndexMeasurement, FirstRecordingFileStartTimeMeasurement, IdMeasurement, LastRecordingFileDurationMeasurement, LastRecordingFileEndIndexMeasurement, LastRecordingFileEndTimeMeasurement, LastRecordingFileLengthMeasurement, LastRecordingFileNameMeasurement, LastRecordingFileNumberMeasurement, LastRecordingFilePathMeasurement, LastRecordingFileStartIndexMeasurement, LastRecordingFileStartTimeMeasurement, LengthMeasurement, LunarAltitudeMeasurement, LunarAzimuthMeasurement, LunarIlluminationMeasurement, MicrophoneOutputNameMeasurement, NauticalDawnMeasurement, NauticalDuskMeasurement, RecentClipCountMeasurement, RecordingChannelNumberMeasurement, RecordingDurationMeasurement, RecordingEndTimeMeasurement, RecordingLengthMeasurement, RecordingStartTimeMeasurement, RelativeEndTimeMeasurement, RelativeStartTimeMeasurement, SampleRateMeasurement, SensorNameMeasurement, SolarAltitudeMeasurement, SolarAzimuthMeasurement, SolarMidnightMeasurement, SolarNoonMeasurement, SolarPeriodMeasurement, StartIndexMeasurement, StartTimeMeasurement, StationNameMeasurement, SunriseMeasurement, SunsetMeasurement, TagStatusMeasurement, ]) _NO_VALUE_STRING = '' _DEFAULT_DATE_FORMAT = '%Y-%m-%d' _DEFAULT_TIME_FORMAT = '%H:%M:%S' _DEFAULT_DATE_TIME_FORMAT = _DEFAULT_DATE_FORMAT + ' ' + _DEFAULT_TIME_FORMAT _DEFAULT_DURATION_FORMAT = '%h:%M:%S' _DEFAULT_RELATIVE_TIME_FORMAT = '%g%h:%M:%S' class Formatter: def _get_required_setting(self, settings, name): try: return settings[name] except KeyError: raise CommandExecutionError( f'Formatter settings lack required "{name}" item.') def format(self, value, clip): if value is None: return None else: return self._format(value, clip) class Calculator(Formatter): name = 'Calculator' def __init__(self, settings): self._code = self._get_required_setting(settings, 'code') self._calculator = Calculator_() def _format(self, value, clip): c = self._calculator try: c.clear() c.dict_stack.put('x', value) c.execute(self._code) return c.operand_stack.pop() except Exception as e: raise CommandExecutionError( f'Execution of calculator code "{self._code}" failed. ' f'Calculator error message was: {str(e)}') class DecimalFormatter(Formatter): name = 'Decimal Formatter' def __init__(self, settings=None): if settings is None: self._format_string = '{:f}' else: self._format_string = '{:' + settings.get('detail', '') + 'f}' def _format(self, x, clip): return self._format_string.format(x) class _DateTimeFormatter(Formatter): def __init__(self, local, settings=None): self._local = local if settings is None: settings = {} self._formatter = self._get_formatter(settings) (self._rounding_enabled, self._rounding_increment, self._rounding_mode) = _get_rounding( settings, False, self._formatter.min_time_increment) def _get_formatter(self, settings): format_ = settings.get('format', _DEFAULT_DATE_TIME_FORMAT)
be changed in the current session state"), content_type='application/xml') else: return HttpResponse(createXmlErrorResponse("Grid was not found"), content_type='application/xml') else: return HttpResponse(createXmlErrorResponse("Session was not found"), content_type='application/xml') else: return HttpResponse(createXmlErrorResponse("You are not a facilitator, can't change grid"), content_type='application/xml') else: return HttpResponse(createXmlErrorResponse("Invalid request, request is missing argument(s)"), content_type='application/xml') elif gridType == 'response': return HttpResponse(createXmlErrorResponse("Invalid request, unsupported operation"), content_type='application/xml') elif gridType == 'user': gridObj = Grid.objects.get(user=user1, usid=request.POST['gridUSID']) else: try: gridObj = Grid.objects.get(user=user1, usid=request.POST['gridUSID']) except: pass if request.POST.has_key('gridName'): gridCheckNameResult = validateName(request.POST['gridName']) if type(gridCheckNameResult) == StringType: gridObj.name = gridCheckNameResult else: # if the grid name isn't a string than it is an error return gridCheckNameResult # because django will save stuff to the database even if .save() is not called, we need to validate everything before starting to create the objects that will be used to populate the db obj = None try: obj = __validateInputForGrid(request, isConcernAlternativeResponseGrid) except KeyError as error: if DEBUG: print "Exception in user code:" print '-' * 60 traceback.print_exc(file=sys.stdout) print '-' * 60 return HttpResponse(createXmlErrorResponse(error.args[0]), content_type='application/xml') except ValueError as error: if DEBUG: print "Exception in user code:" print '-' * 60 traceback.print_exc(file=sys.stdout) print '-' * 60 return HttpResponse(createXmlErrorResponse(error.args[0]), content_type='application/xml') except: if DEBUG: print "Exception in user code:" print '-' * 60 traceback.print_exc(file=sys.stdout) print '-' * 60 logger.exception('Unknown error') return HttpResponse(createXmlErrorResponse('Unknown error'), content_type='application/xml') nConcerns, nAlternatives, concernValues, alternativeValues, ratioValues = obj # update the grid if gridObj is not None: for i in range(int(nAlternatives)): try: str(alternativeValues[i]) except UnicodeEncodeError as error: errorString = 'Invalid character found in the alternatives. The "' + error.object[ error.start:error.end] + '" character can not be convert or used safely' return HttpResponse(createXmlErrorResponse(errorString), content_type='application/xml') except: return HttpResponse(createXmlErrorResponse("Invalid alternative name : " + alternativeValues[i]), content_type='application/xml') for i in range(int(nConcerns)): try: str(concernValues[i][0]) except UnicodeEncodeError as error: errorString = 'Invalid character found in left concern. The "' + error.object[ error.start:error.end] + '" character can not be convert or used safely' return HttpResponse(createXmlErrorResponse(errorString), content_type='application/xml') except: return HttpResponse(createXmlErrorResponse("Invalid left concern name : " + concernValues[i][0]), content_type='application/xml') try: str(concernValues[i][1]) except UnicodeEncodeError as error: errorString = 'Invalid character found in right concern. The "' + error.object[ error.start:error.end] + '" character can not be convert or used safely' return HttpResponse(createXmlErrorResponse(errorString), content_type='application/xml') except: return HttpResponse(createXmlErrorResponse("Invalid right concern name : " + concernValues[i][1]), content_type='application/xml') try: isGridCreated = updateGrid(gridObj, nConcerns, nAlternatives, concernValues, alternativeValues, ratioValues, isConcernAlternativeResponseGrid) if isGridCreated: return HttpResponse(createXmlSuccessResponse('Grid was saved', createDateTimeTag( datetime.now().strftime("%Y-%m-%d %H:%M:%S"))), content_type='application/xml') except: if DEBUG: print "Exception in user code:" print '-' * 60 traceback.print_exc(file=sys.stdout) print '-' * 60 logger.exception('Unknown error') return HttpResponse(createXmlErrorResponse('Unknown error'), content_type='application/xml') else: return HttpResponse(createXmlErrorResponse("No grid found"), content_type='application/xml') @login_required def ajaxDeleteGrid(request): """ This function is used to delete a grid created by a user. External arguments: gridUSID: string """ if request.POST.has_key('gridUSID'): gridUSID = request.POST['gridUSID'] grid = None try: from .models import Grid grid = Grid.objects.get(user=request.user, usid=gridUSID) except: HttpResponse(createXmlErrorResponse('couldn\'t find grid'), content_type='application/xml') if grid is not None: grid.delete() return HttpResponse(createXmlSuccessResponse('Grid was deleted'), content_type='application/xml') else: return HttpResponse(createXmlErrorResponse('Invalid request, request is missing arguments'), content_type='application/xml') @login_required def ajaxGenerateDendogram(request): """ This function is used to send a dendrogram back to the client machine that was generated based on a grid saved in the database External arguments: gridUSID: string """ if request.POST.has_key('gridUSID'): from .models import Grid grid1 = Grid.objects.filter(user=request.user, usid=request.POST['gridUSID']) if len(grid1) >= 1: try: grid1 = grid1[0] if grid1.dendogram is not None and grid1.dendogram is not '': imgData = createDendogram(grid1) responseData = createSvgResponse(imgData, None) return HttpResponse(responseData, content_type='application/xml') else: try: imgData = createDendogram(grid1) responseData = createSvgResponse(imgData, None) return HttpResponse(responseData, content_type='application/xml') except UnicodeEncodeError as error: errorString = 'Invalid character found in the grid. The "' + error.object[ error.start:error.end] + '" character can not be convert or used safely.\nDendogram can not be created.' return HttpResponse(createXmlErrorResponse(errorString), content_type='application/xml') except: if DEBUG: print "Exception in user code:" print '-' * 60 traceback.print_exc(file=sys.stdout) print '-' * 60 logger.exception('Unknown error') return HttpResponse(createXmlErrorResponse('Unknown dendrogram error'), content_type='application/xml') except: if DEBUG: print "Exception in user code:" print '-' * 60 traceback.print_exc(file=sys.stdout) print '-' * 60 logger.exception('Unknown error') return HttpResponse(createXmlErrorResponse('Unknown error'), content_type='application/xml') else: return HttpResponse(createXmlErrorResponse('Could not find the grid to generate the dendrogram'), content_type='application/xml') else: return HttpResponse(createXmlErrorResponse('Invalid request, request is missing argument(s)'), content_type='application/xml') @login_required def ajaxGenerateSimilarity(request): """ This function will generate the similarity matrix for concers and alternatives """ if request.POST.has_key('gridUSID'): from .models import Grid grid1 = Grid.objects.filter(user=request.user, usid=request.POST['gridUSID']) if len(grid1) >= 1: try: grid1 = grid1[0] matrixConcern = returnMatrix(grid1, "concern") matrixAlternatives = returnMatrix(grid1, "alt") # matrix that will be transposed consRangeXi = len(matrixConcern[0]) consRangeYi = len(matrixConcern) - 1 consRangeX = [i + 1 for i in range(consRangeXi)] consRangeY = [j + 1 for j in range(consRangeYi)] altsRangeX = len(matrixAlternatives[0]) altsRangeY = len(matrixAlternatives) - 1 template = loader.get_template('gridMng/grid/similaritymatrix.html') context = RequestContext(request, {'cons': matrixConcern, 'alts': matrixAlternatives, 'consRangeX': consRangeX, 'consRangeY': consRangeY, 'altsRangeX': altsRangeX, 'altsRangeY': altsRangeY}) htmlData = render(request, 'gridMng/grid/similaritymatrix.html', {'cons': matrixConcern, 'alts': matrixAlternatives, 'consRangeX': consRangeX, 'consRangeY': consRangeY, 'altsRangeX': altsRangeX, 'altsRangeY': altsRangeY}) return HttpResponse(createXmlSuccessResponse(htmlData.content), content_type='application/xml') except: if DEBUG: print "Exception in user code:" print '-' * 60 traceback.print_exc(file=sys.stdout) print '-' * 60 logger.exception('Unknown error') return HttpResponse(createXmlErrorResponse('Unknown error'), content_type='application/xml') else: return HttpResponse(createXmlErrorResponse('Could not find the grid to generate similarity matrices'), content_type='application/xml') else: return HttpResponse("Hello World!") @login_required def ajaxGetSaveSvgPage(request): """ This function will return the html code that is needed to generate the dialog box that is used to ask to user to which format should a svg image be saved """ template = loader.get_template('gridMng/grid/saveSvg.html') context = RequestContext(request, {}) htmlData = template.render(context) return HttpResponse(createXmlSuccessResponse(htmlData), content_type='application/xml') @login_required def ajaxConvertSvgTo(request): """ this function will receive a svg string and will convert it to another file type. External arguments: data: svg string convertTo: string values: svg fileName: string """ try: if request.POST.has_key('data') and request.POST.has_key('fileName') and request.POST.has_key('convertTo'): if request.POST['data'] and request.POST['convertTo']: imgData = __convertSvgStringTo(request.POST['data'], request.POST['convertTo']) if not request.POST['fileName']: imgData.fileName = generateRandomString() else: imgData.fileName = request.POST['fileName'] return createFileResponse(imgData) else: if not request.POST.has_key('data'): raise Exception('data key was not received') if not request.POST.has_key('convertTo'): raise Exception('convertTo key was not received') except: if DEBUG: print "Exception in user code:" print '-' * 60 traceback.print_exc(file=sys.stdout) print '-' * 60 # in case of an error or checks failing return an image error errorImageData = getImageError() # send the file response = HttpResponse(errorImageData, content_type='image/jpg') response['Content-Disposition'] = 'attachment; filename=error.jpg' return response @login_required def ajaxConvertGridTo(request): """ This function is used to send the user a file containing the svg image of a grid he created. External arguments: convertTo: string values: svg usid: string fileName: string """ try: if request.POST.has_key('usid') and request.POST.has_key('convertTo'): usidData = request.POST['usid'] convertToData = request.POST['convertTo'] if usidData is not None and convertToData is not None: from .models import Grid gridObj = Grid.objects.filter(usid=usidData) if len(gridObj) >= 1: gridObj = gridObj[0] # check if the requesting user is the owner of the grid if gridObj.user == request.user or gridObj.user == None: imgData = FileData() if convertToData == 'svg': imgData.data = convertGridTableToSvg(gridObj) imgData.fileExtension = 'svg' imgData.contentType = 'image/svg+xml' if request.POST.has_key('fileName'): imgData.fileName = request.POST['fileName'] if not imgData.fileName: imgData.fileName = generateRandomString() return createFileResponse(imgData) else: raise Exception('User is not authorized to access this grid as he is not the creator') else: raise Exception('Grid was not found with the usid: ' + usidData) else: if not usidData: ValueError('usid had invalid value: ' + usidData) if not convertToData: ValueError('convertTo had invalid value: ' + convertToData) else: if not request.POST.has_key('usid'): raise Exception('usid key was not received') if not request.POST.has_key('convertTo'): raise Exception('convertTo key was not received') except: if DEBUG: print "Exception in user code:" print '-' * 60 traceback.print_exc(file=sys.stdout) print '-' * 60 # anything else return the img error errorImageData = getImageError() # send the file response = HttpResponse(errorImageData, content_type='image/jpg') response['Content-Disposition'] = 'attachment; filename=error.jpg' return response @login_required def dendrogramTo(request): """ This functions is used to send back an image file to the user containg a dendrogram. External arguments: convertTo: string values: svg gridUSID: string fileName: string """ ######################################### ############## Options ################## ######################################### # # # convertTo: svg # # gridUSID: usid of the grid in question # ######################################### try: if request.POST.has_key('gridUSID') and request.POST.has_key('convertTo'): # check to see if the inputs are not None if request.POST['gridUSID'] and request.POST['convertTo']: from .models import Grid grid = Grid.objects.filter(usid=request.POST['gridUSID']) if len(grid)
<filename>nltk/corpus/reader/util.py # Natural Language Toolkit: Corpus Reader Utilities # # Copyright (C) 2001-2012 NLTK Project # Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT import os import sys import bisect import re import tempfile try: import cPickle as pickle except ImportError: import pickle from itertools import islice # Use the c version of ElementTree, which is faster, if possible: try: from xml.etree import cElementTree as ElementTree except ImportError: from xml.etree import ElementTree from nltk.tokenize import wordpunct_tokenize from nltk.internals import slice_bounds from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer from nltk.data import SeekableUnicodeStreamReader from nltk.sourcedstring import SourcedStringStream from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation, py25 ###################################################################### #{ Corpus View ###################################################################### class StreamBackedCorpusView(AbstractLazySequence): """ A 'view' of a corpus file, which acts like a sequence of tokens: it can be accessed by index, iterated over, etc. However, the tokens are only constructed as-needed -- the entire corpus is never stored in memory at once. The constructor to ``StreamBackedCorpusView`` takes two arguments: a corpus fileid (specified as a string or as a ``PathPointer``); and a block reader. A "block reader" is a function that reads zero or more tokens from a stream, and returns them as a list. A very simple example of a block reader is: >>> def simple_block_reader(stream): ... return stream.readline().split() This simple block reader reads a single line at a time, and returns a single token (consisting of a string) for each whitespace-separated substring on the line. When deciding how to define the block reader for a given corpus, careful consideration should be given to the size of blocks handled by the block reader. Smaller block sizes will increase the memory requirements of the corpus view's internal data structures (by 2 integers per block). On the other hand, larger block sizes may decrease performance for random access to the corpus. (But note that larger block sizes will *not* decrease performance for iteration.) Internally, ``CorpusView`` maintains a partial mapping from token index to file position, with one entry per block. When a token with a given index *i* is requested, the ``CorpusView`` constructs it as follows: 1. First, it searches the toknum/filepos mapping for the token index closest to (but less than or equal to) *i*. 2. Then, starting at the file position corresponding to that index, it reads one block at a time using the block reader until it reaches the requested token. The toknum/filepos mapping is created lazily: it is initially empty, but every time a new block is read, the block's initial token is added to the mapping. (Thus, the toknum/filepos map has one entry per block.) In order to increase efficiency for random access patterns that have high degrees of locality, the corpus view may cache one or more blocks. :note: Each ``CorpusView`` object internally maintains an open file object for its underlying corpus file. This file should be automatically closed when the ``CorpusView`` is garbage collected, but if you wish to close it manually, use the ``close()`` method. If you access a ``CorpusView``'s items after it has been closed, the file object will be automatically re-opened. :warning: If the contents of the file are modified during the lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior is undefined. :warning: If a unicode encoding is specified when constructing a ``CorpusView``, then the block reader may only call ``stream.seek()`` with offsets that have been returned by ``stream.tell()``; in particular, calling ``stream.seek()`` with relative offsets, or with offsets based on string lengths, may lead to incorrect behavior. :ivar _block_reader: The function used to read a single block from the underlying file stream. :ivar _toknum: A list containing the token index of each block that has been processed. In particular, ``_toknum[i]`` is the token index of the first token in block ``i``. Together with ``_filepos``, this forms a partial mapping between token indices and file positions. :ivar _filepos: A list containing the file position of each block that has been processed. In particular, ``_toknum[i]`` is the file position of the first character in block ``i``. Together with ``_toknum``, this forms a partial mapping between token indices and file positions. :ivar _stream: The stream used to access the underlying corpus file. :ivar _len: The total number of tokens in the corpus, if known; or None, if the number of tokens is not yet known. :ivar _eofpos: The character position of the last character in the file. This is calculated when the corpus view is initialized, and is used to decide when the end of file has been reached. :ivar _cache: A cache of the most recently read block. It is encoded as a tuple (start_toknum, end_toknum, tokens), where start_toknum is the token index of the first token in the block; end_toknum is the token index of the first token not in the block; and tokens is a list of the tokens in the block. """ def __init__(self, fileid, block_reader=None, startpos=0, encoding=None, source=None): """ Create a new corpus view, based on the file ``fileid``, and read with ``block_reader``. See the class documentation for more information. :param fileid: The path to the file that is read by this corpus view. ``fileid`` can either be a string or a ``PathPointer``. :param startpos: The file position at which the view will start reading. This can be used to skip over preface sections. :param encoding: The unicode encoding that should be used to read the file's contents. If no encoding is specified, then the file's contents will be read as a non-unicode string (i.e., a str). :param source: If specified, then use an ``SourcedStringStream`` to annotate all strings read from the file with information about their start offset, end ofset, and docid. The value of ``source`` will be used as the docid. """ if block_reader: self.read_block = block_reader # Initialize our toknum/filepos mapping. self._toknum = [0] self._filepos = [startpos] self._encoding = encoding self._source = source # We don't know our length (number of tokens) yet. self._len = None self._fileid = fileid self._stream = None self._current_toknum = None """This variable is set to the index of the next token that will be read, immediately before ``self.read_block()`` is called. This is provided for the benefit of the block reader, which under rare circumstances may need to know the current token number.""" self._current_blocknum = None """This variable is set to the index of the next block that will be read, immediately before ``self.read_block()`` is called. This is provided for the benefit of the block reader, which under rare circumstances may need to know the current block number.""" # Find the length of the file. try: if isinstance(self._fileid, PathPointer): self._eofpos = self._fileid.file_size() else: self._eofpos = os.stat(self._fileid).st_size except Exception, exc: raise ValueError('Unable to open or access %r -- %s' % (fileid, exc)) # Maintain a cache of the most recently read block, to # increase efficiency of random access. self._cache = (-1, -1, None) fileid = property(lambda self: self._fileid, doc=""" The fileid of the file that is accessed by this view. :type: str or PathPointer""") def read_block(self, stream): """ Read a block from the input stream. :return: a block of tokens from the input stream :rtype: list(any) :param stream: an input stream :type stream: stream """ raise NotImplementedError('Abstract Method') def _open(self): """ Open the file stream associated with this corpus view. This will be called performed if any value is read from the view while its file stream is closed. """ if isinstance(self._fileid, PathPointer): self._stream = self._fileid.open(self._encoding) elif self._encoding: self._stream = SeekableUnicodeStreamReader( open(self._fileid, 'rb'), self._encoding) else: self._stream = open(self._fileid, 'rb') if self._source is not None: self._stream = SourcedStringStream(self._stream, self._source) def close(self): """ Close the file stream associated with this corpus view. This can be useful if you are worried about running out of file handles (although the stream should automatically be closed upon garbage collection of the corpus view). If the corpus view is accessed after it is closed, it will be automatically re-opened. """ if
#! /usr/bin/python3 # # Copyright (c) 2020 <NAME> <<EMAIL>> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import libusb1 import usb1 import time import sys from struct import * from datetime import datetime from mccUSB import * class usb_dio32HS(mccUSB): USB_DIO32HS_PID = 0x0133 PORTA = 0x0 PORTB = 0x1 DIO_PORTS = 0x2 # Ports A & B PORT0 = 0x1 # Port A for channel_map PORT1 = 0x2 # Port B for channel_map DIR_IN = 0x1 DIR_OUT= 0x0 # Status bit values IN_SCAN_RUNNING = 0x2 # Input pacer running IN_SCAN_OVERRUN = 0x4 # Input scan overrun OUT_SCAN_RUNNING = 0x8 # Output scan running OUT_SCAN_UNDERRUN = 0x10 # Output scan underrun IN_SCAN_DONE = 0x20 # Input scan done OUT_SCAN_DONE = 0x40 # Output scan done FPGA_CONFIGURED = 0x100 # FPGA is configured FPGA_CONFIG_MODE = 0x200 # FPGA config mode # Scan Modes CONTINUOUS_READOUT = 0x1 # Continuous mode SINGLEIO = 0x2 # Return data after every read (used for low frequency scans) FORCE_PACKET_SIZE = 0x4 # Force packet_size BASE_CLOCK = 96.E6 # Base clock frequency MAX_PACKET_SIZE_HS = 512 # max packet size for HS device MAX_PACKET_SIZE_FS = 64 # max packet size for HS device # Commands and Codes for USB-DIO32HS # Digital I/O Commands DTRISTATE = 0x00 # Read/write digital port tristate registers DPORT = 0x01 # Read digital port pins DLATCH = 0x02 # Read/write digital port output latch register # Register Commands READ_REG = 0x10 # Read the specified register WRITE_REG = 0x11 # Write the specified register # Acquisition Commands IN_SCAN_START = 0x20 # Start input scan IN_SCAN_STOP = 0x21 # Stop input scan IN_SCAN_CLEAR_FIFO = 0x22 # Clear data in the input FIFO IN_BULK_FLUSH = 0x23 # Flush the input Bulk pipe OUT_SCAN_START = 0x24 # Start output scan OUT_SCAN_STOP = 0x25 # Stop output scan OUT_SCAN_CLEAR_FIFO = 0x26 # Clear data in the ouptut FIFO # Memory Commands MEMORY = 0x30 # Read/Write EEPROM MEM_ADDRESS = 0x31 # EEPROM read/write address value MEM_WRITE_ENABLE = 0x32 # Enable writes to firmware area # Miscellaneous Commands STATUS = 0x40 # Read device status BLINK_LED = 0x41 # Causes the LED to blink RESET = 0x42 # Reset the device TRIGGER_CONFIG = 0x43 # External trigger configuration PATTERN_DETECT_CONFIG = 0x44 # Pattern Detection trigger configuration SERIAL = 0x48 # Read/Write USB Serial Number # FPGA Configuration Commands FPGA_CONFIG = 0x50 # Start FPGA configuration FPGA_DATA = 0x51 # Write FPGA configuration data FPGA_VERSION = 0x52 # Read FPGA version HS_DELAY = 2000 def __init__(self, serial=None): self.status = 0 # status of the device self.productID = self.USB_DIO32HS_PID # USB-DIO32HS self.udev = self.openByVendorIDAndProductID(0x9db, self.productID, serial) if not self.udev: raise IOError("MCC USB-DIO32HS not found") return # Configure the FPGA if not (self.Status() & self.FPGA_CONFIGURED) : # load the FPGA data into memory from usb_dio32HS_rbf import FPGA_data print("Configuring FPGA. This may take a while ...") self.FPGAConfig() if self.Status() & self.FPGA_CONFIG_MODE: for i in range(0, len(FPGA_data) - len(FPGA_data)%64, 64) : self.FPGAData(FPGA_data[i:i+64]) i += 64 if len(FPGA_data) % 64 : self.FPGAData(FPGA_data[i:i+len(FPGA_data)%64]) if not (self.Status() & self.FPGA_CONFIGURED): print("Error: FPGA for the USB-DIO32HS is not configured. status = ", hex(self.Status())) return else: print("Error: could not put USB-DIO32HS into FPGA Config Mode. status = ", hex(self.Status())) return else: print("USB-DIO32HS FPGA configured.") if sys.platform.startswith('linux'): if self.udev.kernelDriverActive(0): self.udev.detachKernelDriver(0) self.udev.resetDevice() # claim all the needed interfaces for InScan self.udev.claimInterface(0) # Find the maxPacketSize for bulk transfers self.wMaxPacketSize = self.getMaxPacketSize(libusb1.LIBUSB_ENDPOINT_IN | 0x6) #EP IN 6 ############################################## # Digital I/O Commands # ############################################## # Read/Write digital port tristate register def DTristateR(self, port=0): """ This command reads the digital port tristate registers. The tristate register determines if the latch register value is driven onto the port pin. A '1' in the tristate register makes the corresponding pin an input, a '0' makes it an output. """ if port < 0 or port > 1: raise ValueError('DTristateR: error in port number.') return request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT) wValue = 0 wIndex = port # the port number to select (0-1) value ,= unpack('H',self.udev.controlRead(request_type, self.DTRISTATE, wValue, wIndex, 2, self.HS_DELAY)) return value def DTristateW(self, port, value): """ This command writes the digital port tristate register. The tristate register determines if the latch register value is driven onto the port pin. A '1' in the tristate register makes the corresponding pin an input, a '0' makes it an output. """ if port < 0 or port > 1: raise ValueError('DTristateW: error in port number.') return request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT) request = self.DTRISTATE wValue = value & 0xffff wIndex = port self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], self.HS_DELAY) def DPort(self, port): """ This command reads the current state of the digital pins from the specified port. port = 0 Read port 0 port = 1 Read port 1 port = 2 Read both ports """ if port < 0 or port > 2: raise ValueError('DPort: error in port number.') return request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT) wValue = 0 wIndex = 0 value = unpack('HH',self.udev.controlRead(request_type, self.DPORT, wValue, wIndex, 4, self.HS_DELAY)) if port == 0: return value[0] elif port == 1: return value[1] else: return list(value) def DLatchR(self, port): """ This command reads the digital port latch register port = 0 Read port 0 port = 1 Read port 1 port = 2 Read both ports """ if port < 0 or port > 2: raise ValueError('DLatchR: error in port number.') return request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT) wValue = 0 wIndex = port value = unpack('HH',self.udev.controlRead(request_type, self.DLATCH, wValue, wIndex, 4, self.HS_DELAY)) if port == 0: return value[0] elif port == 1: return value[1] else: return list(value) def DLatchW(self, port, value): """ This command writes the digital port latch register port = 0 Write port 0 port = 1 Write port 1 port = 2 Write both ports """ if port < 0 or port > 2: raise ValueError('DLatchW: error in port number.') return request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT) request = self.DLATCH wValue = 0x0 wIndex = port value = pack('HH', value[0], value[1]) self.udev.controlWrite(request_type, request, wValue, wIndex, value, self.HS_DELAY) ########################################## # Register Commands # ########################################## def ReadReg(self, address): """ This command reads the FPGA register at the specified address """ request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT) wValue = 0 wIndex = address & 0xff data = self.udev.controlRead(request_type, self.READ_REG, wValue, wIndex, 1, self.HS_DELAY) return data def WriteReg(self, address, value): """ This command writes the FPGA register at the specified address. The user can change the tristate settings with this command, so any time it is sent, the software must re-check the DTristate status to know the current state. """ request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT) request = self.WRITE_REG wValue = value & 0xff wIndex = address & 0xff self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], self.HS_DELAY) ########################################## # Acquisition Commands # ########################################## def InScanStart(self, channel_map, count, retrig_count, frequency, options, mode=0): """ This command starts the input channel scan. This command will result in a bus stall if an input scan is currently running. Notes: The pacer rate is set by an internal 32-bit incrementing timer running at a base rate of 96MHz. The timer is controlled by pacer_period. A pulse will be
include-stmt as a string. :rtype: str ''' return '#include "{0}"'.format(self.items[0]) class Cpp_Macro_Stmt(Base): # 6.10.3 Macro replacement """ C99 6.10.3 Macro replacement macro_stmt is # define identifier [( [identifier-list] ) or (...) ] [ replacement-list ] new-line Important: No preceding whitespace is allowed for the left parenthesis of the optional identifier-list. If a preceding whitespace is encountered, the bracket is considered part of the replacement-list. """ use_names = ['Cpp_Macro_Identifier', 'Cpp_Macro_Identifier_List', 'Cpp_Pp_Tokens'] _regex = re.compile(r"#\s*define\b") @staticmethod def match(string): '''Implements the matching for a preprocessor macro definition. It matches define directives with macro identifier, optional identifier list, and optional replacement list. The macro identifier is matched using :py:class:`fparser.two.C99Preprocessor.Cpp_Macro_Identifier` and the optional argument identifier list using :py:class:`fparser.two.C99Preprocessor.Cpp_Macro_Identifier_List`. Important: No preceding whitespace is allowed for the left parentheses of the dentifier-list. If a preceding whitespace is encountered, the it is considered part of the replacement-list. :param str string: the string to match with as an if statement. :return: a tuple of size 3 containing the macro identifier, \ identifier list or None, and replacement list or None, \ or `None` if there is no match. :rtype: \ (py:class:`fparser.two.C99Preprocessor.Cpp_Macro_Identifier`, \ py:class:`fparser.two.C99Preprocessor.Cpp_Macro_Identifier_List` \ or NoneType, \ py:class:`fparser.two.C99Preprocessor.Cpp_Pp_Tokens` or NoneType) \ or `NoneType` ''' if not string: return None line = string.strip() found = Cpp_Macro_Stmt._regex.match(line) if not found: # The line does not match a define statement return None rhs = line[found.end():].strip() found = pattern.macro_name.match(rhs) if not found: return None name = Cpp_Macro_Identifier(found.group()) definition = rhs[found.end():] # note no strip here because '#define MACRO(x)' and # '#define MACRO (x)' are functionally different if not definition: return (name, None, None) if definition[0] == '(': found = Cpp_Macro_Identifier_List._pattern.match(definition) if not found: # The definition starts with a bracket (without preceding # white space) but does not match an identifier list return None parameter_list = Cpp_Macro_Identifier_List(found.group()) # Note that the definition can potentially be an empty string # which is nonetheless included explicitly to distinguish a # macro definition with arguments from one without but with # a definition definition = definition[found.end():] else: parameter_list = None # now that definition only holds the replacement list, we can strip definition = definition.strip() if definition: definition = Cpp_Pp_Tokens(definition) else: definition = None return (name, parameter_list, definition) def tostr(self): ''' :return: this macro-stmt as a string. :rtype: str ''' return '#define {0}{1}{2}{3}'.format( self.items[0], self.items[1] or '', ' ' if self.items[2] else '', self.items[2] or '') class Cpp_Macro_Identifier(StringBase): # pylint: disable=invalid-name '''Implements the matching of a macro identifier.''' # There are no other classes. This is a simple string match. subclass_names = [] @staticmethod def match(string): '''Implements the matching of a macro identifier. It matches the string with the regular expression abs_macro_name in the pattern_tools file. The macro identifier may contain only letters and underscore. :param str string: the string to match with the pattern rule. :return: a tuple of size 1 containing a string with the \ matched name if there is a match, or None if there is not. :rtype: (str) or NoneType ''' return StringBase.match(pattern.abs_macro_name, string.strip()) class Cpp_Macro_Identifier_List(StringBase): '''Implements the matching of an identifier list in a macro definition. identifier-list is (identifier [, identifier-list or ...]) or (...) ''' subclass_names = [] _pattern = pattern.Pattern('<identifier-list>', r'\((\s*[A-Za-z_]\w*' r'(?:\s*,\s*[A-Za-z_]\w*)*' r'(?:\s*,\s*\.{3})?|\.{3})?\s*\)') @staticmethod def match(string): '''Implements the matching of a macro identifier list as part of a macro definition. It must consist of one or more macro identifier separated by comma, or "..." for a variadic argument list, and must be surrouned by parentheses. For simplicity, the matched list is kept as a single string and not matched as :py:class:`fparser.two.C99Preprocessor.Cpp_Macro_Identifier`. :param str string: the string to match with the pattern rule. :return: a tuple of size 1 containing a string with the \ matched identifier list if there is a match, or None if \ there is not. :rtype: (`str`,) or `NoneType` ''' if not string: return None return StringBase.match(Cpp_Macro_Identifier_List._pattern, string) def tostr(self): ''' :return: this macro-identifier-list as a string. :rtype: str ''' return self.string class Cpp_Undef_Stmt(WORDClsBase): '''Implements the matching of a preprocessor undef statement for a macro. undef-stmt is # undef identifier new-line Strictly, this is part of 6.10.3 but since it is identified by a different directive keyword (undef instead of define) we treat it separately. ''' subclass_names = [] use_names = ['Cpp_Macro_Identifier'] _pattern = pattern.Pattern('<undef>', r'^\s*(#\s*undef)\b', value='#undef') @staticmethod def match(string): '''Implements the matching for a preprocessor undef statement for a macro. The macro identifier is matched using :py:class:`fparser.two.C99Preprocessor.Cpp_Macro_Identifier`. :param str string: the string to match with as an if statement. :return: a tuple of size 1 containing the macro identifier, or\ `None` if there is no match. :rtype: (py:class:`fparser.two.C99Preprocessor.Cpp_Macro_Identifier`) \ or `NoneType` ''' if not string: return None return WORDClsBase.match( Cpp_Undef_Stmt._pattern, Cpp_Macro_Identifier, string, colons=False, require_cls=True) def tostr(self): ''' :return: this undef-stmt as a string. :rtype: str ''' return '{0} {1}'.format(*self.items) class Cpp_Line_Stmt(WORDClsBase): # 6.10.4 Line control ''' C99 6.10.4 Line control line-stmt is # line digit-sequence [ "s-char-sequence" ] new-line or pp-tokens new-line ''' subclass_names = [] use_names = ['Cpp_Pp_Tokens'] _pattern = pattern.Pattern('<line>', r'^\s*#\s*line\b', value='#line') @staticmethod def match(string): '''Implements the matching for a line preprocessor directive. The right hand side of the directive is not matched any further but simply kept as a string. :param str string: the string to match with as a line statement. :return: a tuple of size 1 with the right hand side as a string, \ or `None` if there is no match. :rtype: (`str`) or `NoneType` ''' if not string: return None return WORDClsBase.match( Cpp_Line_Stmt._pattern, Cpp_Pp_Tokens, string, colons=False, require_cls=True) def tostr(self): ''' :return: this line-stmt as a string. :rtype: str ''' return '{0} {1}'.format(*self.items) class Cpp_Error_Stmt(WORDClsBase): # 6.10.5 Error directive ''' C99 6.10.5 Error directive error-stmt is # error [pp-tokens] new-line ''' subclass_names = [] use_names = ['Cpp_Pp_Tokens'] _pattern = pattern.Pattern('<error>', r'^\s*#\s*error\b', value='#error') @staticmethod def match(string): '''Implements the matching for an error preprocessor directive. The optional right hand side of the directive is not matched any further but simply kept as a string. :param str string: the string to match with as a line statement. :return: an empty tuple or a tuple of size 1 with the right hand \ side as a string, or `None` if there is no match. :rtype: () or (`str`) or `NoneType` ''' if not string: return None return WORDClsBase.match( Cpp_Error_Stmt._pattern, Cpp_Pp_Tokens, string, colons=False, require_cls=False) def tostr(self): ''' :return: this error-stmt as a string. :rtype: str ''' if self.items[1]: return '{0} {1}'.format(*self.items) return self.items[0] class Cpp_Warning_Stmt(WORDClsBase): ''' Not actually part of C99 but supported by most preprocessors and with syntax identical to Cpp_Error_Stmt warning-stmt is # warning [pp-tokens] new-line ''' subclass_names = [] use_names = ['Cpp_Pp_Tokens'] _pattern = pattern.Pattern('<warning>', r'^\s*#\s*warning\b', value='#warning') @staticmethod def match(string): '''Implements the matching for a warning preprocessor directive. The optional right hand side of the directive is not matched any further but simply kept as a string. :param str string: the string to match with as a line statement. :return: an empty tuple or a tuple of size 1 with the right hand \ side as a string, or `None` if there is no match. :rtype: () or (`str`) or `NoneType` ''' if not string: return None return WORDClsBase.match( Cpp_Warning_Stmt._pattern, Cpp_Pp_Tokens, string, colons=False, require_cls=False) def tostr(self): ''' :return: this warning-stmt as a string. :rtype: str ''' if self.items[1]: return '{0} {1}'.format(*self.items) return self.items[0] # 6.10.6 Pragma directive # Pragma Preprocessor directives not implemented since Fortran has its own # Pragma syntax in the form of comments. For that reason, most preprocessors # do not support C preprocess pragmas in Fortran code either. class Cpp_Null_Stmt(Base): # 6.10.7 Null directive ''' C99 6.10.7 Null directive null-stmt is # new-line ''' subclass_names = [] @staticmethod def match(string): '''Implements the matching for a Null (empty) directive. :param str string: the string to match with as a line statement. :return: an empty tuple or `None` if there is no match. :rtype: () or `NoneType` '''
import warnings warnings.filterwarnings('ignore', module='matplotlib') import logging root.setLevel(logging.CRITICAL) n_volumes = 57 n_runs = 3 min_rest = 3 min_medit = 6 matplotlib.rcParams['figure.facecolor'] = 'white' matplotlib.rcParams['axes.facecolor'] = 'white' matplotlib.rcParams['font.size'] = 18 def arange(n): return np.arange(n+1) rest_color = 'darkgray' fa_color = 'lightgreen' om_color = 'darkorange' fig = pl.figure() ax = fig.add_subplot(111) ax.fill_between(arange(3), 0, 1, facecolor=rest_color) s = 3 for i in range(3): ax.fill_between((arange(6)+s), 0, 1, facecolor=fa_color) s+=6 ax.fill_between((arange(3)+s), 0, 1, facecolor=rest_color) s+=3 ax.fill_between((arange(6)+s), 0, 1, facecolor=om_color) s+=6 ax.fill_between((arange(3)+s), 0, 1, facecolor=rest_color) s+=3 print(s) _ = ax.set_xticks(np.arange(60)[::5]) _ = ax.set_xlabel("Time (min.)") fig.savefig("/home/robbis/windows-vbox/paradigm.eps") 1837/8: cd ~/git/mvpa_itab_wu/ 1837/9: %matplotlib inline matplotlib.rcParams['figure.figsize'] = (15,12) 1837/10: roi_list = np.loadtxt('/media/robbis/DATA/fmri/templates_fcmri/findlab_rois.txt', delimiter=',', dtype=np.str) 1837/11: def load_matrices(path, condition): """ path = path of result file conditions = analysis label """ # Why looking for subjects in this way??? subjects = os.listdir(path) subjects = [s for s in subjects if s.find('configuration') == -1 \ and s.find('.') == -1 ] subjects = [s for s in subjects if s.find("expertise") == -1] result = [] # Why filter here? for c in condition: s_list = [] for s in subjects: sub_path = os.path.join(path, s) filel = os.listdir(sub_path) filel = [f for f in filel if f.find(c) != -1] c_list = [] for f in filel: matrix = np.loadtxt(os.path.join(sub_path, f)) c_list.append(matrix) s_list.append(np.array(c_list)) result.append(np.array(s_list)) return np.array(result) 1837/12: from pyitab.utils.math import z_fisher class ConnectivityLoader(object): def __init__(self, path, subjects, res_dir, roi_list): self.path = os.path.join(path, res_dir) self.subjects = subjects self.roi_list = roi_list def get_results(self, conditions): self.conditions = dict(zip(conditions, range(len(conditions)))) # Loads data for each subject # results is in the form (condition x subjects x runs x matrix) results = load_matrices(self.path, conditions) # Check if there are NaNs in the data nan_mask = np.isnan(results) for _ in range(len(results.shape) - 2): # For each condition/subject/run check if we have nan nan_mask = nan_mask.sum(axis=0) #pl.imshow(np.bool_(nan_mask), interpolation='nearest') #print np.nonzero(np.bool_(nan_mask)[0,:]) # Clean NaNs results = results[:,:,:,~np.bool_(nan_mask)] # Reshaping because numpy masking flattens matrices rows = np.sqrt(results.shape[-1]) shape = list(results.shape[:-1]) shape.append(int(rows)) shape.append(-1) results = results.reshape(shape) # We apply z fisher to results zresults = z_fisher(results) zresults[np.isinf(zresults)] = 1 self.results = zresults # Select mask to delete labels roi_mask = ~np.bool_(np.diagonal(nan_mask)) self.store_details(roi_mask) # Mean across runs zmean = zresults.mean(axis=2) new_shape = list(zmean.shape[-2:]) new_shape.insert(0, -1) zreshaped = zmean.reshape(new_shape) upper_mask = np.ones_like(zreshaped[0]) upper_mask[np.tril_indices(zreshaped[0].shape[0])] = 0 upper_mask = np.bool_(upper_mask) # Returns the mask of the not available ROIs. self.nan_mask = nan_mask return self.nan_mask def store_details(self, roi_mask): fields = dict() # Depending on data self.network_names = list(self.roi_list[roi_mask].T[0]) #self.roi_names = list(self.roi_list[roi_mask].T[2]) #self.roi_names = list(self.roi_list[roi_mask].T[1]) self.subject_groups = list(self.subjects.T[1]) self.subject_level = list(np.int_(self.subjects.T[-1])) #self.networks = self.roi_list[roi_mask].T[-2] return fields def get_dataset(self): zresults = self.results new_shape = list(zresults.shape[-2:]) new_shape.insert(0, -1) zreshaped = zresults.reshape(new_shape) upper_mask[np.tril_indices(zreshaped[0].shape[0])] = 0 upper_mask = np.bool_(upper_mask) # Reshape data to have samples x features ds_data = zreshaped[:,upper_mask] labels = [] n_runs = zresults.shape[2] n_subj = zresults.shape[1] for l in self.conditions.keys(): labels += [l for _ in range(n_runs * n_subj)] ds_labels = np.array(labels) ds_subjects = [] for s in self.subjects: ds_subjects += [s for _ in range(n_runs)] ds_subjects = np.array(ds_subjects) ds_info = [] for _ in self.conditions.keys(): ds_info.append(ds_subjects) ds_info = np.vstack(ds_info) self.ds = dataset_wizard(ds_data, targets=ds_labels, chunks=np.int_(ds_info.T[5])) self.ds.sa['subjects'] = ds_info.T[0] self.ds.sa['groups'] = ds_info.T[1] self.ds.sa['chunks_1'] = ds_info.T[2] self.ds.sa['expertise'] = ds_info.T[3] self.ds.sa['age'] = ds_info.T[4] self.ds.sa['chunks_2'] = ds_info.T[5] self.ds.sa['meditation'] = ds_labels return self.ds 1837/13: def get_analysis_mask(path, subjects, directory, roi_list): ######## Get matrix infos ############### conn_test = ConnectivityLoader(path, subjects, directory, roi_list) # Get nan mask to correctly fill matrix nan_mask = conn_test.get_results(['Samatha', 'Vipassana']) # Transform matrix into float of ones mask_ = np.float_(~np.bool_(nan_mask)) # Get the upper part of the matrix mask_ = np.triu(mask_, k=1) return mask_ 1837/14: subjects = np.loadtxt('/media/robbis/DATA/fmri/monks/attributes_struct.txt', dtype=np.str) 1837/15: res_path = "/media/robbis/DATA/fmri/monks/0_results/" dir_ = "20151103_132009_connectivity_filtered_first_filtered_after_each_run_no_gsr_findlab_fmri" mask = get_analysis_mask(res_path, subjects, dir_, roi_list) 1837/16: mask_indices = np.nonzero(mask) len(mask_indices[0]) 1837/17: from pyitab.plot.connectivity import plot_connectivity_circle_edited from pyitab.utils.atlas import get_atlas_info from pyitab.plot.connectivity import circular_layout from pyitab.utils.matrix import copy_matrix 1837/18: names_lr, colors_lr, index_, coords, _, _ = get_atlas_info('findlab') 1837/19: subjects 1837/20: def get_results(path, directory, condition, variable, results_dict={"errors":b"error", "sets":b"features", "weights":b"weights", "samples":b"subjects"}, pattern="%s_values_%s_100_50.npz"): print(os.path.join(path, directory, pattern)) results_ = np.load(os.path.join(path, directory, pattern), encoding='bytes', allow_pickle=True) values = results_['arr_0'].tolist() print(values.keys()) errors = values[results_dict["errors"]] #values_['errors_'] sets = values[results_dict["sets"]] #values_['sets_'] weights = values[results_dict["weights"]] #values_['weights_'] samples = values[results_dict["samples"]] return errors, sets, weights, samples 1837/21: def get_matrix(vector, mask_matrix, normalize=True, diagonal_filler=1): matrix = mask_matrix.copy() matrix[np.nonzero(matrix)] = vector matrix = copy_matrix(matrix, diagonal_filler=diagonal_filler) if normalize: matrix = matrix / (matrix.max()) size = np.abs(matrix) size = np.sum(size, axis=0) return matrix, size 1837/22: res_path = "/media/robbis/DATA/fmri/monks/0_results/" dir_ = "20151103_132009_connectivity_filtered_first_filtered_after_each_run_no_gsr_findlab_fmri" cond_ = "Samatha" variable = "age" fname = "%s_values_%s_100_50.npz" % (cond_, variable) 1837/23: erros_age_fa, sets_age_fa, weights_age_fa, samples_age_fa = get_results(res_path, dir_, cond_, variable, pattern=fname) 1837/24: variable = "expertise" fname = "_expertise/%s_values_1000_cv_50.npz" % (cond_) 1837/25: errors_exp_fa, sets_exp_fa, weights_exp_fa, samples_exp_fa = get_results(res_path, dir_, cond_, variable, results_dict={"errors":b"errors_", "sets":b"sets_", "weights":b"weights_", "samples":b"samples_"}, pattern=fname) 1837/26: counter_sets_fa = np.zeros((2, len(mask_indices[0]))) 1837/27: from collections import Counter count_age_fa = Counter(sets_age_fa.flatten()) count_exp_fa = Counter(sets_exp_fa.flatten()) 1837/28: for i in range(len(mask_indices[0])): counter_sets_fa[0, i] = count_age_fa[i] counter_sets_fa[1, i] = count_exp_fa[i] 1837/29: matplotlib.rcParams['axes.spines.top'] = False matplotlib.rcParams['axes.spines.bottom'] = False matplotlib.rcParams['axes.spines.right'] = False matplotlib.rcParams['axes.spines.left'] = False 1837/30: matplotlib.rcParams['figure.facecolor'] = 'white' matplotlib.rcParams['axes.facecolor'] = 'white' matplotlib.rcParams['font.size'] = 15 matplotlib.rcParams['figure.figsize'] = (15,12) 1837/31: matplotlib.style.use('seaborn-white') 1837/32: pl.scatter(counter_sets_fa[0], counter_sets_fa[1], #s=10*counter_sets_fa[0]+10*counter_sets_fa[1]+20, c='green', s=150, #c=10*counter_sets_fa[0]+10*counter_sets_fa[1], #vmin=-10, alpha = 0.3, cmap=pl.cm.Greens) pl.title("Focused Attention (FA) selected features frequency", fontsize=22) pl.xlabel("Selection frequency for Age prediction", fontsize=20) pl.ylabel("Selection frequency for Experience prediction", fontsize=20) pl.yticks([0,10,20,30,40,50], fontsize=20) pl.xticks([0,10,20,30,40,50], fontsize=20) 1837/33: import matplotlib.colors as colors 1837/34: values, x, y = np.histogram2d(x=counter_sets_fa[0], y=counter_sets_fa[1], bins=50) 1837/35: values_plot = values values_plot 1837/36: pl.imshow(values_plot, cmap=pl.cm.Greens, norm=colors.LogNorm(vmax=100)) cb = pl.colorbar() cb.outline.set_visible(False) 1837/37: pl.scatter(counter_sets_fa[0], counter_sets_fa[1], #s=10*counter_sets_fa[0]+10*counter_sets_fa[1]+20, c='green', s=150, #c=10*counter_sets_fa[0]+10*counter_sets_fa[1], #vmin=-10, alpha = 0.3, cmap=pl.cm.Greens) pl.title("Focused Attention (FA) selected features frequency", fontsize=22) pl.xlabel("Selection frequency for Age prediction", fontsize=20) pl.ylabel("Selection frequency for Experience prediction", fontsize=20) pl.yticks([0,10,20,30,40,50], fontsize=20) pl.xticks([0,10,20,30,40,50], fontsize=20) 1837/38: scatter_values[-1] 1837/39: values, x, y = np.histogram2d(x=counter_sets_fa[0], y=counter_sets_fa[1], bins=50) scatter_points = np.nonzero(values) scatter_values = values[scatter_points] im = pl.scatter(scatter_points[0]+1, scatter_points[1]+1, #s=10*counter_sets_om[0]+10*counter_sets_om[1]+20, s=150, c=scatter_values, vmin=1, #vmax=500, alpha=1, norm=colors.LogNorm(), #c='darkorange', cmap=pl.cm.Greens, ) pl.title("Focused Attention (FA) selected features frequency", fontsize=22) pl.xlabel("Selection frequency for Age prediction", fontsize=20) pl.ylabel("Selection frequency for Experience prediction", fontsize=20) pl.yticks([0,10,20,30,40,50], fontsize=20) pl.xticks([0,10,20,30,40,50], fontsize=20) 1837/40: cb = pl.colorbar(im, orientation='horizontal') cb.outline.set_visible(False) 1837/41: counter_diff_fa = counter_sets_fa[1] - counter_sets_fa[0] # Experience - Age 1837/42: count_diff_fa_zero = counter_diff_fa[counter_diff_fa != 0] arg_fa_sort = np.argsort(count_diff_fa_zero)[::-1] fa_age_count = -1*counter_sets_fa[0][counter_diff_fa != 0][arg_fa_sort] fa_exp_count = counter_sets_fa[1][counter_diff_fa != 0][arg_fa_sort] 1837/43: from pyitab.plot.connectivity import plot_connectivity_circle_edited from pyitab.utils.atlas import get_atlas_info from pyitab.plot.connectivity import circular_layout, plot_connectivity_lines 1837/44: matrix_fa, size_fa = get_matrix(counter_diff_fa, mask) 1837/45: matplotlib.rcParams['figure.facecolor'] = 'black' f = plot_connectivity_lines(matrix_fa[index_][:,index_], list(names_lr[index_]), node_colors=colors_lr[index_], con_thresh = 0.95, title="Feature Choice for "+cond_+" (Experience > Age)", colormap='PRGn', ) 1837/46: matplotlib.rcParams['figure.facecolor'] = 'black' f, _ = plot_connectivity_circle_edited(matrix_fa[index_][:,index_], names_lr[index_], node_colors=colors_lr[index_], node_size=1.5*size_fa[index_]**3, con_thresh = 0.95, title="Feature Choice for "+cond_+" (Experience > Age)", node_angles=circular_layout(names_lr, list(names_lr), ), fontsize_title=19, fontsize_names=13, fontsize_colorbar=13, colorbar_size=0.3, colormap='PRGn', vmin=-1.5, vmax=1.5, fig=pl.figure(figsize=(16,16)) ) 1837/47: res_path = "/media/robbis/DATA/fmri/monks/0_results/" dir_ = "20151103_132009_connectivity_filtered_first_filtered_after_each_run_no_gsr_findlab_fmri" cond_ = "Vipassana" variable = "age" fname = "%s_values_%s_100_50.npz" % (cond_, variable) 1837/48: erros_age_om, sets_age_om, weights_age_om, samples_age_om = get_results(res_path, dir_, cond_, variable, pattern=fname) 1837/49: variable = "expertise" fname = "_expertise/%s_values_1000_cv_50.npz" % (cond_) 1837/50: errors_exp_om, sets_exp_om, weights_exp_om, samples_exp_om = get_results(res_path, dir_, cond_, variable, results_dict={"errors":b"errors_", "sets":b"sets_", "weights":b"weights_", "samples":b"samples_"}, pattern=fname ) 1837/51: from collections import Counter count_age_om = Counter(sets_age_om.flatten()) count_exp_om = Counter(sets_exp_om.flatten()) 1837/52: counter_sets_om = np.zeros((2, len(mask_indices[0]))) 1837/53: for i in range(len(mask_indices[0])): counter_sets_om[0, i] = count_age_om[i] counter_sets_om[1, i] = count_exp_om[i] 1837/54: matplotlib.rcParams['figure.facecolor'] = 'white' matplotlib.rcParams['axes.facecolor'] = 'white' matplotlib.rcParams['font.size'] = 18 1837/55: from scipy.stats import pearsonr 1837/56: pearsonr(counter_sets_om[0], counter_sets_om[1]) 1837/57: pearsonr(counter_sets_fa[0], counter_sets_fa[1]) 1837/58: counter_sets_fa[0].shape 1837/59: pl.scatter(counter_sets_om[0], counter_sets_om[1], #s=10*counter_sets_om[0]+10*counter_sets_om[1]+20, s=150, #c=10*counter_sets_om[0]+10*counter_sets_om[1], #vmin=-100, #vmax=500, alpha=0.4, c='darkorange', #cmap=pl.cm.Greens, ) pl.title("Open Monitoring (OM) selected features frequency", fontsize=22) pl.xlabel(r"Selection frequency for Age prediction", fontsize=20) pl.ylabel(r"Selection frequency for Experience prediction", fontsize=20) pl.yticks([0,10,20,30,40,50], fontsize=20) pl.xticks([0,10,20,30,40,50], fontsize=20) 1837/60: np.log(scatter_values * 10) 1837/61: values, x, y = np.histogram2d(x=counter_sets_om[0], y=counter_sets_om[1], bins=50) scatter_points = np.nonzero(values) scatter_values = values[scatter_points] * 2 args = np.argsort(scatter_values) im = pl.scatter(scatter_points[0]+1, scatter_points[1]+1, #s=10*counter_sets_om[0]+10*counter_sets_om[1]+20, s=150, c=scatter_values, vmin=1, #vmax=500, alpha=1, norm=colors.LogNorm(), #c='darkorange', cmap=pl.cm.Oranges, ) pl.title("Open Monitoring (OM) selected features frequency", fontsize=22) pl.xlabel(r"Selection frequency for Age prediction", fontsize=20) pl.ylabel(r"Selection frequency for Experience prediction", fontsize=20) pl.yticks([0,10,20,30,40,50], fontsize=20) pl.xticks([0,10,20,30,40,50], fontsize=20) 1837/62: cb = pl.colorbar(im, orientation='horizontal') cb.outline.set_visible(False) 1837/63: counter_diff_om = counter_sets_om[1] - counter_sets_om[0] # Expertise - Age 1837/64: matplotlib.rcParams['figure.facecolor'] = 'black' 1837/65: names_lr, colors_lr, index_, coords, _, _ = get_atlas_info('findlab') 1837/66: matrix_om, size_om = get_matrix(counter_diff_om, mask) 1837/67: f, _ = plot_connectivity_circle_edited(matrix_om[index_][:,index_], names_lr[index_], node_colors=colors_lr[index_], node_size=2*size_om[index_]**2.5, con_thresh = 0.95, title=cond_, node_angles=circular_layout(names_lr, list(names_lr), ), fontsize_title=19, fontsize_names=13, fontsize_colorbar=13, colorbar_size=0.3, colormap='PRGn', vmin=-1.5, vmax=1.5, fig=pl.figure(figsize=(16,16)) ) 1837/68: matplotlib.rcParams['figure.facecolor'] = 'white' matplotlib.rcParams['axes.facecolor'] = 'white' matplotlib.rcParams['figure.figsize'] = (14,12) 1837/69: import seaborn as sns 1837/70: values, x, y = np.histogram2d(x=counter_sets_fa[0], y=counter_sets_om[0], bins=50) 1837/71: pl.imshow(np.log(values)) 1837/72: a = np.array([[0,1]]) pl.figure(figsize=(9, 1.5)) img = pl.imshow(a, cmap="Blues") pl.gca().set_visible(False) cax = pl.axes([0.1, 0.2, 0.8, 0.6]) pl.colorbar(orientation="h", cax=cax) 1837/73: pearsonr(counter_sets_fa[0], counter_sets_om[0]) 1837/74: pl.scatter(counter_sets_fa[0], counter_sets_om[0], #s=5*counter_sets_fa[0]+5*counter_sets_om[0]+20, s=150, c='slategray', #cmap=pl.cm.viridis_r, alpha=0.4) pl.title("Feature selection frequency for Age prediction", fontsize=22) pl.xlabel("Selection frequency in FA", fontsize=20) pl.yticks([0,10,20,30,40,50], fontsize=20) pl.ylabel("Selection frequency in OM", fontsize=20) pl.xticks([0,10,20,30,40,50], fontsize=20) 1837/75: matrix_values = np.zeros_like(values) matrix_values[scatter_points] = scatter_values pl.imshow(matrix_values) 1837/76: values, x, y = np.histogram2d(x=counter_sets_fa[0], y=counter_sets_om[0], bins=50) scatter_points = np.nonzero(values) scatter_values = values[scatter_points] * 4 im = pl.scatter( scatter_points[0], scatter_points[1], #s=10*counter_sets_om[0]+10*counter_sets_om[1]+20, s=150, c=scatter_values, vmin=1, #vmax=9, alpha=1, norm=colors.LogNorm(), #c='darkorange', cmap=pl.cm.bone_r, ) pl.title("Feature selection frequency for Age prediction",
"QueryID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=436, version=0) class Microsoft_IEFRAME_436_0(Etw): pattern = Struct( "QueryID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=456, version=0) class Microsoft_IEFRAME_456_0(Etw): pattern = Struct( "TabId" / Guid ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=457, version=0) class Microsoft_IEFRAME_457_0(Etw): pattern = Struct( "TabId" / Guid ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=460, version=0) class Microsoft_IEFRAME_460_0(Etw): pattern = Struct( "TabId" / Guid ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=462, version=0) class Microsoft_IEFRAME_462_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=463, version=0) class Microsoft_IEFRAME_463_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=464, version=0) class Microsoft_IEFRAME_464_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=465, version=0) class Microsoft_IEFRAME_465_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=466, version=0) class Microsoft_IEFRAME_466_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=467, version=0) class Microsoft_IEFRAME_467_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=468, version=0) class Microsoft_IEFRAME_468_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=469, version=0) class Microsoft_IEFRAME_469_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=470, version=0) class Microsoft_IEFRAME_470_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=471, version=0) class Microsoft_IEFRAME_471_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=472, version=0) class Microsoft_IEFRAME_472_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=473, version=0) class Microsoft_IEFRAME_473_0(Etw): pattern = Struct( "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=500, version=0) class Microsoft_IEFRAME_500_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=501, version=0) class Microsoft_IEFRAME_501_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=502, version=0) class Microsoft_IEFRAME_502_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=503, version=0) class Microsoft_IEFRAME_503_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=504, version=0) class Microsoft_IEFRAME_504_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=505, version=0) class Microsoft_IEFRAME_505_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=506, version=0) class Microsoft_IEFRAME_506_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=507, version=0) class Microsoft_IEFRAME_507_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=508, version=0) class Microsoft_IEFRAME_508_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=509, version=0) class Microsoft_IEFRAME_509_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=510, version=0) class Microsoft_IEFRAME_510_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=511, version=0) class Microsoft_IEFRAME_511_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=514, version=0) class Microsoft_IEFRAME_514_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=515, version=0) class Microsoft_IEFRAME_515_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=516, version=0) class Microsoft_IEFRAME_516_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=517, version=0) class Microsoft_IEFRAME_517_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=518, version=0) class Microsoft_IEFRAME_518_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=525, version=0) class Microsoft_IEFRAME_525_0(Etw): pattern = Struct( "String" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=533, version=0) class Microsoft_IEFRAME_533_0(Etw): pattern = Struct( "Uint32Val" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=534, version=0) class Microsoft_IEFRAME_534_0(Etw): pattern = Struct( "PrerenderURL" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=535, version=0) class Microsoft_IEFRAME_535_0(Etw): pattern = Struct( "Count" / Int32ul, "DominantImageUrl1" / WString, "DominantImageUrl2" / WString, "DominantImageUrl3" / WString, "DominantImageUrl4" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=550, version=0) class Microsoft_IEFRAME_550_0(Etw): pattern = Struct( "ComponentType" / Int32ul, "PID" / Int32ul, "TID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=551, version=0) class Microsoft_IEFRAME_551_0(Etw): pattern = Struct( "ComponentType" / Int32ul, "PID" / Int32ul, "TID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=552, version=0) class Microsoft_IEFRAME_552_0(Etw): pattern = Struct( "ImageUrl" / WString, "ImageType" / Int32ul, "DIType" / Int32ul, "DIConfidence" / Int32ul, "TileSize" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=555, version=0) class Microsoft_IEFRAME_555_0(Etw): pattern = Struct( "TabId" / Guid ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=556, version=0) class Microsoft_IEFRAME_556_0(Etw): pattern = Struct( "NotifyFrame" / Int8ul, "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=557, version=0) class Microsoft_IEFRAME_557_0(Etw): pattern = Struct( "NotifyFrame" / Int8ul, "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=558, version=0) class Microsoft_IEFRAME_558_0(Etw): pattern = Struct( "TabID" / Int32sl, "SelectTabAsyncTabID" / Int32sl, "SelectTabAsyncFlags" / Int32ul, "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=559, version=0) class Microsoft_IEFRAME_559_0(Etw): pattern = Struct( "TabID" / Int32sl, "SelectTabAsyncTabID" / Int32sl, "SelectTabAsyncFlags" / Int32ul, "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=560, version=0) class Microsoft_IEFRAME_560_0(Etw): pattern = Struct( "SelectTabAsyncTabID" / Int32sl, "SelectTabAsyncFlags" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=562, version=0) class Microsoft_IEFRAME_562_0(Etw): pattern = Struct( "SelectTabAsyncTabID" / Int32sl, "SelectTabAsyncFlags" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=563, version=0) class Microsoft_IEFRAME_563_0(Etw): pattern = Struct( "TabID" / Int32sl, "NewVisibleState" / Int8ul, "CurrentVisibleState" / Int8ul, "IsTabSwitch" / Int8ul, "IsHung" / Int8ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=564, version=0) class Microsoft_IEFRAME_564_0(Etw): pattern = Struct( "TabID" / Int32sl, "NewVisibleState" / Int8ul, "CurrentVisibleState" / Int8ul, "IsTabSwitch" / Int8ul, "IsHung" / Int8ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=565, version=0) class Microsoft_IEFRAME_565_0(Etw): pattern = Struct( "ISO_HANDLE" / Int64ul, "IDLEMANAGER_TASKTYPE" / Int32ul, "TaskID" / Int32ul, "MaxWaitingTime" / Int32ul, "MaxBlockingTime" / Int32ul, "IDLETASK_PRIORITY" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=566, version=0) class Microsoft_IEFRAME_566_0(Etw): pattern = Struct( "ISO_HANDLE" / Int64ul, "IDLEMANAGER_TASKTYPE" / Int32ul, "TaskID" / Int32ul, "MaxWaitingTime" / Int32ul, "MaxBlockingTime" / Int32ul, "IDLETASK_PRIORITY" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=567, version=0) class Microsoft_IEFRAME_567_0(Etw): pattern = Struct( "ISO_HANDLE" / Int64ul, "IDLEMANAGER_TASKTYPE" / Int32ul, "TaskID" / Int32ul, "MaxWaitingTime" / Int32ul, "MaxBlockingTime" / Int32ul, "IDLETASK_PRIORITY" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=568, version=0) class Microsoft_IEFRAME_568_0(Etw): pattern = Struct( "ISO_HANDLE" / Int64ul, "IDLEMANAGER_TASKTYPE" / Int32ul, "TaskID" / Int32ul, "MaxWaitingTime" / Int32ul, "MaxBlockingTime" / Int32ul, "IDLETASK_PRIORITY" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=569, version=0) class Microsoft_IEFRAME_569_0(Etw): pattern = Struct( "ISO_HANDLE" / Int64ul, "IDLEMANAGER_TASKTYPE" / Int32ul, "TaskID" / Int32ul, "MaxWaitingTime" / Int32ul, "MaxBlockingTime" / Int32ul, "IDLETASK_PRIORITY" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=570, version=0) class Microsoft_IEFRAME_570_0(Etw): pattern = Struct( "ISO_HANDLE" / Int64ul, "IDLEMANAGER_TASKTYPE" / Int32ul, "TaskID" / Int32ul, "MaxWaitingTime" / Int32ul, "MaxBlockingTime" / Int32ul, "IDLETASK_PRIORITY" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=571, version=0) class Microsoft_IEFRAME_571_0(Etw): pattern = Struct( "WaitingTaskCount" / Int32ul, "RunningTaskCount" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=577, version=0) class Microsoft_IEFRAME_577_0(Etw): pattern = Struct( "TabID" / Int32sl, "AllowRecovery" / Int8ul, "UseWER" / Int8ul, "HangUIShowing" / Int8ul, "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=578, version=0) class Microsoft_IEFRAME_578_0(Etw): pattern = Struct( "TabID" / Int32sl, "AllowRecovery" / Int8ul, "UseWER" / Int8ul, "HangUIShowing" / Int8ul, "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=579, version=0) class Microsoft_IEFRAME_579_0(Etw): pattern = Struct( "TabID" / Int32sl, "ProcessID" / Int32sl, "HWND" / Int64ul, "HungWindowText" / WString, "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=580, version=0) class Microsoft_IEFRAME_580_0(Etw): pattern = Struct( "TabID" / Int32sl, "ProcessID" / Int32sl, "HWND" / Int64ul, "HungWindowText" / WString, "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=581, version=0) class Microsoft_IEFRAME_581_0(Etw): pattern = Struct( "TabID" / Int32sl, "ProcessID" / Int32sl, "HWND" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=582, version=0) class Microsoft_IEFRAME_582_0(Etw): pattern = Struct( "TabID" / Int32sl, "TabVisibleIndex" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=583, version=0) class Microsoft_IEFRAME_583_0(Etw): pattern = Struct( "TabID" / Int32sl, "TabVisibleIndex" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=584, version=0) class Microsoft_IEFRAME_584_0(Etw): pattern = Struct( "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=585, version=0) class Microsoft_IEFRAME_585_0(Etw): pattern = Struct( "Result" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=589, version=0) class Microsoft_IEFRAME_589_0(Etw): pattern = Struct( "BindContext" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=590, version=0) class Microsoft_IEFRAME_590_0(Etw): pattern = Struct( "BindContext" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=592, version=0) class Microsoft_IEFRAME_592_0(Etw): pattern = Struct( "TabID" / Int32sl, "IsActive" / Int8ul, "hwndAlternateOwner" / Int64ul, "fDestroyingHangUI" / Int8ul, "hwndNext" / Int64ul, "hwndPrev" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=593, version=0) class Microsoft_IEFRAME_593_0(Etw): pattern = Struct( "TabID" / Int32sl, "IsActive" / Int8ul, "hwndAlternateOwner" / Int64ul, "fDestroyingHangUI" / Int8ul, "hwndNext" / Int64ul, "hwndPrev" / Int64ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=594, version=0) class Microsoft_IEFRAME_594_0(Etw): pattern = Struct( "TabID" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=597, version=0) class Microsoft_IEFRAME_597_0(Etw): pattern = Struct( "TabID" / Int32sl, "IsActive" / Int8ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=598, version=0) class Microsoft_IEFRAME_598_0(Etw): pattern = Struct( "TabID" / Int32sl, "IsActive" / Int8ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=599, version=0) class Microsoft_IEFRAME_599_0(Etw): pattern = Struct( "TabID" / Int32sl, "IsActive" / Int8ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=602, version=0) class Microsoft_IEFRAME_602_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=603, version=0) class Microsoft_IEFRAME_603_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=606, version=0) class Microsoft_IEFRAME_606_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=607, version=0) class Microsoft_IEFRAME_607_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=608, version=0) class Microsoft_IEFRAME_608_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=609, version=0) class Microsoft_IEFRAME_609_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=610, version=0) class Microsoft_IEFRAME_610_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=611, version=0) class Microsoft_IEFRAME_611_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=612, version=0) class Microsoft_IEFRAME_612_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=613, version=0) class Microsoft_IEFRAME_613_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=614, version=0) class Microsoft_IEFRAME_614_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=615, version=0) class Microsoft_IEFRAME_615_0(Etw): pattern = Struct( "FileName" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=642, version=0) class Microsoft_IEFRAME_642_0(Etw): pattern = Struct( "SharedMemoryHandle" / Int32ul, "FailureReason" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=644, version=0) class Microsoft_IEFRAME_644_0(Etw): pattern = Struct( "CommandType" / Int32sl, "EventType" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=645, version=0) class Microsoft_IEFRAME_645_0(Etw): pattern = Struct( "PID" / Int32ul, "FoundSuspendable" / Int8ul, "FailureReason" / Int32ul ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=648, version=0) class Microsoft_IEFRAME_648_0(Etw): pattern = Struct( "tabID" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=649, version=0) class Microsoft_IEFRAME_649_0(Etw): pattern = Struct( "tabID" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=650, version=0) class Microsoft_IEFRAME_650_0(Etw): pattern = Struct( "tabID" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=651, version=0) class Microsoft_IEFRAME_651_0(Etw): pattern = Struct( "tabID" / Int32sl, "Title" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=652, version=0) class Microsoft_IEFRAME_652_0(Etw): pattern = Struct( "HiddenTabCookie" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=653, version=0) class Microsoft_IEFRAME_653_0(Etw): pattern = Struct( "tabID" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=654, version=0) class Microsoft_IEFRAME_654_0(Etw): pattern = Struct( "tabID" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=655, version=0) class Microsoft_IEFRAME_655_0(Etw): pattern = Struct( "tabID" / Int32sl ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=656, version=0) class Microsoft_IEFRAME_656_0(Etw): pattern = Struct( "String" / WString ) @declare(guid=guid("5c8bb950-959e-4309-8908-67961a1205d5"), event_id=657, version=0) class Microsoft_IEFRAME_657_0(Etw): pattern = Struct( "String" / WString
<filename>haproxy.py # haproxy-collectd-plugin - haproxy.py # # Author: <NAME> # Description: This is a collectd plugin which runs under the Python plugin to # collect metrics from haproxy. # Plugin structure and logging func taken from # https://github.com/phrawzty/rabbitmq-collectd-plugin # # Modified by "<NAME>" <<EMAIL>>, "<NAME>" <<EMAIL>> import cStringIO as StringIO import socket import csv import pprint import collectd PLUGIN_NAME = 'haproxy' RECV_SIZE = 1024 DEFAULT_METRICS = { 'ConnRate': ('connection_rate', 'gauge'), 'CumReq': ('requests', 'derive'), 'Idle_pct': ('idle_pct', 'gauge'), 'scur': ('session_current', 'gauge'), 'SessRate': ('session_rate_all', 'gauge'), 'lbtot': ('server_selected_total', 'counter'), 'bout': ('bytes_out', 'derive'), 'bin': ('bytes_in', 'derive'), 'ttime': ('session_time_avg', 'gauge'), 'req_rate': ('request_rate', 'gauge'), 'rate': ('session_rate', 'gauge'), 'hrsp_2xx': ('response_2xx', 'derive'), 'hrsp_4xx': ('response_4xx', 'derive'), 'hrsp_5xx': ('response_5xx', 'derive'), 'ereq': ('error_request', 'derive'), 'dreq': ('denied_request', 'derive'), 'econ': ('error_connection', 'derive'), 'dresp': ('denied_response', 'derive'), 'qcur': ('queue_current', 'gauge'), 'qtime': ('queue_time_avg', 'gauge'), 'rtime': ('response_time_avg', 'gauge'), 'eresp': ('error_response', 'derive'), 'wretr': ('retries', 'derive'), 'wredis': ('redispatched', 'derive'), } ENHANCED_METRICS = { # Metrics that are collected for the whole haproxy instance. # The format is haproxy_metricname : {'signalfx_corresponding_metric': 'collectd_type'} # Currently signalfx_corresponding_metric match haproxy_metricname # Correspond to 'show info' socket command 'MaxConn': ('max_connections', 'gauge'), 'CumConns': ('connections', 'derive'), 'MaxConnRate': ('max_connection_rate', 'gauge'), 'MaxSessRate': ('max_session_rate', 'gauge'), 'MaxSslConns': ('max_ssl_connections', 'gauge'), 'CumSslConns': ('ssl_connections', 'derive'), 'MaxPipes': ('max_pipes', 'gauge'), 'Tasks': ('tasks', 'gauge'), 'Run_queue': ('run_queue', 'gauge'), 'PipesUsed': ('pipes_used', 'gauge'), 'PipesFree': ('pipes_free', 'gauge'), 'Uptime_sec': ('uptime_seconds', 'derive'), 'CurrConns': ('current_connections', 'gauge'), 'CurrSslConns': ('current_ssl_connections', 'gauge'), 'SslRate': ('ssl_rate', 'gauge'), 'SslFrontendKeyRate': ('ssl_frontend_key_rate', 'gauge'), 'SslBackendKeyRate': ('ssl_backend_key_rate', 'gauge'), 'SslCacheLookups': ('ssl_cache_lookups', 'derive'), 'SslCacheMisses': ('ssl_cache_misses', 'derive'), 'CompressBpsIn': ('compress_bps_in', 'derive'), 'CompressBpsOut': ('compress_bps_out', 'derive'), 'ZlibMemUsage': ('zlib_mem_usage', 'gauge'), # Metrics that are collected per each proxy separately. # Proxy name would be the dimension as well as service_name # Correspond to 'show stats' socket command 'chkfail': ('failed_checks', 'derive'), 'downtime': ('downtime', 'derive'), 'hrsp_1xx': ('response_1xx', 'derive'), 'hrsp_3xx': ('response_3xx', 'derive'), 'hrsp_other': ('response_other', 'derive'), 'qmax': ('queue_max', 'gauge'), 'qlimit': ('queue_limit', 'gauge'), 'rate_lim': ('session_rate_limit', 'gauge'), 'rate_max': ('session_rate_max', 'gauge'), 'req_rate_max': ('request_rate_max', 'gauge'), 'stot': ('session_total', 'derive'), 'slim': ('session_limit', 'gauge'), 'smax': ('session_max', 'gauge'), 'throttle': ('throttle', 'gauge'), 'cli_abrt': ('cli_abrt', 'derive'), 'srv_abrt': ('srv_abrt', 'derive'), 'comp_in': ('comp_in', 'derive'), 'comp_out': ('comp_out', 'derive'), 'comp_byp': ('comp_byp', 'derive'), 'comp_rsp': ('comp_rsp', 'derive'), 'ctime': ('connect_time_avg', 'gauge'), 'act': ('active_servers', 'gauge'), 'bck': ('backup_servers', 'gauge'), 'check_duration': ('health_check_duration', 'gauge'), 'lastsess': ('last_session', 'gauge'), 'conn_rate': ('conn_rate', 'gauge'), 'conn_rate_max': ('conn_rate_max', 'gauge'), 'conn_tot': ('conn_total', 'counter'), 'intercepted': ('intercepted', 'gauge'), 'dcon': ('denied_tcp_conn', 'gauge'), 'dses': ('denied_tcp_sess', 'gauge'), } DIMENSIONS_LIST = [ 'pxname', 'svname', 'pid', 'sid', 'iid', 'type', 'addr', 'cookie', 'mode', 'algo', ] DEFAULT_METRICS = dict((k.lower(), v) for k, v in DEFAULT_METRICS.items()) ENHANCED_METRICS = dict((k.lower(), v) for k, v in ENHANCED_METRICS.items()) METRIC_DELIM = '.' # for the frontend/backend stats DEFAULT_SOCKET = '/var/run/haproxy.sock' DEFAULT_PROXY_MONITORS = ['server', 'frontend', 'backend'] class HAProxySocket(object): """ Encapsulates communication with HAProxy via the socket interface """ def __init__(self, socket_file=DEFAULT_SOCKET): self.socket_file = socket_file def connect(self): # unix sockets all start with '/', use tcp otherwise is_unix = self.socket_file.startswith('/') if is_unix: stat_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) stat_sock.connect(self.socket_file) return stat_sock else: socket_host, separator, port = self.socket_file.rpartition(':') if socket_host is not '' and port is not '' and separator is ':': stat_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) stat_sock.connect((socket_host, int(port))) return stat_sock else: collectd.error('Could not connect to socket with host %s. Check HAProxy config.' % self.socket_file) return def communicate(self, command): '''Get response from single command. Args: command: string command to send to haproxy stat socket Returns: a string of the response data ''' if not command.endswith('\n'): command += '\n' stat_sock = self.connect() if stat_sock is None: return '' stat_sock.sendall(command) result_buf = StringIO.StringIO() buf = stat_sock.recv(RECV_SIZE) while buf: result_buf.write(buf) buf = stat_sock.recv(RECV_SIZE) stat_sock.close() return result_buf.getvalue() def get_server_info(self): result = {} output = self.communicate('show info') for line in output.splitlines(): try: key, val = line.split(':', 1) except ValueError: continue result[key.strip()] = val.strip() return result def get_server_stats(self): output = self.communicate('show stat') # sanitize and make a list of lines output = output.lstrip('# ').strip() output = [l.strip(',') for l in output.splitlines()] csvreader = csv.DictReader(output) result = [d.copy() for d in csvreader] return result def get_stats(module_config): """ Makes two calls to haproxy to fetch server info and server stats. Returns the dict containing metric name as the key and a tuple of metric value and the dict of dimensions if any """ if module_config['socket'] is None: collectd.error("Socket configuration parameter is undefined. Couldn't get the stats") return stats = [] haproxy = HAProxySocket(module_config['socket']) try: server_info = haproxy.get_server_info() server_stats = haproxy.get_server_stats() except socket.error: collectd.warning('status err Unable to connect to HAProxy socket at %s' % module_config['socket']) return stats # server wide stats for key, val in server_info.iteritems(): try: stats.append((key, int(val), dict())) except (TypeError, ValueError): pass # proxy specific stats for statdict in server_stats: dimensions = _build_dimension_dict(statdict) if not (statdict['svname'].lower() in module_config['proxy_monitors'] or statdict['pxname'].lower() in module_config['proxy_monitors']): continue for metricname, val in statdict.items(): try: stats.append((metricname, int(val), dimensions)) except (TypeError, ValueError): pass return stats def _build_dimension_dict(statdict): """ Builds dimensions dict to send back with metrics with readable metric names Args: statdict dictionary of metrics from HAProxy to be filtered for dimensions """ dimensions = {} for key in DIMENSIONS_LIST: if key in statdict and key == 'pxname': dimensions['proxy_name'] = statdict['pxname'] elif key in statdict and key == 'svname': dimensions['service_name'] = statdict['svname'] elif key in statdict and key == 'pid': dimensions['process_id'] = statdict['pid'] elif key in statdict and key == 'sid': dimensions['server_id'] = statdict['sid'] elif key in statdict and key == 'iid': dimensions['unique_proxy_id'] = statdict['iid'] elif key in statdict and key == 'type': dimensions['type'] = _get_proxy_type(statdict['type']) elif key in statdict and key == 'addr': dimensions['address'] = statdict['addr'] elif key in statdict and key == 'algo': dimensions['algorithm'] = statdict['algo'] elif key in statdict: dimensions[key] = statdict[key] return dimensions def config(config_values): """ A callback method that loads information from the HaProxy collectd plugin config file. Args: config_values (collectd.Config): Object containing config values """ module_config = {} socket = DEFAULT_SOCKET proxy_monitors = [] excluded_metrics = set() enhanced_metrics = False interval = None testing = False custom_dimensions = {} for node in config_values.children: if node.key == "ProxyMonitor" and node.values[0]: proxy_monitors.append(node.values[0]) elif node.key == "Socket" and node.values[0]: socket = node.values[0] elif node.key == "Interval" and node.values[0]: interval = node.values[0] elif node.key == "EnhancedMetrics" and node.values[0]: enhanced_metrics = _str_to_bool(node.values[0]) elif node.key == "ExcludeMetric" and node.values[0]: excluded_metrics.add(node.values[0]) elif node.key == "Testing" and node.values[0]: testing = _str_to_bool(node.values[0]) elif node.key == 'Dimension': if len(node.values) == 2: custom_dimensions.update({node.values[0]: node.values[1]}) else: collectd.warning("WARNING: Check configuration \ setting for %s" % node.key) else: collectd.warning('Unknown config key: %s' % node.key) if not proxy_monitors: proxy_monitors += DEFAULT_PROXY_MONITORS module_config = { 'socket': socket, 'proxy_monitors': proxy_monitors, 'interval': interval, 'enhanced_metrics': enhanced_metrics, 'excluded_metrics': excluded_metrics, 'custom_dimensions': custom_dimensions, 'testing': testing, } proxys = "_".join(proxy_monitors) if testing: return module_config interval_kwarg = {} if interval: interval_kwarg['interval'] = interval collectd.register_read(collect_metrics, data=module_config, name='node_' + module_config['socket'] + '_' + proxys, **interval_kwarg) def _format_dimensions(dimensions): """ Formats a dictionary of dimensions to a format that enables them to be specified as key, value pairs in plugin_instance to signalfx. E.g. >>> dimensions = {'a': 'foo', 'b': 'bar'} >>> _format_dimensions(dimensions) "[a=foo,b=bar]" Args: dimensions (dict): Mapping of {dimension_name: value, ...} Returns: str: Comma-separated list of dimensions """ dim_pairs = ["%s=%s" % (k, v) for k, v in dimensions.iteritems()] return "[%s]" % (",".join(dim_pairs)) def _get_proxy_type(type_id): """ Return human readable proxy type Args: type_id: 0=frontend, 1=backend, 2=server, 3=socket/listener """ proxy_types = { 0: 'frontend', 1: 'backend', 2: 'server', 3: 'socket/listener', } return proxy_types.get(int(type_id)) def _str_to_bool(val): ''' Converts a true/false string to a boolean ''' val = str(val).strip().lower() if val == 'true': return True elif val != 'false': collectd.warning('Warning: String (%s) could not be converted to a boolean. Returning false.' % val) return False def collect_metrics(module_config): collectd.debug('beginning collect_metrics') """ A callback method that gets metrics from HAProxy and records them to collectd. """ info = get_stats(module_config) if not info: collectd.warning('%s: No data received' % PLUGIN_NAME) return for metric_name, metric_value, dimensions in info: # assert metric is in valid metrics lists if not metric_name.lower() in DEFAULT_METRICS and not metric_name.lower() in ENHANCED_METRICS: collectd.debug("metric %s is not in either metric list" % metric_name.lower()) continue # skip metrics in enhanced metrics mode if not enabled if not module_config['enhanced_metrics'] and metric_name.lower() in ENHANCED_METRICS: continue # pull metric name & type from respective metrics list if metric_name.lower() in DEFAULT_METRICS: translated_metric_name, val_type = DEFAULT_METRICS[metric_name.lower()] else: translated_metric_name, val_type = ENHANCED_METRICS[metric_name.lower()] # skip over any exlcluded metrics if translated_metric_name in module_config['excluded_metrics']: collectd.debug("excluding metric %s" %
<reponame>NREL/dgen_globetrotter # -*- coding: utf-8 -*- """ Edited Monday Nov 5, 218 @author: tkwasnik """ import pandas as pd import numpy as np import decorators import utility_functions as utilfunc import config import os # GLOBAL SETTINGS # load logger logger = utilfunc.get_logger() @decorators.fn_timer(logger=logger, tab_level=2, prefix='') def aggregate_outputs_solar(agent_df, year, is_first_year, scenario_settings, interyear_results_aggregations=None): """ Aggregate agent-level results into ba-level results for the given year. Parameters ---------- agent_df : pandas.DataFrame agent attributes for the given year year : int modelled year is_first_year : bool is first year indicator scenario_settings : :class:`python.settings.ScenarioSettings` scenario settings loaded from input sheet and csv's interyear_results_aggregations : pandas.DataFrame aggregated pandas dataframe from previous year Returns ------- interyear_results_aggregations : pandas.DataFrame aggregated agent attributes """ #========================================================================================================== # Unpack results dict from previous years #========================================================================================================== if interyear_results_aggregations != None: ba_cum_pv_mw = interyear_results_aggregations['ba_cum_pv_mw'] #========================================================================================================== # Set up objects #========================================================================================================== ba_list = np.unique(np.array(agent_df[config.BA_COLUMN])) # print 'ba_list' # print ba_list col_list_8760 = list([config.BA_COLUMN, 'year']) hour_list = list(np.arange(1,8761)) col_list_8760 = col_list_8760 + hour_list if is_first_year == True: # PV and batt capacities ba_cum_pv_mw = pd.DataFrame(index=ba_list) # Set up for groupby agent_df['index'] = list(range(len(agent_df))) agent_df_to_group = agent_df[[config.BA_COLUMN, 'index']] agents_grouped = agent_df_to_group.groupby([config.BA_COLUMN]).aggregate(lambda x: tuple(x)) #========================================================================================================== # Aggregate PV and Batt capacity by reeds region #========================================================================================================== agent_cum_capacities = agent_df[[ config.BA_COLUMN, 'pv_kw_cum']] ba_cum_pv_kw_year = agent_cum_capacities.groupby(by=config.BA_COLUMN).sum() ba_cum_pv_kw_year[config.BA_COLUMN] = ba_cum_pv_kw_year.index ba_cum_pv_mw[year] = ba_cum_pv_kw_year['pv_kw_cum'] / 1000.0 ba_cum_pv_mw.round(3).to_csv(os.path.join(scenario_settings.out_scen_path, 'dpv_MW_by_ba_and_year.csv'), index_label=config.BA_COLUMN) #========================================================================================================== # Aggregate PV generation profiles and calculate capacity factor profiles #========================================================================================================== # DPV CF profiles are only calculated for the last year, since they change # negligibly from year-to-year. A ten-year degradation is applied, to # approximate the age of a mature fleet. if year==scenario_settings.model_years[-1]: pv_gen_by_agent = np.vstack(agent_df['solar_cf_profile']).astype(np.float) / 1e3 * np.array(agent_df['pv_kw_cum'].fillna(0)).reshape(len(agent_df), 1) # Sum each agent's profile into a total dispatch in each BA pv_gen_by_ba = np.zeros([len(ba_list), 8760]) for ba_n, ba in enumerate(ba_list): list_of_agent_indicies = np.array(agents_grouped.loc[ba, 'index']) pv_gen_by_ba[ba_n, :] = np.sum(pv_gen_by_agent[list_of_agent_indicies, :], axis=0) # Apply ten-year degradation pv_deg_rate = agent_df.loc[agent_df.index[0], 'pv_deg'] pv_gen_by_ba = pv_gen_by_ba * (1-pv_deg_rate)**10 # Change the numpy array into pandas dataframe pv_gen_by_ba_df = pd.DataFrame(pv_gen_by_ba, columns=hour_list) # print pv_gen_by_ba_df # print 'pv_gen_by_ba_df' pv_gen_by_ba_df.index = ba_list # Convert generation into capacity factor by diving by total capacity pv_cf_by_ba = pv_gen_by_ba_df[hour_list].divide(ba_cum_pv_mw[year]*1000.0, 'index') pv_cf_by_ba[config.BA_COLUMN] = ba_list # write output pv_cf_by_ba = pv_cf_by_ba[[config.BA_COLUMN] + hour_list] pv_cf_by_ba.round(3).to_csv(scenario_settings.out_scen_path + '/dpv_cf_by_ba.csv', index=False) interyear_results_aggregations = {'ba_cum_pv_mw':ba_cum_pv_mw} #========================================================================================================== # Package interyear results #========================================================================================================== return interyear_results_aggregations #%% @decorators.fn_timer(logger=logger, tab_level=2, prefix='') def apply_elec_price_multiplier_and_escalator(dataframe, year, elec_price_change_traj): """ Obtain a single scalar multiplier for each agent, that is the cost of electricity relative to 2016 (when the tariffs were curated). Also calculate the average increase in the price of electricity over the past ten years, which will be the escalator that they use to project electricity changes in their bill calculations. Parameters ---------- dataframe : pandas.DataFrame agent attributes for the given year year : int modelled year elec_price_change_traj : pandas.DataFrame contains elec_price_multiplier field by country, control region, and sector Returns ------- pandas.DataFrame agent attributes with new attributes Note ---- That many customers will not differentiate between real and nomianl, and therefore many would overestimate the real escalation of electriicty prices. """ dataframe = dataframe.reset_index() elec_price_multiplier = elec_price_change_traj[elec_price_change_traj['year']==year].reset_index() horizon_year = year-10 elec_price_escalator_df = elec_price_multiplier.copy() if horizon_year in elec_price_change_traj.year.values: elec_price_escalator_df['historical'] = elec_price_change_traj[elec_price_change_traj['year']==horizon_year]['elec_price_multiplier'].values else: first_year = np.min(elec_price_change_traj['year']) first_year_df = elec_price_change_traj[elec_price_change_traj['year']==first_year].reset_index() missing_years = first_year - horizon_year elec_price_escalator_df['historical'] = first_year_df['elec_price_multiplier']*0.99**missing_years elec_price_escalator_df['elec_price_escalator'] = (elec_price_escalator_df['elec_price_multiplier'] / elec_price_escalator_df['historical'])**(1.0/10) - 1.0 # Set lower bound of escalator at 0, assuming that potential customers would not evaluate declining electricity costs elec_price_escalator_df['elec_price_escalator'] = np.maximum(elec_price_escalator_df['elec_price_escalator'], 0) dataframe = pd.merge(dataframe, elec_price_multiplier[['elec_price_multiplier', config.BA_COLUMN, 'sector_abbr']], how='left', on=[config.BA_COLUMN, 'sector_abbr']) dataframe = pd.merge(dataframe, elec_price_escalator_df[[config.BA_COLUMN, 'sector_abbr', 'elec_price_escalator']], how='left', on=[config.BA_COLUMN, 'sector_abbr']) dataframe = dataframe.set_index('agent_id') return dataframe #%% @decorators.fn_timer(logger=logger, tab_level=2, prefix='') def apply_export_tariff_params(dataframe, net_metering_df): """ Add net metering system size limitation to each agent Parameters ---------- dataframe : pandas.DataFrame agent attributes for the given year year : int modelled year net_metering_df : pandas.DataFrame Attributes ---------- net_metering_df.nem_system_size_limit_kw net_metering_df.year_end_excess_sell_rate_usd_per_kwh net_metering_df.hourly_excess_sell_rate_usd_per_kwh Returns ------- pandas.DataFrame agent attributes with new attributes """ dataframe = dataframe.reset_index() dataframe = pd.merge(dataframe, net_metering_df[[config.BA_COLUMN, 'sector_abbr', 'nem_system_size_limit_kw']], how='left', on=[config.BA_COLUMN, 'sector_abbr']) dataframe = dataframe.set_index('agent_id') return dataframe #%% @decorators.fn_timer(logger=logger, tab_level=2, prefix='') def apply_wholesale_elec_prices(dataframe, df): """ Add control region and sector specific wholesale electricity prices to each agent Parameters ---------- dataframe : pandas.DataFrame agent attributes for the given year df : pandas.DataFrame includes joinable wholesale_elec_usd_per_kwh field Returns ------- pandas.DataFrame agent attributes with new attributes """ dataframe = dataframe.reset_index() dataframe = pd.merge(dataframe, df[[config.BA_COLUMN, 'sector_abbr','wholesale_elec_usd_per_kwh','year']], how='left', on=['year',config.BA_COLUMN, 'sector_abbr']) dataframe = dataframe.set_index('agent_id') return dataframe #%% @decorators.fn_timer(logger=logger, tab_level=2, prefix='') def apply_pv_specs(dataframe, pv_specs): """ Add the year's PV specifications, including pv capitial and OM costs, degredation, and power density by year and sector Parameters ---------- dataframe : pandas.DataFrame agent attributes for the given year pv_specs : pandas.DataFrame Attributes ---------- pv_specs.pv_power_density_w_per_sqft pv_specs.pv_deg pv_specs.pv_price_per_kw pv_specs.pv_om_per_kw pv_specs.pv_variable_om_per_kw Returns ------- pandas.DataFrame agent attributes with new attributes """ dataframe = dataframe.reset_index() dataframe = pd.merge(dataframe, pv_specs, how='left', on=['sector_abbr', 'year']) #========================================================================================================== # apply the capital cost multipliers #========================================================================================================== dataframe['pv_price_per_kw'] = (dataframe['pv_price_per_kw'] * dataframe['cap_cost_multiplier']) dataframe = dataframe.set_index('agent_id') return dataframe #%% @decorators.fn_timer(logger = logger, tab_level = 2, prefix = '') def apply_storage_specs(dataframe, batt_price_traj, year, scenario_settings): """ Add the year's Battery specifications Parameters ---------- dataframe : pandas.DataFrame agent attributes for the given year year : int modelled year scenario_settings : :class:`python.settings.ScenarioSettings` scenario settings loaded from input sheet and csv's batt_price_traj : pandas.DataFrame Attributes ---------- batt_price_traj.batt_price_per_kwh batt_price_traj.batt_price_per_kw batt_price_traj.batt_om_per_kw batt_price_traj.batt_om_per_kwh Returns ------- pandas.DataFrame agent attributes with new attributes """ dataframe = dataframe.reset_index() dataframe = pd.merge(dataframe, batt_price_traj, how = 'left', on = ['sector_abbr', 'year']) del dataframe['batt_om_per_kwh'] del dataframe['batt_om_per_kw'] #========================================================================================================== # Add replacement cost payments to base O&M #========================================================================================================== storage_replace_values = batt_price_traj[batt_price_traj['year']==year+scenario_settings.storage_options['batt_replacement_yr']] storage_replace_values['kw_replace_price'] = storage_replace_values['batt_price_per_kw'] * scenario_settings.storage_options['batt_replacement_frac_kw'] storage_replace_values['kwh_replace_price'] = storage_replace_values['batt_price_per_kwh'] * scenario_settings.storage_options['batt_replacement_frac_kwh'] #========================================================================================================== # Calculate the present value of the replacements #========================================================================================================== replace_discount = 0.08 # Use a different discount rate to represent the discounting of the third party doing the replacing replace_fraction = 1 / (1.0+replace_discount)**scenario_settings.storage_options['batt_replacement_yr'] storage_replace_values['kw_replace_present'] = storage_replace_values['kw_replace_price'] * replace_fraction storage_replace_values['kwh_replace_present'] = storage_replace_values['kwh_replace_price'] * replace_fraction #========================================================================================================== # Calculate the level of annual payments whose present value equals the present value of a replacement #========================================================================================================== storage_replace_values['batt_om_per_kw'] += storage_replace_values['kw_replace_present'] * (replace_discount*(1+replace_discount)**20) / ((1+replace_discount)**20 - 1) storage_replace_values['batt_om_per_kwh'] += storage_replace_values['kwh_replace_present'] * (replace_discount*(1+replace_discount)**20) / ((1+replace_discount)**20 - 1) dataframe = pd.merge(dataframe, storage_replace_values[['sector_abbr', 'batt_om_per_kwh', 'batt_om_per_kw']], how='left', on=['sector_abbr']) #========================================================================================================== # Apply battery replacement year #========================================================================================================== dataframe['batt_replace_yr'] = scenario_settings.storage_options['batt_replacement_yr'] dataframe = dataframe.set_index('agent_id') return dataframe #%% @decorators.fn_timer(logger=logger, tab_level=2, prefix='') def apply_financial_params(dataframe, financing_terms, inflation_rate): """ apply_financial_params Add the year's financial parameters including depreciation schedule (array for years 0,1,2,3,4,5), Solar ITC fraction, Solar ITC min size kw, Solar ITC max size kw, years of loan term, loan rate, down payment percent, real discount percent, tax rate and economic lifetime Parameters ---------- dataframe : pandas.DataFrame agent attributes for the given year inflation_rate : float inflation rate percent financing_terms : pandas.DataFrame Attributes ---------- financing_terms.deprec_sch financing_terms.itc_fraction financing_terms.min_size_kw financing_terms.max_size_kw financing_terms.loan_term financing_terms.loan_rate financing_terms.down_payment financing_terms.real_discount financing_terms.tax_rate financing_terms.economic_lifetime Returns ------- pandas.DataFrame agent attributes with new attributes joined on """ dataframe = dataframe.reset_index() dataframe = dataframe.merge(financing_terms, how='left', on=['year', 'sector_abbr']) dataframe['inflation'] = inflation_rate dataframe = dataframe.set_index('agent_id') return dataframe #%% @decorators.fn_timer(logger=logger, tab_level=2, prefix='') def apply_load_growth(dataframe, load_growth_df): """ Apply load growth trajactories by country, control region and year Parameters ---------- dataframe : pandas.DataFrame agent attributes for the given year load_growth_df : pandas.DataFrame Attributes ---------- load_growth_df.load_multiplier Returns ------- pandas.DataFrame agent attributes with new attributes """ dataframe = dataframe.reset_index() del dataframe['year'] dataframe = pd.merge(dataframe, load_growth_df, how='left', on=[config.BA_COLUMN, 'sector_abbr']) #========================================================================================================== # for res, load growth translates to kwh_per_customer change #========================================================================================================== dataframe['load_per_customer_in_bin_kwh'] = np.where(dataframe['sector_abbr']=='res', dataframe['load_per_customer_in_bin_kwh_initial'] * dataframe['load_multiplier'], dataframe['load_per_customer_in_bin_kwh_initial']) #========================================================================================================== # for C&I, load growth translates to customer count change #========================================================================================================== dataframe['customers_in_bin'] = np.where(dataframe['sector_abbr']!='res', dataframe['customers_in_bin_initial'] * dataframe['load_multiplier'], dataframe['customers_in_bin_initial']) #========================================================================================================== # for all sectors, total kwh_in_bin changes #========================================================================================================== dataframe['load_in_bin_kwh'] = dataframe['load_in_bin_kwh_initial'] * dataframe['load_multiplier'] dataframe = dataframe.set_index('agent_id') return dataframe #%% @decorators.fn_timer(logger=logger, tab_level=2, prefix='') def calculate_developable_customers_and_load(dataframe): """ Calculate cumulative developebale customers and load Parameters ---------- dataframe : pandas.DataFrame agent attributes for the given year Returns ------- pandas.DataFrame agent attributes with new attributes """ dataframe = dataframe.reset_index() dataframe['developable_customers_in_bin'] = dataframe['developable_buildings_pct'] * dataframe['customers_in_bin'] dataframe['developable_load_in_bin_kwh'] = dataframe['developable_buildings_pct'] * dataframe['load_in_bin_kwh'] dataframe = dataframe.set_index('agent_id') return dataframe #%% @decorators.fn_timer(logger=logger, tab_level=2, prefix='') def apply_scale_normalized_load_profiles(dataframe): """ Scale the normalized load based on agent's per captia cumulative energy consumption Parameters ---------- dataframe : pandas.DataFrame agent attributes for the
No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | pulse_voltage_level_range | Yes | Yes | Yes | Yes | Yes | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | transient_response | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ set_as_active_sequence (bool): Specifies that this current sequence is active. ''' vi_ctype = _visatype.ViSession(self._vi) # case S110 channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010 sequence_name_ctype = ctypes.create_string_buffer(sequence_name.encode(self._encoding)) # case C020 attribute_id_count_ctype = _visatype.ViInt32(0 if attribute_ids is None else len(attribute_ids)) # case S160 attribute_ids_ctype = get_ctypes_pointer_for_buffer(value=attribute_ids, library_type=_visatype.ViInt32) # case B550 set_as_active_sequence_ctype = _visatype.ViBoolean(set_as_active_sequence) # case S150 error_code = self._library.niDCPower_CreateAdvancedSequenceWithChannels(vi_ctype, channel_name_ctype, sequence_name_ctype, attribute_id_count_ctype, attribute_ids_ctype, set_as_active_sequence_ctype) errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False) return @ivi_synchronized def delete_advanced_sequence(self, sequence_name): r'''delete_advanced_sequence Deletes a previously created advanced sequence and all the advanced sequence steps in the advanced sequence. **Support for this Method** You must set the source mode to Sequence to use this method. Using the set_sequence method with Advanced Sequence methods is unsupported. **Related Topics**: `Advanced Sequence Mode <REPLACE_DRIVER_SPECIFIC_URL_1(advancedsequencemode)>`__ `Programming States <REPLACE_DRIVER_SPECIFIC_URL_1(programmingstates)>`__ Note: This method is not supported on all devices. Refer to `Supported Methods by Device <REPLACE_DRIVER_SPECIFIC_URL_2(nidcpowercref.chm',%20'supportedfunctions)>`__ for more information about supported devices. Tip: This method can be called on specific channels within your :py:class:`nidcpower.Session` instance. Use Python index notation on the repeated capabilities container channels to specify a subset, and then call this method on the result. Example: :py:meth:`my_session.channels[ ... ].delete_advanced_sequence` To call the method on all channels, you can call it directly on the :py:class:`nidcpower.Session`. Example: :py:meth:`my_session.delete_advanced_sequence` Args: sequence_name (str): specifies the name of the sequence to delete. ''' vi_ctype = _visatype.ViSession(self._vi) # case S110 channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010 sequence_name_ctype = ctypes.create_string_buffer(sequence_name.encode(self._encoding)) # case C020 error_code = self._library.niDCPower_DeleteAdvancedSequenceWithChannels(vi_ctype, channel_name_ctype, sequence_name_ctype) errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False) return @ivi_synchronized def create_advanced_sequence(self, sequence_name, property_names, set_as_active_sequence=True): '''create_advanced_sequence Creates an empty advanced sequence. Call the create_advanced_sequence_step method to add steps to the active advanced sequence. You can create multiple advanced sequences in a session. **Support for this method** You must set the source mode to Sequence to use this method. Using the set_sequence method with Advanced Sequence methods is unsupported. Use this method in the Uncommitted or Committed programming states. Refer to the `Programming States <REPLACE_DRIVER_SPECIFIC_URL_1(programmingstates)>`__ topic in the *NI DC Power Supplies and SMUs Help* for more information about NI-DCPower programming states. **Related Topics**: `Advanced Sequence Mode <REPLACE_DRIVER_SPECIFIC_URL_1(advancedsequencemode)>`__ `Programming States <REPLACE_DRIVER_SPECIFIC_URL_1(programmingstates)>`__ create_advanced_sequence_step Note: This method is not supported on all devices. Refer to `Supported Methods by Device <REPLACE_DRIVER_SPECIFIC_URL_2(nidcpowercref.chm',%20'supportedfunctions)>`__ for more information about supported devices. Tip: This method can be called on specific channels within your :py:class:`nidcpower.Session` instance. Use Python index notation on the repeated capabilities container channels to specify a subset, and then call this method on the result. Example: :py:meth:`my_session.channels[ ... ].create_advanced_sequence` To call the method on all channels, you can call it directly on the :py:class:`nidcpower.Session`. Example: :py:meth:`my_session.create_advanced_sequence` Args: sequence_name (str): Specifies the name of the sequence to create. property_names (list of str): Specifies the names of the properties you reconfigure per step in the advanced sequence. The following table lists which properties can be configured in an advanced sequence for each NI-DCPower device that supports advanced sequencing. A Yes indicates that the property can be configured in advanced sequencing. An No indicates that the property cannot be configured in advanced sequencing. +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | Property | PXIe-4135 | PXIe-4136 | PXIe-4137 | PXIe-4138 | PXIe-4139 | PXIe-4140/4142/4144 | PXIe-4141/4143/4145 | PXIe-4162/4163 | +================================+===========+===========+===========+===========+===========+=====================+=====================+================+ | dc_noise_rejection | Yes | No | Yes | No | Yes | No | No | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | aperture_time | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | measure_record_length | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | sense | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | ovp_enabled | Yes | Yes | Yes | No | No | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | ovp_limit | Yes | Yes | Yes | No | No | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | pulse_bias_delay | Yes | Yes | Yes | Yes | Yes | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | pulse_off_time | Yes | Yes | Yes | Yes | Yes | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | pulse_on_time | Yes | Yes | Yes | Yes | Yes | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | source_delay | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | current_compensation_frequency | Yes | No | Yes | No | Yes | No | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | current_gain_bandwidth | Yes | No | Yes | No | Yes | No | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | current_pole_zero_ratio | Yes | No | Yes | No | Yes | No | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | voltage_compensation_frequency | Yes | No | Yes | No | Yes | No | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | voltage_gain_bandwidth | Yes | No | Yes | No | Yes | No | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | voltage_pole_zero_ratio | Yes | No | Yes | No | Yes | No | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | current_level | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | current_level_range | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | voltage_limit | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | voltage_limit_high | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | voltage_limit_low | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | voltage_limit_range | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | current_limit | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | current_limit_high | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | current_limit_low | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | current_limit_range | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | voltage_level | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | voltage_level_range | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | output_enabled | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | output_function | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | output_resistance | Yes | No | Yes | No | Yes | No | Yes | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | pulse_bias_current_level | Yes | Yes | Yes | Yes | Yes | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | pulse_bias_voltage_limit | Yes | Yes | Yes | Yes | Yes | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | pulse_bias_voltage_limit_high | Yes | Yes | Yes | Yes | Yes | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | pulse_bias_voltage_limit_low | Yes | Yes | Yes | Yes | Yes | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+ | pulse_current_level | Yes | Yes | Yes | Yes | Yes | No | No | No | +--------------------------------+-----------+-----------+-----------+-----------+-----------+---------------------+---------------------+----------------+
<gh_stars>1000+ #!/usr/bin/env python3 import sys import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import timeit import mpld3 from mpld3 import plugins, utils # Install tabulate using "pip-install tabulate" from tabulate import tabulate import os def clear(): os.system('clear') # Function to clear screen import pymrpt # Matplotlib figure parameters mpl.rcParams['figure.figsize'] = (12.0, 8.0) plt.rcParams.update({'axes.titlesize': 30, 'axes.labelsize': 20, 'xtick.labelsize': 15, 'ytick.labelsize': 15, 'figure.titlesize': 40}) # Define number of points and camera parameters n = 10 sigma = 0.0005 n_range = list(range(5, 25)) sigma_range = np.arange(0.0001, 0.001, 0.0001) f = 1.0 cx = 0.0 cy = 0.0 cam_intrinsic = np.array([[f, 0.0, cx], [0.0, f, cy], [0.0, 0.0, 1.0]]) # Define constants for serial outputting results checkmark = '\u2713' l_progress_bar = 50 # Instantiate pnp module pnp = pymrpt.pnp(n) # Define settings for comparison module algos = [pnp.dls, pnp.epnp, pnp.p3p, pnp.rpnp, pnp.ppnp, pnp.posit, pnp.lhm] algo_names = ['dls', 'epnp', 'p3p', 'rpnp', 'ppnp', 'posit', 'lhm'] algo_ls = [':', '-', '--', '-', '--', '-', '-'] n_algos = len(algos) n_iter = 100 class HighlightLines(plugins.PluginBase): # css format for interactive d3 plots """A plugin for an interactive legend. Inspired by http://bl.ocks.org/simzou/6439398 """ JAVASCRIPT = """ mpld3.register_plugin("interactive_legend", InteractiveLegend); InteractiveLegend.prototype = Object.create(mpld3.Plugin.prototype); InteractiveLegend.prototype.constructor = InteractiveLegend; InteractiveLegend.prototype.requiredProps = ["line_ids", "labels"]; InteractiveLegend.prototype.defaultProps = {} function InteractiveLegend(fig, props){ mpld3.Plugin.call(this, fig, props); }; InteractiveLegend.prototype.draw = function(){ var labels = new Array(); for(var i=0; i<this.props.labels.length; i++){ var obj = {} obj.label = this.props.labels[i] obj.line = mpld3.get_element(this.props.line_ids[i], this.fig) obj.visible = false; labels.push(obj); } var ax = this.fig.axes[0] var legend = this.fig.canvas.append("svg:g") .attr("class", "legend"); // add the rectangles legend.selectAll("rect") .data(labels) .enter().append("rect") .attr("height",10) .attr("width", 25) .attr("x",ax.width+10+ax.position[0]) .attr("y",function(d,i) { return ax.position[1]+ i * 25 - 10;}) .attr("stroke", function(d) { return d.line.props.edgecolor}) .attr("class", "legend-box") .style("fill", "white") .on("click", click) // add the text legend.selectAll("text") .data(labels) .enter().append("text") .attr("x", function (d) { return ax.width+10+ax.position[0] + 25 + 15 }) .attr("y", function(d,i) { return ax.position[1]+ i * 25 }) .text(function(d) { return d.label }) // specify the action on click function click(d,i){ d.visible = !d.visible; d3.select(this) .style("fill",function(d, i) { console.log(d) var color = d.line.props.edgecolor return d.visible ? color : "white"; }) d3.select(d.line.path[0][0]) .style("stroke-opacity", d.visible ? 1 : d.line.props.alpha); } }; """ def __init__(self, lines, labels, css): self.css_ = css or "" self.lines = lines self.dict_ = {"type": "interactive_legend", "line_ids": [utils.get_id(line) for line in lines], "labels": labels} css = """ .legend-box { cursor: pointer; } """ def vector2RotMat(vec, theta=0): # Function to convert from axis, angle to rotation matrix # Rodrigues rotation formula n_check = np.linalg.norm(vec) kx = vec[0] / n_check ky = vec[1] / n_check kz = vec[2] / n_check K = np.matrix([[0, -kz, ky], [kz, 0, -kx], [-ky, kx, 0]]) I = np.identity(3) R = I + K * np.sin(theta) + K * K * (1 - np.cos(theta)) R = np.array(R) return R def quatvec2RotMat(q): # Function to convert from quaternion to Rotaiton matrix qw = np.sqrt(1 - np.linalg.norm(q) * np.linalg.norm(q)) qx = q[0] qy = q[1] qz = q[2] R = [1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw, 2 * qx * qz + 2 * qy * qw, 2 * qx * qy + 2 * qz * qw, 1 - 2 * qx * qx - 2 * qz * qz, 2 * qy * qz - 2 * qx * qw, 2 * qx * qz - 2 * qy * qw, 2 * qy * qz + 2 * qx * qw, 1 - 2 * qx * qx - 2 * qy * qy] R = np.reshape(R, [3, 3]) return R def RotMat2quat(R): # Function to convert from rotation matrix to Quaternion qw = np.sqrt(1 + R[0, 0] + R[1, 1] + R[2, 2]) / 2 if qw > 0.01: qx = (R[2, 1] - R[1, 2]) / 4 / qw qy = (R[0, 2] - R[2, 0]) / 4 / qw qz = (R[1, 0] - R[0, 1]) / 4 / qw else: l = np.array([R[0, 0], R[1, 1], R[2, 2]]) ind_max = np.argmax(l) if ind_max == 0: qx = np.sqrt((R[0, 0] + 1) / 2) qy = (R[1, 0] + R[0, 1]) / 4 / qx qz = (R[0, 2] + R[2, 0]) / 4 / qx elif ind_max == 1: qy = np.sqrt((R[1, 1] + 1) / 2) qx = (R[1, 0] + R[0, 1]) / 4 / qy qz = (R[2, 1] + R[1, 2]) / 4 / qy else: qz = np.sqrt((R[2, 2] + 1) / 2) qx = (R[0, 2] + R[2, 0]) / 4 / qz qy = (R[2, 1] + R[1, 2]) / 4 / qz qw = np.sqrt(1 - qx * qx - qy * qy - qz * qz) return [qw, qx, qy, qz] def display_comparison_plot(t, arr, names, line_styles, title, xtitle, ytitle, ylim, figname): f, ax = plt.subplots() lines = [] for i in np.arange(0, len(names)): l, = ax.plot(t, arr[:, i], label=names[i], lw=3, ls=line_styles[i]) lines.append(l) leg = ax.legend(fancybox=True, shadow=True) leg.get_frame().set_alpha(0.8) lined = dict() for legline, origline in zip(leg.get_lines(), lines): legline.set_picker(10) lined[legline] = origline ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) ax.set_title(title) ax = plt.gca() ax.set_ylim(ylim) ax.grid() def onpick(event): legline = event.artist origline = lined[legline] vis = not origline.get_visible() origline.set_visible(vis) if vis: legline.set_alpha(1) else: legline.set_alpha(0.2) f.canvas.draw() f.canvas.mpl_connect('pick_event', onpick) plt.show() plt.savefig(figname + '.pdf') def display_comparison_plot_mpld3(t, arr, names, line_styles, title, xtitle, ytitle, ylim, figname): # Function used to generate interactive d3 plots in html f, ax = plt.subplots() lines = [] for i in np.arange(0, len(names)): l, = ax.plot(t, arr[:, i], label=names[i], lw=3, ls=line_styles[i], alpha=0.2) lines.append(l) ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) ax.set_title(title) ax = plt.gca() ax.set_ylim(ylim) ax.grid() plugins.connect(f, HighlightLines(lines, names, css)) mpld3.display() #mpld3.save_html(f, figname + '.html') return mpld3.fig_to_html(f) def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100): """ # Print iterations progress Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) barLength - Optional : character length of bar (Int) """ formatStr = "{0:." + str(decimals) + "f}" percents = formatStr.format(100 * (iteration / float(total))) filledLength = int(round(barLength * iteration / float(total))) bar = "\u2588" * filledLength + '-' * (barLength - filledLength) sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)), sys.stdout.flush() if iteration == total: sys.stdout.write('\n') sys.stdout.flush() def printTestStatus(test_index): # Print overall progress of all the tests head_tests = ['#No.', 'Status', 'Description'] table_tests = [[str(1), '', 'Test for 100 iterations with each algorithm'], [str(2), '', 'Varitaion of error with image pixel noise standard deviation (' + '\u03C3' + ')'], [str(3), '', ' Variation in error with number of 2d/3d correspondences (n)'], [str(4), '', 'Average computation time of each algorithm']] for i in range(0, test_index): table_tests[i][1] = checkmark print(tabulate(table_tests, head_tests, tablefmt='fancy_grid')) for i in range(0, test_index): printProgress(l_progress_bar, l_progress_bar, prefix='Test' + str(i + 1) + ' Progress:', suffix='Complete', barLength=50) def printTest1Results(vals): # Function to print results of test 1 test1_headers = ['Algo', 'Translation Mean Error', 'Translation Median Error', 'Rotation Mean Error', 'Rotation Median Error'] test1_table = np.empty([7, 5], dtype=object) test1_table[:, 1:] = vals test1_table[:, 0] = algo_names print(tabulate(test1_table, test1_headers, tablefmt='fancy_grid')) def printTest4Results(vals): # Function to print results of test 4 test4_headers = ['Algo', 'Translation Mean Error', 'Translation Median Error', 'Rotation Mean Error', 'Rotation Median Error'] vals = np.random.rand(7, 1) test4_table = np.empty([7, 2], dtype=object) test4_table[:, 1:] = vals test4_table[:, 0] = algo_names print(tabulate(test4_table, test4_headers, tablefmt='fancy_grid')) def calc_err(pose1, pose2): # FUnction to compute reprojection errors if np.any(np.isnan(pose1)) or np.linalg.norm(pose1) > 1000000: err = [0, 0] return err # Percent error in translation t_est = np.array(pose1[0:3]) t = np.array(pose2[0:3]) err_t = (np.linalg.norm(t_est - t) / np.linalg.norm(t)) * 100 # Rotation error q_est = pose1[3:6, 0] q = pose2[3:6, 0] if np.linalg.norm(q) != 0 and np.linalg.norm(q_est): val = np.dot(q_est, q) / np.linalg.norm(q_est) / np.linalg.norm(q) else: val = 1 if val > 1: val = 1 elif val < -1: val = -1 elif val == np.nan: val = 1 err_q = np.max(np.abs(np.arccos(val))) * 180 / np.pi err_q = err_q if err_q < 180
import abc import copy import logging import time import weakref import six import kafka.errors as Errors from kafka.future import Future from kafka.protocol.commit import GroupCoordinatorRequest, OffsetCommitRequest from kafka.protocol.group import (HeartbeatRequest, JoinGroupRequest, LeaveGroupRequest, SyncGroupRequest) from .heartbeat import Heartbeat log = logging.getLogger('kafka.coordinator') class BaseCoordinator(object): """ BaseCoordinator implements group management for a single group member by interacting with a designated Kafka broker (the coordinator). Group semantics are provided by extending this class. See ConsumerCoordinator for example usage. From a high level, Kafka's group management protocol consists of the following sequence of actions: 1. Group Registration: Group members register with the coordinator providing their own metadata (such as the set of topics they are interested in). 2. Group/Leader Selection: The coordinator select the members of the group and chooses one member as the leader. 3. State Assignment: The leader collects the metadata from all the members of the group and assigns state. 4. Group Stabilization: Each member receives the state assigned by the leader and begins processing. To leverage this protocol, an implementation must define the format of metadata provided by each member for group registration in group_protocols() and the format of the state assignment provided by the leader in _perform_assignment() and which becomes available to members in _on_join_complete(). """ DEFAULT_CONFIG = { 'group_id': 'kafka-python-default-group', 'session_timeout_ms': 30000, 'heartbeat_interval_ms': 3000, 'retry_backoff_ms': 100, } def __init__(self, client, **configs): """ Keyword Arguments: group_id (str): name of the consumer group to join for dynamic partition assignment (if enabled), and to use for fetching and committing offsets. Default: 'kafka-python-default-group' session_timeout_ms (int): The timeout used to detect failures when using Kafka's group managementment facilities. Default: 30000 heartbeat_interval_ms (int): The expected time in milliseconds between heartbeats to the consumer coordinator when using Kafka's group management feature. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session_timeout_ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances. Default: 3000 retry_backoff_ms (int): Milliseconds to backoff when retrying on errors. Default: 100. """ self.config = copy.copy(self.DEFAULT_CONFIG) for key in self.config: if key in configs: self.config[key] = configs[key] self._client = client self.generation = OffsetCommitRequest[2].DEFAULT_GENERATION_ID self.member_id = JoinGroupRequest[0].UNKNOWN_MEMBER_ID self.group_id = self.config['group_id'] self.coordinator_id = None self.rejoin_needed = True self.needs_join_prepare = True self.heartbeat = Heartbeat(**self.config) self.heartbeat_task = HeartbeatTask(weakref.proxy(self)) #self.sensors = GroupCoordinatorMetrics(metrics, metric_group_prefix, metric_tags) def __del__(self): if hasattr(self, 'heartbeat_task') and self.heartbeat_task: self.heartbeat_task.disable() @abc.abstractmethod def protocol_type(self): """ Unique identifier for the class of protocols implements (e.g. "consumer" or "connect"). Returns: str: protocol type name """ pass @abc.abstractmethod def group_protocols(self): """Return the list of supported group protocols and metadata. This list is submitted by each group member via a JoinGroupRequest. The order of the protocols in the list indicates the preference of the protocol (the first entry is the most preferred). The coordinator takes this preference into account when selecting the generation protocol (generally more preferred protocols will be selected as long as all members support them and there is no disagreement on the preference). Note: metadata must be type bytes or support an encode() method Returns: list: [(protocol, metadata), ...] """ pass @abc.abstractmethod def _on_join_prepare(self, generation, member_id): """Invoked prior to each group join or rejoin. This is typically used to perform any cleanup from the previous generation (such as committing offsets for the consumer) Arguments: generation (int): The previous generation or -1 if there was none member_id (str): The identifier of this member in the previous group or '' if there was none """ pass @abc.abstractmethod def _perform_assignment(self, leader_id, protocol, members): """Perform assignment for the group. This is used by the leader to push state to all the members of the group (e.g. to push partition assignments in the case of the new consumer) Arguments: leader_id (str): The id of the leader (which is this member) protocol (str): the chosen group protocol (assignment strategy) members (list): [(member_id, metadata_bytes)] from JoinGroupResponse. metadata_bytes are associated with the chosen group protocol, and the Coordinator subclass is responsible for decoding metadata_bytes based on that protocol. Returns: dict: {member_id: assignment}; assignment must either be bytes or have an encode() method to convert to bytes """ pass @abc.abstractmethod def _on_join_complete(self, generation, member_id, protocol, member_assignment_bytes): """Invoked when a group member has successfully joined a group. Arguments: generation (int): the generation that was joined member_id (str): the identifier for the local member in the group protocol (str): the protocol selected by the coordinator member_assignment_bytes (bytes): the protocol-encoded assignment propagated from the group leader. The Coordinator instance is responsible for decoding based on the chosen protocol. """ pass def coordinator_unknown(self): """Check if we know who the coordinator is and have an active connection Side-effect: reset coordinator_id to None if connection failed Returns: bool: True if the coordinator is unknown """ if self.coordinator_id is None: return True if self._client.is_disconnected(self.coordinator_id): self.coordinator_dead() return True return False def ensure_coordinator_known(self): """Block until the coordinator for this group is known (and we have an active connection -- java client uses unsent queue). """ while self.coordinator_unknown(): future = self._send_group_coordinator_request() self._client.poll(future=future) if future.failed(): if isinstance(future.exception, Errors.GroupCoordinatorNotAvailableError): continue elif future.retriable(): metadata_update = self._client.cluster.request_update() self._client.poll(future=metadata_update) else: raise future.exception # pylint: disable-msg=raising-bad-type def need_rejoin(self): """Check whether the group should be rejoined (e.g. if metadata changes) Returns: bool: True if it should, False otherwise """ return self.rejoin_needed def ensure_active_group(self): """Ensure that the group is active (i.e. joined and synced)""" if not self.need_rejoin(): return if self.needs_join_prepare: self._on_join_prepare(self.generation, self.member_id) self.needs_join_prepare = False while self.need_rejoin(): self.ensure_coordinator_known() future = self._send_join_group_request() self._client.poll(future=future) if future.succeeded(): member_assignment_bytes = future.value self._on_join_complete(self.generation, self.member_id, self.protocol, member_assignment_bytes) self.needs_join_prepare = True self.heartbeat_task.reset() else: assert future.failed() exception = future.exception if isinstance(exception, (Errors.UnknownMemberIdError, Errors.RebalanceInProgressError, Errors.IllegalGenerationError)): continue elif not future.retriable(): raise exception # pylint: disable-msg=raising-bad-type time.sleep(self.config['retry_backoff_ms'] / 1000.0) def _send_join_group_request(self): """Join the group and return the assignment for the next generation. This function handles both JoinGroup and SyncGroup, delegating to _perform_assignment() if elected leader by the coordinator. Returns: Future: resolves to the encoded-bytes assignment returned from the group leader """ if self.coordinator_unknown(): e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id) return Future().failure(e) # send a join group request to the coordinator log.info("(Re-)joining group %s", self.group_id) request = JoinGroupRequest[0]( self.group_id, self.config['session_timeout_ms'], self.member_id, self.protocol_type(), [(protocol, metadata if isinstance(metadata, bytes) else metadata.encode()) for protocol, metadata in self.group_protocols()]) # create the request for the coordinator log.debug("Sending JoinGroup (%s) to coordinator %s", request, self.coordinator_id) future = Future() _f = self._client.send(self.coordinator_id, request) _f.add_callback(self._handle_join_group_response, future) _f.add_errback(self._failed_request, self.coordinator_id, request, future) return future def _failed_request(self, node_id, request, future, error): log.error('Error sending %s to node %s [%s]', request.__class__.__name__, node_id, error) # Marking coordinator dead # unless the error is caused by internal client pipelining if not isinstance(error, (Errors.NodeNotReadyError, Errors.TooManyInFlightRequests)): self.coordinator_dead() future.failure(error) def _handle_join_group_response(self, future, response): error_type = Errors.for_code(response.error_code) if error_type is Errors.NoError: log.debug("Received successful JoinGroup response for group %s: %s", self.group_id, response) self.member_id = response.member_id self.generation = response.generation_id self.rejoin_needed = False self.protocol = response.group_protocol log.info("Joined group '%s' (generation %s) with member_id %s", self.group_id, self.generation, self.member_id) #self.sensors.join_latency.record(response.requestLatencyMs()) if response.leader_id == response.member_id: log.info("Elected group leader -- performing partition" " assignments using %s", self.protocol) self._on_join_leader(response).chain(future) else: self._on_join_follower().chain(future) elif error_type is Errors.GroupLoadInProgressError: log.debug("Attempt to join group %s rejected since coordinator %s" " is loading the group.", self.group_id, self.coordinator_id) # backoff and retry future.failure(error_type(response)) elif error_type is Errors.UnknownMemberIdError: # reset the member id and retry immediately error = error_type(self.member_id) self.member_id = JoinGroupRequest[0].UNKNOWN_MEMBER_ID log.debug("Attempt to join group %s failed due to unknown member id", self.group_id) future.failure(error) elif error_type in (Errors.GroupCoordinatorNotAvailableError, Errors.NotCoordinatorForGroupError): # re-discover the coordinator and retry with backoff self.coordinator_dead() log.debug("Attempt to join group %s failed due to obsolete " "coordinator information: %s", self.group_id, error_type.__name__) future.failure(error_type()) elif error_type in (Errors.InconsistentGroupProtocolError, Errors.InvalidSessionTimeoutError, Errors.InvalidGroupIdError): # log the error and re-throw the exception error = error_type(response) log.error("Attempt to join group %s failed due to fatal error: %s", self.group_id, error) future.failure(error) elif error_type is Errors.GroupAuthorizationFailedError: future.failure(error_type(self.group_id)) else: # unexpected error, throw the exception error = error_type() log.error("Unexpected
# -*- encoding: utf-8 -*- # Copyright 2009-2013 Yelp and Contributors # Copyright 2015-2016 Yelp # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit testing of MRJob.""" import os import os.path import sys import time from io import BytesIO from subprocess import Popen from subprocess import PIPE from mrjob.conf import combine_envs from mrjob.job import MRJob from mrjob.job import UsageError from mrjob.job import _im_func from mrjob.parse import parse_mr_job_stderr from mrjob.protocol import JSONProtocol from mrjob.protocol import JSONValueProtocol from mrjob.protocol import PickleProtocol from mrjob.protocol import RawValueProtocol from mrjob.protocol import ReprProtocol from mrjob.protocol import ReprValueProtocol from mrjob.protocol import StandardJSONProtocol from mrjob.py2 import StringIO from mrjob.step import _IDENTITY_MAPPER from mrjob.step import _IDENTITY_REDUCER from mrjob.step import JarStep from mrjob.step import MRStep from mrjob.util import log_to_stream from tests.mr_hadoop_format_job import MRHadoopFormatJob from tests.mr_sort_values import MRSortValues from tests.mr_tower_of_powers import MRTowerOfPowers from tests.mr_two_step_job import MRTwoStepJob from tests.py2 import TestCase from tests.py2 import patch from tests.quiet import logger_disabled from tests.quiet import no_handlers_for_logger from tests.sandbox import EmptyMrjobConfTestCase from tests.sandbox import SandboxedTestCase # These can't be invoked as a separate script, but they don't need to be class MRBoringJob(MRJob): """It's a boring job, but somebody had to do it.""" def mapper(self, key, value): yield(key, value) def reducer(self, key, values): yield(key, list(values)) class MRInitJob(MRJob): def __init__(self, *args, **kwargs): super(MRInitJob, self).__init__(*args, **kwargs) self.sum_amount = 0 self.multiplier = 0 self.combiner_multipler = 1 def mapper_init(self): self.sum_amount += 10 def mapper(self, key, value): yield(None, self.sum_amount) def reducer_init(self): self.multiplier += 10 def reducer(self, key, values): yield(None, sum(values) * self.multiplier) def combiner_init(self): self.combiner_multiplier = 2 def combiner(self, key, values): yield(None, sum(values) * self.combiner_multiplier) ### Test cases ### class MRInitTestCase(EmptyMrjobConfTestCase): def test_mapper(self): j = MRInitJob() j.mapper_init() self.assertEqual(next(j.mapper(None, None)), (None, j.sum_amount)) def test_init_funcs(self): num_inputs = 2 stdin = BytesIO(b"x\n" * num_inputs) mr_job = MRInitJob(['-r', 'inline', '-']) mr_job.sandbox(stdin=stdin) results = [] with mr_job.make_runner() as runner: runner.run() for line in runner.stream_output(): key, value = mr_job.parse_output_line(line) results.append(value) # these numbers should match if mapper_init, reducer_init, and # combiner_init were called as expected self.assertEqual(results[0], num_inputs * 10 * 10 * 2) class NoTzsetTestCase(TestCase): def setUp(self): self.remove_time_tzset() def tearDown(self): """Test systems without time.tzset() (e.g. Windows). See Issue #46.""" self.restore_time_tzset() def remove_time_tzset(self): if hasattr(time, 'tzset'): self._real_time_tzset = time.tzset del time.tzset def restore_time_tzset(self): if hasattr(self, '_real_time_tzset'): time.tzset = self._real_time_tzset def test_init_does_not_require_tzset(self): MRJob() class CountersAndStatusTestCase(TestCase): def test_counters_and_status(self): mr_job = MRJob().sandbox() mr_job.increment_counter('Foo', 'Bar') mr_job.set_status('Initializing qux gradients...') mr_job.increment_counter('Foo', 'Bar') mr_job.increment_counter('Foo', 'Baz', 20) mr_job.set_status('Sorting metasyntactic variables...') parsed_stderr = parse_mr_job_stderr(mr_job.stderr.getvalue()) self.assertEqual(parsed_stderr, {'counters': {'Foo': {'Bar': 2, 'Baz': 20}}, 'statuses': ['Initializing qux gradients...', 'Sorting metasyntactic variables...'], 'other': []}) def test_unicode_set_status(self): mr_job = MRJob().sandbox() # shouldn't raise an exception mr_job.set_status(u'💩') def test_unicode_counter(self): mr_job = MRJob().sandbox() # shouldn't raise an exception mr_job.increment_counter(u'💩', 'x', 1) def test_negative_and_zero_counters(self): mr_job = MRJob().sandbox() mr_job.increment_counter('Foo', 'Bar', -1) mr_job.increment_counter('Foo', 'Baz') mr_job.increment_counter('Foo', 'Baz', -1) mr_job.increment_counter('Qux', 'Quux', 0) parsed_stderr = parse_mr_job_stderr(mr_job.stderr.getvalue()) self.assertEqual(parsed_stderr['counters'], {'Foo': {'Bar': -1, 'Baz': 0}, 'Qux': {'Quux': 0}}) def test_bad_counter_amounts(self): mr_job = MRJob().sandbox() self.assertRaises(TypeError, mr_job.increment_counter, 'Foo', 'Bar', 'two') self.assertRaises(TypeError, mr_job.increment_counter, 'Foo', 'Bar', None) def test_commas_in_counters(self): # commas should be replaced with semicolons mr_job = MRJob().sandbox() mr_job.increment_counter('Bad items', 'a, b, c') mr_job.increment_counter('girl, interrupted', 'movie') parsed_stderr = parse_mr_job_stderr(mr_job.stderr.getvalue()) self.assertEqual(parsed_stderr['counters'], {'Bad items': {'a; b; c': 1}, 'girl; interrupted': {'movie': 1}}) class ProtocolsTestCase(TestCase): # not putting these in their own files because we're not going to invoke # it as a script anyway. class MRBoringJob2(MRBoringJob): INPUT_PROTOCOL = StandardJSONProtocol INTERNAL_PROTOCOL = PickleProtocol OUTPUT_PROTOCOL = ReprProtocol class MRBoringJob3(MRBoringJob): def internal_protocol(self): return ReprProtocol() class MRBoringJob4(MRBoringJob): INTERNAL_PROTOCOL = ReprProtocol class MRTrivialJob(MRJob): OUTPUT_PROTOCOL = RawValueProtocol def mapper(self, key, value): yield key, value def assertMethodsEqual(self, fs, gs): # we're going to use this to match bound against unbound methods self.assertEqual([_im_func(f) for f in fs], [_im_func(g) for g in gs]) def test_default_protocols(self): mr_job = MRBoringJob() self.assertMethodsEqual( mr_job.pick_protocols(0, 'mapper'), (RawValueProtocol.read, JSONProtocol.write)) self.assertMethodsEqual( mr_job.pick_protocols(0, 'reducer'), (StandardJSONProtocol.read, JSONProtocol.write)) def test_explicit_default_protocols(self): mr_job2 = self.MRBoringJob2().sandbox() self.assertMethodsEqual(mr_job2.pick_protocols(0, 'mapper'), (JSONProtocol.read, PickleProtocol.write)) self.assertMethodsEqual(mr_job2.pick_protocols(0, 'reducer'), (PickleProtocol.read, ReprProtocol.write)) mr_job3 = self.MRBoringJob3() self.assertMethodsEqual(mr_job3.pick_protocols(0, 'mapper'), (RawValueProtocol.read, ReprProtocol.write)) # output protocol should default to JSON self.assertMethodsEqual(mr_job3.pick_protocols(0, 'reducer'), (ReprProtocol.read, JSONProtocol.write)) mr_job4 = self.MRBoringJob4() self.assertMethodsEqual(mr_job4.pick_protocols(0, 'mapper'), (RawValueProtocol.read, ReprProtocol.write)) # output protocol should default to JSON self.assertMethodsEqual(mr_job4.pick_protocols(0, 'reducer'), (ReprProtocol.read, JSONProtocol.write)) def test_mapper_raw_value_to_json(self): RAW_INPUT = BytesIO(b'foo\nbar\nbaz\n') mr_job = MRBoringJob(['--mapper']) mr_job.sandbox(stdin=RAW_INPUT) mr_job.run_mapper() self.assertEqual(mr_job.stdout.getvalue(), b'null\t"foo"\n' + b'null\t"bar"\n' + b'null\t"baz"\n') def test_reducer_json_to_json(self): JSON_INPUT = BytesIO(b'"foo"\t"bar"\n' + b'"foo"\t"baz"\n' + b'"bar"\t"qux"\n') mr_job = MRBoringJob(args=['--reducer']) mr_job.sandbox(stdin=JSON_INPUT) mr_job.run_reducer() # ujson doesn't add whitespace to JSON self.assertEqual(mr_job.stdout.getvalue().replace(b' ', b''), (b'"foo"\t["bar","baz"]\n' + b'"bar"\t["qux"]\n')) def test_output_protocol_with_no_final_reducer(self): # if there's no reducer, the last mapper should use the # output protocol (in this case, repr) RAW_INPUT = BytesIO(b'foo\nbar\nbaz\n') mr_job = self.MRTrivialJob(['--mapper']) mr_job.sandbox(stdin=RAW_INPUT) mr_job.run_mapper() self.assertEqual(mr_job.stdout.getvalue(), RAW_INPUT.getvalue()) class StrictProtocolsTestCase(EmptyMrjobConfTestCase): class MRBoringReprAndJSONJob(MRBoringJob): # allowing reading in bytes that can't be JSON-encoded INPUT_PROTOCOL = ReprValueProtocol INTERNAL_PROTOCOL = StandardJSONProtocol OUTPUT_PROTOCOL = StandardJSONProtocol class MRBoringJSONJob(MRJob): INPUT_PROTOCOL = StandardJSONProtocol INTERNAL_PROTOCOL = StandardJSONProtocol OUTPUT_PROTOCOL = StandardJSONProtocol def reducer(self, key, values): yield(key, list(values)) BAD_JSON_INPUT = (b'BAD\tJSON\n' + b'"foo"\t"bar"\n' + b'"too"\t"many"\t"tabs"\n' + b'"notabs"\n') UNENCODABLE_REPR_INPUT = (b"'foo'\n" + b'set()\n' + b"'bar'\n") def assertJobHandlesUndecodableInput(self, job_args=()): job = self.MRBoringJSONJob(job_args) job.sandbox(stdin=BytesIO(self.BAD_JSON_INPUT)) with job.make_runner() as r: r.run() # good data should still get through self.assertEqual(b''.join(r.stream_output()), b'"foo"\t["bar"]\n') # exception type varies between JSON implementations, # so just make sure there were three exceptions of some sort counters = r.counters()[0] self.assertEqual(sorted(counters), ['Undecodable input']) self.assertEqual( sum(counters['Undecodable input'].values()), 3) def assertJobRaisesExceptionOnUndecodableInput(self, job_args=()): job = self.MRBoringJSONJob(job_args) job.sandbox(stdin=BytesIO(self.BAD_JSON_INPUT)) with job.make_runner() as r: self.assertRaises(Exception, r.run) def assertJobHandlesUnencodableOutput(self, job_args=()): job = self.MRBoringReprAndJSONJob(job_args) job.sandbox(stdin=BytesIO(self.UNENCODABLE_REPR_INPUT)) with job.make_runner() as r: r.run() # good data should still get through self.assertEqual(b''.join(r.stream_output()), b'null\t["bar", "foo"]\n') counters = r.counters()[0] # there should be one Unencodable output error. Exception # type may vary by json implementation self.assertEqual( list(counters), ['Unencodable output']) self.assertEqual( list(counters['Unencodable output'].values()), [1]) def assertJobRaisesExceptionOnUnencodableOutput(self, job_args=()): job = self.MRBoringReprAndJSONJob(job_args) job.sandbox(stdin=BytesIO(self.UNENCODABLE_REPR_INPUT)) with job.make_runner() as r: self.assertRaises(Exception, r.run) def test_undecodable_input(self): self.assertJobRaisesExceptionOnUndecodableInput() def test_undecodable_input_strict_protocols(self): self.assertJobRaisesExceptionOnUndecodableInput( ['--strict-protocols']) def test_undecodable_input_no_strict_protocols(self): self.assertJobHandlesUndecodableInput( ['--no-strict-protocols']) def test_unencodable_output(self): self.assertJobRaisesExceptionOnUnencodableOutput() def test_unencodable_output_strict(self): self.assertJobRaisesExceptionOnUnencodableOutput( ['--strict-protocols']) def test_unencodable_output_no_strict_protocols(self): self.assertJobHandlesUnencodableOutput( ['--no-strict-protocols']) class PickProtocolsTestCase(TestCase): def _yield_none(self, *args, **kwargs): yield None def _make_job(self, steps): class CustomJob(MRJob): INPUT_PROTOCOL = PickleProtocol INTERNAL_PROTOCOL = JSONProtocol OUTPUT_PROTOCOL = JSONValueProtocol def steps(self): return steps args = ['--no-conf'] return CustomJob(args) def _assert_script_protocols(self, steps, expected_protocols): """Given a list of (read_protocol_class, write_protocol_class) tuples for *each substep*, assert that the given _steps_desc() output for each substep matches the protocols in order """ j = self._make_job(steps) for i, step in enumerate(steps): expected_step = expected_protocols[i] step_desc = step.description(i) if step_desc['type'] == 'jar': # step_type for a non-script step is undefined self.assertIsNone(expected_step) else: for substep_key in ('mapper', 'combiner', 'reducer'): if substep_key in step_desc: self.assertIn(substep_key, expected_step) expected_substep = expected_step[substep_key] try: actual_read, actual_write = ( j._pick_protocol_instances(i, substep_key)) except ValueError: self.assertIsNone(expected_substep) else: expected_read, expected_write = expected_substep self.assertIsInstance(actual_read, expected_read) self.assertIsInstance(actual_write, expected_write) else: self.assertNotIn(substep_key, expected_step) def test_single_mapper(self): self._assert_script_protocols( [MRStep(mapper=self._yield_none)], [dict(mapper=(PickleProtocol, JSONValueProtocol))]) def test_single_reducer(self): # MRStep transparently adds mapper self._assert_script_protocols( [MRStep(reducer=self._yield_none)], [dict(mapper=(PickleProtocol, JSONProtocol), reducer=(JSONProtocol, JSONValueProtocol))]) def test_mapper_combiner(self): self._assert_script_protocols( [MRStep(mapper=self._yield_none, combiner=self._yield_none)], [dict(mapper=(PickleProtocol, JSONValueProtocol), combiner=(JSONValueProtocol, JSONValueProtocol))]) def test_mapper_combiner_reducer(self): self._assert_script_protocols( [MRStep( mapper=self._yield_none, combiner=self._yield_none, reducer=self._yield_none)], [dict(mapper=(PickleProtocol, JSONProtocol), combiner=(JSONProtocol, JSONProtocol), reducer=(JSONProtocol, JSONValueProtocol))]) def test_begin_jar_step(self): self._assert_script_protocols( [JarStep(jar='binks_jar.jar'), MRStep( mapper=self._yield_none, combiner=self._yield_none, reducer=self._yield_none)], [None, dict(mapper=(PickleProtocol, JSONProtocol), combiner=(JSONProtocol, JSONProtocol), reducer=(JSONProtocol, JSONValueProtocol))]) def test_end_jar_step(self): self._assert_script_protocols( [MRStep( mapper=self._yield_none, combiner=self._yield_none, reducer=self._yield_none), JarStep(jar='binks_jar.jar')], [dict(mapper=(PickleProtocol, JSONProtocol), combiner=(JSONProtocol, JSONProtocol), reducer=(JSONProtocol, JSONValueProtocol)), None]) def test_middle_jar_step(self): self._assert_script_protocols( [MRStep( mapper=self._yield_none, combiner=self._yield_none), JarStep(jar='binks_jar.jar'), MRStep(reducer=self._yield_none)], [dict(mapper=(PickleProtocol, JSONProtocol), combiner=(JSONProtocol, JSONProtocol)), None, dict(reducer=(JSONProtocol, JSONValueProtocol))]) def test_single_mapper_cmd(self): self._assert_script_protocols( [MRStep(mapper_cmd='cat')], [dict(mapper=None)]) def test_single_mapper_cmd_with_script_combiner(self): self._assert_script_protocols( [MRStep( mapper_cmd='cat', combiner=self._yield_none)], [dict(mapper=None, combiner=(RawValueProtocol, RawValueProtocol))]) def test_single_mapper_cmd_with_script_reducer(self): # reducer is only script step so it uses INPUT_PROTOCOL and # OUTPUT_PROTOCOL self._assert_script_protocols( [MRStep( mapper_cmd='cat', reducer=self._yield_none)], [dict(mapper=None, reducer=(PickleProtocol, JSONValueProtocol))]) def test_multistep(self): # reducer is only script step so it uses INPUT_PROTOCOL and # OUTPUT_PROTOCOL self._assert_script_protocols( [MRStep(mapper_cmd='cat', reducer=self._yield_none), JarStep(jar='binks_jar.jar'), MRStep(mapper=self._yield_none)], [dict(mapper=None, reducer=(PickleProtocol, JSONProtocol)), None, dict(mapper=(JSONProtocol, JSONValueProtocol))]) class JobConfTestCase(TestCase): class MRJobConfJob(MRJob): JOBCONF = {'mapred.foo': 'garply', 'mapred.bar.bar.baz': 'foo'} class MRJobConfMethodJob(MRJob): def jobconf(self): return {'mapred.baz': 'bar'} class MRBoolJobConfJob(MRJob): JOBCONF = {'true_value': True, 'false_value': False} class MRHadoopVersionJobConfJob1(MRJob): JOBCONF = {'hadoop_version': 1.0} class MRHadoopVersionJobConfJob2(MRJob): JOBCONF = {'hadoop_version': 0.20} def test_empty(self): mr_job = MRJob() self.assertEqual(mr_job.job_runner_kwargs()['jobconf'], {}) def test_cmd_line_options(self): mr_job = MRJob([ '--jobconf', 'mapred.foo=bar', '--jobconf', 'mapred.foo=baz', '--jobconf', 'mapred.qux=quux', ]) self.assertEqual(mr_job.job_runner_kwargs()['jobconf'], {'mapred.foo': 'baz', # second option
"""Evaluation of Wigner-d functions and spin-weighted spherical harmonic functions.""" import numpy as np import warnings from numba import njit, prange from scipy.special import binom, factorial from .utils import tri_ravel, tri_base, el_block_size __all__ = [ 'spin_spharm_goldberg', 'DeltaMatrix', 'spin_spherical_harmonic', 'wigner_d', 'get_cached_dmat', 'clear_cached_dmat', 'set_cache_mem_limit', 'get_cache_details', ] @njit(fastmath=True) def _dmat_eval(lmax, arr, lmin=0, lstart=None, arr0=None): # Evaluate the values of the Wigner-d matrices at pi/2. # arr = linear array, modified in-place if arr0 is not None: arr[:len(arr0)] = arr0 else: arr[0] = 1.0 if lstart is None: lstart = lmin offset = tri_ravel(lmin, lmin, 0) for el in range(lstart + 1, lmax + 1): if el <= lmin + 1: # Shift previous result back. elm2_size = el_block_size(el - 2) # If this is the first step, the el-1 block is # in the zeroth position. if el == lstart + 1: elm2_size = 0 elm1_size = el_block_size(el - 1) arr[:elm1_size] = arr[elm2_size:elm2_size + elm1_size].copy() offset = tri_base(el - 1) dll0 = -np.sqrt((2 * el - 1) / float(2 * el)) * \ arr[tri_ravel(el - 1, el - 1, 0) - offset] arr[tri_ravel(el, el, 0) - offset] = dll0 for m2 in range(0, el + 1): if m2 > 0: dllm = np.sqrt( (el / 2) * (2 * el - 1) / ((el + m2) * (el + m2 - 1)) ) * arr[tri_ravel(el - 1, el - 1, m2 - 1) - offset] arr[tri_ravel(el, el, m2) - offset] = dllm for m1 in range(el - 1, m2 - 1, -1): fac1 = (2 * m2) / np.sqrt((el - m1) * (el + m1 + 1)) * \ arr[tri_ravel(el, m1 + 1, m2) - offset] fac2 = 0.0 if (m1 + 2) <= el: fac2 = np.sqrt( ((el - m1 - 1) * (el + m1 + 2)) / ((el - m1) * (el + m1 + 1)) ) * arr[tri_ravel(el, m1 + 2, m2) - offset] arr[tri_ravel(el, m1, m2) - offset] = fac1 - fac2 @njit(fastmath=True) def _access_element(l, m1, m2, arr, lmin=0): # Access stored elements, or use # symmetry relations for non-stored elements. _m1, _m2 = m1, m2 fac = (-1.)**(_m2 - _m1) # For sign convention of the SSHT paper if _m1 < 0 and _m2 < 0: fac *= (-1)**(m1 - m2) m1 *= -1 m2 *= -1 elif _m1 < 0 and _m2 >= 0: fac *= (-1)**(l - _m2) m1 *= -1 elif _m1 >= 0 and _m2 < 0: fac *= (-1)**(l + _m1) m2 *= -1 # reverse if wrong order if m1 < m2: fac *= (-1.)**(m1 - m2) m1, m2 = m2, m1 val = fac * arr[tri_ravel(l, m1, m2) - tri_ravel(lmin, lmin, 0)] return val class DeltaMatrix: """ Wigner-d functions evaluated at pi/2. Only stores values for m1, m2 >= 0. Other values are returned by symmetry relations. Based on the methods in: <NAME>, and <NAME>. Acta Crystallographica Section A, vol. 62, no. 4, 2006, pp. 262–69. Parameters ---------- lmax: int Maximum multipole mode. (Optional) Defaults to sqrt(len(flm)). lmin: int Minimum multipole mode to compute. (Optional, default 0) dmat: DeltaMatrix Initialize using the data in another DeltaMatrix to start. This will speed up computation for larger el, if dmat.lmax <= lmax. dtype: type Provide a dtype for the array. This defaults to 32-bit floats, which will be good to about four decimal places. """ def __init__(self, lmax, lmin=0, dmat=None, dtype=np.float32): arrsize = self.array_size(lmin, lmax) self._arr = np.empty(arrsize, dtype=dtype) self.dtype = dtype self.lmax = lmax self.lmin = lmin self.size = arrsize self._eval(dmat) def _eval(self, old_dmat=None): arr0 = None lstart = 0 if old_dmat is not None: # Start using data from old matrix. oln, olx = old_dmat.lmin, old_dmat.lmax ln, lx = self.lmin, self.lmax # Case 0: [ ( ) ] if (oln <= ln) and (lx <= olx): arr0 = old_dmat._arr[tri_base(ln) - tri_base(oln):tri_base(lx + 1) - tri_base(oln)] lstart = lx # Case 1: [ ( ] ) elif (oln <= ln <= olx) and (lx > olx): arr0 = old_dmat._arr[tri_base(ln) - tri_base(oln):tri_base(olx + 1) - tri_base(oln)] lstart = olx # Case 2: [ ] ( ) elif (olx < ln): base = tri_base(olx) - tri_base(oln) arr0 = old_dmat._arr[base: base + el_block_size(olx)] lstart = olx # Start from the topmost block. _dmat_eval(self.lmax, self._arr, lstart=lstart, arr0=arr0, lmin=self.lmin) @classmethod def array_size(cls, lmin, lmax): """Estimate size of the flattened array needed to store Delta_lmm values.""" return (1 + lmax - lmin) * (6 + 5 * lmax + lmax**2 + 4 * lmin + lmax * lmin + lmin**2) // 6 @classmethod def _get_array_params(cls, lmin=None, lmax=None, arrsize=None, strict=False): # Fill in the missing parameter. # Only one input may be None at a time! if arrsize is None: arrsize = cls.array_size(lmin, lmax) if lmax is None: lmax = lmin while True: s = cls.array_size(lmin, lmax) if s > arrsize and strict: raise ValueError("Invalid combination.") if s > arrsize: lmax -= 1 break elif s == arrsize: break lmax += 1 if lmin is None: lmin = lmax while True: s = cls.array_size(lmin, lmax) if s > arrsize and strict: raise ValueError("Invalid combination.") if s == arrsize: break if s > arrsize: lmin += 1 break lmin -= 1 arrsize = cls.array_size(lmin, lmax) return (lmin, lmax, arrsize) def __getitem__(self, index): """ Access stored elements, or use symmetry relations for non-stored elements. Parameters ---------- index: tuple (l, m1, m2) delta matrix entry Returns ------- float Value of Delta[el, m1, m2] """ (l, m1, m2) = index if l < self.lmin: raise ValueError("l < lmin. Need to re-evaluate delta matrix.") if l > self.lmax: raise ValueError("l > lmax. Need to re-evaluate delta matrix.") return _access_element(l, m1, m2, self._arr, self.lmin) @njit(parallel=False, fastmath=True) def _get_matrix_elements(el, m1, m2, arr, outarr): """ Get an array of Delta[el, mp, m1] * Delta[el, mp, m2]. Results are written into outarr """ for mp in prange(0, el + 1): outarr[mp] = _access_element( el, mp, m1, arr) * _access_element(el, mp, m2, arr) @njit(fastmath=True) def _outerprod_wigner(el, m1, m2, dmats, theta, outarr): """ Performs a sum in wigner_d with speed while avoiding a bottleneck. Results are written intto outarr, which must have the same shape as theta. """ for mp in prange(1, el + 1): exp_fac = np.exp(1j * mp * theta) outarr[:] += (exp_fac + (-1.)**(m1 + m2 - 2 * el) / exp_fac) * dmats[mp] class HarmonicFunction: """ Methods for calculating Wigner-d functions and spin-weighted spherical harmonics. Caches a :class:`DeltaMatrix` to speed up subsequent calculations. Not to be instantiated. """ current_dmat = None cache_mem_limit = 500 * 2**20 # 500 MiB, lmax ~ 921 for lmin = 0 # This is the maximum el mode that whose full m1/m2 block can be # contained in the allowed memory limit. # Reset by set_cache_mem_limit() maximum_el = 10238 def __init__(self): raise Exception("HarmonicFunction class is not instantiable.") @classmethod def set_cache_mem_limit(cls, maxmem): newlim = maxmem * 2**20 if cls.current_dmat is not None and cls.current_dmat._arr.nbytes > newlim: raise ValueError(f"Cached DeltaMatrix exceeds new limit {maxmem} MiB." " Clear it using clear_cached_dmat() first.") cls.cache_mem_limit = newlim max_block_size = cls._est_arrsize_limit(cls.cache_mem_limit) cls.maximum_el = int(np.floor((np.sqrt(1 + 8 * max_block_size) - 3) / 2)) @classmethod def get_cache_details(cls): return_dict = { 'cache_mem_limit': cls.cache_mem_limit, 'maximum_el': cls.maximum_el } if cls.current_dmat is not None: return_dict.update({ 'lmin': cls.current_dmat.lmin, 'lmax': cls.current_dmat.lmax, 'size': cls.current_dmat.size, }) return return_dict @classmethod def _est_arrsize_limit(cls, maxmem, double=False): # maxmem = memory limit in bytes dtype = np.float32 if double: dtype = np.float64 return int(np.floor(maxmem / np.zeros(1, dtype=dtype).nbytes)) @classmethod def _limit_lmin_lmax(cls, lmin, lmax, high, dtype=None): """ Choose new lmin/lmax respecting array size limits. Parameters ---------- lmin, lmax: int Desired lmin/lmax. high: bool If the given limits are not possible under memory restrictions, this chooses whether the returned range should include lmin (False) or lmax (True). """ if dtype is np.float64: double = True else: double = False mem_lim = getattr(cls, 'cache_mem_limit') max_arrsize = cls._est_arrsize_limit(mem_lim, double=double) req_arrsize = DeltaMatrix.array_size(lmin, lmax) limited = req_arrsize > max_arrsize if limited and high: lmin,
<gh_stars>0 import numpy as np import pandas as pd import pytest from pandas.testing import assert_frame_equal # from http://imachordata.com/2016/02/05/you-complete-me/ @pytest.fixture def df1(): return pd.DataFrame( { "Year": [1999, 2000, 2004, 1999, 2004], "Taxon": [ "Saccharina", "Saccharina", "Saccharina", "Agarum", "Agarum", ], "Abundance": [4, 5, 2, 1, 8], } ) def test_empty_column(df1): """Return dataframe if `columns` is empty.""" assert_frame_equal(df1.complete(), df1) def test_MultiIndex_column(df1): """Raise ValueError if column is a MultiIndex.""" df = df1 df.columns = [["A", "B", "C"], list(df.columns)] with pytest.raises(ValueError): df1.complete(["Year", "Taxon"]) def test_column_duplicated(df1): """Raise ValueError if column is duplicated in `columns`""" with pytest.raises(ValueError): df1.complete( columns=[ "Year", "Taxon", {"Year": lambda x: range(x.min().x.max() + 1)}, ] ) def test_type_columns(df1): """Raise error if columns is not a list object.""" with pytest.raises(TypeError): df1.complete(columns="Year") @pytest.mark.xfail(reason="fill_value dropped. fillna better.") def test_fill_value_is_a_dict(df1): """Raise error if fill_value is not a dictionary""" with pytest.raises(TypeError): df1.complete(columns=["Year", "Taxon"]) @pytest.mark.xfail(reason="fill_value dropped. fillna better.") def test_wrong_column_fill_value(df1): """Raise ValueError if column in `fill_value` does not exist.""" with pytest.raises(ValueError): df1.complete(columns=["Taxon", "Year"]) def test_wrong_data_type_dict(df1): """ Raise ValueError if value in dictionary is not a 1-dimensional object. """ with pytest.raises(ValueError): df1.complete(columns=[{"Year": pd.DataFrame([2005, 2006, 2007])}]) def test_not_list_like_type_dict(df1): """ Raise ValueError if value in dictionary is not a list-like object. """ with pytest.raises(ValueError): df1.complete(columns=[{"Year": "2001, 2002, 2003"}]) def test_MultiIndex_type_dict(df1): """ Raise ValueError if value in dictionary is a MultiIndex. """ with pytest.raises(ValueError): df1.complete( columns=[ {"Year": pd.MultiIndex.from_tuples([(1, 2001), (2, 2002)])} ] ) def test_empty_type_dict(df1): """ Raise ValueError if value in dictionary is empty. """ with pytest.raises(ValueError): df1.complete(columns=[{"Year": pd.Index([])}]) frame = pd.DataFrame( { "Year": [1999, 2000, 2004, 1999, 2004], "Taxon": [ "Saccharina", "Saccharina", "Saccharina", "Agarum", "Agarum", ], "Abundance": [4, 5, 2, 1, 8], } ) wrong_columns = ( (frame, ["b", "Year"]), (frame, [{"Yayay": range(7)}]), (frame, ["Year", ["Abundant", "Taxon"]]), (frame, ["Year", ("Abundant", "Taxon")]), ) empty_sub_columns = [ (frame, ["Year", []]), (frame, ["Year", {}]), (frame, ["Year", ()]), ] @pytest.mark.parametrize("frame,wrong_columns", wrong_columns) def test_wrong_columns(frame, wrong_columns): """Test that ValueError is raised if wrong column is supplied.""" with pytest.raises(ValueError): frame.complete(columns=wrong_columns) @pytest.mark.parametrize("frame,empty_sub_cols", empty_sub_columns) def test_empty_subcols(frame, empty_sub_cols): """Raise ValueError for an empty group in columns""" with pytest.raises(ValueError): frame.complete(columns=empty_sub_cols) @pytest.mark.xfail(reason="fill_value dropped. fillna is better.") def test_fill_value(df1): """Test fill_value argument.""" output1 = pd.DataFrame( { "Year": [1999, 1999, 2000, 2000, 2004, 2004], "Taxon": [ "Agarum", "Saccharina", "Agarum", "Saccharina", "Agarum", "Saccharina", ], "Abundance": [1, 4.0, 0, 5, 8, 2], } ) result = df1.complete(columns=["Year", "Taxon"]).fillna({"Abundance": 0}) assert_frame_equal(result, output1) @pytest.fixture def df1_output(): return pd.DataFrame( { "Year": [ 1999, 1999, 2000, 2000, 2001, 2001, 2002, 2002, 2003, 2003, 2004, 2004, ], "Taxon": [ "Agarum", "Saccharina", "Agarum", "Saccharina", "Agarum", "Saccharina", "Agarum", "Saccharina", "Agarum", "Saccharina", "Agarum", "Saccharina", ], "Abundance": [1.0, 4, 0, 5, 0, 0, 0, 0, 0, 0, 8, 2], } ) def test_fill_value_all_years(df1, df1_output): """ Test the complete function accurately replicates for all the years from 1999 to 2004. """ result = df1.complete( columns=[{"Year": lambda x: range(x.min(), x.max() + 1)}, "Taxon"] ).fillna(0) assert_frame_equal(result, df1_output) def test_dict_series(df1, df1_output): """ Test the complete function if a dictionary containing a Series is present in `columns`. """ result = df1.complete( columns=[ {"Year": lambda x: pd.Series(range(x.min(), x.max() + 1))}, "Taxon", ] ).fillna(0) assert_frame_equal(result, df1_output) def test_dict_series_duplicates(df1, df1_output): """ Test the complete function if a dictionary containing a Series (with duplicates) is present in `columns`. """ result = df1.complete( columns=[ { "Year": pd.Series( [1999, 2000, 2000, 2001, 2002, 2002, 2002, 2003, 2004] ) }, "Taxon", ] ).fillna(0) assert_frame_equal(result, df1_output) def test_dict_values_outside_range(df1): """ Test the output if a dictionary is present, and none of the values in the dataframe, for the corresponding label, is not present in the dictionary's values. """ result = df1.complete( columns=[("Taxon", "Abundance"), {"Year": np.arange(2005, 2007)}] ) expected = pd.DataFrame( [ {"Taxon": "Agarum", "Abundance": 1, "Year": 1999}, {"Taxon": "Agarum", "Abundance": 1, "Year": 2005}, {"Taxon": "Agarum", "Abundance": 1, "Year": 2006}, {"Taxon": "Agarum", "Abundance": 8, "Year": 2004}, {"Taxon": "Agarum", "Abundance": 8, "Year": 2005}, {"Taxon": "Agarum", "Abundance": 8, "Year": 2006}, {"Taxon": "Saccharina", "Abundance": 2, "Year": 2004}, {"Taxon": "Saccharina", "Abundance": 2, "Year": 2005}, {"Taxon": "Saccharina", "Abundance": 2, "Year": 2006}, {"Taxon": "Saccharina", "Abundance": 4, "Year": 1999}, {"Taxon": "Saccharina", "Abundance": 4, "Year": 2005}, {"Taxon": "Saccharina", "Abundance": 4, "Year": 2006}, {"Taxon": "Saccharina", "Abundance": 5, "Year": 2000}, {"Taxon": "Saccharina", "Abundance": 5, "Year": 2005}, {"Taxon": "Saccharina", "Abundance": 5, "Year": 2006}, ] ) assert_frame_equal(result, expected) complete_parameters = [ ( pd.DataFrame( [ { "date": pd.Timestamp("2014-10-20 00:00:00"), "colour": "red", "orders": 7, }, { "date": pd.Timestamp("2014-10-21 00:00:00"), "colour": "red", "orders": 10, }, { "date": pd.Timestamp("2014-10-20 00:00:00"), "colour": "yellow", "orders": 3, }, ] ), [{"date": pd.date_range("2014-10-20", "2014-10-22")}, "colour"], pd.DataFrame( [ { "date": pd.Timestamp("2014-10-20 00:00:00"), "colour": "red", "orders": 7.0, }, { "date": pd.Timestamp("2014-10-20 00:00:00"), "colour": "yellow", "orders": 3.0, }, { "date": pd.Timestamp("2014-10-21 00:00:00"), "colour": "red", "orders": 10.0, }, { "date": pd.Timestamp("2014-10-21 00:00:00"), "colour": "yellow", "orders": np.nan, }, { "date": pd.Timestamp("2014-10-22 00:00:00"), "colour": "red", "orders": np.nan, }, { "date": pd.Timestamp("2014-10-22 00:00:00"), "colour": "yellow", "orders": np.nan, }, ] ), ), ( pd.DataFrame( { "id": [1, 2, 3], "choice": [5, 6, 7], "c": [9.0, np.nan, 11.0], "d": [ pd.NaT, pd.Timestamp("2015-09-30 00:00:00"), pd.Timestamp("2015-09-29 00:00:00"), ], } ), ["id", "c"], pd.DataFrame( [ {"id": 1, "c": 9.0, "choice": 5.0, "d": pd.NaT}, {"id": 1, "c": np.nan, "choice": np.nan, "d": pd.NaT}, {"id": 1, "c": 11.0, "choice": np.nan, "d": pd.NaT}, {"id": 2, "c": 9.0, "choice": np.nan, "d": pd.NaT}, { "id": 2, "c": np.nan, "choice": 6.0, "d": pd.Timestamp("2015-09-30 00:00:00"), }, {"id": 2, "c": 11.0, "choice": np.nan, "d": pd.NaT}, {"id": 3, "c": 9.0, "choice": np.nan, "d": pd.NaT}, {"id": 3, "c": np.nan, "choice": np.nan, "d": pd.NaT}, { "id": 3, "c": 11.0, "choice": 7.0, "d": pd.Timestamp("2015-09-29 00:00:00"), }, ] ), ), ( pd.DataFrame( { "row": { 0: "22.08.2020", 1: "22.08.2020", 2: "21.08.2020", 3: "21.08.2020", 4: "21.08.2020", 5: "21.08.2020", 6: "22.08.2020", }, "column": { 0: "B", 1: "B", 2: "A", 3: "A", 4: "B", 5: "C", 6: "A", }, "value": {0: 40, 1: 34, 2: 43, 3: 36, 4: 36, 5: 28, 6: 16}, } ), ["row", "column"], pd.DataFrame( [ {"row": "21.08.2020", "column": "A", "value": 43.0}, {"row": "21.08.2020", "column": "A", "value": 36.0}, {"row": "21.08.2020", "column": "B", "value": 36.0}, {"row": "21.08.2020", "column": "C", "value": 28.0}, {"row": "22.08.2020", "column": "A", "value": 16.0}, {"row": "22.08.2020", "column": "B", "value": 40.0}, {"row": "22.08.2020", "column": "B", "value": 34.0}, {"row": "22.08.2020", "column": "C", "value": np.nan}, ] ), ), ( pd.DataFrame( { "row": { 0: "21.08.2020", 1: "21.08.2020", 2: "21.08.2020", 3: "21.08.2020", 4: "22.08.2020", 5: "22.08.2020", 6: "22.08.2020", }, "column": { 0: "A", 1: "A", 2: "B", 3: "C", 4: "A", 5: "B", 6: "B", }, "value": {0: 43, 1: 36, 2: 36, 3: 28, 4: 16, 5: 40, 6: 34}, } ), ["row", "column"], pd.DataFrame( [ {"row": "21.08.2020", "column": "A", "value": 43.0}, {"row": "21.08.2020", "column": "A", "value": 36.0}, {"row": "21.08.2020", "column": "B", "value": 36.0}, {"row": "21.08.2020", "column": "C", "value": 28.0}, {"row": "22.08.2020", "column": "A", "value": 16.0}, {"row": "22.08.2020", "column": "B", "value": 40.0}, {"row": "22.08.2020", "column": "B", "value": 34.0}, {"row": "22.08.2020", "column": "C", "value": np.nan}, ] ), ), ( pd.DataFrame( [ {"YEAR": 1946, "Region": 1}, {"YEAR": 1946, "Region": 2}, {"YEAR": 1946, "Region": 3}, {"YEAR": 1946, "Region": 5}, {"YEAR": 1947, "Region": 3}, {"YEAR": 1947, "Region": 4}, ] ), ["YEAR", "Region"], pd.DataFrame( [ {"YEAR": 1946, "Region": 1}, {"YEAR": 1946, "Region": 2}, {"YEAR": 1946, "Region": 3}, {"YEAR": 1946, "Region": 4}, {"YEAR": 1946, "Region": 5}, {"YEAR": 1947, "Region": 1}, {"YEAR": 1947, "Region": 2}, {"YEAR": 1947, "Region": 3}, {"YEAR": 1947, "Region": 4}, {"YEAR": 1947, "Region": 5}, ] ), ), ( pd.DataFrame( { "group": [1, 2, 1], "item_id": [1, 2, 2], "item_name": ["a", "b", "b"], "value1": [1, 2, 3], "value2": [4, 5, 6], } ), ["group", "item_id", "item_name"], pd.DataFrame( { "group": [1, 1, 1, 1, 2, 2, 2, 2], "item_id": [1, 1, 2, 2, 1, 1, 2, 2], "item_name": ["a", "b", "a", "b", "a", "b", "a", "b"], "value1": [ 1.0, np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 2.0, ], "value2": [ 4.0, np.nan, np.nan, 6.0, np.nan, np.nan, np.nan, 5.0, ], } ), ), ( pd.DataFrame( { "group": [1, 2, 1], "item_id": [1, 2, 2], "item_name": ["a", "b", "b"], "value1": [1, 2, 3], "value2": [4, 5, 6], } ), ["group", ("item_id", "item_name")], pd.DataFrame( { "group": [1, 1, 2, 2], "item_id": [1, 2, 1, 2], "item_name": ["a", "b", "a", "b"], "value1": [1.0, 3.0, np.nan, 2.0], "value2": [4.0, 6.0, np.nan, 5.0], } ), ), ] @pytest.mark.parametrize("df,columns,output", complete_parameters) def test_complete(df, columns,
""" TODO: Module docstring """ from copy import copy from decimal import Decimal from itertools import repeat from math import acos from math import copysign from math import floor from math import sqrt from typing import Iterator from typing import List from typing import Tuple from typing import Union from astrolib.util.constants import NANOSECONDS_PER_SECOND from astrolib.util.constants import SECONDS_PER_HOUR from astrolib.util.constants import SECONDS_PER_MINUTE from astrolib.util.constants import SECONDS_PER_SOLAR_DAY _DECIMAL_NANOSECONDS_PER_SECOND = Decimal(str(NANOSECONDS_PER_SECOND)) #pylint: disable=invalid-name class Matrix: """ Class represents a two-dimensional matrix of float values. """ @staticmethod def fill(num_rows: int, num_cols: int, fill_value: float) -> 'Matrix': """ TODO: Method docstring """ if (num_rows <= 0) or (num_cols <= 0): raise ValueError("The number of rows or columns must be positive and greater than 0.") fill_value = float(fill_value) return Matrix(A = [list(repeat(fill_value, num_cols)) for _ in range(num_rows)]) @staticmethod def zeros(num_rows: int, num_cols: int = None) -> 'Matrix': """ TODO: Method docstring """ if not num_cols and num_cols != 0.0: num_cols = num_rows return Matrix.fill(num_rows, num_cols, 0.0) @staticmethod def ones(num_rows: int, num_cols: int = None) -> 'Matrix': """ TODO: Method docstring """ if not num_cols and num_cols != 0.0: num_cols = num_rows return Matrix.fill(num_rows, num_cols, 1.0) @staticmethod def identity(dim: int) -> 'Matrix': """ TODO: Method docstring """ A = Matrix.zeros(dim) for i in range(dim): A[i,i] = 1.0 return A @staticmethod def empty() -> 'Matrix': """ TODO: Method docstring """ return Matrix([]) @staticmethod def from_column_matrices(matrices) -> 'Matrix': """ TODO: Method docstring """ if not isinstance(matrices, list): raise ValueError('Input collection must be a list of matrices to concatenate.') for A in matrices: if not isinstance(A, Matrix): raise ValueError('Input collection must be a list of matrices to concatenate.') if A.num_cols != 1: raise ValueError('Each matrix must be a column matrix to concatenate.') return Matrix([row for A in matrices for row in A]) def __init__(self, A: List[List[float]]): num_cols = len(A[0]) if A else 0 for row in A: if len(row) != num_cols: raise ValueError('Each row must have the same number of columns.') self._A = A def __str__(self) -> str: def stringify_row(row: List): return ", ".join([str(x) for x in row]) mat = "\n ".join(stringify_row(row) for row in self._A) return f"[{mat}]" def __repr__(self) -> str: return f"[{'; '.join(', '.join(str(x) for x in row) for row in self)}]" def __setitem__(self, indices: Union[Tuple[int, int], Tuple[int, slice], Tuple[int, slice], Tuple[slice, slice], int, slice], value: Union[int, 'Matrix'] ) -> None: if isinstance(indices, (int, slice)): if self.is_row_matrix(): indices = (0, indices) elif self.is_column_matrix(): indices = (indices, 0) else: raise ValueError("Single-index indexing only supported for row or column matrices.") if isinstance(indices[0], slice) and isinstance(indices[1], slice): # expects Matrix if not isinstance(value, Matrix): raise ValueError("When setting matrix indices using slice notation a Matrix value " "must be used.") if value.is_empty: raise ValueError("When setting matrix indices using slice notation for both the " "row and column indices an empty Matrix value cannot be used.") slice_rows_range = range((indices[0].start or 0), (indices[0].stop or self.num_rows), (indices[0].step or 1)) slice_cols_range = range((indices[1].start or 0), (indices[1].stop or self.num_cols), (indices[1].step or 1)) value_rows_range = range(value.num_rows) value_cols_range = range(value.num_cols) for (i,m) in zip(slice_rows_range, value_rows_range): for (j,n) in zip(slice_cols_range, value_cols_range): self._A[i][j] = value[m,n] elif isinstance(indices[0], slice): # expects Matrix if not isinstance(value, Matrix): raise ValueError("When setting matrix indices using slice notation a Matrix value " "must be used.") slice_range = range((indices[0].start or 0), (indices[0].stop or self.num_rows), (indices[0].step or 1)) if value.is_empty: self._A = [[self._A[i][j] for j in range(self.num_cols) if j != indices[1]] for i in slice_range] else: value_range = range(value.num_rows) for (i,j) in zip(slice_range, value_range): self._A[i][indices[1]] = value[j,0] elif isinstance(indices[1], slice): # expects Matrix if not isinstance(value, Matrix): raise ValueError("When setting matrix indices using slice notation a Matrix value " "must be used.") slice_range = range((indices[1].start or 0), (indices[1].stop or self.num_cols), (indices[1].step or 1)) if value.is_empty: self._A = [[self._A[i][j] for j in slice_range] for i in range(self.num_rows) if i != indices[0]] else: value_range = range(value.num_cols) for (i,j) in zip(slice_range, value_range): self._A[indices[0]][i] = value[0,j] else: # expects int or float if not isinstance(value, (float, int)): raise ValueError("When setting matrix indices using direct index notation an int " "or float value must be used.") self._A[indices[0]][indices[1]] = value def __getitem__(self, indices: Union[Tuple[int, int], Tuple[int, slice], Tuple[int, slice], Tuple[slice, slice], int, slice] ) -> Union[float, 'Matrix']: M = None if isinstance(indices, (int, slice)): if self.is_row_matrix(): indices = (0, indices) elif self.is_column_matrix(): indices = (indices, 0) else: raise ValueError("Single-index indexing only supported for row or column matrices.") if isinstance(indices[0], slice) and isinstance(indices[1], slice): # returns Matrix rows_range = range((indices[0].start or 0), (indices[0].stop or self.num_rows), (indices[0].step or 1)) cols_range = range((indices[1].start or 0), (indices[1].stop or self.num_cols), (indices[1].step or 1)) M = Matrix([[self.get_row(i)[j] for j in cols_range] for i in rows_range]) elif isinstance(indices[0], slice): # returns Matrix M = Matrix([[self.get_col(indices[1])[i]] for i in range((indices[0].start or 0), (indices[0].stop or self.num_rows), (indices[0].step or 1))]) elif isinstance(indices[1], slice): # returns Matrix M = Matrix([[self.get_row(indices[0])[i] for i in range((indices[1].start or 0), (indices[1].stop or self.num_cols), (indices[1].step or 1))]]) else: # returns float M = self._A[indices[0]][indices[1]] return M def __iter__(self) -> Iterator[List[float]]: for row in self._A: yield row @property def num_rows(self) -> int: """ TODO: Property docstring """ return len(self._A) @property def num_cols(self) -> int: """ TODO: Property docstring """ return len(self._A[0]) if self._A else 0 @property def size(self) -> Tuple[int, int]: """ TODO: Property docstring """ return self.num_rows, self.num_cols @property def is_empty(self) -> bool: """ TODO: Property docstring """ return not (self.num_rows or self.num_cols) def __eq__(self, other: Union['Matrix', float, int]) -> bool: if not isinstance(other, (Matrix, float, int)): return False if isinstance(other, (float, int)): other = Matrix.fill(*self.size, other) if self.size != other.size: return False for i in range(self.num_rows): for j in range(self.num_cols): if self[i,j] != other[i,j]: return False return True def __lt__(self, other: Union['Matrix', float, int]) -> bool: if not isinstance(other, (Matrix, float, int)): return False if isinstance(other, (float, int)): other = Matrix.fill(*self.size, other) if self.size != other.size: return False for i in range(self.num_rows): for j in range(self.num_cols): if self[i,j] >= other[i,j]: return False return True def __le__(self, other: Union['Matrix', float, int]) -> bool: if not isinstance(other, (Matrix, float, int)): return False if isinstance(other, (float, int)): other = Matrix.fill(*self.size, other) if self.size != other.size: return False for i in range(self.num_rows): for j in range(self.num_cols): if self[i,j] > other[i,j]: return False return True def __add__(self, other: Union['Matrix', float, int]) -> 'Matrix': if not isinstance(other, (Matrix, float, int)): return NotImplemented if isinstance(other, (float, int)): other = Matrix.fill(*self.size, other) if self.size != other.size: raise ValueError("Matrices must be the same size to be added.") M = Matrix.zeros(*self.size) for i in range(self.num_rows): for j in range(self.num_cols): M[i,j] = self[i,j] + other[i,j] return M def __radd__(self, other: Union['Matrix', float, int]) -> 'Matrix': return self.__add__(other) def __sub__(self, other: Union['Matrix', float, int]) -> 'Matrix': return self.__add__(-1.0 * other) def __rsub__(self, other: Union['Matrix', float, int]) -> 'Matrix': return -1.0 * self.__sub__(other) def __mul__(self, other: Union['Matrix', float, int]) -> 'Matrix': if not isinstance(other, (Matrix, float, int)): return NotImplemented if isinstance(other, (float, int)): M = Matrix.zeros(*self.size) for i in range(self.num_rows): for j in range(self.num_cols): M[i,j] = other * self[i,j] elif isinstance(other, Matrix): if other.num_rows != self.num_cols: raise ValueError("Incorrect dimensions for matrix multiplication. Check that the " "number of columns in the first matrix matches the number of " "rows in the second matrix.") M = Matrix.zeros(self.num_rows, other.num_cols) # TODO: Evaluate _inner_product() local function performance vs that of nested for loop for i in range(self.num_rows): for j in range(other.num_cols): M[i,j] = sum([x * y for (x,y) in zip(self.get_row(i), other.get_col(j))]) return M def __rmul__(self, other: Union[float, int]) -> 'Matrix': if not isinstance(other, (float, int)): return NotImplemented return self.__mul__(other) def __abs__(self) -> 'Matrix': M = Matrix.zeros(*self.size) for i in range(self.num_rows): for j in range(self.num_cols): M[i,j] = abs(self[i,j]) return M def __neg__(self) -> 'Matrix': M = Matrix.zeros(*self.size) for i in range(self.num_rows): for j in range(self.num_cols): M[i,j] = -1 * self[i,j] return M def __len__(self) -> int: """ Returns the length of the calling Matrix, defined as the maximum dimension. Returns: int: The maximum dimension of the calling Matrix, i.e. max(num_rows, num_cols) """ return int(max(self.size)) def
ansatz from the cost and mixer Hamiltonians def qaoa_layer(gamma, alpha): qaoa.cost_layer(gamma, cost_h) qaoa.mixer_layer(alpha, mixer_h) # Creates the actual quantum circuit for the QAOA algorithm def circuit(params, **kwargs): for w in wires: qml.Hadamard(wires=w) qml.layer(qaoa_layer, n_layers, params[0], params[1]) # Evaluates the cost Hamiltonian def hamiltonian(params, **kwargs): """Evaluate the MaxCut cost of the QAOA, given the angles for the circuit and the specified graph. Args: params (list): List of angles for the QAOA circuit, with shape (n_layers x 2). params has the shape [angles_1, ..., angles_(n_layers)], where angles_i = [alphas_i, gammas_i]. Returns: cost (function): Cost Hamiltonian evaluated with the input angles """ # We set the default.qubit.tf device for seamless integration with TensorFlow dev = qml.device("default.qubit.tf", wires=len(graph.nodes)) # ExpvalCost evaluates the expectation value of an operator cost = qml.ExpvalCost(circuit, cost_h, dev, interface="tf", diff_method="backprop") return cost(params) return hamiltonian ###################################################################### # Before moving on, let’s see how to use these functions. # # Create an instance of a QAOA circuit given a graph. cost = qaoa_from_graph(graph=graphs[0], n_layers=1) # Define some paramenters # Since we use only one layer in QAOA, params have the shape 1 x 2, in the form [[alpha, gamma]]. # If not specificed, TensorFlow converts Python floating point numbers to tf.float32. x = tf.Variable([[0.5], [0.5]], dtype=tf.float32) # Evaluate th QAOA instance just created with some angles. cost(x) ###################################################################### # Now we optimize the parameters to reduce the cost function ``cost``. We # can do this using ``tf.GradientTape()``, which works directly thanks to # PennyLane’s seamless integration with TensorFlow when using the # ``default.qubit.tf`` device. # # .. note:: # We use the ``Adam`` optimizer for its ease of use and good # performances on most optimization problems. Feel free to try other # optimizers too. # x = tf.Variable([[0.5], [0.5]], dtype=tf.float32) # Select an optimizer opt = tf.keras.optimizers.Adam(learning_rate=0.1) # Choose the number of optimization steps step = 15 # Start of optimization (or training) for _ in range(step): with tf.GradientTape() as tape: loss = cost(x) gradients = tape.gradient(loss, [x]) opt.apply_gradients(zip(gradients, [x])) print(f"Step {_+1} - Loss = {loss}") print(f"\nFinal cost function: {cost(x).numpy()}\nOptimized angles: {x.numpy()}") ###################################################################### # As you can see, the loss function is correctly minimized throughout the # optimization process, which eventually yields an optimal value for the # angles in the QAOA ansatz. # ###################################################################### # Recurrent Neural Network: LSTM # ------------------------------ # # So far, we have defined the machinery which lets us build the QAOA # algorithm for solving the MaxCut problem of a graph. # Now we wish to implement the Recurrent Neural Network architecture # explained previously. In particular, as proposed in the original # paper, we will build a custom model of an LSTM (Long-Short Term # Memory) network, capable of handling the hybrid data passing between # classical and quantum procedures. For this task, we will use ``Keras`` # and ``TensorFlow``. # ###################################################################### # First of all, let’s define the elemental building block of the model, # that is an LSTM cell (see `TensorFlow # documentation <https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTMCell>`__ # for further details). # # Number of layers in QAOA ansatz. The higher the better in terms of performance, # but it also gets more computationally hard. For simplicity, we will stick to the single layer case. n_layers = 1 # Define a single LSTM cell. The cell has two units per layer # since each layer in the QAOA ansatz make use of two angles (parameters). cell = tf.keras.layers.LSTMCell(2 * n_layers) ###################################################################### # Using the ``qaoa_from_graph`` function, we create a list # ``graph_cost_list`` containing the cost functions of a set of graphs. # You can see this as a preprocessing step of the data. # # We create the QAOA MaxCut cost functions of some graphs graph_cost_list = [qaoa_from_graph(g) for g in graphs] ###################################################################### # At this stage, we seek to reproduce the recurrent behavior depicted in # the picture above, outlining the functioning of RNN as a black-box # optimizer. We do so by defining two functions: # # * ``rnn_iteration``: accounts for the computations happening on a single time step in the figure, # that is it performs the calculation inside the CPU and evaluates the quantum circuit on the QPU to obtain # the loss function for the current parameters. # # * ``recurrent_loop``: as the name suggests, it accounts for the creation of the recurrent loop # of the model. In particular, it makes consecutive calls to the ``rnn_iteration`` function, # where the outputs of a previous call are fed as inputs of the next call. # def rnn_iteration(inputs, graph_cost, n_layers=1): """Perform a single time step in the computational graph of the custom RNN. The function takes as inputs, the outputs from a previous time step and a graph cost function, and then goes through the Keras' LSTM cell. Its output is used to evaluate the QAOA MaxCut cost function. Args: inputs (list): List of inputs coming from the previous timestep: cost function, parameters, first LSTM hidden state, second LSTM hidden state. graph_cost (function): Cost function of a graph coming from the qaoa_from_graph function Returns: list: A list containing new values for the cost, parameters, and hidden states. """ # Unpack the input list containing the previous cost, parameters, and hidden states (denoted as 'h' and 'c'). prev_cost = inputs[0] prev_params = inputs[1] prev_h = inputs[2] prev_c = inputs[3] # Concatenate the previous parameters and previous cost to create new input new_input = tf.keras.layers.concatenate([prev_cost, prev_params]) # Call the LSTM cell, which outputs new values for the parameters along with new internal states h and c new_params, [new_h, new_c] = cell(new_input, states=[prev_h, prev_c]) # Reshape the parameters to correctly match those expected by PennyLane _params = tf.reshape(new_params, shape=(2, n_layers)) # Evaluate the cost using new angles _cost = graph_cost(_params) # Reshape to be consistent with other tensors new_cost = tf.reshape(tf.cast(_cost, dtype=tf.float32), shape=(1, 1)) return [new_cost, new_params, new_h, new_c] def recurrent_loop(graph_cost, n_layers=1, intermediate_steps=False): """Creates the recurrent loop for the Recurrent Neural Network. The recurrent loops are created manually by connecting consecutive iterations calls to the custom LSTM model created in `rnn_iteration`. Args: graph_cost (function): Cost function of a graph coming from the qaoa_from_graph function intermediate_steps (bool): Boolean flag, set to True if you wish to store values from intermediate iterations Returns: loss (tf.Tensor): Loss function driving the minimization process """ # Initialize starting all inputs (cost, parameters, hidden states) as zeros. initial_cost = tf.zeros(shape=(1, 1)) initial_params = tf.zeros(shape=(1, 2 * n_layers)) initial_h = tf.zeros(shape=(1, 2 * n_layers)) initial_c = tf.zeros(shape=(1, 2 * n_layers)) # We perform five consecutive calls to 'rnn_iteration', thus creating the recurrent loop. # More iterations lead to better results, at the cost of more computationally intensive simulations. out0 = rnn_iteration([initial_cost, initial_params, initial_h, initial_c], graph_cost) out1 = rnn_iteration(out0, graph_cost) out2 = rnn_iteration(out1, graph_cost) out3 = rnn_iteration(out2, graph_cost) out4 = rnn_iteration(out3, graph_cost) # This cost function takes into account the cost from all iteration, but using different weights. loss = tf.keras.layers.average( [0.1 * out0[0], 0.2 * out1[0], 0.3 * out2[0], 0.4 * out3[0], 0.5 * out4[0]] ) if intermediate_steps: return [out0[1], out1[1], out2[1], out3[1], out4[1], loss] else: return loss ###################################################################### # The cost function # ~~~~~~~~~~~~~~~~~~~~~~ # # A key part in the ``recurrent_loop`` function, is given by the # definition of the variable ``loss``. In order to drive the learning # procedure of the weights in the LSTM cell, a cost function is needed. # While in the original paper the authors suggest to use a measure called # *observed improvement*, for simplicity here we use an easier cost # function :math:`\cal{L}(\phi)` defined as: # # .. math:: \cal{L}(\phi) = {\bf w} \cdot {\bf y}_t(\phi) # # where :math:`{\bf y}_t(\phi) = (y_1, \cdots, y_5)` contains the # Hamiltonian cost functions from all iterations, and :math:`{\bf w}` are # just some coefficients weighting the different steps in the recurrent # loop. In this case, we used :math:`{\bf w}=(0.1, 0.2, 0.3, 0.4, 0.5)`, # to give more importance to the last steps rather than the initial steps. # Intuitively in this way the RNN is more free (low coefficient) to # explore a larger portion of parameter space during the first steps of # optimization, while it is constrained (high coefficient) to select an # optimal solution towards the end of the procedure. Note that one could # also use just the final cost function from the last iteration to drive # the training procedure of the RNN. However,
<reponame>beenfhb/AlphaGo # -*- coding: utf-8 -*- """Tests for sgf.py.""" import unittest from textwrap import dedent from betago import gosgf SAMPLE_SGF = b"""\ (;AP[testsuite:0]CA[utf-8]DT[2009-06-06]FF[4]GM[1]KM[7.5]PB[Black engine] PL[B]PW[White engine]RE[W+R]SZ[9]AB[ai][bh][ee]AW[fc][gc];B[dg];W[ef]C[comment on two lines];B[];W[tt]C[Final comment]) """ SAMPLE_SGF_VAR = b"""\ (;AP[testsuite:0]CA[utf-8]DT[2009-06-06]FF[4]GM[1]KM[7.5]PB[Black engine] PL[B]RE[W+R]SZ[9]AB[ai][bh][ee]AW[fd][gc]VW[] ;B[dg] ;W[ef]C[comment on two lines] ;B[] ;C[Nonfinal comment]VW[aa:bb] (;B[ia];W[ib];B[ic]) (;B[ib];W[ic] (;B[id]) (;B[ie]) )) """ class SgfTestCase(unittest.TestCase): def test_new_sgf_game(self): g1 = gosgf.Sgf_game(9) self.assertEqual(g1.get_size(), 9) root = g1.get_root() self.assertEqual(root.get_raw(b'FF'), b'4') self.assertEqual(root.get_raw(b'GM'), b'1') self.assertEqual(root.get_raw(b'SZ'), b'9') self.assertEqual(root.get_raw_property_map(), { b'FF': [b'4'], b'GM': [b'1'], b'SZ': [b'9'], b'CA': [b'UTF-8'], }); self.assertEqual(list(root), []) self.assertEqual(root.parent, None) self.assertIs(root.owner, g1) def test_sgf_game_from_coarse_game_tree(self): class Namespace(object): pass coarse_game = Namespace() coarse_game.sequence = [{b'SZ' : [b"9"]}, {b'B' : [b"aa"]}] coarse_game.children = [] g1 = gosgf.Sgf_game.from_coarse_game_tree(coarse_game) self.assertEqual(g1.get_size(), 9) root = g1.get_root() self.assertIs(root.get_raw_property_map(), coarse_game.sequence[0]) self.assertEqual(root.parent, None) self.assertIs(root.owner, g1) self.assertEqual(len(root), 1) coarse_game2 = Namespace() coarse_game2.sequence = [{b'SZ' : [b"0"]}, {b'B' : [b"aa"]}] coarse_game2.children = [] self.assertRaisesRegexp(ValueError, "size out of range: 0", gosgf.Sgf_game.from_coarse_game_tree, coarse_game2) def test_sgf_game_from_string(self): g1 = gosgf.Sgf_game.from_string(b"(;)") self.assertEqual(g1.get_size(), 19) self.assertRaisesRegexp(ValueError, "unexpected end of SGF data", gosgf.Sgf_game.from_string, b"(;SZ[9]") g2 = gosgf.Sgf_game.from_string(b"(;SZ[9])") self.assertEqual(g2.get_size(), 9) self.assertRaisesRegexp(ValueError, "bad SZ property", gosgf.Sgf_game.from_string, b"(;SZ[a])") self.assertRaisesRegexp(ValueError, "size out of range", gosgf.Sgf_game.from_string, b"(;SZ[27])") self.assertRaisesRegexp(ValueError, "unknown encoding", gosgf.Sgf_game.from_string, b"(;CA[])") def test_node(self): sgf_game = gosgf.Sgf_game.from_string( r"(;KM[6.5]C[sample\: comment]AB[ai][bh][ee]AE[];B[dg])".encode('ascii')) node0 = sgf_game.get_root() node1 = list(sgf_game.main_sequence_iter())[1] self.assertEqual(node0.get_size(), 19) self.assertEqual(node0.get_encoding(), "ISO-8859-1") self.assertIs(node0.has_property(b'KM'), True) self.assertIs(node0.has_property(b'XX'), False) self.assertIs(node1.has_property(b'KM'), False) self.assertEqual(set(node0.properties()), set([b"KM", b"C", b"AB", b"AE"])) self.assertEqual(set(node1.properties()), set([b"B"])) self.assertEqual(node0.get_raw(b'C'), r"sample\: comment".encode('ascii')) self.assertEqual(node0.get_raw(b'AB'), b"ai") self.assertEqual(node0.get_raw(b'AE'), b"") self.assertRaises(KeyError, node0.get_raw, b'XX') self.assertEqual(node0.get_raw_list(b'KM'), [b'6.5']) self.assertEqual(node0.get_raw_list(b'AB'), [b'ai', b'bh', b'ee']) self.assertEqual(node0.get_raw_list(b'AE'), [b'']) self.assertRaises(KeyError, node0.get_raw_list, b'XX') self.assertRaises(KeyError, node0.get_raw, b'XX') def test_property_combination(self): sgf_game = gosgf.Sgf_game.from_string(b"(;XX[1]YY[2]XX[3]YY[4])") node0 = sgf_game.get_root() self.assertEqual(node0.get_raw_list(b"XX"), [b"1", b"3"]) self.assertEqual(node0.get_raw_list(b"YY"), [b"2", b"4"]) def test_node_get(self): sgf_game = gosgf.Sgf_game.from_string(dedent(r""" (;AP[testsuite:0]CA[utf-8]DT[2009-06-06]FF[4]GM[1]KM[7.5]PB[Black engine] PL[B]PW[White engine][xs]RE[W+R]SZ[9]AB[ai][bh][ee]AW[fd][gc]AE[]BM[2]VW[] EV[Test event] C[123:\) abc] YY[none sense] ;B[dg]KO[]AR[ab:cd][de:fg]FG[515:first move] LB[ac:lbl][bc:lbl2]) """).encode('utf-8')) root = sgf_game.get_root() node1 = list(sgf_game.main_sequence_iter())[1] self.assertRaises(KeyError, root.get, b'XX') self.assertEqual(root.get(b'C'), b"123:)\nabc") # Text self.assertEqual(root.get(b'EV'), b"Test event") # Simpletext self.assertEqual(root.get(b'BM'), 2) # Double self.assertEqual(root.get(b'YY'), b"none\nsense") # unknown (Text) self.assertIs(node1.get(b'KO'), True) # None self.assertEqual(root.get(b'KM'), 7.5) # Real self.assertEqual(root.get(b'GM'), 1) # Number self.assertEqual(root.get(b'PL'), 'b') # Color self.assertEqual(node1.get(b'B'), (2, 3)) # Point self.assertEqual(root.get(b'AB'), set([(0, 0), (1, 1), (4, 4)])) # List of Point self.assertEqual(root.get(b'VW'), set()) # Empty elist self.assertEqual(root.get(b'AP'), (b"testsuite", b"0")) # Application self.assertEqual(node1.get(b'AR'), [((7, 0), (5, 2)), ((4, 3), (2, 5))]) # Arrow self.assertEqual(node1.get(b'FG'), (515, b"first move")) # Figure self.assertEqual(node1.get(b'LB'), [((6, 0), b"lbl"), ((6, 1), b"lbl2")]) # Label # Check we (leniently) treat lists like elists on read self.assertEqual(root.get(b'AE'), set()) self.assertRaisesRegexp(ValueError, "multiple values", root.get, b'PW') def test_text_values(self): def check(s): sgf_game = gosgf.Sgf_game.from_string(s.encode('ascii')) return sgf_game.get_root().get(b"C").decode('ascii') # Round-trip check of Text values through tokeniser, parser, and # text_value(). self.assertEqual(check(r"(;C[abc]KO[])"), r"abc") self.assertEqual(check(r"(;C[a\\bc]KO[])"), r"a\bc") self.assertEqual(check(r"(;C[a\\bc\]KO[])"), r"a\bc]KO[") self.assertEqual(check(r"(;C[abc\\]KO[])"), r"abc" + "\\") self.assertEqual(check(r"(;C[abc\\\]KO[])"), r"abc\]KO[") self.assertEqual(check(r"(;C[abc\\\\]KO[])"), r"abc" + "\\\\") self.assertEqual(check(r"(;C[abc\\\\\]KO[])"), r"abc\\]KO[") self.assertEqual(check(r"(;C[xxx :\) yyy]KO[])"), r"xxx :) yyy") self.assertEqual(check("(;C[ab\\\nc])"), "abc") self.assertEqual(check("(;C[ab\nc])"), "ab\nc") def test_node_string(self): sgf_game = gosgf.Sgf_game.from_string(SAMPLE_SGF) node = sgf_game.get_root() self.assertMultiLineEqual(str(node), dedent("""\ AB[ai][bh][ee] AP[testsuite:0] AW[fc][gc] CA[utf-8] DT[2009-06-06] FF[4] GM[1] KM[7.5] PB[Black engine] PL[B] PW[White engine] RE[W+R] SZ[9] """)) def test_node_get_move(self): sgf_game = gosgf.Sgf_game.from_string(SAMPLE_SGF) nodes = list(sgf_game.main_sequence_iter()) self.assertEqual(nodes[0].get_move(), (None, None)) self.assertEqual(nodes[1].get_move(), ('b', (2, 3))) self.assertEqual(nodes[2].get_move(), ('w', (3, 4))) self.assertEqual(nodes[3].get_move(), ('b', None)) self.assertEqual(nodes[4].get_move(), ('w', None)) def test_node_get_setup_stones(self): sgf_game = gosgf.Sgf_game.from_string( r"(;KM[6.5]SZ[9]C[sample\: comment]AB[ai][bh][ee]AE[bb];B[dg])".encode('utf-8')) node0 = sgf_game.get_root() node1 = list(sgf_game.main_sequence_iter())[1] self.assertIs(node0.has_setup_stones(), True) self.assertIs(node1.has_setup_stones(), False) self.assertEqual(node0.get_setup_stones(), (set([(0, 0), (1, 1), (4, 4)]), set(), set([(7, 1)]))) self.assertEqual(node1.get_setup_stones(), (set(), set(), set())) def test_sgf_game(self): sgf_game = gosgf.Sgf_game.from_string(SAMPLE_SGF_VAR) nodes = list(sgf_game.main_sequence_iter()) self.assertEqual(sgf_game.get_size(), 9) self.assertEqual(sgf_game.get_komi(), 7.5) self.assertIs(sgf_game.get_handicap(), None) self.assertEqual(sgf_game.get_player_name('b'), "Black engine") self.assertIs(sgf_game.get_player_name('w'), None) self.assertEqual(sgf_game.get_winner(), 'w') self.assertEqual(nodes[2].get(b'C'), b"comment\non two lines") self.assertEqual(nodes[4].get(b'C'), b"Nonfinal comment") g2 = gosgf.Sgf_game.from_string(b"(;)") self.assertEqual(g2.get_size(), 19) self.assertEqual(g2.get_komi(), 0.0) self.assertIs(g2.get_handicap(), None) self.assertIs(g2.get_player_name('b'), None) self.assertIs(g2.get_player_name('w'), None) self.assertEqual(g2.get_winner(), None) def test_tree_view(self): sgf_game = gosgf.Sgf_game.from_string(SAMPLE_SGF_VAR) root = sgf_game.get_root() self.assertIsInstance(root, gosgf.Tree_node) self.assertIs(root.parent, None) self.assertIs(root.owner, sgf_game) self.assertEqual(len(root), 1) self.assertEqual(root[0].get_raw(b'B'), b"dg") self.assertTrue(root) self.assertEqual(root.index(root[0]), 0) branchnode = root[0][0][0][0] self.assertIsInstance(branchnode, gosgf.Tree_node) self.assertIs(branchnode.parent, root[0][0][0]) self.assertIs(branchnode.owner, sgf_game) self.assertEqual(len(branchnode), 2) self.assertIs(branchnode[1], branchnode[-1]) self.assertEqual(branchnode[:1], [branchnode[0]]) self.assertEqual([node for node in branchnode], [branchnode[0], branchnode[1]]) with self.assertRaises(IndexError): branchnode[2] self.assertEqual(branchnode[0].get_raw(b'B'), b"ia") self.assertEqual(branchnode[1].get_raw(b'B'), b"ib") self.assertEqual(branchnode.index(branchnode[0]), 0) self.assertEqual(branchnode.index(branchnode[1]), 1) self.assertEqual(len(branchnode[1][0]), 2) leaf = branchnode[1][0][1] self.assertIs(leaf.parent, branchnode[1][0]) self.assertEqual(len(leaf), 0) self.assertFalse(leaf) self.assertIs(sgf_game.get_last_node(), root[0][0][0][0][0][0][0]) # check nothing breaks when first retrieval is by index game2 = gosgf.Sgf_game.from_string(SAMPLE_SGF) root2 = game2.get_root() self.assertEqual(root2[0].get_raw(b'B'), b"dg") def test_serialise(self): # Doesn't cover transcoding sgf_game = gosgf.Sgf_game.from_string(SAMPLE_SGF_VAR) serialised = sgf_game.serialise() self.assertEqual(serialised, dedent("""\ (;FF[4]AB[ai][bh][ee]AP[testsuite:0]AW[fd][gc]CA[utf-8]DT[2009-06-06]GM[1] KM[7.5]PB[Black engine]PL[B]RE[W+R]SZ[9]VW[];B[dg];C[comment on two lines]W[ef] ;B[];C[Nonfinal comment]VW[aa:bb](;B[ia];W[ib];B[ic])(;B[ib];W[ic](;B[id])(; B[ie]))) """).encode('utf-8')) sgf_game2 = gosgf.Sgf_game.from_string(serialised) self.assertEqual([str(x) for x in sgf_game.get_main_sequence()], [str(x) for x in sgf_game2.get_main_sequence()]) def test_serialise_wrap(self): sgf_game = gosgf.Sgf_game.from_string(SAMPLE_SGF_VAR) serialised = sgf_game.serialise(wrap=None) self.assertEqual(serialised, dedent("""\ (;FF[4]AB[ai][bh][ee]AP[testsuite:0]AW[fd][gc]CA[utf-8]DT[2009-06-06]GM[1]KM[7.5]PB[Black engine]PL[B]RE[W+R]SZ[9]VW[];B[dg];C[comment on two lines]W[ef];B[];C[Nonfinal comment]VW[aa:bb](;B[ia];W[ib];B[ic])(;B[ib];W[ic](;B[id])(;B[ie]))) """).encode('ascii')) sgf_game2 = gosgf.Sgf_game.from_string(serialised) seq1 = [str(x) for x in sgf_game.get_main_sequence()] seq2 = [str(x) for x in sgf_game2.get_main_sequence()] self.assertEqual(seq1, seq2) def test_encoding(self): g1 = gosgf.Sgf_game(19) self.assertEqual(g1.get_charset(), "UTF-8") root = g1.get_root() self.assertEqual(root.get_encoding(), "UTF-8") root.set(b"C", u"£".encode('utf-8')) self.assertEqual(root.get(b"C"), u"£".encode('utf-8')) self.assertEqual(root.get_raw(b"C"), u"£".encode('utf-8')) self.assertEqual(g1.serialise(), dedent(u"""\ (;FF[4]C[£]CA[UTF-8]GM[1]SZ[19]) """).encode('utf-8')) g2 = gosgf.Sgf_game(19, encoding="iso-8859-1") self.assertEqual(g2.get_charset(), "ISO-8859-1") root = g2.get_root() self.assertEqual(root.get_encoding(), "ISO-8859-1") root.set(b"C", u"£".encode('utf-8')) self.assertEqual(root.get(b"C"), u"£".encode('utf-8')) self.assertEqual(root.get_raw(b"C"), b"\xa3") self.assertEqual(g2.serialise(), b"(;FF[4]C[\xa3]CA[ISO-8859-1]GM[1]SZ[19])\n") self.assertRaisesRegexp(ValueError, "unknown encoding", gosgf.Sgf_game, 19, "unknownencoding") def test_loaded_sgf_game_encoding(self): g1 = gosgf.Sgf_game.from_string(u""" (;FF[4]C[£]CA[utf-8]GM[1]SZ[19]) """.encode('utf-8')) self.assertEqual(g1.get_charset(), "UTF-8") root = g1.get_root() self.assertEqual(root.get_encoding(), "UTF-8") self.assertEqual(root.get(b"C"), u"£".encode('utf-8')) self.assertEqual(root.get_raw(b"C"), u"£".encode('utf-8')) self.assertEqual(g1.serialise(), dedent(u"""\ (;FF[4]C[£]CA[utf-8]GM[1]SZ[19]) """).encode('utf-8')) g2 = gosgf.Sgf_game.from_string(b""" (;FF[4]C[\xa3]CA[iso-8859-1]GM[1]SZ[19]) """) self.assertEqual(g2.get_charset(), "ISO-8859-1") root = g2.get_root() self.assertEqual(root.get_encoding(), "ISO-8859-1") self.assertEqual(root.get(b"C"), u"£".encode('utf-8')) self.assertEqual(root.get_raw(b"C"), b"\xa3") self.assertEqual(g2.serialise(), dedent(u"""\ (;FF[4]C[£]CA[iso-8859-1]GM[1]SZ[19]) """).encode('iso-8859-1')) g3 = gosgf.Sgf_game.from_string(b""" (;FF[4]C[\xa3]GM[1]SZ[19]) """) self.assertEqual(g3.get_charset(), "ISO-8859-1") root = g3.get_root() self.assertEqual(root.get_encoding(), "ISO-8859-1") self.assertEqual(root.get(b"C"), u"£".encode('utf-8')) self.assertEqual(root.get_raw(b"C"), b"\xa3") self.assertEqual(g3.serialise(), dedent(u"""\ (;FF[4]C[£]GM[1]SZ[19]) """).encode('iso-8859-1')) # This is invalidly encoded. get() notices, but serialise() doesn't care. g4 = gosgf.Sgf_game.from_string(b""" (;FF[4]C[\xa3]CA[utf-8]GM[1]SZ[19]) """) self.assertEqual(g4.get_charset(), "UTF-8") root = g4.get_root() self.assertEqual(root.get_encoding(), "UTF-8") self.assertRaises(UnicodeDecodeError, root.get, b"C") self.assertEqual(root.get_raw(b"C"), b"\xa3") self.assertEqual(g4.serialise(), b"""(;FF[4]C[\xa3]CA[utf-8]GM[1]SZ[19])\n""") self.assertRaisesRegexp( ValueError, "unknown encoding", gosgf.Sgf_game.from_string, b""" (;FF[4]CA[unknownencoding]GM[1]SZ[19]) """) def test_override_encoding(self): g1 = gosgf.Sgf_game.from_string(u""" (;FF[4]C[£]CA[iso-8859-1]GM[1]SZ[19]) """.encode('utf-8'), override_encoding="utf-8") root = g1.get_root() self.assertEqual(root.get_encoding(), "UTF-8") self.assertEqual(root.get(b"C"), u"£".encode('utf-8')) self.assertEqual(root.get_raw(b"C"), u"£".encode('utf-8')) self.assertEqual(g1.serialise(), dedent(u"""\ (;FF[4]C[£]CA[UTF-8]GM[1]SZ[19]) """).encode('utf-8')) g2 = gosgf.Sgf_game.from_string(b""" (;FF[4]C[\xa3]CA[utf-8]GM[1]SZ[19]) """, override_encoding="iso-8859-1") root = g2.get_root() self.assertEqual(root.get_encoding(), "ISO-8859-1") self.assertEqual(root.get(b"C"), u"£".encode('utf-8')) self.assertEqual(root.get_raw(b"C"), b'\xa3') self.assertEqual(g2.serialise().strip(), b"""(;FF[4]C[\xa3]CA[ISO-8859-1]GM[1]SZ[19])""") def test_serialise_transcoding(self): g1 = gosgf.Sgf_game.from_string(u""" (;FF[4]C[£]CA[utf-8]GM[1]SZ[19]) """.encode('utf-8')) self.assertEqual(g1.serialise(), dedent(u"""\ (;FF[4]C[£]CA[utf-8]GM[1]SZ[19]) """).encode('utf-8')) g1.get_root().set(b"CA", b"latin-1") self.assertEqual(g1.serialise(), dedent(u"""\ (;FF[4]C[£]CA[latin-1]GM[1]SZ[19]) """).encode('latin-1')) g1.get_root().set(b"CA", b"unknown") self.assertRaisesRegexp(ValueError, "unsupported charset", g1.serialise) # improperly-encoded from the start g2 = gosgf.Sgf_game.from_string(u""" (;FF[4]C[£]CA[ascii]GM[1]SZ[19]) """.encode('utf-8')) self.assertEqual(g2.serialise(), dedent(u"""\ (;FF[4]C[£]CA[ascii]GM[1]SZ[19]) """).encode('utf-8')) g2.get_root().set(b"CA", b"utf-8") self.assertRaises(UnicodeDecodeError, g2.serialise) g3 = gosgf.Sgf_game.from_string(u""" (;FF[4]C[Δ]CA[utf-8]GM[1]SZ[19]) """.encode('utf-8')) g3.get_root().unset(b"CA") self.assertRaises(UnicodeEncodeError, g3.serialise) def test_tree_mutation(self): sgf_game = gosgf.Sgf_game(9) root = sgf_game.get_root() n1 = root.new_child() n1.set(b"N", b"n1") n2 = root.new_child() n2.set(b"N", b"n2") n3 = n1.new_child() n3.set(b"N", b"n3") n4 = root.new_child(1) n4.set(b"N", b"n4") self.assertEqual( sgf_game.serialise(), b"(;FF[4]CA[UTF-8]GM[1]SZ[9](;N[n1];N[n3])(;N[n4])(;N[n2]))\n") self.assertEqual( [node.get_raw_property_map() for node in sgf_game.main_sequence_iter()], [node.get_raw_property_map() for node in (root, root[0], n3)]) self.assertIs(sgf_game.get_last_node(), n3) n1.delete() self.assertEqual( sgf_game.serialise(), b"(;FF[4]CA[UTF-8]GM[1]SZ[9](;N[n4])(;N[n2]))\n") self.assertRaises(ValueError, root.delete) def test_tree_mutation_from_coarse_game(self): sgf_game = gosgf.Sgf_game.from_string(b"(;SZ[9](;N[n1];N[n3])(;N[n2]))") root = sgf_game.get_root() n4 = root.new_child() n4.set(b"N", b"n4") n3 = root[0][0] self.assertEqual(n3.get(b"N"), b"n3") n5 = n3.new_child() n5.set(b"N", b"n5") self.assertEqual(sgf_game.serialise(), b"(;SZ[9](;N[n1];N[n3];N[n5])(;N[n2])(;N[n4]))\n") self.assertEqual( [node.get_raw_property_map() for node in sgf_game.main_sequence_iter()], [node.get_raw_property_map() for node in (root, root[0], n3, n5)]) self.assertIs(sgf_game.get_last_node(), n5) n3.delete() self.assertEqual(sgf_game.serialise(), b"(;SZ[9](;N[n1])(;N[n2])(;N[n4]))\n") self.assertRaises(ValueError, root.delete) def test_tree_new_child_with_unexpanded_root_and_index(self): sgf_game = gosgf.Sgf_game.from_string(b"(;SZ[9](;N[n1];N[n3])(;N[n2]))") root = sgf_game.get_root() n4 = root.new_child(2) n4.set(b"N", b"n4") self.assertEqual(sgf_game.serialise(), b"(;SZ[9](;N[n1];N[n3])(;N[n2])(;N[n4]))\n") def test_reparent(self): g1 = gosgf.Sgf_game.from_string(b"(;SZ[9](;N[n1];N[n3])(;N[n2]))") root = g1.get_root() # Test with unexpanded root self.assertRaisesRegexp(ValueError, "would create a loop", root.reparent, root) n1 = root[0] n2 = root[1] n3 = root[0][0] self.assertEqual(n1.get(b"N"), b"n1") self.assertEqual(n2.get(b"N"), b"n2") self.assertEqual(n3.get(b"N"), b"n3") n3.reparent(n2) self.assertEqual(g1.serialise(), b"(;SZ[9](;N[n1])(;N[n2];N[n3]))\n") n3.reparent(n2) self.assertEqual(g1.serialise(), b"(;SZ[9](;N[n1])(;N[n2];N[n3]))\n") self.assertRaisesRegexp(ValueError, "would create a loop", root.reparent, n3) self.assertRaisesRegexp(ValueError, "would create a loop", n3.reparent, n3) g2 = gosgf.Sgf_game(9) self.assertRaisesRegexp( ValueError, "new parent doesn't belong to the same game", n3.reparent, g2.get_root()) def test_reparent_index(self): g1 = gosgf.Sgf_game.from_string(b"(;SZ[9](;N[n1];N[n3])(;N[n2]))") root = g1.get_root() n1 = root[0] n2 = root[1] n3 = root[0][0] self.assertEqual(n1.get(b"N"), b"n1") self.assertEqual(n2.get(b"N"), b"n2") self.assertEqual(n3.get(b"N"), b"n3") n3.reparent(root, index=1) self.assertEqual(g1.serialise(), b"(;SZ[9](;N[n1])(;N[n3])(;N[n2]))\n") n3.reparent(root, index=1) self.assertEqual(g1.serialise(), b"(;SZ[9](;N[n1])(;N[n3])(;N[n2]))\n") n3.reparent(root, index=2) self.assertEqual(g1.serialise(), b"(;SZ[9](;N[n1])(;N[n2])(;N[n3]))\n") def test_extend_main_sequence(self): g1 = gosgf.Sgf_game(9) for i in range(6): g1.extend_main_sequence().set(b"N", ("e%d" % i).encode('ascii')) self.assertEqual( g1.serialise(), b"(;FF[4]CA[UTF-8]GM[1]SZ[9];N[e0];N[e1];N[e2];N[e3];N[e4];N[e5])\n") g2 = gosgf.Sgf_game.from_string(b"(;SZ[9](;N[n1];N[n3])(;N[n2]))") for i in range(6): g2.extend_main_sequence().set(b"N", ("e%d" % i).encode('ascii')) self.assertEqual( g2.serialise(), b"(;SZ[9](;N[n1];N[n3];N[e0];N[e1];N[e2];N[e3];N[e4];N[e5])(;N[n2]))\n") def test_get_sequence_above(self): sgf_game = gosgf.Sgf_game.from_string(SAMPLE_SGF_VAR) root = sgf_game.get_root() branchnode = root[0][0][0][0] leaf = branchnode[1][0][1] self.assertEqual(sgf_game.get_sequence_above(root), []) self.assertEqual(sgf_game.get_sequence_above(branchnode), [root, root[0], root[0][0], root[0][0][0]]) self.assertEqual(sgf_game.get_sequence_above(leaf), [root, root[0], root[0][0], root[0][0][0], branchnode, branchnode[1], branchnode[1][0]]) sgf_game2 = gosgf.Sgf_game.from_string(SAMPLE_SGF_VAR) self.assertRaisesRegexp(ValueError, "node doesn't belong to this game", sgf_game2.get_sequence_above, leaf) def test_get_main_sequence_below(self): sgf_game = gosgf.Sgf_game.from_string(SAMPLE_SGF_VAR) root = sgf_game.get_root() branchnode = root[0][0][0][0] leaf = branchnode[1][0][1] self.assertEqual(sgf_game.get_main_sequence_below(leaf), []) self.assertEqual(sgf_game.get_main_sequence_below(branchnode), [branchnode[0], branchnode[0][0], branchnode[0][0][0]]) self.assertEqual(sgf_game.get_main_sequence_below(root), [root[0], root[0][0], root[0][0][0], branchnode, branchnode[0], branchnode[0][0], branchnode[0][0][0]]) sgf_game2 = gosgf.Sgf_game.from_string(SAMPLE_SGF_VAR) self.assertRaisesRegexp(ValueError, "node doesn't belong to this game", sgf_game2.get_main_sequence_below, branchnode) def test_main_sequence(self): sgf_game = gosgf.Sgf_game.from_string(SAMPLE_SGF_VAR) root = sgf_game.get_root() nodes = list(sgf_game.main_sequence_iter()) self.assertEqual(len(nodes), 8) self.assertIs(root.get_raw_property_map(), nodes[0].get_raw_property_map()) # Check that main_sequence_iter() optimisation has been used. # (Have to call this before
/ # newpar if debugging: print 'add case 1' newpar.lchild = newnd newnd.rsib = nd newnd.par = newpar nd.par = newpar tree.root = newpar #bug: should be self.root = newpar? elif nd == nd.par.lchild: # nd---x---y newnd---nd # \\ | / \\ / # \\ | / --> newpar---x-y # \\|/ \\ / / # par par if debugging: print 'add case 2' x = nd.rsib newnd.lchild = None newnd.rsib = nd newnd.par = newpar newpar.lchild = newnd newpar.rsib = x newpar.par = nd.par #nd.lchild = nd.rsib = None nd.par.lchild = newpar nd.par = newpar elif nd.rsib is None: # x---y---nd newnd--nd # \\ | / \\ / # \\ | / --> x---y--newpar # \\|/ \\ | / # par par if debugging: print 'add case 3' y = nd.par.lchild while y.rsib != nd: y = y.rsib assert y is not None y.rsib = newpar newnd.lchild = None newnd.rsib = nd newnd.par = newpar newpar.lchild = newnd newpar.rsib = None newpar.par = nd.par #nd.lchild = nd.rsib = None nd.par = newpar else: # x---nd--y newnd--nd # \\ | / \\ / # \\ | / --> x-newpar-y # \\|/ \\ | / # par par if debugging: print 'add case 4' x = nd.par.lchild while x.rsib != nd: x = x.rsib assert x is not None x.rsib = newpar y = nd.rsib newnd.lchild = None newnd.rsib = nd newnd.par = newpar newpar.lchild = newnd newpar.rsib = y newpar.par = nd.par #nd.lchild = nd.rsib = None nd.par = newpar self.calcSplits() return newnd class Description(object): def __init__(self, t): self.tree = t self.newick = None self.nodeNumbers = None self.numTips = None self.numInternals = None self.treefile = open('trees.tre', 'w') self.treefile.write('#nexus\n\nbegin trees;\n') def close(self): self.treefile.write('end;\n') self.treefile.close() def reset(self): self.nodeNumbers = [] self.numTips = 0 self.numInternals = 0 def processNode(self, nd): self.nodeNumbers.append(nd.number) if nd.lchild is None: self.numTips += 1 else: self.numInternals += 1 def describe(self, n = None, r = None): self.tree.traverse(self) self.newick = self.tree.makeNewick() if n is not None and r is not None and self.numTips == n and self.numInternals == r: self.treefile.write('tree %s = %s;\n' % ('N%d_R%d' % (self.numTips, self.numInternals), self.newick)) elif n is not None and self.numTips == n: self.treefile.write('tree %s = %s;\n' % ('N%d_R%d' % (self.numTips, self.numInternals), self.newick)) elif r is not None and self.numInternals == r: self.treefile.write('tree %s = %s;\n' % ('N%d_R%d' % (self.numTips, self.numInternals), self.newick)) else: self.treefile.write('tree %s = %s;\n' % ('N%d_R%d' % (self.numTips, self.numInternals), self.newick)) return (self.numTips, self.numInternals, self.newick) def output(s = None): if s is None: print logf.write('\n') else: print s logf.write('%s\n' % s) def reduce(t, n, r, binary_r, binary_id, binary_newick, treedict): # Take out each edge from t in turn and store the resulting tree's newick in treedict[r][id]['trees']. # Also store id in the reduced tree's treedict[r-1][id]['trees'] entry. global newick_lookup if r == 0: return # There are r-1 internal nodes with edges for i in range(1, r): tcopy = t.deepCopy() j = 0 for nd in tcopy.preorder: if nd.par is not None and nd.lchild is not None: j += 1 if j == i: tcopy.deleteNode(nd) tcopy.preorder = Preorder(tcopy).build() tcopy_newick = tcopy.makeNewick() tcopy_id = frozenset(tcopy.id) newick_lookup[tcopy_id] = tcopy_newick #output(' %s' % tcopy_newick) treedict[binary_r][binary_id]['trees'].add(tcopy_id) treedict[r-1][tcopy_id]['trees'].add(binary_id) reduce(tcopy, n, r-1, binary_r, binary_id, binary_newick, treedict) break def recurse(tree, scribe, nmax, create_treedict, n = None): global treedict, progress if n is None: progress = 0 # start with trees having one more tip than the current tree tmp = [nd.number for nd in tree.preorder if nd.lchild is None] n = 1 + len(tmp) if create_treedict: # create dictionary to store trees having nmax tips, with trees in each resolution # class stored in a vector with key equal to number of internal nodes treedict = {} for r in range(1,nmax): treedict[r] = {} if n <= nmax: for nd in tree.preorder: newnd = tree.addNodeBelow(nd, n) n, r, newick = scribe.describe() if n == nmax and r == nmax-1: #output('* %6d %6d %s' % (n, r, newick)) if create_treedict: treedict[r][frozenset(tree.id)] = {'newick':newick, 'count':0, 'trees':set()} else: binary_r = r binary_id = frozenset(tree.id) binary_newick = newick newick_lookup[binary_id] = binary_newick progress += 1 #print 'Reducing %d: %s' % (progress, binary_newick) reduce(tree, n, r, binary_r, binary_id, binary_newick, treedict) elif create_treedict: #output(' %6d %6d %s' % (n, r, newick)) if n == nmax: treedict[r][frozenset(tree.id)] = {'newick':newick, 'count':0, 'trees':set()} recurse(tree, scribe, nmax, create_treedict, n+1) tree.deleteNode(newnd) if nd.lchild is not None: newnd = tree.addNodeTo(nd, n) n, r, newick = scribe.describe() if create_treedict: #output(' %6d %6d %s' % (n, r, newick)) if n == nmax: treedict[r][frozenset(tree.id)] = {'newick':newick, 'count':0, 'trees':set()} recurse(tree, scribe, nmax, create_treedict, n+1) tree.deleteNode(newnd) def listTrees(r, id): s = '' for treeid in list(treedict[r][id]['trees']): newick = newick_lookup[treeid] ninternals = len(treeid) s += ' | %d: %s' % (ninternals, newick) # ' | '.join(['%s' % newick_lookup[treeid] for treeid in list(treedict[r][id]['trees'])]) return s def resolutionClassFreq(r, id): freqs = [0]*rmax for treeid in list(treedict[r][id]['trees']): ninternals = len(treeid) freqs[ninternals-1] += 1 s = ' '.join(['%3d' % n for n in freqs]) return s def processTreeFile(treefname, tree, descr, treedict, rootat): stuff = open(treefname, 'r').read() newicks = re.findall('^\s*tree\s+\S+\s*=\s*(.+?);\s*$', stuff, re.M | re.S) for newick in newicks: tree.buildFromNewick('%s;' % newick) tree.rerootAt(rootat) n, r, newick = descr.describe() treedict[r][frozenset(tree.id)]['count'] += 1 if __name__ == '__main__': # key = tree id, value = newick description newick_lookup = {} logf = open('output.txt', 'w') # start with 2 tip tree tree = Tree() tree.root = Node() tree.addNodeTo(tree.root, 2) tree.addNodeTo(tree.root, 1) #output(' %6s %6s %s' % ('N', 'R', 'newick')) descr = Description(tree) n, r, newick = descr.describe() #output(' %6d %6d %s' % (n, r, newick)) # nmax is the number of taxa in rooted trees # nmax determines which file is read (e.g. nmax=6 causes 7taxa-unrooted-trees.t to # be processed, and generates 6taxa-rooted-output.txt) nmax = 6 rmax = nmax-1 print 'Enumerating all possible unrooted trees for %d taxa...' % (nmax+1,) # Running recurse to create treedict. Every multifurcating tree topology will be stored # in treedict under its resolution class: e.g. treedict[r][id]['newick'] stores the newick # string for the tree with the given id in resolution class r, with id being a set of sets # in which the outer set comprises r inner sets, each storing the node numbers of leaves # above one internal node in the tree. Other keys in treedict[r][id] include 'count', # which is a count of trees having this topology, and 'trees', which is the set of all # binary (i.e. fully-resolved) trees compatible with this tree. * = binary tree topology. create_treedict = True recurse(tree, descr, nmax, create_treedict) # Initialize nresclass and nsamples dictionaries that will provide a tally of the # number of sampled trees representing each topology (nsamples) and falling in each # possible resolution class (nresclass) nresclass = {} nsamples = {} for r in range(1,rmax+1): nresclass[r] = len(treedict[r].keys()) nsamples[r] = 0 print 'Determining "spread" by finding all binary trees compatible with each polytomous tree' print ' (this could be done much faster if spread was calculated directly)...' # Running recurse to reduce binary trees. Each internal node in each binary # tree will be recursively removed and the resulting non-binary tree stored # in the 'trees' element of the binary tree. Also, the binary tree will be # added to the 'trees' element of the non-binary tree. This allows one to # see all non-binary trees that are compatible with each binary tree and # all binary trees that can be generated by resolving each non-binary tree. create_treedict = False recurse(tree, descr, nmax, create_treedict) tree_file_name = '../%dtaxa-unrooted-trees.t' % (nmax+1,) print 'Processing "%s"...' % tree_file_name # Reads trees.t and stores each tree there in the 'counts' element of the # tree's treedict record. The term 'spread' is defined as the number of distinct # fully-resolved (i.e. binary) labeled trees compatible with a particular tree. # Thus, a binary tree has spread 1, while the star tree containing just 1
encrypted, ``1`` othewise :rtype: int .. note:: Return-value ``1`` means that ``UndecodableRecordError`` has been implicitely raised :raises FileNotFoundError: if the specified file does not exist """ try: with open(os.path.abspath(file_path), mode='r') as _file: with contextlib.closing( mmap.mmap( _file.fileno(), 0, access=mmap.ACCESS_READ ) ) as _buffer: try: self.update(record=_buffer.read()) except UndecodableRecordError: return 1 else: return 0 except FileNotFoundError: raise def encryptFilePerLog(self, file_path): """Encrypts per log the data of the provided file into the Merkle-tree More accurately, it successively updates the Merkle-tree (cf. doc of the ``.update()`` method) with each line of the provided file in the respective order :param file_path: relative path of the file under enryption with respect to the current working directory :type file_path: str :returns: ``0`` if the provided file was successfully encrypted, ``1`` othewise :rtype: int .. note:: Return-value ``1`` means that some line of the provided log-file is undecodable with the Merkle-tree's encoding type (i.e., a ``UnicodeDecodeError`` has been implicitely raised) :raises FileNotFoundError: if the specified file does not exist """ absolute_file_path = os.path.abspath(file_path) try: with open(absolute_file_path, mode='r') as _file: buffer = mmap.mmap( _file.fileno(), 0, access=mmap.ACCESS_READ ) except FileNotFoundError: raise else: records = [] while True: _record = buffer.readline() if not _record: break else: try: _record = _record.decode(self.encoding) except UnicodeDecodeError: return 1 else: records.append(_record) tqdm.write('') # Perform line by line encryption for _record in tqdm(records, desc='Encrypting log file', total=len(records)): self.update(record=_record) tqdm.write('Encryption complete\n') return 0 def encryptObject(self, object, sort_keys=False, indent=0): """Encrypts the provided object as a single new leaf into the Merkle-tree More accurately, it updates (cf. doc of the ``.update()`` method) the Merkle-tree with *one* newly-created leaf storing the digest of the provided object's stringified version :param object: the JSON entity under encryption :type objec: dict :param sort_keys: [optional] Defaults to ``False``. If ``True``, then the object's keys get alphabetically sorted before its stringification. :type sort_keys: bool :param indent: [optional] Defaults to ``0``. Specifies key indentation upon stringification of the provided object. :type indent: int """ self.update( record=json.dumps( object, sort_keys=sort_keys, indent=indent ) ) def encryptObjectFromFile(self, file_path, sort_keys=False, indent=0): """Encrypts the object within the provided ``.json`` file as a single new leaf into the Merkle-tree More accurately, the Merkle-tree is updated with *one* newly-created leaf (cf. doc of the ``.update()`` method) storing the digest of the stringified version of the object loaded from within the provided file :param file_path: relative path of a ``.json`` file with respect to the current working directory, containing *one* JSON entity :type file_path: str :param sort_keys: [optional] Defaults to ``False``. If ``True``, then the object's keys get alphabetically sorted before its stringification :type sort_keys: bool :param indent: [optional] Defaults to ``0``. Specifies key indentation upon stringification of the object under encryption :type indent: int :raises FileNotFoundError: if the specified file does not exist :raises JSONDecodeError: if the specified file could not be deserialized """ try: with open(os.path.abspath(file_path), 'rb') as _file: object = json.load(_file) except (FileNotFoundError, JSONDecodeError): raise else: self.update( record=json.dumps( object, sort_keys=sort_keys, indent=indent ) ) def encryptFilePerObject(self, file_path, sort_keys=False, indent=0): """Encrypts per object the data of the provided ``.json`` file into the Merkle-tree More accurately, it successively updates the Merkle-tree (cf. doc of the ``.update()`` method) with each newly created leaf storing the digest of the respective JSON entity in the list loaded from the provided file :param file_path: relative path of a ``.json`` file with respect to the current working directory, containing a *list* of JSON entities :type file_path: str :param sort_keys: [optional] Defaults to ``False``. If ``True``, then the all objects' keys get alphabetically sorted before stringification :type sort_keys: bool :param indent: [optional] Defaults to ``0``. Specifies uniform key indentation upon stringification of objects :type indent: int :raises FileNotFoundError: if the specified file does not exist :raises JSONDecodeError: if the specified file could not be deserialized :raises WrongJSONFormat: if the JSON object loaded from within the provided file is not a list """ try: with open(os.path.abspath(file_path), 'rb') as _file: objects = json.load(_file) except (FileNotFoundError, JSONDecodeError): raise if type(objects) is not list: raise WrongJSONFormat for _object in objects: self.update( record=json.dumps( _object, sort_keys=sort_keys, indent=indent ) ) # ------------------------ Export to and load from file ------------------ def export(self, file_path): """Creates a ``.json`` file at the provided path and exports the minimum required information into it, so that the Merkle-tree can be reloaded in its current state from that file The final file will store a JSON entity with keys ``header`` (containing the parameters ``hash_type``, ``encoding``, and ``security``) and ``hashes``, mapping to the digests currently stored by the tree's leaves in respective order .. note:: If the provided path does not end with ``.json``, then this extension is appended to it before exporting .. warning:: If a file exists already for the provided path (after possibly extending with ``.json``, see above), then it gets overwritten :param file_path: relative path of the file to export to with respect to the current working directory :type file_path: str """ with open('%s.json' % file_path if not file_path.endswith('.json') else file_path, 'w') as _file: json.dump( self.serialize(), _file, indent=4 ) @staticmethod def loadFromFile(file_path): """Loads a Merkle-tree from the provided file, the latter being the result of an export (cf. the ``.export()`` method) :param file_path: relative path of the file to load from with respect to the current working directory :type file_path: str :returns: the Merkle-tree laoded from the provided file :rtype: tree.MerkleTree :raises FileNotFoundError: if the specified file does not exist :raises JSONDecodeError: if the specified file could not be deserialized :raises WrongJSONFormat: if the JSON object loaded from within is not a Merkle-tree export (cf. the ``.export()`` method) """ try: with open(file_path, 'r') as _file: loaded_object = json.load(_file) except (FileNotFoundError, JSONDecodeError): raise try: _header = loaded_object['header'] _tree = MerkleTree( hash_type=_header['hash_type'], encoding=_header['encoding'], security=_header['security'] ) except KeyError: raise WrongJSONFormat tqdm.write('\nFile has been loaded') for hash in tqdm(loaded_object['hashes'], desc='Retreiving tree...'): _tree.update(digest=hash) tqdm.write('Tree has been retreived') return _tree # --------------------------------- Comparison --------------------------- def __eq__(self, other): """Implements the ``==`` operator :param other: the Merkle-tree to compare with :type other: tree.MerkleTree :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class """ if not isinstance(other, self.__class__): raise InvalidComparison if not other: return not self else: return True if not self else self.rootHash == other.rootHash def __ne__(self, other): """Implements the ``!=`` operator :param other: the Merkle-tree to compare with :type other: tree.MerkleTree :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class """ if not isinstance(other, self.__class__): raise InvalidComparison if not other: return self.__bool__() else: return True if not self else self.rootHash != other.rootHash def __ge__(self, other): """Implements the ``>=`` operator :param other: the Merkle-tree to compare with :type other: tree.MerkleTree :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class """ if not isinstance(other, self.__class__): raise InvalidComparison if not other: return True else: return False if not self else self.inclusionTest(other.rootHash, other.length) def __le__(self, other): """Implements the ``<=`` operator :param other: the Merkle-tree to compare with :type other: tree.MerkleTree :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class """ if not isinstance(other, self.__class__): raise InvalidComparison else: return other.__ge__(self) def __gt__(self, other): """Implements the ``>`` operator :param other: the Merkle-tree to compare with :type other: tree.MerkleTree :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class """ if not isinstance(other, self.__class__): raise InvalidComparison if not other: return self.__bool__() elif not self or self.rootHash == other.rootHash: return False else: return self.inclusionTest(other.rootHash, other.length) def __lt__(self, other): """Implements the ``<`` operator :param other: the Merkle-tree to compare with :type other: tree.MerkleTree :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class """ if not isinstance(other, self.__class__): raise InvalidComparison else: return other.__gt__(self) # ------------------------------- Representation ------------------------- def __repr__(self): """Overrides the default implementation Sole purpose of this function is to easily print info about the Merkle-treee by just invoking
"""Build RESTCONF request from user-provided inputs.""" from collections import OrderedDict import os import logging import re import json log = logging.getLogger(__name__) # Translate edit-op property in a node to an HTTP method HTTP_METHODS = { 'merge': 'PATCH', 'create': 'POST', 'replace': 'PUT', 'delete': 'DELETE', 'get': 'GET', } NO_BODY_METHODS = ['GET', 'DELETE'] WITH_BODY_METHODS = ['PATCH', 'PUT', 'POST'] XPATH_CONCAT_TOKEN_RE = re.compile( r"(?:" # one of r'"([^"]*)"' # double-quoted string r"|" # or r"'([^']*)'" # single-quoted string r")" ) XPATH_KEY_RE = re.compile( r"\[([^=\[\]]+)=" # [key= r"(?:" # one of r'"([^"]*)"' # "double-quoted value" r"|" # or r"'([^']*)'" # 'single-quoted value' r"|" # or r"concat\((.*)\)" # concat() r")\]" ) XPATH_BRACKETS_CONTENT_RE = re.compile(r"\[(.*?)\]") DEVICE_RESTCONF_ENDPOINT = '/restconf/data' class RequestInputError(ValueError): """Raised on invalid input.""" def __init__(self, parameter, value, reason): self.parameter = parameter self.value = value self.reason = reason super(RequestInputError, self).__init__(str(self)) def __str__(self): return "ERROR: Invalid {0}:\n{1}\n {2}".format( self.parameter, self.value, self.reason) class XPathError(RequestInputError): """Exception raised when the XPath is malformed.""" def __init__(self, value, reason): super(XPathError, self).__init__('xpath', value, reason) class RestconfRequestBuilder(object): """Class used to build URL, and body for a ResTCONF request from provided data.""" # # Public API # def __init__(self, request_data, returns): """Create list of requests from request data""" # Nodes and XPaths from request data self.nodes = request_data.get('nodes', []) self.xpaths = [node.get('xpath') for node in self.nodes if node.get('xpath', None)] # Map XPath to its node data self.xpath_to_nodedata = {node['xpath']: node for node in self.nodes} # String present in all XPaths self.common_prefix = self.gen_common_prefix(self.xpaths) # Components for REST request self.http_method = self.get_http_method_from_nodes(self.nodes) self.url = self.gen_url(self.common_prefix, request_data, returns, self.http_method) self.body = self.gen_body(self.xpaths, self.common_prefix, request_data, self.http_method) self.json_body = json.dumps(self.body, indent=2) self.content_type = 'application/yang-data+json' def get_http_method_from_nodes(self, nodes): if not nodes or not len(nodes): raise RequestInputError('nodes', None, 'No nodes are present in data') ref_edit_op = nodes[0]['edit-op'].lower() or 'merge' if not all([node['edit-op'].lower() == ref_edit_op for node in nodes]): # Check if all edit-ops are the same, if not raise error raise RequestInputError( 'edit-op', 'Edit-op not the same across all nodes', 'Edit-op must the same in all nodes for this test' ) try: http_method = HTTP_METHODS[ref_edit_op] except KeyError: raise RequestInputError( 'edit-op', ref_edit_op, f'Invalid edit-op, must be one of the following: {", ".join(HTTP_METHODS.keys())}' ) return http_method def gen_common_prefix(self, xpaths): """Get string/xpath that is present in all xpaths""" common_prefix = os.path.commonprefix(xpaths) tokens = common_prefix.split('/') last_element_colon_tokens = tokens[-1].split(':') if tokens[-1].find(':') and not len(last_element_colon_tokens[-1]): # If the last token only have namespace and not module name, omit from common prefix return '/'.join(tokens[:-1]) return common_prefix def replace_keys_with_params(self, xpath): """Replace all keys in URL/XPath with RESTCONF-compatible parameters""" if '[' not in xpath or ']' not in xpath: # Keys are not present return xpath xpath_tokens = xpath.split('/') formatted_tokens = [] for token in xpath_tokens: # Get all keys in XPath token keys = XPATH_KEY_RE.findall(token) # Find the beginning open brace if keys: # If there are keys in this token, replace with params open_brace_index = token.find('[') # Retrieve token without keys inside brackets token = token[:open_brace_index] # Add in RESTCONF-friendly params for i in range(len(keys)): if i == 0: token = '{0}={1}'.format(token, keys[i][1]) elif i > 0: token = '{0},{1}={2}'.format(token, keys[i][0], keys[i][1]) formatted_tokens.append(token) # Join all tokens together after splitting by slash formatted_xpath = '/'.join(formatted_tokens) return formatted_xpath def remove_keys(self, xpath, duplicates_only=False): """Remove all or only duplicate keys in xpath/URL""" if '[' not in xpath or ']' not in xpath or not xpath: # Keys are not present or url is empty return xpath formatted_xpath = xpath if duplicates_only: # Remove duplicates from tokens and add to processed_tokens when done tokens = xpath.split('/') processed_tokens = [] for token in tokens: # Find and extract string from between square brackets brackets_content = XPATH_BRACKETS_CONTENT_RE.findall(token) if len(token) and len(brackets_content): # Map key names to raw key string in token token_keys = {} for content in brackets_content: key = content.split('=') if key[0] not in token_keys.keys() or (key[0] in token_keys.keys() and key[0] != key[1]): token_keys[key[0]] = key[1] # Remove key from token token = token.replace('[{0}={1}]'.format(key[0], key[1]), '') # Re-add valid non-duplicate keys back to token for key in token_keys: token += '[{0}={1}]'.format(key, token_keys[key]) processed_tokens.append(token) formatted_xpath = '/'.join(processed_tokens) elif not duplicates_only: # Remove all keys from url/xpath while '[' in formatted_xpath and ']' in formatted_xpath: open_brace_index = formatted_xpath.find('[') close_brace_index = formatted_xpath.find(']') formatted_xpath = '{0}{1}'.format( formatted_xpath[0:open_brace_index], formatted_xpath[close_brace_index + 1:] ) return formatted_xpath @staticmethod def replace_or_delete_namespaces(xpath, request_data, mode='replace'): """Replace/delete all namespace(s) in URL with module name(s) or """ valid_modes = ['replace', 'delete'] namespaces = request_data.get('namespace', None) formatted_xpath = xpath if namespaces and mode in valid_modes: for namespace in namespaces: if namespace in xpath: if mode == 'delete': formatted_xpath = formatted_xpath.replace('{0}:'.format(namespace), '') elif mode == 'replace': modulename = namespaces[namespace].split('/')[-1] formatted_xpath = formatted_xpath.replace(namespace, modulename) return formatted_xpath def gen_url(self, xpath, request_data, returns, http_method): """Generate a valid RESTCONF/REST URL""" # Base URL will be the start of the URL, appended by remainder URL base_url = DEVICE_RESTCONF_ENDPOINT # URL that will be appended to base URL remainder_url = xpath # Result of appending base_url and remainder_url together url = '' if http_method == 'POST': xpath_has_key = True if XPATH_KEY_RE.search(xpath) else False substring = self.get_xpath_last_key_substring(remainder_url) if xpath_has_key: # Remove last key substring from URL remainder_url = remainder_url.replace(substring, '') else: # Keep URL except cut off last slash token remainder_url = '/'.join(remainder_url.split('/')[:-1]) elif http_method == 'GET' and returns: tokens = remainder_url.split('/') remainder_url = f"/{tokens[0] if tokens[0] else tokens[1]}" # Remove duplicate keys remainder_url = self.remove_keys(remainder_url, duplicates_only=True) # Replace keys in xpath with RESTCONF-compatible parameters remainder_url = self.replace_keys_with_params(remainder_url) # Transform namespace into module name remainder_url = self.replace_or_delete_namespaces(remainder_url, request_data, mode='replace') # URL must start at device's RESTCONF endpoint followed by the xpath/remainder of URL url = '{0}{1}'.format(base_url, remainder_url) return url def get_xpath_last_key_substring(self, xpath): """ Returns substring of XPath starting from the last occurence of key in it """ if not XPATH_KEY_RE.search(xpath): # No keys in XPath return xpath return xpath[ xpath.index( next(( token for token in reversed(xpath.split('/')) if XPATH_KEY_RE.search(token) )) ) - 1: ] def gen_post_method_body(self, xpaths, common_prefix, request_data): """Generate a valid RESTCONF/REST body for one xpath compliant with POST method""" # Body of all generated xpath bodies merged together merged_body = OrderedDict() for xpath in xpaths: # Get targeted node data node_data = self.xpath_to_nodedata[xpath] # Body for this xpath to be merged into main body later body = OrderedDict() xpath_has_key = True if XPATH_KEY_RE.search(xpath) else False if xpath_has_key: # Get XPath beginning from the last slash token that includes a key formatted_xpath = self.get_xpath_last_key_substring(xpath) formatted_xpath = self.replace_or_delete_namespaces(formatted_xpath, request_data, mode='delete') formatted_xpath_tokens = [token for token in formatted_xpath.split('/') if len(token)] reversed_xpath_tokens = list(reversed(formatted_xpath_tokens)) # Walk through xpath elements from end to beginning and generate nested body dict for i in range(len(reversed_xpath_tokens)): token_with_key = reversed_xpath_tokens[i] token = self.remove_keys(token_with_key) # If token has a key, insert key and value into body keys = XPATH_KEY_RE.findall(token_with_key) if i == 0: # Leaf node, set value body[token] = node_data.get('value', '') elif i > 0: if keys: body[token] = [{ reversed_xpath_tokens[i - 1]: body[reversed_xpath_tokens[i - 1]] }] else: # Not leaf node, generate nested dicts body[token] = { reversed_xpath_tokens[i - 1]: body[reversed_xpath_tokens[i - 1]] } del body[reversed_xpath_tokens[i - 1]] if keys: # Insert key and values into body for key in keys: body[token][0][key[0]] = key[1] elif not xpath_has_key: # Last slash token of XPath with namespace removed key = self.replace_or_delete_namespaces(xpath.split('/')[-1], request_data, mode='delete') # Value set by user value = node_data.get('value', '') body = OrderedDict({ key: value }) merged_body = self.merge_dictionaries(merged_body, body) return merged_body def gen_body(self, xpaths, common_prefix, request_data, http_method): body = OrderedDict() if http_method in WITH_BODY_METHODS: if http_method == 'POST': body = self.gen_post_method_body(xpaths, common_prefix, request_data) else: body = self.gen_nonpost_method_body(xpaths, common_prefix, request_data) return body def gen_nonpost_method_body(self, xpaths, common_prefix, request_data): """Generate a valid RESTCONF/REST body for one xpath for methods that are not POST, DELETE, or GET""" # Body of all generated xpath bodies merged together merged_body = OrderedDict() for xpath in xpaths: # Get targeted node data node_data = self.xpath_to_nodedata[xpath] body = OrderedDict() # Format XPath to remove common prefix (except last slash token), namespaces, and keys formatted_xpath = xpath # Remove common prefix from xpath,
>>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.attach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :param int bookingdetail_id: (required) :param int addon_id: (required) :param int quantity: :param int packagefacade_id: :return: InlineResponse2009 If the method is called asynchronously, returns the request thread. """ all_params = ['booking_id', 'bookingdetail_id', 'addon_id', 'quantity', 'packagefacade_id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method attach_addon" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'booking_id' is set if ('booking_id' not in params) or (params['booking_id'] is None): raise ValueError("Missing the required parameter `booking_id` when calling `attach_addon`") # verify the required parameter 'bookingdetail_id' is set if ('bookingdetail_id' not in params) or (params['bookingdetail_id'] is None): raise ValueError("Missing the required parameter `bookingdetail_id` when calling `attach_addon`") # verify the required parameter 'addon_id' is set if ('addon_id' not in params) or (params['addon_id'] is None): raise ValueError("Missing the required parameter `addon_id` when calling `attach_addon`") resource_path = '/booking/add-addon'.replace('{format}', 'json') path_params = {} query_params = {} if 'booking_id' in params: query_params['booking_id'] = params['booking_id'] if 'bookingdetail_id' in params: query_params['bookingdetail_id'] = params['bookingdetail_id'] if 'addon_id' in params: query_params['addon_id'] = params['addon_id'] if 'quantity' in params: query_params['quantity'] = params['quantity'] if 'packagefacade_id' in params: query_params['packagefacade_id'] = params['packagefacade_id'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse2009', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def attach_pickup(self, booking_id, location, date, time, **kwargs): """ Attach a pickup location for a booking This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.attach_pickup(booking_id, location, date, time, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :param str location: (required) :param date date: (required) :param str time: (required) :return: InlineResponse20011 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.attach_pickup_with_http_info(booking_id, location, date, time, **kwargs) else: (data) = self.attach_pickup_with_http_info(booking_id, location, date, time, **kwargs) return data def attach_pickup_with_http_info(self, booking_id, location, date, time, **kwargs): """ Attach a pickup location for a booking This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.attach_pickup_with_http_info(booking_id, location, date, time, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :param str location: (required) :param date date: (required) :param str time: (required) :return: InlineResponse20011 If the method is called asynchronously, returns the request thread. """ all_params = ['booking_id', 'location', 'date', 'time'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method attach_pickup" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'booking_id' is set if ('booking_id' not in params) or (params['booking_id'] is None): raise ValueError("Missing the required parameter `booking_id` when calling `attach_pickup`") # verify the required parameter 'location' is set if ('location' not in params) or (params['location'] is None): raise ValueError("Missing the required parameter `location` when calling `attach_pickup`") # verify the required parameter 'date' is set if ('date' not in params) or (params['date'] is None): raise ValueError("Missing the required parameter `date` when calling `attach_pickup`") # verify the required parameter 'time' is set if ('time' not in params) or (params['time'] is None): raise ValueError("Missing the required parameter `time` when calling `attach_pickup`") resource_path = '/booking/add-pickup'.replace('{format}', 'json') path_params = {} query_params = {} if 'booking_id' in params: query_params['booking_id'] = params['booking_id'] if 'location' in params: query_params['location'] = params['location'] if 'date' in params: query_params['date'] = params['date'] if 'time' in params: query_params['time'] = params['time'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse20011', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def cancel_booking(self, booking_id, **kwargs): """ Cancel a booking This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.cancel_booking(booking_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :return: InlineResponse2003 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.cancel_booking_with_http_info(booking_id, **kwargs) else: (data) = self.cancel_booking_with_http_info(booking_id, **kwargs) return data def cancel_booking_with_http_info(self, booking_id, **kwargs): """ Cancel a booking This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.cancel_booking_with_http_info(booking_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :return: InlineResponse2003 If the method is called asynchronously, returns the request thread. """ all_params = ['booking_id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method cancel_booking" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'booking_id' is set if ('booking_id' not in params) or (params['booking_id'] is None): raise ValueError("Missing the required parameter `booking_id` when calling `cancel_booking`") resource_path = '/booking/cancel'.replace('{format}', 'json') path_params = {} query_params = {} if 'booking_id' in params: query_params['booking_id'] = params['booking_id'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse2003', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def confirm_booking(self, booking_id, **kwargs): """ Confirm a booking and all of its sessions and notify the lead customer This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.confirm_booking(booking_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :return: InlineResponse20012 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.confirm_booking_with_http_info(booking_id, **kwargs) else: (data) = self.confirm_booking_with_http_info(booking_id, **kwargs) return data def confirm_booking_with_http_info(self, booking_id, **kwargs): """ Confirm a booking and all of its sessions and notify the lead customer This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.confirm_booking_with_http_info(booking_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :return: InlineResponse20012 If the method is called asynchronously, returns the request thread. """ all_params = ['booking_id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method confirm_booking" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'booking_id' is set if ('booking_id' not in params) or (params['booking_id'] is None): raise ValueError("Missing the required parameter `booking_id` when calling `confirm_booking`") resource_path = '/booking/confirm'.replace('{format}', 'json') path_params = {} query_params = {} if 'booking_id' in params: query_params['booking_id'] =
<gh_stars>10-100 """Training script for vision models. Training, evaluation, and tuning scheme: 1. Use `python train_vision.py [options]` to build, train, and validate models. 2. Logs and model checkpoints will be stored in a unique dir for each specific run (e.g. .../{model_version}_{datetime}/), within the specified model dir. 3. To change base model parameters, either use the [options] interface, edit the param dicts in vision.py, or save the base params as json (hint: `--save-base-params`), edit the file, and supply the model directory containing the new params file to override the default base params. 4. To change training params and dataset choices, use the [options] interface of the train script (see `--help` for more info). Author: <NAME> Contact: <EMAIL> Date: August 2018 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import json import logging import argparse import datetime import matplotlib matplotlib.use('Agg') # use anitgrain rendering engine backend for non-GUI interfaces from sklearn import preprocessing import numpy as np import tensorflow as tf # Add upper-level 'src' directory to application path sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) #pylint: disable=E0401 from mltoolset import training from mltoolset import data from mltoolset import nearest_neighbour from mltoolset import utils from mltoolset import TF_FLOAT, TF_INT #pylint: enable=E0401 import vision # Static names for stored log and param files MODEL_PARAMS_STORE_FN = 'model_params.json' LOG_FILENAME = 'train_vision.log' # Training options passed by command line # TODO(rpeloff) add to mltoolset training.py TRAIN_OPTION_ARGS = [ { "varname": 'n_max_epochs', "names": ['-me', '--n-max-epochs'], "help": "Maximum number of epochs to train (default: 50)", "choices": None, "default": 50 }, { "varname": 'batch_size', "names": ['-bs', '--batch-size'], "help": "Number of exemplars per mini-batch for SGD (default: 100)", "choices": None, "default": 100 }, { "varname": 'optimizer', "names": ['-op', '--optimizer'], "help": "Optimizer for gradient descent (default: adam)", "choices": ['sgd', 'momentum', 'adagrad', 'adadelta', 'adam'], "default": 'adam' }, { "varname": 'learning_rate', "names": ['-lr', '--learning-rate'], "help": "Learning rate (default: 1e-3)", "choices": None, "default": 1e-3 }, { "varname": 'decay_rate', "names": ['-dr', '--decay-rate'], "help": "Exponential decay of learning rate per epoch (default: 1.0 - no decay)", "choices": None, "default": 1.0 }, { "varname": 'random_seed', "names": ['-rs', '--random-seed'], "help": "Random seed (default: 42)", "choices": None, "default": 42 } ] def check_arguments(): """Check command line arguments for `python train_vision.py`.""" parser = argparse.ArgumentParser(description=__doc__.strip().split('\n')[0]) # -------------------------------------------------------------------------- # General script options (model type, storage, etc.): # -------------------------------------------------------------------------- parser.add_argument('--model-version', type=str, help="Type of vision model to build and train", choices=vision.MODEL_VERSIONS, required=True) parser.add_argument('--model-dir', type=os.path.abspath, help="Path to store and load vision models " "(defaults to the current working directory)", default='.') parser.add_argument('--no-unique-dir', action='store_true', help="Do not create a unique model directory") parser.add_argument('--data-dir', type=os.path.abspath, help="Path to store and load data" "(defaults to '{}' in the current working " "directory)".format('data'), default='data') parser.add_argument('--params-file', type=str, help="Filename of model parameters file e.g. '{0}' " "(defaults to '{0}' in model directory " "if found, else base model parameters " "are used)".format(MODEL_PARAMS_STORE_FN), default=None) parser.add_argument('--restore-checkpoint', type=str, help="Filename of model checkpoint to restore and " "continue training (defaults to None which trains " "from latest checkpoint if found)", default=None) parser.add_argument('--save-base-params', action='store_true', help="Store the base parameters of the selected " "vision model (as {}/{}) and exit".format( '{model-dir}', MODEL_PARAMS_STORE_FN)) # -------------------------------------------------------------------------- # Model and data pipeline options: # -------------------------------------------------------------------------- # Val size based on selecting first 10 out of 30 alphabets from Omniglot: # omni[0][2][np.isin(omni[0][2], np.unique(omni[0][2])[:10])].shape -> 6000 parser.add_argument('--train-set', type=str, help="Dataset to use for pre-training vision model " "(defaults to '{}')".format('omniglot'), choices=['omniglot', 'mnist'], default='omniglot') parser.add_argument('--val-size', type=int, help="Number of validation examples to select from " "the train dataset (defaults to " "{}; recommend {} for mnist)".format(6000, 5000), default=6000) parser.add_argument('--l-way', type=int, help="Number of L unique classes used in few-shot " "evaluation (defaults to {})".format(10), default=10) parser.add_argument('--k-shot', type=int, help="Number of K examples sampled per L-way label " "used in few-shot evaluation (defaults to {})" "".format(1), default=1) parser.add_argument('--n-queries', type=int, help="Number of N_queries query examples to sample per " "few-shot episode (defaults to {})".format(10), default=10) parser.add_argument('--n-train-episodes', type=int, help="Number of training episodes/batches per epoch " "(defaults to {} which uses " "train_size/batch_size)".format(None), default=None), parser.add_argument('--n-test-episodes', type=int, help="Number of few-shot validation episodes" "(defaults to {})".format(400), default=400) parser.add_argument('--balanced-batching', action='store_true', help="Sample balanced batches of P concept classes and " "K examples per class during training (defaults " "to {}, which uses normal batching; Overrides " "--batch-size with P_batch*K_batch)".format(False)) parser.add_argument('--p-batch', type=int, help="Number of P_batch unique concept labels to " "sample per balanced batch (defaults to {})" "".format(32), default=32) parser.add_argument('--k-batch', type=int, help="Number of K_batch examples to sample per unique " "concept in a balanced batch (defaults to {})" "".format(4), default=4) parser.add_argument('--max-offline-pairs', type=int, help="Maximum number of same pairs that will be sampled " "for siamese triplet (offline) model (defaults to " "100000).", default=int(100e3)) # -------------------------------------------------------------------------- # Other common training parameters: # -------------------------------------------------------------------------- for train_opt in TRAIN_OPTION_ARGS: parser.add_argument(*train_opt['names'], type=type(train_opt['default']), choices=train_opt['choices'], help=train_opt['help'], default=train_opt['default']) # -------------------------------------------------------------------------- # Vision model hyperparameters: # -------------------------------------------------------------------------- for model_param in vision.base_model_dict.keys(): parser.add_argument("--{}".format(model_param), default=-1) # default -1 to determine if value set return parser.parse_args() def main(): # -------------------------------------------------------------------------- # Parse script args and handle options: # -------------------------------------------------------------------------- ARGS = check_arguments() # Set numpy and tenorflow random seed np.random.seed(ARGS.random_seed) tf.set_random_seed(ARGS.random_seed) # Get specified model directory (default cwd) model_dir = ARGS.model_dir # Check if not using a previous run, and create a unique run directory if not os.path.exists(os.path.join(model_dir, LOG_FILENAME)): if not ARGS.no_unique_dir: unique_dir = "{}_{}_{}".format( 'vision', ARGS.model_version, datetime.datetime.now().strftime("%y%m%d_%Hh%Mm%Ss_%f")) model_dir = os.path.join(model_dir, unique_dir) # Create directories if required ... if not os.path.exists(model_dir): os.makedirs(model_dir) # Set logging to print to console and log to file utils.set_logger(model_dir, log_fn=LOG_FILENAME) logging.info("Training vision model: version={}".format(ARGS.model_version)) logging.info("Using model directory: {}".format(model_dir)) # Save base parameters and exit if `--save-base-params` flag encountered if ARGS.save_base_params: base_params = vision.MODEL_BASE_PARAMS[ARGS.model_version].copy() base_params['model_version'] = ARGS.model_version base_params_path = os.path.join(model_dir, MODEL_PARAMS_STORE_FN) with open(base_params_path, 'w') as fp: logging.info("Writing base model parameters to file: {}" "".format(base_params_path)) json.dump(base_params, fp, indent=4) return # exit ... # Load JSON model params from specified file or a previous run if available params_file = None model_params_store_fn = os.path.join(model_dir, MODEL_PARAMS_STORE_FN) if ARGS.params_file is not None: params_file = os.path.join(model_dir, ARGS.params_file) if not os.path.exists(params_file): logging.info("Could not find specified model parameters file: " "{}.".format(params_file)) return # exit ... else: logging.info("Using stored model parameters file: " "{}".format(params_file)) elif os.path.exists(model_params_store_fn): params_file = model_params_store_fn logging.info("Using stored model parameters file: " "{}".format(params_file)) # If a model params file is found, load JSON into a model params dict if params_file is not None: try: with open(params_file, 'r') as fp: model_params = json.load(fp) logging.info("Successfully loaded JSON model parameters!") except json.JSONDecodeError as ex: logging.info("Could not read JSON model parameters! " "Caught exception: {}".format(ex)) return # exit ... else: # Get the default base model params for the specified model version model_params = vision.MODEL_BASE_PARAMS[ARGS.model_version].copy() logging.info("No model parameters file found. " "Using base model parameters.") # Read and write training and model options from specified/default args train_options = {} var_args = vars(ARGS) for arg in var_args: if arg in vision.base_model_dict: if var_args[arg] != -1: # if value explicitly set for model param model_params[arg] = var_args[arg] else: train_options[arg] = getattr(ARGS, arg) logging.info("Training parameters:") for train_opt, opt_val in train_options.items(): logging.info("\t{}: {}".format(train_opt, opt_val)) train_options_path = os.path.join(model_dir, 'train_options.json') with open(train_options_path, 'w') as fp: logging.info("Writing most recent training parameters to file: {}" "".format(train_options_path)) json.dump(train_options, fp, indent=4) # -------------------------------------------------------------------------- # Add additional model parameters and save: # -------------------------------------------------------------------------- image_size = 105 if (ARGS.train_set == 'omniglot') else 28 model_params['model_version'] = ARGS.model_version # for later rebuilding model_params_path = os.path.join(model_dir, MODEL_PARAMS_STORE_FN) with open(model_params_path, 'w') as fp: print("Writing model parameters to file: {}".format(model_params_path)) json.dump(model_params, fp, indent=4) # For pixel matching model we simply want the model params, no training ... if ARGS.model_version == 'pixels': logging.info("Pure pixel matching model params ready for test!") return # -------------------------------------------------------------------------- # Load pre-train dataset: # -------------------------------------------------------------------------- if ARGS.train_set == 'omniglot': # load omniglot (default) train set logging.info("Training vision model on dataset: {}".format('omniglot')) train_data = data.load_omniglot( path=os.path.join(ARGS.data_dir, 'omniglot.npz')) inverse_data = True # inverse omniglot grayscale else: # load mnist train set logging.info("Training vision model on dataset: {}".format('mnist')) train_data = data.load_mnist() inverse_data = False # don't inverse mnist grayscale # -------------------------------------------------------------------------- # Data processing pipeline (placed on CPU so GPU is free): # -------------------------------------------------------------------------- with tf.device('/cpu:0'): # ------------------------------------ # Create (pre-)train dataset pipeline: # ------------------------------------
<filename>cheshire3/datamining/preParser.py<gh_stars>1-10 from __future__ import absolute_import import os import random import tempfile import commands import math import re import operator try: import cPickle as pickle except ImportError: import pickle from cheshire3.baseObjects import PreParser from cheshire3.document import StringDocument from cheshire3.exceptions import ConfigFileException from cheshire3.exceptions import MissingDependencyException class VectorRenumberPreParser(PreParser): _possibleSettings = {'termOffset': {'docs': "", 'type': int}} _possiblePaths = {'modelPath': {'docs': ""}} def __init__(self, session, config, parent): PreParser.__init__(self, session, config, parent) # Some settings that are needed at this stage self.offset = self.get_setting(session, 'termOffset', 0) def process_document(self, session, doc): (labels, vectors) = doc.get_raw(session) # Find max attr all = {} for v in vectors: all.update(v) keys = all.keys() keys.sort() maxattr = keys[-1] nattrs = len(keys) # Remap vectors to reduced space renumbers = range(self.offset, nattrs + self.offset) renumberhash = dict(zip(keys, renumbers)) newvectors = [] for vec in vectors: new = {} for (k, v) in vec.items(): new[renumberhash[k]] = v newvectors.append(new) # Pickle renumberhash pick = pickle.dumps(renumberhash) filename = self.get_path(session, 'modelPath', None) if not filename: dfp = self.get_path(session, 'defaultPath') filename = os.path.join(dfp, self.id + "_ATTRHASH.pickle") f = file(filename, 'w') f.write(pick) f.close() return StringDocument((labels, newvectors, nattrs)) class VectorUnRenumberPreParser(PreParser): _possibleSettings = {'termOffset': {'docs': "", 'type': int}} _possiblePaths = {'modelPath': {'docs': ""}} def __init__(self, session, config, parent): PreParser.__init__(self, session, config, parent) # Some settings that are needed at this stage self.offset = self.get_setting(session, 'termOffset', 0) filename = self.get_path(session, 'modelPath', None) if not filename: dfp = self.get_path(session, 'defaultPath') filename = os.path.join(dfp, self.id + "_ATTRHASH.pickle") self.modelPath = filename self.model = {} self.lastModTime = 0 self.load_model(session) def load_model(self, session): # Store last written time, in case we change filename = self.modelPath if os.path.exists(filename): si = os.stat(filename) lastMod = si.st_mtime if lastMod > self.lastModTime: inh = file(filename) inhash = pickle.load(inh) inh.close() # Now reverse our keys/values self.model = dict(zip(inhash.values(), inhash.keys())) si = os.stat(filename) self.lastModTime = si.st_mtime return 1 else: return 0 else: return 0 def process_document(self, session, doc): self.load_model(session) data = doc.get_raw(session) # Data should be list of list of ints to map g = self.model.get ndata = [] for d in data: n = [] for i in d: n.append(g(i)) ndata.append(n) return StringDocument(ndata) class ARMVectorPreParser(PreParser): def process_document(self, session, doc): (labels, vectors) = doc.get_raw(session)[:2] txt = [] for v in vectors: k = v.keys() if k: k.sort() txt.append(' '.join(map(str, k))) return StringDocument('\n'.join(txt)) class ARMPreParser(PreParser): _possibleSettings = { 'support': { 'docs': "Support value", 'type': float }, 'confidence': { 'docs': "Confidence value", 'type': float }, 'absoluteSupport': { 'docs': 'Number of records for supp, not %', 'type': int } } def __init__(self, session, config, parent): PreParser.__init__(self, session, config, parent) self.support = self.get_setting(session, 'support', 10.0) self.absSupport = self.get_setting(session, 'absoluteSupport', 0) self.confidence = self.get_setting(session, 'confidence', 0.0) class TFPPreParser(ARMPreParser): _possibleSettings = { 'memory': { 'docs': "How much memory to let Java use", 'type': int } } _possiblePaths = { 'filePath': { 'docs': 'Directory where TFP lives' }, 'javaPath': { 'docs': 'Full path to java executable' } } def __init__(self, session, config, parent): ARMPreParser.__init__(self, session, config, parent) # Check we know where TFP is etc self.filePath = self.get_path(session, 'filePath', None) if not self.filePath: raise ConfigFileException("%s requires the path: filePath" "" % self.id) self.java = self.get_path(session, 'javaPath', 'java') self.memory = self.get_setting(session, 'memory', 1000) def process_document(self, session, doc): # Write out our temp file (qq, infn) = tempfile.mkstemp(".tfp") fh = file(infn, 'w') fh.write(doc.get_raw(session)) fh.close() # Go to TFP directory and run o = os.getcwd() os.chdir(self.filePath) results = commands.getoutput("%s -Xms%sm -Xmx%sm AprioriTFPapp " "-F../%s -S%s -C%s" "" % (self.java, self.memory, self.memory, infn, self.support, self.confidence) ) os.chdir(o) # Process results resultLines = results.split('\n') matches = [] for l in resultLines: m = freqRe.search(l) if m: (set, freq) = m.groups() matches.append((int(freq), set)) if not matches: # No FIS for some reason, return results?? return StringDocument(results) matches.sort(reverse=True) return StringDocument(matches) class Fimi1PreParser(ARMPreParser): _possibleSettings = { 'singleItems': { 'docs': '', 'type': int, 'options': '0|1' } } _possiblePaths = { 'filePath': { 'docs': 'Directory where fimi01 executable (apriori) lives' } } def __init__(self, session, config, parent): ARMPreParser.__init__(self, session, config, parent) # Check we know where TFP is etc self.filePath = self.get_path(session, 'filePath', None) #if not self.filePath: # raise ConfigFileException("%s requires the path: filePath" # "" % self.id) self.fisre = re.compile("([0-9 ]+) \(([0-9]+)\)") self.rulere = re.compile("([0-9 ]+) ==> ([0-9 ]+) " "\(([0-9.]+), ([0-9]+)\)") self.singleItems = self.get_setting(session, 'singleItems', 0) def process_document(self, session, doc): # write out our temp file (qq, infn) = tempfile.mkstemp(".arm") fh = file(infn, 'w') fh.write(doc.get_raw(session)) fh.close() if self.absSupport: t = len(doc.get_raw(session).split('\n')) self.support = (float(self.absSupport) / float(t)) * 100 (qq, outfn) = tempfile.mkstemp(".txt") # go to directory and run o = os.getcwd() #os.chdir(self.filePath) if self.confidence > 0: cmd = "apriori %s %s %f %s" % (infn, outfn, self.support / 100, self.confidence / 100) else: cmd = "apriori %s %s %s" % (infn, outfn, self.support / 100) results = commands.getoutput(cmd) #os.chdir(o) inh = file(outfn) fis = self.fisre rule = self.rulere singleItems = self.singleItems matches = [] rules = [] for line in inh: # Matching line looks like N N N (N) # Rules look like N N ==> N (f, N) m = fis.match(line) if m: (set, freq) = m.groups() if singleItems or set.find(' ') > -1: matches.append((int(freq), set)) elif self.confidence > 0: m = rule.match(line) if m: (ante, conc, conf, supp) = m.groups() al = map(int, ante.split(' ')) cl = map(int, conc.split(' ')) rules.append((float(conf), int(supp), al, cl)) inh.close() # Delete temp files! os.remove(outfn) os.remove(infn) if not matches: # No FIS for some reason, return results?? return StringDocument([results, []]) matches.sort(reverse=True) rules.sort(reverse=True) os.chdir(o) doc = StringDocument([matches, rules]) return doc class MagicFimi1PreParser(Fimi1PreParser): _possibleSettings = { 'minRules': { 'docs': "", 'type': int }, 'minItemsets': { 'docs': "", 'type': int } } def __init__(self, session, config, parent): Fimi1PreParser.__init__(self, session, config, parent) self.minRules = self.get_setting(session, 'minRules', -1) self.minFIS = self.get_setting(session, 'minItemsets', -1) if self.minRules > 0 and self.confidence <= 0: raise ConfigFileException("minRules setting not allowed without " "confidence setting on %s" % (self.id)) def process_document(self, session, doc): # try to find our best support threshold s = self.get_setting(session, 'support', 12.0) lr = -1 lf = -1 maxiters = 12 iters = 0 minRules = self.minRules minFIS = self.minFIS while True: iters += 1 if iters > maxiters: break lasts = self.support lastlr = lr lastlf = lf self.support = s d2 = Fimi1PreParser.process_document(self, session, doc) (fis, rules) = d2.get_raw(session) lr = len(rules) lf = len(fis) print "%s --> %s, %s" % (s, lr, lf) if minRules != -1: if lr == lastlr: # Keep going back, change didn't make any difference s = s * 1.5 elif lr >= minRules * 2: # go back s = (lasts + s) / 2.0 elif lr >= minRules: # Stop break elif lr * 3 < minRules: # Go forward a bit s = s / 2.0 elif lr * 7 < minRules: # Go forward a lot s = s / 3.0 else: s = s / 1.5 if minFIS != -1 and lf > minFIS: break elif minFIS != -1: if lf == lastlf: # Keep going back, change didn't make any difference s = s * 1.5 elif lf >= minFIS * 2: # Go back s = (lasts + s) / 2.0 elif lf >= minFIS: # Stop break elif lf * 3 < minFIS: # Go forward a bit s = s / 2.0 elif lf * 7 < minFIS: # Go forward a lot s = s / 3.0 else: s = s / 1.5 if minRules != -1 and lf > minRules: break self.support = s return d2 class FrequentSet(object): freq = 0 termids = [] avgs = [] avg = 0 pctg = 0 opctg = 0 ll = 0 surprise = 0 entropy = 0 gini = 0 termidFreqs = {} termidRules = {} document = None def __repr__(self): termList = [] ts = self.termidRules.items() ts.sort(key=lambda x: x[1], reverse=True) for t in ts: termList.append("%s %s" % (self.document.termHash[t[0]], t[1])) terms = " ".join(termList) return "<Rule Object: %s (%s)>" % (terms, self.freq)
<reponame>Mikuana/oops_fhir from pathlib import Path from fhir.resources.codesystem import CodeSystem from oops_fhir.utils import CodeSystemConcept __all__ = ["v3hl7PublishingSubSection"] _resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json")) class v3hl7PublishingSubSection: """ v3 Code System hl7PublishingSubSection Description: Codes for HL7 publishing sub-sections (business sub- categories) Status: active - Version: 2018-08-12 Copyright None http://terminology.hl7.org/CodeSystem/v3-hl7PublishingSubSection """ co = CodeSystemConcept( { "code": "CO", "definition": 'Description: Represents the HL7 V3 publishing sub-section that holds common or shared specifications within the Infrastructure Management (IM) section.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.\r\n\n For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets.', "display": "common", } ) """ common Description: Represents the HL7 V3 publishing sub-section that holds common or shared specifications within the Infrastructure Management (IM) section. UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications. For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets. """ fi = CodeSystemConcept( { "code": "FI", "definition": 'Description: Represents the HL7 V3 publishing sub-section that holds specifications related to the management of financial information within the Administrative Management (AM) section.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.\r\n\n For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets.', "display": "financial information", } ) """ financial information Description: Represents the HL7 V3 publishing sub-section that holds specifications related to the management of financial information within the Administrative Management (AM) section. UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications. For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets. """ mc = CodeSystemConcept( { "code": "MC", "definition": 'Description: Represents the HL7 V3 publishing sub-section that holds specifications related to the definition and control of interoperability messages within the Infrastructure Management (IM) section.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.\r\n\n For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets.', "display": "message control", } ) """ message control Description: Represents the HL7 V3 publishing sub-section that holds specifications related to the definition and control of interoperability messages within the Infrastructure Management (IM) section. UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications. For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets. """ mf = CodeSystemConcept( { "code": "MF", "definition": 'Description: Represents the HL7 V3 publishing sub-section that holds specifications related to master file and registry management activities within the Infrastructure Management (IM) section.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.\r\n\n For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets.', "display": "master file", } ) """ master file Description: Represents the HL7 V3 publishing sub-section that holds specifications related to master file and registry management activities within the Infrastructure Management (IM) section. UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications. For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets. """ po = CodeSystemConcept( { "code": "PO", "definition": 'Description: Represents the HL7 V3 publishing sub-section that holds specifications related to managing clinical operations within the Health and Clinical Management (HM) section.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.\r\n\n For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets.', "display": "operations", } ) """ operations Description: Represents the HL7 V3 publishing sub-section that holds specifications related to managing clinical operations within the Health and Clinical Management (HM) section. UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications. For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets. """ pr = CodeSystemConcept( { "code": "PR", "definition": 'Description: Represents the HL7 V3 publishing sub-section that holds specifications related to the management of practice settings within the Administrative Management (AM) section.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.\r\n\n For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets.', "display": "practice", } ) """ practice Description: Represents the HL7 V3 publishing sub-section that holds specifications related to the management of practice settings within the Administrative Management (AM) section. UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications. For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets. """ qu = CodeSystemConcept( { "code": "QU", "definition": 'Description: Represents the HL7 V3 publishing sub-section that holds specifications related to query/response activities within the Infrastructure Management (IM) section.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.\r\n\n For publishing purposes, these domains are aggregated into sub-sections of related health care areas and these sub-sections are further aggregated into three major sets.', "display": "query", } ) """ query Description: Represents the HL7 V3 publishing sub-section that holds specifications related to query/response activities within the Infrastructure Management (IM) section. UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent
= val.bitset # pylint: disable=W0201,E0237 return tree return val def copy(self, deep=False): """Create a copy of this tree.""" if not deep: return self.__class__(self.label, self) return self.__class__.convert(self) def _frozen_class(self): """The frozen version of this class.""" return ImmutableTree def freeze(self, leaf_freezer=None): """:returns: an immutable version of this tree.""" frozen_class = self._frozen_class() if leaf_freezer is None: newcopy = frozen_class.convert(self) else: newcopy = self.copy(deep=True) for pos in newcopy.treepositions('leaves'): newcopy[pos] = leaf_freezer(newcopy[pos]) newcopy = frozen_class.convert(newcopy) hash(newcopy) # Make sure the leaves are hashable. return newcopy # === Parsing =============================================== @classmethod def parse(cls, s, parse_label=None, parse_leaf=int, label_pattern=None, leaf_pattern=None): """Parse a bracketed tree string and return the resulting tree. Trees are represented as nested bracketings, such as: ``(S (NP (NNP John)) (VP (V runs)))`` :param s: The string to parse :param parse_label, parse_leaf: If specified, these functions are applied to the substrings of s corresponding to labels and leaves (respectively) to obtain the values for those labels and leaves. They should have the following signature: parse_label(str) -> value :param label_pattern, leaf_pattern: Regular expression patterns used to find label and leaf substrings in s. By default, both label and leaf patterns are defined to match any sequence of non-whitespace non-bracket characters. :returns: A tree corresponding to the string representation s. If this class method is called using a subclass of Tree, then it will return a tree of that type.""" # Construct a regexp that will tokenize the string. open_b, close_b = '()' open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b)) if label_pattern is None: label_pattern = r'[^\s%s%s]+' % (open_pattern, close_pattern) if leaf_pattern is None: leaf_pattern = r'[^\s%s%s]+' % (open_pattern, close_pattern) token_re = re.compile(r'%s\s*(%s)?|%s|(%s)' % ( open_pattern, label_pattern, close_pattern, leaf_pattern)) # Walk through each token, updating a stack of trees. stack = [(None, [])] # list of (label, children) tuples for match in token_re.finditer(s): token = match.group() if token[0] == open_b: # Beginning of a tree/subtree if len(stack) == 1 and stack[0][1]: cls._parse_error(s, match, 'end-of-string') label = token[1:].lstrip() if parse_label is not None: label = parse_label(label) stack.append((label, [])) elif token == close_b: # End of a tree/subtree if len(stack) == 1: if stack[0][1]: cls._parse_error(s, match, 'end-of-string') else: cls._parse_error(s, match, open_b) label, children = stack.pop() stack[-1][1].append(cls(label, children)) else: # Leaf node if len(stack) == 1: cls._parse_error(s, match, open_b) if parse_leaf is not None: token = parse_leaf(token) stack[-1][1].append(token) # check that we got exactly one complete tree. if len(stack) > 1: cls._parse_error(s, 'end-of-string', close_b) elif stack[0][1]: assert stack[0][0] is None and len(stack[0][1]) == 1 else: cls._parse_error(s, 'end-of-string', open_b) tree = stack[0][1][0] return tree @classmethod def _parse_error(cls, orig, match, expecting): """Display a friendly error message when parsing a tree string fails. :param orig: The string we're parsing. :param match: regexp match of the problem token. :param expecting: what we expected to see instead.""" # Construct a basic error message if match == 'end-of-string': pos, token = len(orig), 'end-of-string' else: pos, token = match.start(), match.group() msg = '%s.parse(): expected %r but got %r\n%sat index %d.' % ( cls.__name__, expecting, token, ' ' * 12, pos) # Add a display showing the error token itself: s = orig.replace('\n', ' ').replace('\t', ' ') offset = pos if len(s) > pos + 10: s = s[:pos + 10] + '...' if pos > 10: s = '...' + s[pos - 10:] offset = 13 msg += '\n%s"%s"\n%s^' % (' ' * 16, s, ' ' * (17 + offset)) msg += '\n%s' % orig raise ValueError(msg) # === String Representations ================================ def __repr__(self): childstr = ", ".join(repr(c) for c in self) return '%s(%r, [%s])' % (self.__class__.__name__, self.label, childstr) def __str__(self): return self._pprint_flat('()') def pprint(self, margin=70, indent=0, brackets='()'): """ :returns: A pretty-printed string representation of this tree. :param margin: The right margin at which to do line-wrapping. :param indent: The indentation level at which printing begins. This number is used to decide how far to indent subsequent lines.""" # Try writing it on one line. s = self._pprint_flat(brackets) if len(s) + indent < margin: return s # If it doesn't fit on one line, then write it on multi-lines. if isinstance(self.label, str): s = '%s%s' % (brackets[0], self.label) else: s = '%s%r' % (brackets[0], self.label) for child in self.children: if isinstance(child, Tree): s += '\n' + ' ' * (indent + 2) + child.pprint(margin, indent + 2, brackets) elif isinstance(child, tuple): s += '\n' + ' ' * (indent + 2) + '/'.join(child) elif isinstance(child, str): s += '\n' + ' ' * (indent + 2) + '%s' % child else: s += '\n' + ' ' * (indent + 2) + '%r' % child return s + brackets[1] def _pprint_flat(self, brackets): """Pretty-printing helper function.""" childstrs = [] for child in self.children: if isinstance(child, Tree): childstrs.append(child._pprint_flat(brackets)) elif isinstance(child, str) or child is None: childstrs.append(child or '') else: childstrs.append(repr(child)) if isinstance(self.label, str): return '%s%s %s%s' % (brackets[0], self.label, ' '.join(childstrs), brackets[1]) else: return '%s%r %s%s' % (brackets[0], self.label, ' '.join(childstrs), brackets[1]) def draw(self): """:returns: an ASCII art visualization of tree.""" return DrawTree(self, ['%d' % a for a in self.leaves()]).text() def _repr_svg_(self): """Return a rich representation for IPython notebook.""" return DrawTree(self, ['%d' % a for a in self.leaves()]).svg() class ImmutableTree(Tree): """A tree which may not be modified.; has a hash() value. NB: the ``label`` and ``children`` attributes should not be modified, but this is not enforced. This class has the following optimizations compared to Tree objects: - precomputed ``hash()`` value - precomputed ``leaves()`` value of each node - a bitset attribute recording the leaf indices dominated by each node """ __slots__ = ('_hash', '_leaves', 'bitset') def __init__(self, label_or_str, children=None): if children is None: return # see note in Tree.__init__() self._hash = self._leaves = None super(ImmutableTree, self).__init__(label_or_str, children) # Precompute our hash value. This ensures that we're really # immutable. It also means we only have to calculate it once. try: self._hash = hash((self.label, tuple(self))) except (TypeError, ValueError) as err: raise ValueError('ImmutableTree\'s label and children ' 'must be immutable:\n%s %r\n%r' % (self.label, self, err)) else: # self._leaves = Tree.leaves(self) self._addleaves() try: self.bitset = sum(1 << n for n in self._leaves) except TypeError as err: self.bitset = None def _addleaves(self): """Set leaves attribute of this node and its descendants.""" leaves = [] for child in self.children: if isinstance(child, Tree): if child._leaves is None: child._addleaves() leaves.extend(child._leaves) else: leaves.append(child) self._leaves = leaves def leaves(self): return self._leaves def __setitem__(self, _index, _value): raise ValueError('ImmutableTrees may not be modified') def __setslice__(self, _start, _stop, _value): raise ValueError('ImmutableTrees may not be modified') def __delitem__(self, _index): raise ValueError('ImmutableTrees may not be modified') def __delslice__(self, _start, _stop): raise ValueError('ImmutableTrees may not be modified') def __iadd__(self, _): raise ValueError('ImmutableTrees may not be modified') def __imul__(self, _): raise ValueError('ImmutableTrees may not be modified') def append(self, _): raise ValueError('ImmutableTrees may not be modified') def extend(self, _): raise ValueError('ImmutableTrees may not be modified') def pop(self, _=None): raise ValueError('ImmutableTrees may not be modified') def remove(self, _): raise ValueError('ImmutableTrees may not be modified') def __hash__(self): return self._hash class ParentedTree(Tree): """A Tree that maintains parent pointers for single-parented trees. The parent pointers are updated whenever any change is made to a tree's structure. The following read-only property values are automatically updated whenever the structure of a parented tree is modified: parent, parent_index, left_sibling, right_sibling, root, treeposition. Each ParentedTree may have at most one parent; i.e., subtrees may not be shared. Any attempt to reuse a single ParentedTree as a child of more than one parent (or as multiple children of the same parent) will cause a ValueError exception to be raised. ParentedTrees should never be used in the same tree as Trees or MultiParentedTrees. Mixing tree implementations may result in incorrect parent pointers and in TypeError exceptions. The ParentedTree class redefines all operations that modify a tree's structure to call two methods, which are used by subclasses to update parent information: - ``_setparent()`` is called whenever a new child is added. - ``_delparent()`` is called whenever a child is removed.""" __slots__ = () def __init__(self, label_or_str, children=None): if children is None: return # see note in Tree.__init__() self._parent = None super(ParentedTree, self).__init__(label_or_str, children) # iterate over self.children, *not* children, # because children might be an iterator. for i, child in enumerate(self.children): if isinstance(child, Tree): self._setparent(child, i, dry_run=True) for i, child in enumerate(self.children): if isinstance(child, Tree): self._setparent(child, i) def _frozen_class(self): return ImmutableParentedTree # === Properties ================================================= def _get_parent_index(self): """The index of this tree in its parent. i.e., ptree.parent[ptree.parent_index] is ptree. Note that ptree.parent_index is not necessarily equal to ptree.parent.index(ptree), since the index() method returns the first child that is _equal_ to its argument.""" if self._parent is None: return None for i, child in enumerate(self._parent): if child is self: return i raise ValueError('expected to find self in self._parent!') def _get_left_sibling(self): """The left sibling of this tree, or None if it has none.""" parent_index = self._get_parent_index() if self._parent and parent_index > 0: return self._parent[parent_index - 1] return None # no left sibling def _get_right_sibling(self): """The right sibling of this tree, or None if it has none.""" parent_index = self._get_parent_index() if self._parent and parent_index < (len(self._parent) - 1): return self._parent[parent_index + 1] return None # no right sibling def _get_treeposition(self): """The tree position of this tree, relative to the root of the tree. i.e., ptree.root[ptree.treeposition] is ptree.""" if self._parent is None: return () return (self._parent._get_treeposition() + (self._get_parent_index(), )) def _get_root(self): """:returns: the root of this tree.""" node = self while node._parent is not None: node = node._parent return node parent = property(lambda self: self._parent, doc="""The parent of this tree, or None if it has no parent.""") parent_index = property(_get_parent_index, doc=_get_parent_index.__doc__) left_sibling = property(_get_left_sibling, doc=_get_left_sibling.__doc__) right_sibling = property(_get_right_sibling, doc=_get_right_sibling.__doc__) root = property(_get_root, doc=_get_root.__doc__) treeposition = property(_get_treeposition, doc=_get_treeposition.__doc__) # === Parent Management ========================================== def _delparent(self, child, index): """Update child's parent pointer to not
is visible,input X:") datetimeFormat = '%Y-%m-%d %H:%M:%S.%f' diff =datetime.strptime(str(f_time), datetimeFormat)\ - datetime.strptime(str(time), datetimeFormat) print("Time difference:") print(diff) tofs=TimeDelta(f_time-time) tofs = TimeDelta(np.linspace(0, 31* u.day, num=1)) # multiply Moon gravity by x so that effect is visible :) rr = propagate( initial, tofs, method=cowell, rtol=1e-6, ad=third_body, k_third=x *k_third, third_body=body_r, ) print("") print("Positions and velocity vectors are:") #print(str(rr.x)) #print([float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))]) r=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.z))][0]]* u.km v=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_z))][0]]* u.km / u.s f_orbit= Orbit.from_vectors(Earth, r, v) print(r) print(v) #f_orbit.plot() print("") print("Orbital elements:") print(f_orbit.classical()) #print("") #print(f_orbit.ecc) plotOrbit((f_orbit.a.value),(f_orbit.ecc.value),(f_orbit.inc.value),(f_orbit.raan.value),(f_orbit.argp.value),(f_orbit.nu.value)) #if(shadow_function(np.asarray(r),get_sun(Time(datetime.now(),format='datetime')),Earth.R.to(u.km).value)): #print("In shadow.") #else: #print("Not in shadow.") def no_pert_earth(initial,time,f_time): # No perturbation # parameters of a body C_D = 2.2 # dimentionless (any value would do) A = ((np.pi / 4.0) * (u.m ** 2)).to(u.km ** 2).value # km^2 m = 100 # kg B = C_D * A / m datetimeFormat = '%Y-%m-%d %H:%M:%S.%f' diff =datetime.strptime(str(f_time), datetimeFormat)\ - datetime.strptime(str(time), datetimeFormat) print("Time difference:") print(diff) tofs=TimeDelta(f_time-time) tofs = TimeDelta(np.linspace(0, 31* u.day, num=1)) rr =propagate( initial, tofs, method=cowell, ad=None ) print("") print("Positions and velocity vectors are:") #print(str(rr.x)) #print([float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))]) r=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.z))][0]]* u.km v=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_z))][0]]* u.km / u.s f_orbit= Orbit.from_vectors(Earth, r, v) print(r) print(v) #f_orbit.plot() print("") print("Orbital elements:") print(f_orbit.classical()) print("") #print("") #print(f_orbit.ecc) plotOrbit((f_orbit.a.value),(f_orbit.ecc.value),(f_orbit.inc.value),(f_orbit.raan.value),(f_orbit.argp.value),(f_orbit.nu.value)) #if(shadow_function(np.asarray(r),get_sun(Time(datetime.now(),format='datetime')),Earth.R.to(u.km).value)): #print("In shadow.") #else: #print("Not in shadow.") def j2_earth(initial,time,f_time): # perturbation for oblateness R = Earth.R.to(u.km).value k = Earth.k.to(u.km ** 3 / u.s ** 2).value #s0 = Orbit.from_classical(Earth, x[0] * u.km, x[1] * u.one, x[4] * u.deg, * u.deg, x[3] * u.deg, 0 * u.deg, epoch=Time(0, format='jd', scale='tdb')) #orbit = Orbit.circular( # Earth, 250 * u.km, epoch=Time(0.0, format="jd", scale="tdb")) # parameters of a body C_D = 2.2 # dimentionless (any value would do) A = ((np.pi / 4.0) * (u.m ** 2)).to(u.km ** 2).value # km^2 m = 100 # kg B = C_D * A / m J2=Earth.J2.value print("Use default constants or you want to customize?\n1.Default.\n2.Custom.") check=input() if(check=='1'): pass else: J2=input("Enter J2 constant") R=input("Enter radius of earth in km") datetimeFormat = '%Y-%m-%d %H:%M:%S.%f' diff =datetime.strptime(str(f_time), datetimeFormat)\ - datetime.strptime(str(time), datetimeFormat) print("Time difference:") print(diff) tofs=TimeDelta(f_time-time) tofs = TimeDelta(np.linspace(0, 31* u.day, num=1)) rr =propagate( initial, tofs, method=cowell, ad=J2_perturbation, J2=J2, R=R ) print("") print("Positions and velocity vectors are:") #print(str(rr.x)) #print([float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))]) r=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.z))][0]]* u.km v=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_z))][0]]* u.km / u.s f_orbit= Orbit.from_vectors(Earth, r, v) print(r) print(v) #f_orbit.plot() print("") print("Orbital elements:") print(f_orbit.classical()) print("") #print("") #print(f_orbit.ecc) plotOrbit((f_orbit.a.value),(f_orbit.ecc.value),(f_orbit.inc.value),(f_orbit.raan.value),(f_orbit.argp.value),(f_orbit.nu.value)) #if(shadow_function(np.asarray(r),get_sun(Time(datetime.now(),format='datetime')),Earth.R.to(u.km).value)): #print("In shadow.") #else: #print("Not in shadow.") def j3_earth(initial,time,f_time): # perturbation for oblateness R = Earth.R.to(u.km).value k = Earth.k.to(u.km ** 3 / u.s ** 2).value #s0 = Orbit.from_classical(Earth, x[0] * u.km, x[1] * u.one, x[4] * u.deg, * u.deg, x[3] * u.deg, 0 * u.deg, epoch=Time(0, format='jd', scale='tdb')) #orbit = Orbit.circular( # Earth, 250 * u.km, epoch=Time(0.0, format="jd", scale="tdb")) # parameters of a body C_D = 2.2 # dimentionless (any value would do) A = ((np.pi / 4.0) * (u.m ** 2)).to(u.km ** 2).value # km^2 m = 100 # kg B = C_D * A / m J3=Earth.J3.value print("Use default constants or you want to customize?\n1.Default.\n2.Custom.") check=input() if(check=='1'): pass else: J3=input("Enter J3 constant:") R=input("Enter radius of earth in km:") datetimeFormat = '%Y-%m-%d %H:%M:%S.%f' diff =datetime.strptime(str(f_time), datetimeFormat)\ - datetime.strptime(str(time), datetimeFormat) print("Time difference:") print(diff) tofs=TimeDelta(f_time-time) tofs = TimeDelta(np.linspace(0, 31* u.day, num=1)) rr =propagate( initial, tofs, method=cowell, ad=J3_perturbation, J3=J3, R=R ) print("") print("Positions and velocity vectors are:") #print(str(rr.x)) #print([float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))]) r=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.z))][0]]* u.km v=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_z))][0]]* u.km / u.s f_orbit= Orbit.from_vectors(Earth, r, v) print(r) print(v) #f_orbit.plot() print("") print("Orbital elements:") print(f_orbit.classical()) print("") #print("") #print(f_orbit.ecc) plotOrbit((f_orbit.a.value),(f_orbit.ecc.value),(f_orbit.inc.value),(f_orbit.raan.value),(f_orbit.argp.value),(f_orbit.nu.value)) #if(shadow_function(np.asarray(r),get_sun(Time(datetime.now(),format='datetime')),Earth.R.to(u.km).value)): #print("In shadow.") #else: #print("Not in shadow.") def atmd_earth(initial,time,f_time): # perturbation for atmospheric drag R = Earth.R.to(u.km).value k = Earth.k.to(u.km ** 3 / u.s ** 2).value #s0 = Orbit.from_classical(Earth, x[0] * u.km, x[1] * u.one, x[4] * u.deg, * u.deg, x[3] * u.deg, 0 * u.deg, epoch=Time(0, format='jd', scale='tdb')) #orbit = Orbit.circular( # Earth, 250 * u.km, epoch=Time(0.0, format="jd", scale="tdb")) # parameters of a body C_D = 2.2 # dimentionless (any value would do) A = ((np.pi / 4.0) * (u.m ** 2)).to(u.km ** 2).value # km^2 m = 100 # kg B = C_D * A / m # parameters of the atmosphere rho0 = Earth.rho0.to(u.kg / u.km ** 3).value # kg/km^3 H0 = Earth.H0.to(u.km).value print("Use default constants or you want to customize?\n1.Default.\n2.Custom.") check=input() if(check=='1'): pass else: m=input("Enter mass of the body in kg:") R=input("Enter radius of earth in km:") C_D=input("Enter C_D(dimension):") H0=input("Enter atmospheric parameter H0:") #rho0=input("Enter atmospheric parameter rho0:") R=R*u.km H0=H0*u.km #rho0=rho0*(u.kg / u.km ** 3) datetimeFormat = '%Y-%m-%d %H:%M:%S.%f' diff =datetime.strptime(str(f_time), datetimeFormat)\ - datetime.strptime(str(time), datetimeFormat) print("Time difference:") print(diff) tofs=TimeDelta(f_time-time) tofs = TimeDelta(np.linspace(0, 31* u.day, num=1)) #print("tofs:") #print(tofs) #print("ie:") #print(initial.epoch.iso) rr =propagate( initial, tofs, method=cowell, ad=atmospheric_drag, R=R, C_D=C_D, A=A, m=m, H0=H0, rho0=rho0, ) print("") print("Positions and velocity vectors are:") #print(str(rr.x)) #print([float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))]) r=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.z))][0]]* u.km v=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_z))][0]]* u.km / u.s f_orbit= Orbit.from_vectors(Earth, r, v) print(r) print(v) #f_orbit.plot() print("") print("Orbital elements:") print(f_orbit.classical()) print("") #print("") #print(f_orbit.ecc) plotOrbit((f_orbit.a.value),(f_orbit.ecc.value),(f_orbit.inc.value),(f_orbit.raan.value),(f_orbit.argp.value),(f_orbit.nu.value)) #if(shadow_function(np.asarray(r),get_sun(Time(datetime.now(),format='datetime')),Earth.R.to(u.km).value)): #print("In shadow.") #else: #print("Not in shadow.") def a_d_1(t0, state,k, J2, R,C_D, A, m, H0, rho0): # J2+atmospheric_drag return (J2_perturbation(t0, state, k, J2, R)+ atmospheric_drag(t0, state, k, R, C_D, A, m, H0, rho0)) def a_d_2(t0, state,k, J3, R,C_D, A, m, H0, rho0): # J2+atmospheric_drag return (J3_perturbation(t0, state, k, J3, R)+ atmospheric_drag(t0, state, k, R, C_D, A, m, H0, rho0)) #@njit def accel(t0, state, k): """Constant acceleration aligned with the velocity. """ # requires modification and validation v_vec = state[3:] norm_v = (v_vec * v_vec).sum() ** .5 return 1e-5 * v_vec / norm_v def custom(initial,choice,state,time,f_time): # requires modification and validation R = Earth.R.to(u.km).value k = Earth.k.to(u.km ** 3 / u.s ** 2).value #s0 = Orbit.from_classical(Earth, x[0] * u.km, x[1] * u.one, x[4] * u.deg, * u.deg, x[3] * u.deg, 0 * u.deg, epoch=Time(0, format='jd', scale='tdb')) #orbit = Orbit.circular( # Earth, 250 * u.km, epoch=Time(0.0, format="jd", scale="tdb")) # parameters of a body C_D = 2.2 # dimentionless (any value would do) A = ((np.pi / 4.0) * (u.m ** 2)).to(u.km ** 2).value # km^2 m = 100 # kg B = C_D * A / m # parameters of the atmosphere rho0 = Earth.rho0.to(u.kg / u.km ** 3).value # kg/km^3 H0 = Earth.H0.to(u.km).value #J2,J3 parameters J3=Earth.J3.value J2=Earth.J2.value datetimeFormat = '%Y-%m-%d %H:%M:%S.%f' diff =datetime.strptime(str(f_time), datetimeFormat)\ - datetime.strptime(str(time), datetimeFormat) print("Time difference:") print(diff) tofs=TimeDelta(f_time-time) tofs = TimeDelta(np.linspace(0, 31* u.day, num=1)) if(choice=='7'): rr =propagate( initial, tofs, method=cowell, ad=accel, #k=k, ) elif(choice=='5'): rr =propagate( initial, tofs, method=cowell, ad=a_d_1, #k=k, J2=J2, R=R, C_D=C_D, A=A, m=m, H0=H0, rho0=rho0 ) elif(choice=='6'): rr =propagate( initial, tofs, method=cowell, ad=a_d_2, #k=k, J3=J3, R=R, C_D=C_D, A=A, m=m, H0=H0, rho0=rho0 ) else: pass print("") print("Positions and velocity vectors are:") #print(str(rr.x)) #print([float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))]) r=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.z))][0]]* u.km v=[[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_x))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_y))][0],[float(s) for s in re.findall(r'-?\d+\.?\d*',str(rr.v_z))][0]]* u.km / u.s f_orbit= Orbit.from_vectors(Earth, r, v) print(r) print(v) #f_orbit.plot() print("") print("Orbital elements:") print(f_orbit.classical()) print("") #print("") #print(f_orbit.ecc) plotOrbit((f_orbit.a.value),(f_orbit.ecc.value),(f_orbit.inc.value),(f_orbit.raan.value),(f_orbit.argp.value),(f_orbit.nu.value)) #if(shadow_function(np.asarray(r),get_sun(Time(datetime.now(),format='datetime')),Earth.R.to(u.km).value)): #print("In shadow.") #else: #print("Not in shadow.") # converts iso format time to tle format, returns