ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a4e91c71746e1f599b67ed9fe197e29cabfb105
import argparse import os import json import joblib from azureml.core import Run from training.train_helper import split_data, train_model, get_model_metrics dummy1 = os.path.abspath(os.curdir) print(f"Root directory is {dummy1}") print(f"Listing files in root directory {os.listdir(dummy1)}") print("Create new features") # Giving a description of this file when invoked on command line like python clean.py -h parser = argparse.ArgumentParser("model training") parser.add_argument("--model_name", type=str, help="Name of the model", default="titanic_classifier_model.pkl") parser.add_argument("--output_model", type=str, help="Model Output directory") # Parse the arguments args = parser.parse_args() print(f"Argument 1 (Name of the model):, {args.model_name}") print(f"Argument 2 (Output Directory of model):, {args.output_model}") # Output path of this step model_name = args.model_name step_output_path = args.output_model # Load the training parameters from the parameters file with open("parameters.json") as f: pars = json.load(f) try: train_args = pars["training"] except KeyError: print("Could not load training values from file") train_args = {} # Get the run context run = Run.get_context() # Get the feature eng data feateng_data = run.input_datasets["feateng_data"] feateng_df = feateng_data.to_pandas_dataframe() # Tagging details to the run run.input_datasets["training_data"] = feateng_data run.parent.tag("dataset_id", value=feateng_data.id) # Split the data into train and test X_cols = ['Passenger_Class', 'Sex', 'SibSp', 'Parch', 'Fare'] target_col = "Survived" data = split_data(feateng_df, X_cols, target_col) # Train the model model = train_model(data, train_args) # Evaluate and log the metrics returned from the train function metrics = get_model_metrics(model, data) for (k, v) in metrics.items(): run.log(k, v) run.parent.log(k, v) # Pass model file to next step os.makedirs(step_output_path, exist_ok=True) model_output_path = os.path.join(step_output_path, model_name) joblib.dump(value=model, filename=model_output_path) # Also upload model file to run outputs for history os.makedirs('outputs', exist_ok=True) output_path = os.path.join('outputs', model_name) joblib.dump(value=model, filename=output_path)
py
1a4e91dd35cec3ffff615a392920dfdbb802337b
from __future__ import absolute_import from __future__ import print_function import veriloggen import _iter expected_verilog = """ module blinkled ( input CLK, input RST, output reg [8-1:0] LED ); reg [32-1:0] count; always @(posedge CLK) begin if(RST) begin count <= 0; end else begin if(count == 1023) begin count <= 0; end else begin count <= count + 1; end end end always @(posedge CLK) begin if(RST) begin LED <= 1; end else begin if(count == 1023) begin LED[0] <= LED[7]; LED[1] <= LED[0]; LED[2] <= LED[1]; LED[3] <= LED[2]; LED[4] <= LED[3]; LED[5] <= LED[4]; LED[6] <= LED[5]; LED[7] <= LED[6]; end end end endmodule """ def test(): veriloggen.reset() test_module = _iter.mkLed() code = test_module.to_verilog() from pyverilog.vparser.parser import VerilogParser from pyverilog.ast_code_generator.codegen import ASTCodeGenerator parser = VerilogParser() expected_ast = parser.parse(expected_verilog) codegen = ASTCodeGenerator() expected_code = codegen.visit(expected_ast) assert(expected_code == code)
py
1a4e91ecb6aa80b4092dd781abe9ffb3527dc855
Desc = cellDescClass("SDFFXL") Desc.properties["cell_footprint"] = "sdff" Desc.properties["area"] = "66.528000" Desc.properties["cell_leakage_power"] = "1840.890240" Desc.pinOrder = ['CK', 'D', 'IQ', 'IQN', 'Q', 'QN', 'SE', 'SI', 'next'] Desc.add_arc("CK","SI","setup_rising") Desc.add_arc("CK","SI","hold_rising") Desc.add_arc("CK","SE","setup_rising") Desc.add_arc("CK","SE","hold_rising") Desc.add_arc("CK","D","setup_rising") Desc.add_arc("CK","D","hold_rising") Desc.add_arc("CK","Q","rising_edge") Desc.add_arc("CK","QN","rising_edge") Desc.add_param("area",66.528000); Desc.add_pin("SI","input") Desc.add_pin("SE","input") Desc.add_pin("D","input") Desc.set_pin_job("CK","clock") Desc.add_pin("CK","input") Desc.add_pin("Q","output") Desc.add_pin_func("Q","unknown") Desc.add_pin("QN","output") Desc.add_pin_func("QN","unknown") Desc.add_pin("IQ","output") Desc.add_pin_func("IQ","unknown") Desc.add_pin("IQN","output") Desc.add_pin_func("IQN","unknown") Desc.add_pin("next","output") Desc.add_pin_func("next","unknown") Desc.set_job("flipflop") CellLib["SDFFXL"]=Desc
py
1a4e92bd24710cfe90775506de08523bc9829aee
import jesse.helpers as jh from jesse.enums import sides, order_statuses from jesse.models import Order from jesse.enums import order_types from .utils import set_up, single_route_backtest def test_cancel_order(): set_up() order = Order({ 'id': jh.generate_unique_id(), 'exchange': 'Sandbox', 'symbol': 'BTC-USDT', 'type': order_types.LIMIT, 'price': 129.33, 'qty': 10.2041, 'side': sides.BUY, 'status': order_statuses.ACTIVE, 'created_at': jh.now_to_timestamp(), }) assert order.is_canceled is False order.cancel() assert order.is_canceled is True assert order.canceled_at == jh.now_to_timestamp() def test_execute_order(): set_up() order = Order({ 'id': jh.generate_unique_id(), 'symbol': 'BTC-USDT', 'exchange': 'Sandbox', 'type': order_types.LIMIT, 'price': 129.33, 'qty': 10.2041, 'side': sides.BUY, 'status': order_statuses.ACTIVE, 'created_at': jh.now_to_timestamp(), }) assert order.is_executed is False assert order.executed_at is None order.execute() assert order.is_executed is True assert order.executed_at == jh.now_to_timestamp() def test_order_is_stop_loss_property(): single_route_backtest('TestOrderIsStopLossProperty') def test_order_is_take_profit_property(): single_route_backtest('TestOrderIsTakeProfitProperty')
py
1a4e9311c4f24581b4abdb00dd130b785da8006a
#!/usr/bin/env python ''' TnAmplicons Analysis of Tn-Seq data, Transposon insertion site detection, initial version is to process the samples (trim) primers (transoposon sequence) detect the TA genomic insertion site and map the resulting files to the genome. Later version will exand on analysis ''' import sys try: from setuptools import setup, Extension except ImportError: from distutils.core import setup, Extension editdist = Extension('editdist', sources=['lib/editdist.c']) trim = Extension('trim', sources=['lib/trim.c']) try: version_num = open("VERSION", "r+").readline().strip() except: sys.stderr.write("Error retrieving version_number") config = \ { 'description': 'Processing of Illumina amplicon projects - TnSeq version', 'author': 'Matt Settles', 'url': 'https://github.com/msettles/TnAmplicons', 'download_url': 'https://github.com/msettles/TnAmplicons', 'author_email': '[email protected]', 'version': version_num, 'install_requires': [], 'packages': ['TnAmplicons'], 'scripts': ['bin/TnAmplicons'], 'name': 'TnAmplicons', "ext_package": 'TnAmplicons', 'ext_modules': [editdist, trim] } setup(**config)
py
1a4e941c97329fef3be9dd17fb87da73ec62c23e
import numpy as np import torch class SamplingAlgo: def __init__(self, t_prof, env_bldr, n_envs_avg, n_envs_br, br_buf2, avg_buf2, br_learner2, avg_learner2): if t_prof.nn_type == "recurrent": from PokerRL.rl.buffers.BRMemorySaverRNN import BRMemorySaverRNN from NFSP.workers.la.action_buffer.ActionBufferRNN import AvgMemorySaverRNN BR_MEM_SAVER = BRMemorySaverRNN AVG_MEM_SAVER = AvgMemorySaverRNN elif t_prof.nn_type == "feedforward": from PokerRL.rl.buffers.BRMemorySaverFLAT import BRMemorySaverFLAT from NFSP.workers.la.action_buffer.ActionBufferFLAT import AvgMemorySaverFLAT BR_MEM_SAVER = BRMemorySaverFLAT AVG_MEM_SAVER = AvgMemorySaverFLAT else: raise ValueError(t_prof.nn_type) self._t_prof = t_prof self._env_bldr = env_bldr self._antic = self._t_prof.antic_start self._br_buf2 = br_buf2 self._avg_buf2 = avg_buf2 self._br_learner2 = br_learner2 self._avg_learner2 = avg_learner2 self._avg_memory_savers = [ [ AVG_MEM_SAVER(env_bldr=self._env_bldr, buffer=self._avg_buf2[p]) for _ in range(n_envs_avg) ] for p in range(self._env_bldr.N_SEATS) ] self._br_memory_savers = [ [ BR_MEM_SAVER(env_bldr=self._env_bldr, buffer=self._br_buf2[p]) for _ in range(n_envs_br) ] for p in range(self._env_bldr.N_SEATS) ] @property def antic(self): return self._antic @antic.setter def antic(self, value): self._antic = value def play(self, nfsp_iter): raise NotImplementedError class SeatActorBase: AVG = 1 BR = 2 @staticmethod def act_mixed(owner, current_policy_tags, step_wrappers, br_learner, avg_learner, random_prob): """ play with p*eps*rnd + p*(1-eps)*br and (1-p)*avg policy """ with torch.no_grad(): # """""""""""""""""""""""" # Construct # """""""""""""""""""""""" _sw_list_AVG = [] _sw_list_BR = [] for sw in step_wrappers: if current_policy_tags[sw.env_idx] == SeatActorBase.AVG: _sw_list_AVG.append(sw) elif current_policy_tags[sw.env_idx] == SeatActorBase.BR: _sw_list_BR.append(sw) else: raise ValueError(current_policy_tags[sw.env_idx]) # """""""""""""""""""""""" # AVG actions # """""""""""""""""""""""" SeatActorBase.act_avg(owner=owner, step_wrappers=_sw_list_AVG, avg_learner=avg_learner) # """""""""""""""""""""""" # BR actions # """""""""""""""""""""""" if random_prob > 0: SeatActorBase.act_eps_greedy(owner=owner, step_wrappers=_sw_list_BR, br_learner=br_learner, random_prob=random_prob) else: SeatActorBase.act_greedy(owner=owner, step_wrappers=_sw_list_BR, br_learner=br_learner) @staticmethod def act_constant_eps_greedy(owner, step_wrappers, br_learner): """ BR + eps """ with torch.no_grad(): if len(step_wrappers) > 0: actions, was_rnd = SeatActorBase.choose_a_br(br_learner=br_learner, owner=owner, step_wrappers=step_wrappers, random_prob=br_learner.eps) for i, sw in enumerate(step_wrappers): sw.action = actions[i].item() sw.action_was_random = was_rnd @staticmethod def act_eps_greedy(owner, step_wrappers, br_learner, random_prob=None): """ BR + eps """ with torch.no_grad(): if len(step_wrappers) > 0: actions, was_rnd = SeatActorBase.choose_a_br(br_learner=br_learner, owner=owner, step_wrappers=step_wrappers, random_prob=br_learner.eps if random_prob is None else random_prob) for i, sw in enumerate(step_wrappers): sw.action = actions[i].item() sw.action_was_random = was_rnd @staticmethod def act_greedy(owner, step_wrappers, br_learner): """ BR + eps """ with torch.no_grad(): if len(step_wrappers) > 0: actions, was_rnd = SeatActorBase.choose_a_br(br_learner=br_learner, owner=owner, step_wrappers=step_wrappers, random_prob=0) for i, sw in enumerate(step_wrappers): sw.action = actions[i].item() sw.action_was_random = was_rnd @staticmethod def act_avg(owner, step_wrappers, avg_learner): if len(step_wrappers) > 0: a_probs = avg_learner.get_a_probs( pub_obses=[sw.obs for sw in step_wrappers], range_idxs=np.array([sw.range_idxs[owner] for sw in step_wrappers], dtype=np.int32), legal_actions_lists=[sw.legal_actions_list for sw in step_wrappers], ) _n_actions_arranged = np.arange(a_probs.shape[-1]) for i, sw in enumerate(step_wrappers): sw.action = np.random.choice( a=_n_actions_arranged, p=a_probs[i], replace=True ).item() sw.action_was_random = False @staticmethod def choose_a_br(owner, br_learner, step_wrappers, random_prob): """ TODO maybe allow some explore some BR Returns: actions, was_random?: """ pub_obses = [sw.obs for sw in step_wrappers] range_idxs = [sw.range_idxs[owner] for sw in step_wrappers] legal_actions_lists = [sw.legal_actions_list for sw in step_wrappers] # """"""""""""""""""""" # Perhaps explore # """"""""""""""""""""" if random_prob > np.random.random(): actions = np.array([ l[np.random.randint(low=0, high=len(l))] for l in legal_actions_lists ]) return actions, True with torch.no_grad(): # """"""""""""""""""""" # Play by BR # """"""""""""""""""""" actions = br_learner.select_br_a( pub_obses=pub_obses, range_idxs=range_idxs, legal_actions_lists=legal_actions_lists, ) return actions, False @staticmethod def pick_training_policy(br_prob): if br_prob < np.random.random(): return SeatActorBase.AVG return SeatActorBase.BR
py
1a4e946cb88656c1b0c68cbd5b15495e6f243c2c
#!/usr/bin/env python """ Transfer net parameters between Caffe versions This (URL: https://groups.google.com/forum/#!topic/caffe-users/aeqqtyTXogY) discussion might be helpful. Modified from https://gist.github.com/shelhamer/ec8f96517fed5a430635 How to Use? Let's say, we would like to transplant the caffemodel trained with Caffe of version A to Caffe of version B. Usually, you should put this file under the caffe/python, where the caffemodel is trained with. Here it is A. Author: Kun Wang """ from __future__ import division from argparse import ArgumentParser import sys import os.path as osp import numpy as np pycaffe_dir = osp.dirname(__file__) if osp.join(pycaffe_dir) not in sys.path: sys.path.insert(0, pycaffe_dir) import caffe def transplant(new_net, net): for p in net.params: if p not in new_net.params: print 'dropping', p continue for i in range(len(net.params[p])): if net.params[p][i].data.shape != new_net.params[p][i].data.shape: print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p][i].data.shape else: print 'copying', p, i new_net.params[p][i].data.flat = net.params[p][i].data.flat def main(args): # Set default output file names if args.target_model is None: file_name = osp.splitext(args.model)[0] args.target_model = file_name + '_converted.prototxt' if args.target_weights is None: file_name = osp.splitext(args.weights)[0] args.target_weights = file_name + '_converted.caffemodel' # Load source weights source_weights = caffe.Net(args.model, args.weights, caffe.TEST) target_weights = caffe.Net(args.target_model, caffe.TEST) transplant(target_weights, source_weights) # Save the caffemodel target_weights.save(args.target_weights) if __name__ == '__main__': parser = ArgumentParser(description="Transfer net parameters between Caffe versions") parser.add_argument('model', help="The source net definition prototxt") parser.add_argument('weights', help="The source weights caffemodel") parser.add_argument('--target_model') parser.add_argument('--target_weights') args = parser.parse_args() # Transplanting main(args)
py
1a4e948b7f8212db5fa06f1bad4a05873b5b752b
import os import shutil import tempfile from datetime import datetime from typing import Text, List, Dict import requests from fastapi import File from fastapi.background import BackgroundTasks from fastapi.security import OAuth2PasswordBearer from loguru import logger from mongoengine.errors import ValidationError from rasa.shared.constants import DEFAULT_DATA_PATH from rasa.shared.nlu.constants import TEXT from rasa.shared.nlu.training_data import entities_parser from rasa.shared.nlu.training_data.formats.markdown import MarkdownReader from .constant import ALLOWED_NLU_FORMATS, ALLOWED_STORIES_FORMATS, \ ALLOWED_DOMAIN_FORMATS, ALLOWED_CONFIG_FORMATS, EVENT_STATUS, ALLOWED_RULES_FORMATS, ALLOWED_HTTP_ACTIONS_FORMATS, \ REQUIREMENTS from .constant import RESPONSE from .training_data_generation_processor import TrainingDataGenerationProcessor from ...api.models import HttpActionParametersResponse, HttpActionConfigResponse from ...exceptions import AppException from ...shared.actions.data_objects import HttpActionConfig from ...shared.models import StoryStepType from ...shared.utils import Utility class DataUtility: """Class contains logic for various utilities""" oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login") oauth2_scheme_non_strict = OAuth2PasswordBearer(tokenUrl="/api/auth/login", auto_error=False) markdown_reader = MarkdownReader() @staticmethod def prepare_nlu_text(example: Text, entities: List[Dict]): """ combines plain text and entities into training example format :param example: training example plain text :param entities: list of entities :return: trianing example combine with enities """ if not Utility.check_empty_string(example): if entities: from rasa.shared.nlu.training_data.formats.rasa_yaml import RasaYAMLWriter example = RasaYAMLWriter.generate_message({'text': example, "entities": entities}) return example @staticmethod async def save_uploaded_data(bot: Text, training_files: [File]): if not training_files: raise AppException("No files received!") if training_files[0].filename.endswith('.zip'): bot_data_home_dir = await DataUtility.save_training_files_as_zip(bot, training_files[0]) else: bot_data_home_dir = os.path.join('training_data', bot, str(datetime.utcnow())) data_path = os.path.join(bot_data_home_dir, DEFAULT_DATA_PATH) Utility.make_dirs(data_path) for file in training_files: if file.filename in ALLOWED_NLU_FORMATS.union(ALLOWED_STORIES_FORMATS).union(ALLOWED_RULES_FORMATS): path = os.path.join(data_path, file.filename) Utility.write_to_file(path, await file.read()) elif file.filename in ALLOWED_CONFIG_FORMATS.union(ALLOWED_DOMAIN_FORMATS).union( ALLOWED_HTTP_ACTIONS_FORMATS): path = os.path.join(bot_data_home_dir, file.filename) Utility.write_to_file(path, await file.read()) return bot_data_home_dir @staticmethod async def save_training_files_as_zip(bot: Text, training_file: File): tmp_dir = tempfile.mkdtemp() try: zipped_file = os.path.join(tmp_dir, training_file.filename) Utility.write_to_file(zipped_file, await training_file.read()) unzip_path = os.path.join('training_data', bot, str(datetime.utcnow())) shutil.unpack_archive(zipped_file, unzip_path, 'zip') return unzip_path except Exception as e: logger.error(e) raise AppException("Invalid zip") finally: Utility.delete_directory(tmp_dir) @staticmethod def validate_and_get_requirements(bot_data_home_dir: Text, delete_dir_on_exception: bool = False): """ Checks whether at least one of the required files are present and finds other files required for validation during import. @param bot_data_home_dir: path where data exists @param delete_dir_on_exception: whether directory needs to be deleted in case of exception. """ requirements = set() data_path = os.path.join(bot_data_home_dir, DEFAULT_DATA_PATH) if not os.path.exists(bot_data_home_dir): raise AppException("Bot data home directory not found") files_received = set(os.listdir(bot_data_home_dir)) if os.path.exists(data_path): files_received = files_received.union(os.listdir(data_path)) if ALLOWED_NLU_FORMATS.intersection(files_received).__len__() < 1: requirements.add('nlu') if ALLOWED_STORIES_FORMATS.intersection(files_received).__len__() < 1: requirements.add('stories') if ALLOWED_DOMAIN_FORMATS.intersection(files_received).__len__() < 1: requirements.add('domain') if ALLOWED_CONFIG_FORMATS.intersection(files_received).__len__() < 1: requirements.add('config') if ALLOWED_RULES_FORMATS.intersection(files_received).__len__() < 1: requirements.add('rules') if ALLOWED_HTTP_ACTIONS_FORMATS.intersection(files_received).__len__() < 1: requirements.add('http_actions') if requirements == REQUIREMENTS: if delete_dir_on_exception: Utility.delete_directory(bot_data_home_dir) raise AppException('Invalid files received') return requirements @staticmethod async def save_training_files(nlu: File, domain: File, config: File, stories: File, rules: File = None, http_action: File = None): """ convert mongo data to individual files :param nlu: nlu data :param domain: domain data :param stories: stories data :param config: config data :param rules: rules data :param http_action: http actions data :return: files path """ training_file_loc = {} tmp_dir = tempfile.mkdtemp() data_path = os.path.join(tmp_dir, DEFAULT_DATA_PATH) os.makedirs(data_path) nlu_path = os.path.join(data_path, nlu.filename) domain_path = os.path.join(tmp_dir, domain.filename) stories_path = os.path.join(data_path, stories.filename) config_path = os.path.join(tmp_dir, config.filename) Utility.write_to_file(nlu_path, await nlu.read()) Utility.write_to_file(domain_path, await domain.read()) Utility.write_to_file(stories_path, await stories.read()) Utility.write_to_file(config_path, await config.read()) training_file_loc['rules'] = await DataUtility.write_rule_data(data_path, rules) training_file_loc['http_action'] = await DataUtility.write_http_data(tmp_dir, http_action) training_file_loc['nlu'] = nlu_path training_file_loc['config'] = config_path training_file_loc['stories'] = stories_path training_file_loc['domain'] = domain_path training_file_loc['root'] = tmp_dir return training_file_loc @staticmethod async def write_rule_data(data_path: str, rules: File = None): """ writes the rule data to file and returns the file path :param data_path: path of the data files :param rules: rules data :return: rule file path """ if rules and rules.filename: rules_path = os.path.join(data_path, rules.filename) Utility.write_to_file(rules_path, await rules.read()) return rules_path else: return None @staticmethod async def write_http_data(temp_path: str, http_action: File = None): """ writes the http_actions data to file and returns the file path :param temp_path: path of the temporary directory :param http_action: http_action data :return: http_action file path """ if http_action and http_action.filename: http_path = os.path.join(temp_path, http_action.filename) Utility.write_to_file(http_path, await http_action.read()) return http_path else: return None @staticmethod def extract_text_and_entities(text: Text): """ extract entities and plain text from markdown intent example :param text: markdown intent example :return: plain intent, list of extracted entities """ example = entities_parser.parse_training_example(text) return example.get(TEXT), example.get('entities', None) @staticmethod def __extract_response_button(buttons: Dict): """ used to prepare ResponseButton by extracting buttons configuration from bot utterance :param buttons: button configuration in bot response :return: yields ResponseButton """ from .data_objects import ResponseButton for button in buttons: yield ResponseButton._from_son(button) @staticmethod def prepare_response(value: Dict): """ used to prepare bot utterance either Text or Custom for saving in Mongo :param value: utterance value :return: response type, response object """ from .data_objects import ResponseText, ResponseCustom if RESPONSE.Text.value in value: response_text = ResponseText() response_text.text = str(value[RESPONSE.Text.value]).strip() if RESPONSE.IMAGE.value in value: response_text.image = value[RESPONSE.IMAGE.value] if RESPONSE.CHANNEL.value in value: response_text.channel = value["channel"] if RESPONSE.BUTTONS.value in value: response_text.buttons = list( DataUtility.__extract_response_button(value[RESPONSE.BUTTONS.value]) ) data = response_text response_type = "text" elif RESPONSE.CUSTOM.value in value: data = ResponseCustom._from_son( {RESPONSE.CUSTOM.value: value[RESPONSE.CUSTOM.value]} ) response_type = "custom" else: response_type = None data = None return response_type, data @staticmethod def get_rasa_core_policies(): from rasa.core.policies import registry file1 = open(registry.__file__, 'r') Lines = file1.readlines() policy = [] for line in Lines: if line.startswith("from"): items = line.split("import")[1].split(",") for item in items: policy.append(item.strip()) return policy @staticmethod def build_http_response_object(http_action_config: HttpActionConfig, user: str, bot: str): """ Builds a new HttpActionConfigResponse object from HttpActionConfig object. :param http_action_config: HttpActionConfig object containing configuration for the Http action :param user: user id :param bot: bot id :return: HttpActionConfigResponse containing configuration for Http action """ http_params = [ HttpActionParametersResponse(key=param.key, value=param.value, parameter_type=param.parameter_type) for param in http_action_config.params_list] response = HttpActionConfigResponse( auth_token=http_action_config.auth_token, action_name=http_action_config.action_name, response=http_action_config.response, http_url=http_action_config.http_url, request_method=http_action_config.request_method, params_list=http_params, user=user, bot=bot ) return response @staticmethod def trigger_data_generation_event(bot: str, user: str, token: str): try: event_url = Utility.environment['data_generation']['event_url'] logger.info("Training data generator event started") response = requests.post(event_url, headers={'content-type': 'application/json'}, json={'user': user, 'token': token}) logger.info("Training data generator event completed" + response.content.decode('utf8')) except Exception as e: logger.error(str(e)) TrainingDataGenerationProcessor.set_status(bot=bot, user=user, status=EVENT_STATUS.FAIL.value, exception=str(e)) @staticmethod def get_interpreter(model_path): from rasa.model import get_model, get_model_subdirectories from rasa.core.interpreter import create_interpreter try: with get_model(model_path) as unpacked_model: _, nlu_model = get_model_subdirectories(unpacked_model) _interpreter = create_interpreter( nlu_model ) except Exception: logger.debug(f"Could not load interpreter from '{model_path}'.") _interpreter = None return _interpreter @staticmethod def train_model(background_tasks: BackgroundTasks, bot: Text, user: Text, email: Text, process_type: Text): """ train model common code when uploading files or training a model :param background_tasks: fast api background task :param bot: bot id :param user: user id :param email: user email for generating token for reload :param process_type: either upload or train """ from ...shared.data.model_processor import ModelProcessor from ...shared.auth import Authentication from ...shared.data.constant import MODEL_TRAINING_STATUS from ...train import start_training exception = process_type != 'upload' ModelProcessor.is_training_inprogress(bot, raise_exception=exception) ModelProcessor.is_daily_training_limit_exceeded(bot, raise_exception=exception) ModelProcessor.set_training_status( bot=bot, user=user, status=MODEL_TRAINING_STATUS.INPROGRESS.value, ) token = Authentication.create_access_token(data={"sub": email}, token_expire=180) background_tasks.add_task( start_training, bot, user, token.decode('utf8') ) @staticmethod def validate_flow_events(events, type, name): from rasa.shared.core.constants import RULE_SNIPPET_ACTION_NAME Utility.validate_document_list(events) if type == "STORY" and events[0].type != "user": raise ValidationError("First event should be an user") if type == "RULE": if events[0].name == RULE_SNIPPET_ACTION_NAME and events[0].type == "action": if events[1].type != "user": raise ValidationError('First event should be an user or conversation_start action') else: if events[0].type != "user": raise ValidationError('First event should be an user or conversation_start action') if events[len(events) - 1].type == "user": raise ValidationError("user event should be followed by action") intents = 0 for i, j in enumerate(range(1, len(events))): if events[i].type == "user": intents = intents + 1 if events[i].type == "user" and events[j].type == "user": raise ValidationError("Found 2 consecutive user events") if type == "RULE" and intents > 1: raise ValidationError( f"""Found rules '{name}' that contain more than user event.\nPlease use stories for this case""") @staticmethod def load_fallback_actions(bot: Text): from .processor import MongoProcessor mongo_processor = MongoProcessor() config = mongo_processor.load_config(bot) fallback_action = DataUtility.parse_fallback_action(config) nlu_fallback_action = MongoProcessor.fetch_nlu_fallback_action(bot) return fallback_action, nlu_fallback_action @staticmethod def parse_fallback_action(config: Dict): fallback_action = "action_default_fallback" action_fallback = next((comp for comp in config['policies'] if comp["name"] == "RulePolicy"), None) if action_fallback: fallback_action = action_fallback.get("core_fallback_action_name", fallback_action) return fallback_action @staticmethod def load_default_actions(): from kairon.importer.validator.file_validator import DEFAULT_ACTIONS return list(DEFAULT_ACTIONS - {"action_default_fallback", "action_two_stage_fallback"}) @staticmethod def get_template_type(story: Dict): steps = story['steps'] if len(steps) == 2 and steps[0]['type'] == StoryStepType.intent and steps[1]['type'] == StoryStepType.bot: template_type = 'Q&A' else: template_type = 'CUSTOM' return template_type
py
1a4e94fac1c362a7463137b9b6e7eb9d3ddb1ea3
#import math import numpy as np import scipy.optimize from . import factor from .material import char_mat_strength __all__ = [ "pipe_ovality", "pipe_char_elastic_pressure", "pipe_char_plastic_pressure", "char_collapse_pressure_num", "char_collapse_pressure", "pipe_collapse_unity", "pipe_collapse_all" ] # from .pipe_collapse import pipe_char_elastic_pressure # from .pipe_collapse import pipe_char_plastic_pressure # from .pipe_collapse import pipe_ovality # from .pipe_collapse import char_collapse_pressure # from .pipe_collapse import pipe_collapse_unity # def pipeCollapse(t,D,P_c,SMYS,nu=0.3,E=207.*10**9, f_o=None): # '''DNV-OS-F101:2010 Sec.5 D401, collapse due to external pressure ''' # P_el = 2*E*(t/D)**3/(1-nu**2) # P_p = f_y*alpha_fab*(2*t/D) # if not f_o: # f_o = (D_max-D_min)/D # if f_o<0.005: f_o = 0.005 # return (P_c-P_el)*(P_c**2-P_p**2) - P_c*P_el*P_p*f_o*D/t def pipe_ovality(D, D_max=None, D_min=None) -> "O_0": """Calculate pipe ovality. Reference: DNVGL-ST-F101 (2017-12) sec:5.4.4.2 eq:5.14 page:96 $O_0$ """ if D_max is None: D_max = D if D_min is None: D_min = D O_0 = (D_max - D_min) / D if O_0 < 0.005: O_0 = 0.005 return O_0 def pipe_char_elastic_pressure(t, D, nu=0.3, E=207.0*10**9) -> "p_el": """Calculate p_el. Reference: DNVGL-ST-F101 (2017-12) sec:5.4.4.2 eq:5.12 page:96 $p_{el}$ """ p_el = 2*E*(t/D)**3/(1-nu**2) return p_el def pipe_char_plastic_pressure(t, D, f_y, alpha_fab) -> "p_p": """Calculate characteristic plastic pressure p_p. Reference: DNVGL-ST-F101 (2017-12) sec:5.4.4.2 eq:5.13 page:96 $p_p$ """ p_p = f_y*alpha_fab*(2*t/D) return p_p def p_c_zerofunc(p_c, p_el, p_p, O_0, D, t): return (p_c-p_el)*(p_c**2-p_p**2) - p_c*p_el*p_p*O_0*D/t def p_c_fprime(p_c, p_el, p_p, O_0, D, t): return 3*p_c**2 - 2*p_c*p_el - p_p**2 - p_el*p_p**O_0*D/t def char_collapse_pressure_num(p_el, p_p, O_0, D, t, p_c_0=1.e5) -> "p_c": """Calculate p_c numerically using Newton's method. Reference: DNVGL-ST-F101 (2017-12) sec:5.4.4.2 eq:5.11 page:95 $p_c$ """ p_c = scipy.optimize.newton(p_c_zerofunc, p_c_0, p_c_fprime, args=(p_el, p_p, O_0, D, t)) return p_c def char_collapse_pressure(p_el, p_p, O_0, D, t) -> "p_c": """Calculate p_c analytically using solution of cubic equation given in DNVGL-ST-F101. Reference: DNVGL-ST-F101 (2017-12) sec:13.4.7 eq:13.10 page:299 $p_c$ """ b = -p_el c = -(p_p**2 + p_el*p_p*O_0*D/t) d = p_el * p_p**2 u = 1/3 * (-1/3 * b**2 + c) v = 1/2 * (2/27 * b**3 - 1/3 * b*c + d) phi = np.arccos(-v / np.sqrt(-u**3)) y = -2 * np.sqrt(-u) * np.cos(phi/3 + 60*np.pi/180) p_c = y - 1/3 * b return p_c def pipe_collapse_unity(p_e, p_c, gamma_m, gamma_SCLB, p_min=0 ) -> "pipe_collapse_uty": """Calculate pipe collapse unity value. Local buckling – system collapse (external over pressure only). Reference: DNVGL-ST-F101 (2017-12) sec:5.4.4.1 eq:5.10 page:95 $p_{lt}$ """ # if gamma_m is None: # gamma_m = factor.gamma_m_map[limit_state] # if gamma_SCLB is None: # gamma_SCLB = factor.gamma_SCLB_map[SC] pipe_collapse_uty = (p_e - p_min) * gamma_m * gamma_SCLB / p_c return pipe_collapse_uty def external_pressure(depth, rho_water, g=9.81) -> "p_e": p_e = abs(depth) * rho_water * g return p_e def pipe_collapse_all(t, D, E, nu, SMYS, h_l, rho_water, gamma_m, alpha_fab, alpha_U, gamma_SCLB, material=None, T=None, f_ytemp=None, D_max=None, D_min=None, p_min=0, g=9.81 ) -> "{}": O_0 = pipe_ovality(D, D_max, D_min) p_el = pipe_char_elastic_pressure(t, D, nu, E) #_alpha_U = factor.alpha_U_map(alpha_U) f_y = char_mat_strength(SMYS, material, T, f_ytemp, alpha_U) #_alpha_fab = factor.alpha_fab_map(alpha_fab) p_p = pipe_char_plastic_pressure(t, D, f_y, alpha_fab) p_c = char_collapse_pressure(p_el, p_p, O_0, D, t) p_e = external_pressure(abs(h_l), rho_water, g) pipe_collapse_uty = pipe_collapse_unity(p_e, p_c, gamma_m, gamma_SCLB, p_min) return { "O_0": O_0, "p_el": p_el, "p_p": p_p, "p_c": p_c, "p_e": p_e, "pipe_collapse_uty": pipe_collapse_uty, } if __name__ == "__main__": p_c_0 = 1025*9.81*1 t = 0.0212 t_corr = 0.0005 t_fab = 0.001 t_1 = t - t_corr - t_fab D = 0.660 D_max = D D_min = D SMYS = 450e6 f_y = SMYS - 6e6 alpha_fab = 1.00 h_l = -410. rho_water = 1027. p_e = rho_water*9.81*abs(h_l) p_el = pipe_char_elastic_pressure(t_1, D, nu=0.3, E=207.*10**9) p_p = pipe_char_plastic_pressure(t_1, D, f_y, alpha_fab) O_0 = pipe_ovality(D, D_max, D_min) p_c = char_collapse_pressure_num(p_el, p_p, O_0, D, t_1, p_c_0=p_c_0) print("p_c (numerical)=", p_c) pipe_collapse_uty = pipe_collapse_unity(p_e, p_c) print("pipe_colpse_uty (numerical)=", pipe_collapse_uty) p_c = char_collapse_pressure(p_el, p_p, O_0, D, t_1) print("p_c=", p_c) pipe_collapse_uty = pipe_collapse_unity(p_e, p_c) print("pipe_colpse_uty=", pipe_collapse_uty)
py
1a4e9662390e47eb7d11713a21787ec6d7fd46e2
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from ..model import GLMFit def test_GLMFit_inputs(): input_map = dict( allow_ill_cond=dict(argstr='--illcond', ), allow_repeated_subjects=dict(argstr='--allowsubjrep', ), args=dict(argstr='%s', ), calc_AR1=dict(argstr='--tar1', ), check_opts=dict(argstr='--checkopts', ), compute_log_y=dict(argstr='--logy', ), contrast=dict(argstr='--C %s...', ), cortex=dict( argstr='--cortex', xor=['label_file'], ), debug=dict(argstr='--debug', ), design=dict( argstr='--X %s', extensions=None, xor=('fsgd', 'design', 'one_sample'), ), diag=dict(argstr='--diag %d', ), diag_cluster=dict(argstr='--diag-cluster', ), environ=dict( nohash=True, usedefault=True, ), fixed_fx_dof=dict( argstr='--ffxdof %d', xor=['fixed_fx_dof_file'], ), fixed_fx_dof_file=dict( argstr='--ffxdofdat %d', extensions=None, xor=['fixed_fx_dof'], ), fixed_fx_var=dict( argstr='--yffxvar %s', extensions=None, ), force_perm=dict(argstr='--perm-force', ), fsgd=dict( argstr='--fsgd %s %s', xor=('fsgd', 'design', 'one_sample'), ), fwhm=dict(argstr='--fwhm %f', ), glm_dir=dict( argstr='--glmdir %s', genfile=True, ), hemi=dict(), in_file=dict( argstr='--y %s', copyfile=False, extensions=None, mandatory=True, ), invert_mask=dict(argstr='--mask-inv', ), label_file=dict( argstr='--label %s', extensions=None, xor=['cortex'], ), mask_file=dict( argstr='--mask %s', extensions=None, ), no_contrast_ok=dict(argstr='--no-contrasts-ok', ), no_est_fwhm=dict(argstr='--no-est-fwhm', ), no_mask_smooth=dict(argstr='--no-mask-smooth', ), no_prune=dict( argstr='--no-prune', xor=['prunethresh'], ), one_sample=dict( argstr='--osgm', xor=('one_sample', 'fsgd', 'design', 'contrast'), ), pca=dict(argstr='--pca', ), per_voxel_reg=dict(argstr='--pvr %s...', ), profile=dict(argstr='--profile %d', ), prune=dict(argstr='--prune', ), prune_thresh=dict( argstr='--prune_thr %f', xor=['noprune'], ), resynth_test=dict(argstr='--resynthtest %d', ), save_cond=dict(argstr='--save-cond', ), save_estimate=dict(argstr='--yhat-save', ), save_res_corr_mtx=dict(argstr='--eres-scm', ), save_residual=dict(argstr='--eres-save', ), seed=dict(argstr='--seed %d', ), self_reg=dict(argstr='--selfreg %d %d %d', ), sim_done_file=dict( argstr='--sim-done %s', extensions=None, ), sim_sign=dict(argstr='--sim-sign %s', ), simulation=dict(argstr='--sim %s %d %f %s', ), subject_id=dict(), subjects_dir=dict(), surf=dict( argstr='--surf %s %s %s', requires=['subject_id', 'hemi'], ), surf_geo=dict(usedefault=True, ), synth=dict(argstr='--synth', ), uniform=dict(argstr='--uniform %f %f', ), var_fwhm=dict(argstr='--var-fwhm %f', ), vox_dump=dict(argstr='--voxdump %d %d %d', ), weight_file=dict( extensions=None, xor=['weighted_ls'], ), weight_inv=dict( argstr='--w-inv', xor=['weighted_ls'], ), weight_sqrt=dict( argstr='--w-sqrt', xor=['weighted_ls'], ), weighted_ls=dict( argstr='--wls %s', extensions=None, xor=('weight_file', 'weight_inv', 'weight_sqrt'), ), ) inputs = GLMFit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GLMFit_outputs(): output_map = dict( beta_file=dict(extensions=None, ), dof_file=dict(extensions=None, ), error_file=dict(extensions=None, ), error_stddev_file=dict(extensions=None, ), error_var_file=dict(extensions=None, ), estimate_file=dict(extensions=None, ), frame_eigenvectors=dict(extensions=None, ), ftest_file=dict(), fwhm_file=dict(extensions=None, ), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), mask_file=dict(extensions=None, ), sig_file=dict(), singular_values=dict(extensions=None, ), spatial_eigenvectors=dict(extensions=None, ), svd_stats_file=dict(extensions=None, ), ) outputs = GLMFit.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value
py
1a4e9694a673086b1c1838fb7f2cdf39019bd30a
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2019-10-20 07:17 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.AddField( model_name='user', name='email_active', field=models.BooleanField(default=False, verbose_name='邮件激活状态'), ), ]
py
1a4e975fa78c85abb116ecd180c87191842c7004
from __future__ import division import math import torch from torch.jit.annotations import List, Tuple from torch import Tensor import torchvision # TODO: https://github.com/pytorch/pytorch/issues/26727 def zeros_like(tensor, dtype): # type: (Tensor, int) -> Tensor return torch.zeros_like(tensor, dtype=dtype, layout=tensor.layout, device=tensor.device, pin_memory=tensor.is_pinned()) @torch.jit.script class BalancedPositiveNegativeSampler(object): """ This class samples batches, ensuring that they contain a fixed proportion of positives """ def __init__(self, batch_size_per_image, positive_fraction): # type: (int, float) """ Arguments: batch_size_per_image (int): number of elements to be selected per image positive_fraction (float): percentace of positive elements per batch """ self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction def __call__(self, matched_idxs): # type: (List[Tensor]) """ Arguments: matched idxs: list of tensors containing -1, 0 or positive values. Each tensor corresponds to a specific image. -1 values are ignored, 0 are considered as negatives and > 0 as positives. Returns: pos_idx (list[tensor]) neg_idx (list[tensor]) Returns two lists of binary masks for each image. The first list contains the positive elements that were selected, and the second list the negative example. """ pos_idx = [] neg_idx = [] for matched_idxs_per_image in matched_idxs: positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1) negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1) num_pos = int(self.batch_size_per_image * self.positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = self.batch_size_per_image - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx_per_image = positive[perm1] neg_idx_per_image = negative[perm2] # create binary mask from indices pos_idx_per_image_mask = zeros_like( matched_idxs_per_image, dtype=torch.uint8 ) neg_idx_per_image_mask = zeros_like( matched_idxs_per_image, dtype=torch.uint8 ) pos_idx_per_image_mask[pos_idx_per_image] = torch.tensor(1, dtype=torch.uint8) neg_idx_per_image_mask[neg_idx_per_image] = torch.tensor(1, dtype=torch.uint8) pos_idx.append(pos_idx_per_image_mask) neg_idx.append(neg_idx_per_image_mask) return pos_idx, neg_idx @torch.jit.script def encode_boxes(reference_boxes, proposals, weights): # type: (torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor """ Encode a set of proposals with respect to some reference boxes Arguments: reference_boxes (Tensor): reference boxes proposals (Tensor): boxes to be encoded """ # perform some unpacking to make it JIT-fusion friendly wx = weights[0] wy = weights[1] ww = weights[2] wh = weights[3] proposals_x1 = proposals[:, 0].unsqueeze(1) proposals_y1 = proposals[:, 1].unsqueeze(1) proposals_x2 = proposals[:, 2].unsqueeze(1) proposals_y2 = proposals[:, 3].unsqueeze(1) reference_boxes_x1 = reference_boxes[:, 0].unsqueeze(1) reference_boxes_y1 = reference_boxes[:, 1].unsqueeze(1) reference_boxes_x2 = reference_boxes[:, 2].unsqueeze(1) reference_boxes_y2 = reference_boxes[:, 3].unsqueeze(1) # implementation starts here ex_widths = proposals_x2 - proposals_x1 ex_heights = proposals_y2 - proposals_y1 ex_ctr_x = proposals_x1 + 0.5 * ex_widths ex_ctr_y = proposals_y1 + 0.5 * ex_heights gt_widths = reference_boxes_x2 - reference_boxes_x1 gt_heights = reference_boxes_y2 - reference_boxes_y1 gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights targets_dw = ww * torch.log(gt_widths / ex_widths) targets_dh = wh * torch.log(gt_heights / ex_heights) targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1) return targets @torch.jit.script class BoxCoder(object): """ This class encodes and decodes a set of bounding boxes into the representation used for training the regressors. """ def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)): # type: (Tuple[float, float, float, float], float) """ Arguments: weights (4-element tuple) bbox_xform_clip (float) """ self.weights = weights self.bbox_xform_clip = bbox_xform_clip def encode(self, reference_boxes, proposals): # type: (List[Tensor], List[Tensor]) boxes_per_image = [len(b) for b in reference_boxes] reference_boxes = torch.cat(reference_boxes, dim=0) proposals = torch.cat(proposals, dim=0) targets = self.encode_single(reference_boxes, proposals) return targets.split(boxes_per_image, 0) def encode_single(self, reference_boxes, proposals): """ Encode a set of proposals with respect to some reference boxes Arguments: reference_boxes (Tensor): reference boxes proposals (Tensor): boxes to be encoded """ dtype = reference_boxes.dtype device = reference_boxes.device weights = torch.as_tensor(self.weights, dtype=dtype, device=device) targets = encode_boxes(reference_boxes, proposals, weights) return targets def decode(self, rel_codes, boxes): # type: (Tensor, List[Tensor]) assert isinstance(boxes, (list, tuple)) assert isinstance(rel_codes, torch.Tensor) boxes_per_image = [b.size(0) for b in boxes] concat_boxes = torch.cat(boxes, dim=0) box_sum = 0 for val in boxes_per_image: box_sum += val pred_boxes = self.decode_single( rel_codes.reshape(box_sum, -1), concat_boxes ) return pred_boxes.reshape(box_sum, -1, 4) def decode_single(self, rel_codes, boxes): """ From a set of original boxes and encoded relative box offsets, get the decoded boxes. Arguments: rel_codes (Tensor): encoded boxes boxes (Tensor): reference boxes. """ boxes = boxes.to(rel_codes.dtype) widths = boxes[:, 2] - boxes[:, 0] heights = boxes[:, 3] - boxes[:, 1] ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.weights dx = rel_codes[:, 0::4] / wx dy = rel_codes[:, 1::4] / wy dw = rel_codes[:, 2::4] / ww dh = rel_codes[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.bbox_xform_clip) dh = torch.clamp(dh, max=self.bbox_xform_clip) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] pred_boxes1 = pred_ctr_x - torch.tensor(0.5, dtype=pred_ctr_x.dtype) * pred_w pred_boxes2 = pred_ctr_y - torch.tensor(0.5, dtype=pred_ctr_y.dtype) * pred_h pred_boxes3 = pred_ctr_x + torch.tensor(0.5, dtype=pred_ctr_x.dtype) * pred_w pred_boxes4 = pred_ctr_y + torch.tensor(0.5, dtype=pred_ctr_y.dtype) * pred_h pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=2).flatten(1) return pred_boxes @torch.jit.script class Matcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be assigned to zero or more predicted elements. Matching is based on the MxN match_quality_matrix, that characterizes how well each (ground-truth, predicted)-pair match. For example, if the elements are boxes, the matrix may contain box IoU overlap values. The matcher returns a tensor of size N containing the index of the ground-truth element m that matches to prediction n. If there is no match, a negative value is returned. """ BELOW_LOW_THRESHOLD = -1 BETWEEN_THRESHOLDS = -2 __annotations__ = { 'BELOW_LOW_THRESHOLD': int, 'BETWEEN_THRESHOLDS': int, } def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False): # type: (float, float, bool) """ Args: high_threshold (float): quality values greater than or equal to this value are candidate matches. low_threshold (float): a lower quality threshold used to stratify matches into three levels: 1) matches >= high_threshold 2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold) 3) BELOW_LOW_THRESHOLD matches in [0, low_threshold) allow_low_quality_matches (bool): if True, produce additional matches for predictions that have only low-quality match candidates. See set_low_quality_matches_ for more details. """ self.BELOW_LOW_THRESHOLD = -1 self.BETWEEN_THRESHOLDS = -2 assert low_threshold <= high_threshold self.high_threshold = high_threshold self.low_threshold = low_threshold self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. Returns: matches (Tensor[int64]): an N tensor where N[i] is a matched gt in [0, M - 1] or a negative value indicating that prediction i could not be matched. """ if match_quality_matrix.numel() == 0: # empty targets or proposals not supported during training if match_quality_matrix.shape[0] == 0: raise ValueError( "No ground-truth boxes available for one of the images " "during training") else: raise ValueError( "No proposal boxes available for one of the images " "during training") # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) if self.allow_low_quality_matches: all_matches = matches.clone() else: all_matches = None # Assign candidate matches with low quality to negative (unassigned) values below_low_threshold = matched_vals < self.low_threshold between_thresholds = (matched_vals >= self.low_threshold) & ( matched_vals < self.high_threshold ) matches[below_low_threshold] = torch.tensor(self.BELOW_LOW_THRESHOLD) matches[between_thresholds] = torch.tensor(self.BETWEEN_THRESHOLDS) if self.allow_low_quality_matches: assert all_matches is not None self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) return matches def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth with which it has the highest quality value. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find highest quality match available, even if it is low, including ties gt_pred_pairs_of_highest_quality = torch.nonzero( match_quality_matrix == highest_quality_foreach_gt[:, None] ) # Example gt_pred_pairs_of_highest_quality: # tensor([[ 0, 39796], # [ 1, 32055], # [ 1, 32070], # [ 2, 39190], # [ 2, 40255], # [ 3, 40390], # [ 3, 41455], # [ 4, 45470], # [ 5, 45325], # [ 5, 46390]]) # Each row is a (gt index, prediction index) # Note how gt items 1, 2, 3, and 5 each have two ties pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1] matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
py
1a4e99507236ee2b6560186f022c348b686025d3
import os import random from typing import Dict, NamedTuple, Optional import numpy as np from .file_utils import is_tf_available, is_torch_available try: import wandb wandb.ensure_configured() if wandb.api.api_key is None: _has_wandb = False wandb.termwarn("W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable.") else: _has_wandb = False if os.getenv("WANDB_DISABLED") else True except (ImportError, AttributeError): _has_wandb = False def is_wandb_available(): return _has_wandb def set_seed(seed: int): """ Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if installed). Args: seed (:obj:`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) if is_torch_available(): import torch torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available if is_tf_available(): import tensorflow as tf tf.random.set_seed(seed) class EvalPrediction(NamedTuple): """ Evaluation output (always contains labels), to be used to compute metrics. Parameters: predictions (:obj:`np.ndarray`): Predictions of the model. label_ids (:obj:`np.ndarray`): Targets to be matched. """ predictions: np.ndarray label_ids: np.ndarray class PredictionOutput(NamedTuple): predictions: np.ndarray label_ids: Optional[np.ndarray] metrics: Optional[Dict[str, float]] class TrainOutput(NamedTuple): global_step: int training_loss: float PREFIX_CHECKPOINT_DIR = "checkpoint"
py
1a4e9c685790a47be25b4bcbc1f0b97abc700672
# Solution 1 with open("/Users/a318196/Code/AdventOfCode2020/20191202/input.txt") as file: inputText = file.read() inputArray = list(map(int, inputText.split(','))) position = 0 while position < len(inputArray): if (inputArray[position] == 1): inputArray[inputArray[position + 3]] = inputArray[inputArray[position + 1]] + inputArray[inputArray[position + 2]] position += 4 if (inputArray[position] == 2): inputArray[inputArray[position + 3]] = inputArray[inputArray[position + 1]] * inputArray[inputArray[position + 2]] position += 4 if (inputArray[position] == 99): position = len(inputArray) print('Solution 1: ' + str(inputArray[0])) # Solution 2 stopNumber = 19690720 noun = 0 while noun <= 99: verb = 0 while verb <= 99: with open("/Users/a318196/Code/AdventOfCode2020/20191202/input.txt") as file: inputText = file.read() inputArray = list(map(int, inputText.split(','))) inputArray[1] = noun inputArray[2] = verb position = 0 while position < len(inputArray): if (inputArray[position] == 1): inputArray[inputArray[position + 3]] = inputArray[inputArray[position + 1]] + inputArray[inputArray[position + 2]] position += 4 if (inputArray[position] == 2): inputArray[inputArray[position + 3]] = inputArray[inputArray[position + 1]] * inputArray[inputArray[position + 2]] position += 4 if (inputArray[position] == 99): position = len(inputArray) if (inputArray[0] == stopNumber): break verb += 1 if (inputArray[0] == stopNumber): break noun += 1 print('Solution 2: ' + str(100 * noun + verb))
py
1a4e9d6a1dc3df16a5dd81568513e37779045219
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # author: Tang Zhuangkun import time import sys sys.path.append("..") import database.db_operator as db_operator import log.custom_logger as custom_logger class DataMinerCommonDBOperation: # 通用,常用的(非基金或股票信息)数据库操作 def __init__(self): pass def get_the_last_trading_date(self,day): # 获取传入日期参数最近的交易日期, 即上一个交易日 # day: 交易日期,如 2021-06-09 # return: 如果存在最近的交易日期,则返回日期 # 如果不存在,则返回 0000-00-00 # 查询SQL selecting_sql = "SELECT trading_date FROM trading_days WHERE trading_date <= '%s' ORDER BY " \ "ABS(DATEDIFF(trading_date, '%s')) ASC LIMIT 1" % (day,day) # 查询 selecting_result = db_operator.DBOperator().select_one("financial_data", selecting_sql) if selecting_result is not None: return str(selecting_result["trading_date"]) else: # 日志记录 log_msg = "无法获取 "+day+" 最近的交易日期" custom_logger.CustomLogger().log_writter(log_msg, 'error') return "0000-00-00" if __name__ == '__main__': time_start = time.time() go = DataMinerCommonDBOperation() last_trade_day = go.get_the_last_trading_date("2022-03-20") print(last_trade_day) time_end = time.time() print('Time Cost: ' + str(time_end - time_start))
py
1a4e9df6f1cade0343b7240afa3513e271302e11
#%% import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib import phd.viz import phd.thermo import phd.stats colors, palette = phd.viz.phd_style() # %% # Load the data sets and restrict to the carbon sources data = pd.read_csv('../../data/ch4_growth/analyzed_foldchange.csv') stats = pd.read_csv('../../data/ch8_growth_si/DNA_binding_energy_summary.csv') data = data[(data['strain']=='dilution') & (data['repressors'] > 0) & (data['fold_change'] >= 0) & (data['temp'] == 37) & (data['size']=='large')] summary = data.groupby(['carbon', 'date', 'run_number', 'atc_ngml']).mean().reset_index() summary = summary.groupby(['carbon', 'atc_ngml']).agg(('mean', 'sem')).reset_index() stats = stats[(stats['temp']==37)] # Define the constants for plotting rep_range = np.logspace(0, 3, 100) # %% # Set up the figure canvas fig, ax = plt.subplots(3, 3, figsize=(5.5, 5.5), dpi=100) phd.viz.despine(ax.ravel()) for a in ax.ravel(): a.set_xscale('log') a.set_yscale('log') a.set_xlim([1, 800]) a.set_ylim([1E-2, 1.1]) for i in range(3): ax[-1, i].set_xlabel('repressors per cell') ax[i, 0].set_ylabel('fold-change') for i in range(3): ax[0, i].spines['bottom'].set_visible(False) ax[0, i].set_xticks([]) ax[1, i].spines['bottom'].set_visible(False) ax[1, i].set_xticks([]) ax[i, 1].spines['left'].set_visible(False) ax[i, 1].set_yticks([]) ax[i, 2].spines['left'].set_visible(False) ax[i, 2].set_yticks([]) titles = ['acetate', 'glycerol', 'glucose'] title_colors = [colors['dark_brown'], colors['dark_green'], colors['dark_purple']] bgcolors = [colors['brown'], colors['green'], colors['purple']] for i in range(3): if i > 0: # apply offset transform to all y ticklabels. dx = -13 / fig.dpi dy = 0 offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans) for label in ax[i, 0].yaxis.get_majorticklabels(): label.set_transform(label.get_transform() + offset) # Plot the predictions for i, pred in enumerate(titles): # Get the binding energy values for the prediction strain low, high = stats[(stats['carbon']==pred) & (stats['parameter']=='epRA')][ ['hpd_min', 'hpd_max']].values[0] # Compute the theory theo_min = phd.thermo.SimpleRepression(R=rep_range, ep_r=low, ka=139, ki=0.53, ep_ai=1000, effector_conc=0).fold_change() theo_max = phd.thermo.SimpleRepression(R=rep_range, ep_r=high, ka=139, ki=0.53, ep_ai=1000, effector_conc=0).fold_change() for j, fit in enumerate(titles): ax[i, j].fill_between(rep_range, theo_min, theo_max, color=title_colors[i], alpha=0.25) # Plot the data for i, carb in enumerate(titles): for j in range(3): if i == j: fill = 'white' edge = bgcolors[i] else: fill = bgcolors[i] edge = colors['grey'] # Isolate the data. d = summary[summary['carbon']==carb] ax[j, i].errorbar(d['repressors']['mean'], d['fold_change']['mean'], xerr=d['repressors']['sem'], yerr=d['fold_change']['sem'], fmt='o', ms=5, markerfacecolor=fill, markeredgewidth=0.5, linestyle='none', capsize=1, lw=0.75, markeredgecolor=edge, color=bgcolors[i]) plt.subplots_adjust(wspace=0.05, hspace=0.05) plt.savefig('../figs/figS10_plots.svg', bbox_inches='tight') # %%
py
1a4e9f694f30967f9fd482c835923c92fa0f7cd1
"""Locale support module. The module provides low-level access to the C lib's locale APIs and adds high level number formatting APIs as well as a locale aliasing engine to complement these. The aliasing engine includes support for many commonly used locale names and maps them to values suitable for passing to the C lib's setlocale() function. It also includes default encodings for all supported locale names. """ import sys import encodings import encodings.aliases import re import _collections_abc from builtins import str as _builtin_str import functools # Try importing the _locale module. # # If this fails, fall back on a basic 'C' locale emulation. # Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before # trying the import. So __all__ is also fiddled at the end of the file. __all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error", "setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm", "str", "atof", "atoi", "format", "format_string", "currency", "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY", "LC_NUMERIC", "LC_ALL", "CHAR_MAX"] def _strcoll(a,b): """ strcoll(string,string) -> int. Compares two strings according to the locale. """ return (a > b) - (a < b) def _strxfrm(s): """ strxfrm(string) -> string. Returns a string that behaves for cmp locale-aware. """ return s try: from _locale import * except ImportError: # Locale emulation CHAR_MAX = 127 LC_ALL = 6 LC_COLLATE = 3 LC_CTYPE = 0 LC_MESSAGES = 5 LC_MONETARY = 4 LC_NUMERIC = 1 LC_TIME = 2 Error = ValueError def localeconv(): """ localeconv() -> dict. Returns numeric and monetary locale-specific parameters. """ # 'C' locale default values return {'grouping': [127], 'currency_symbol': '', 'n_sign_posn': 127, 'p_cs_precedes': 127, 'n_cs_precedes': 127, 'mon_grouping': [], 'n_sep_by_space': 127, 'decimal_point': '.', 'negative_sign': '', 'positive_sign': '', 'p_sep_by_space': 127, 'int_curr_symbol': '', 'p_sign_posn': 127, 'thousands_sep': '', 'mon_thousands_sep': '', 'frac_digits': 127, 'mon_decimal_point': '', 'int_frac_digits': 127} def setlocale(category, value=None): """ setlocale(integer,string=None) -> string. Activates/queries locale processing. """ if value not in (None, '', 'C'): raise Error('_locale emulation only supports "C" locale') return 'C' # These may or may not exist in _locale, so be sure to set them. if 'strxfrm' not in globals(): strxfrm = _strxfrm if 'strcoll' not in globals(): strcoll = _strcoll _localeconv = localeconv # With this dict, you can override some items of localeconv's return value. # This is useful for testing purposes. _override_localeconv = {} @functools.wraps(_localeconv) def localeconv(): d = _localeconv() if _override_localeconv: d.update(_override_localeconv) return d ### Number formatting APIs # Author: Martin von Loewis # improved by Georg Brandl # Iterate over grouping intervals def _grouping_intervals(grouping): last_interval = None for interval in grouping: # if grouping is -1, we are done if interval == CHAR_MAX: return # 0: re-use last group ad infinitum if interval == 0: if last_interval is None: raise ValueError("invalid grouping") while True: yield last_interval yield interval last_interval = interval #perform the grouping from right to left def _group(s, monetary=False): conv = localeconv() thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] grouping = conv[monetary and 'mon_grouping' or 'grouping'] if not grouping: return (s, 0) if s[-1] == ' ': stripped = s.rstrip() right_spaces = s[len(stripped):] s = stripped else: right_spaces = '' left_spaces = '' groups = [] for interval in _grouping_intervals(grouping): if not s or s[-1] not in "0123456789": # only non-digit characters remain (sign, spaces) left_spaces = s s = '' break groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s) groups.reverse() return ( left_spaces + thousands_sep.join(groups) + right_spaces, len(thousands_sep) * (len(groups) - 1) ) # Strip a given amount of excess padding from the given string def _strip_padding(s, amount): lpos = 0 while amount and s[lpos] == ' ': lpos += 1 amount -= 1 rpos = len(s) - 1 while amount and s[rpos] == ' ': rpos -= 1 amount -= 1 return s[lpos:rpos+1] _percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?' r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') def _format(percent, value, grouping=False, monetary=False, *additional): if additional: formatted = percent % ((value,) + additional) else: formatted = percent % value # floats and decimal ints need special action! if percent[-1] in 'eEfFgG': seps = 0 parts = formatted.split('.') if grouping: parts[0], seps = _group(parts[0], monetary=monetary) decimal_point = localeconv()[monetary and 'mon_decimal_point' or 'decimal_point'] formatted = decimal_point.join(parts) if seps: formatted = _strip_padding(formatted, seps) elif percent[-1] in 'diu': seps = 0 if grouping: formatted, seps = _group(formatted, monetary=monetary) if seps: formatted = _strip_padding(formatted, seps) return formatted def format_string(f, val, grouping=False, monetary=False): """Formats a string in the same way that the % formatting would use, but takes the current locale into account. Grouping is applied if the third parameter is true. Conversion uses monetary thousands separator and grouping strings if forth parameter monetary is true.""" percents = list(_percent_re.finditer(f)) new_f = _percent_re.sub('%s', f) if isinstance(val, _collections_abc.Mapping): new_val = [] for perc in percents: if perc.group()[-1]=='%': new_val.append('%') else: new_val.append(_format(perc.group(), val, grouping, monetary)) else: if not isinstance(val, tuple): val = (val,) new_val = [] i = 0 for perc in percents: if perc.group()[-1]=='%': new_val.append('%') else: starcount = perc.group('modifiers').count('*') new_val.append(_format(perc.group(), val[i], grouping, monetary, *val[i+1:i+1+starcount])) i += (1 + starcount) val = tuple(new_val) return new_f % val def format(percent, value, grouping=False, monetary=False, *additional): """Deprecated, use format_string instead.""" import warnings warnings.warn( "This method will be removed in a future version of Python. " "Use 'locale.format_string()' instead.", DeprecationWarning, stacklevel=2 ) match = _percent_re.match(percent) if not match or len(match.group())!= len(percent): raise ValueError(("format() must be given exactly one %%char " "format specifier, %s not valid") % repr(percent)) return _format(percent, value, grouping, monetary, *additional) def currency(val, symbol=True, grouping=False, international=False): """Formats val according to the currency settings in the current locale.""" conv = localeconv() # check for illegal values digits = conv[international and 'int_frac_digits' or 'frac_digits'] if digits == 127: raise ValueError("Currency formatting is not possible using " "the 'C' locale.") s = _format('%%.%if' % digits, abs(val), grouping, monetary=True) # '<' and '>' are markers if the sign must be inserted between symbol and value s = '<' + s + '>' if symbol: smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes'] separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space'] if precedes: s = smb + (separated and ' ' or '') + s else: if international and smb[-1] == ' ': smb = smb[:-1] s = s + (separated and ' ' or '') + smb sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn'] sign = conv[val<0 and 'negative_sign' or 'positive_sign'] if sign_pos == 0: s = '(' + s + ')' elif sign_pos == 1: s = sign + s elif sign_pos == 2: s = s + sign elif sign_pos == 3: s = s.replace('<', sign) elif sign_pos == 4: s = s.replace('>', sign) else: # the default if nothing specified; # this should be the most fitting sign position s = sign + s return s.replace('<', '').replace('>', '') def str(val): """Convert float to string, taking the locale into account.""" return _format("%.12g", val) def delocalize(string): "Parses a string as a normalized number according to the locale settings." conv = localeconv() #First, get rid of the grouping ts = conv['thousands_sep'] if ts: string = string.replace(ts, '') #next, replace the decimal point with a dot dd = conv['decimal_point'] if dd: string = string.replace(dd, '.') return string def atof(string, func=float): "Parses a string as a float according to the locale settings." return func(delocalize(string)) def atoi(string): "Converts a string to an integer according to the locale settings." return int(delocalize(string)) def _test(): setlocale(LC_ALL, "") #do grouping s1 = format_string("%d", 123456789,1) print(s1, "is", atoi(s1)) #standard formatting s1 = str(3.14) print(s1, "is", atof(s1)) ### Locale name aliasing engine # Author: Marc-Andre Lemburg, [email protected] # Various tweaks by Fredrik Lundh <[email protected]> # store away the low-level version of setlocale (it's # overridden below) _setlocale = setlocale def _replace_encoding(code, encoding): if '.' in code: langname = code[:code.index('.')] else: langname = code # Convert the encoding to a C lib compatible encoding string norm_encoding = encodings.normalize_encoding(encoding) #print('norm encoding: %r' % norm_encoding) norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(), norm_encoding) #print('aliased encoding: %r' % norm_encoding) encoding = norm_encoding norm_encoding = norm_encoding.lower() if norm_encoding in locale_encoding_alias: encoding = locale_encoding_alias[norm_encoding] else: norm_encoding = norm_encoding.replace('_', '') norm_encoding = norm_encoding.replace('-', '') if norm_encoding in locale_encoding_alias: encoding = locale_encoding_alias[norm_encoding] #print('found encoding %r' % encoding) return langname + '.' + encoding def _append_modifier(code, modifier): if modifier == 'euro': if '.' not in code: return code + '.ISO8859-15' _, _, encoding = code.partition('.') if encoding in ('ISO8859-15', 'UTF-8'): return code if encoding == 'ISO8859-1': return _replace_encoding(code, 'ISO8859-15') return code + '@' + modifier def normalize(localename): """ Returns a normalized locale code for the given locale name. The returned locale code is formatted for use with setlocale(). If normalization fails, the original name is returned unchanged. If the given encoding is not known, the function defaults to the default encoding for the locale code just like setlocale() does. """ # Normalize the locale name and extract the encoding and modifier code = localename.lower() if ':' in code: # ':' is sometimes used as encoding delimiter. code = code.replace(':', '.') if '@' in code: code, modifier = code.split('@', 1) else: modifier = '' if '.' in code: langname, encoding = code.split('.')[:2] else: langname = code encoding = '' # First lookup: fullname (possibly with encoding and modifier) lang_enc = langname if encoding: norm_encoding = encoding.replace('-', '') norm_encoding = norm_encoding.replace('_', '') lang_enc += '.' + norm_encoding lookup_name = lang_enc if modifier: lookup_name += '@' + modifier code = locale_alias.get(lookup_name, None) if code is not None: return code #print('first lookup failed') if modifier: # Second try: fullname without modifier (possibly with encoding) code = locale_alias.get(lang_enc, None) if code is not None: #print('lookup without modifier succeeded') if '@' not in code: return _append_modifier(code, modifier) if code.split('@', 1)[1].lower() == modifier: return code #print('second lookup failed') if encoding: # Third try: langname (without encoding, possibly with modifier) lookup_name = langname if modifier: lookup_name += '@' + modifier code = locale_alias.get(lookup_name, None) if code is not None: #print('lookup without encoding succeeded') if '@' not in code: return _replace_encoding(code, encoding) code, modifier = code.split('@', 1) return _replace_encoding(code, encoding) + '@' + modifier if modifier: # Fourth try: langname (without encoding and modifier) code = locale_alias.get(langname, None) if code is not None: #print('lookup without modifier and encoding succeeded') if '@' not in code: code = _replace_encoding(code, encoding) return _append_modifier(code, modifier) code, defmod = code.split('@', 1) if defmod.lower() == modifier: return _replace_encoding(code, encoding) + '@' + defmod return localename def _parse_localename(localename): """ Parses the locale code for localename and returns the result as tuple (language code, encoding). The localename is normalized and passed through the locale alias engine. A ValueError is raised in case the locale name cannot be parsed. The language code corresponds to RFC 1766. code and encoding can be None in case the values cannot be determined or are unknown to this implementation. """ code = normalize(localename) if '@' in code: # Deal with locale modifiers code, modifier = code.split('@', 1) if modifier == 'euro' and '.' not in code: # Assume Latin-9 for @euro locales. This is bogus, # since some systems may use other encodings for these # locales. Also, we ignore other modifiers. return code, 'iso-8859-15' if '.' in code: return tuple(code.split('.')[:2]) elif code == 'C': return None, None elif code == 'UTF-8': # On macOS "LC_CTYPE=UTF-8" is a valid locale setting # for getting UTF-8 handling for text. return None, 'UTF-8' raise ValueError('unknown locale: %s' % localename) def _build_localename(localetuple): """ Builds a locale code from the given tuple (language code, encoding). No aliasing or normalizing takes place. """ try: language, encoding = localetuple if language is None: language = 'C' if encoding is None: return language else: return language + '.' + encoding except (TypeError, ValueError): raise TypeError('Locale must be None, a string, or an iterable of ' 'two strings -- language code, encoding.') from None def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): """ Tries to determine the default locale settings and returns them as tuple (language code, encoding). According to POSIX, a program which has not called setlocale(LC_ALL, "") runs using the portable 'C' locale. Calling setlocale(LC_ALL, "") lets it use the default locale as defined by the LANG variable. Since we don't want to interfere with the current locale setting we thus emulate the behavior in the way described above. To maintain compatibility with other platforms, not only the LANG variable is tested, but a list of variables given as envvars parameter. The first found to be defined will be used. envvars defaults to the search path used in GNU gettext; it must always contain the variable name 'LANG'. Except for the code 'C', the language code corresponds to RFC 1766. code and encoding can be None in case the values cannot be determined. """ try: # check if it's supported by the _locale module import _locale code, encoding = _locale._getdefaultlocale() except (ImportError, AttributeError): pass else: # make sure the code/encoding values are valid if sys.platform == "win32" and code and code[:2] == "0x": # map windows language identifier to language name code = windows_locale.get(int(code, 0)) # ...add other platform-specific processing here, if # necessary... return code, encoding # fall back on POSIX behaviour import os lookup = os.environ.get for variable in envvars: localename = lookup(variable,None) if localename: if variable == 'LANGUAGE': localename = localename.split(':')[0] break else: localename = 'C' return _parse_localename(localename) def getlocale(category=LC_CTYPE): """ Returns the current setting for the given locale category as tuple (language code, encoding). category may be one of the LC_* value except LC_ALL. It defaults to LC_CTYPE. Except for the code 'C', the language code corresponds to RFC 1766. code and encoding can be None in case the values cannot be determined. """ localename = _setlocale(category) if category == LC_ALL and ';' in localename: raise TypeError('category LC_ALL is not supported') return _parse_localename(localename) def setlocale(category, locale=None): """ Set the locale for the given category. The locale can be a string, an iterable of two strings (language code and encoding), or None. Iterables are converted to strings using the locale aliasing engine. Locale strings are passed directly to the C lib. category may be given as one of the LC_* values. """ if locale and not isinstance(locale, _builtin_str): # convert to string locale = normalize(_build_localename(locale)) return _setlocale(category, locale) def resetlocale(category=LC_ALL): """ Sets the locale for category to the default setting. The default setting is determined by calling getdefaultlocale(). category defaults to LC_ALL. """ _setlocale(category, _build_localename(getdefaultlocale())) if sys.platform.startswith("win"): # On Win32, this will return the ANSI code page def getpreferredencoding(do_setlocale = True): """Return the charset that the user is likely using.""" if sys.flags.utf8_mode: return 'UTF-8' import _bootlocale return _bootlocale.getpreferredencoding(False) else: # On Unix, if CODESET is available, use that. try: CODESET except NameError: if hasattr(sys, 'getandroidapilevel'): # On Android langinfo.h and CODESET are missing, and UTF-8 is # always used in mbstowcs() and wcstombs(). def getpreferredencoding(do_setlocale = True): return 'UTF-8' else: # Fall back to parsing environment variables :-( def getpreferredencoding(do_setlocale = True): """Return the charset that the user is likely using, by looking at environment variables.""" if sys.flags.utf8_mode: return 'UTF-8' res = getdefaultlocale()[1] if res is None: # LANG not set, default conservatively to ASCII res = 'ascii' return res else: def getpreferredencoding(do_setlocale = True): """Return the charset that the user is likely using, according to the system configuration.""" if sys.flags.utf8_mode: return 'UTF-8' import _bootlocale if do_setlocale: oldloc = setlocale(LC_CTYPE) try: setlocale(LC_CTYPE, "") except Error: pass result = _bootlocale.getpreferredencoding(False) if do_setlocale: setlocale(LC_CTYPE, oldloc) return result ### Database # # The following data was extracted from the locale.alias file which # comes with X11 and then hand edited removing the explicit encoding # definitions and adding some more aliases. The file is usually # available as /usr/lib/X11/locale/locale.alias. # # # The local_encoding_alias table maps lowercase encoding alias names # to C locale encoding names (case-sensitive). Note that normalize() # first looks up the encoding in the encodings.aliases dictionary and # then applies this mapping to find the correct C lib name for the # encoding. # locale_encoding_alias = { # Mappings for non-standard encoding names used in locale names '437': 'C', 'c': 'C', 'en': 'ISO8859-1', 'jis': 'JIS7', 'jis7': 'JIS7', 'ajec': 'eucJP', 'koi8c': 'KOI8-C', 'microsoftcp1251': 'CP1251', 'microsoftcp1255': 'CP1255', 'microsoftcp1256': 'CP1256', '88591': 'ISO8859-1', '88592': 'ISO8859-2', '88595': 'ISO8859-5', '885915': 'ISO8859-15', # Mappings from Python codec names to C lib encoding names 'ascii': 'ISO8859-1', 'latin_1': 'ISO8859-1', 'iso8859_1': 'ISO8859-1', 'iso8859_10': 'ISO8859-10', 'iso8859_11': 'ISO8859-11', 'iso8859_13': 'ISO8859-13', 'iso8859_14': 'ISO8859-14', 'iso8859_15': 'ISO8859-15', 'iso8859_16': 'ISO8859-16', 'iso8859_2': 'ISO8859-2', 'iso8859_3': 'ISO8859-3', 'iso8859_4': 'ISO8859-4', 'iso8859_5': 'ISO8859-5', 'iso8859_6': 'ISO8859-6', 'iso8859_7': 'ISO8859-7', 'iso8859_8': 'ISO8859-8', 'iso8859_9': 'ISO8859-9', 'iso2022_jp': 'JIS7', 'shift_jis': 'SJIS', 'tactis': 'TACTIS', 'euc_jp': 'eucJP', 'euc_kr': 'eucKR', 'utf_8': 'UTF-8', 'koi8_r': 'KOI8-R', 'koi8_t': 'KOI8-T', 'koi8_u': 'KOI8-U', 'kz1048': 'RK1048', 'cp1251': 'CP1251', 'cp1255': 'CP1255', 'cp1256': 'CP1256', # XXX This list is still incomplete. If you know more # mappings, please file a bug report. Thanks. } for k, v in sorted(locale_encoding_alias.items()): k = k.replace('_', '') locale_encoding_alias.setdefault(k, v) # # The locale_alias table maps lowercase alias names to C locale names # (case-sensitive). Encodings are always separated from the locale # name using a dot ('.'); they should only be given in case the # language name is needed to interpret the given encoding alias # correctly (CJK codes often have this need). # # Note that the normalize() function which uses this tables # removes '_' and '-' characters from the encoding part of the # locale name before doing the lookup. This saves a lot of # space in the table. # # MAL 2004-12-10: # Updated alias mapping to most recent locale.alias file # from X.org distribution using makelocalealias.py. # # These are the differences compared to the old mapping (Python 2.4 # and older): # # updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' # updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' # updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' # updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' # updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' # updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' # updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1' # updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' # updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' # updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' # updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' # updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' # updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' # updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP' # updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13' # updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13' # updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' # updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' # updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11' # updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312' # updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5' # updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5' # # MAL 2008-05-30: # Updated alias mapping to most recent locale.alias file # from X.org distribution using makelocalealias.py. # # These are the differences compared to the old mapping (Python 2.5 # and older): # # updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2' # updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2' # updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' # updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' # updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8' # updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # # AP 2010-04-12: # Updated alias mapping to most recent locale.alias file # from X.org distribution using makelocalealias.py. # # These are the differences compared to the old mapping (Python 2.6.5 # and older): # # updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' # updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' # updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' # updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' # updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin' # updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin' # updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8' # updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' # # SS 2013-12-20: # Updated alias mapping to most recent locale.alias file # from X.org distribution using makelocalealias.py. # # These are the differences compared to the old mapping (Python 3.3.3 # and older): # # updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' # updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' # updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' # updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' # updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' # updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' # updated 'sd' -> '[email protected]' to 'sd_IN.UTF-8' # updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' # updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8' # updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' # updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' # # SS 2014-10-01: # Updated alias mapping with glibc 2.19 supported locales. # # SS 2018-05-05: # Updated alias mapping with glibc 2.27 supported locales. # # These are the differences compared to the old mapping (Python 3.6.5 # and older): # # updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia' # updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154' # updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R' locale_alias = { 'a3': 'az_AZ.KOI8-C', 'a3_az': 'az_AZ.KOI8-C', 'a3_az.koic': 'az_AZ.KOI8-C', 'aa_dj': 'aa_DJ.ISO8859-1', 'aa_er': 'aa_ER.UTF-8', 'aa_et': 'aa_ET.UTF-8', 'af': 'af_ZA.ISO8859-1', 'af_za': 'af_ZA.ISO8859-1', 'agr_pe': 'agr_PE.UTF-8', 'ak_gh': 'ak_GH.UTF-8', 'am': 'am_ET.UTF-8', 'am_et': 'am_ET.UTF-8', 'american': 'en_US.ISO8859-1', 'an_es': 'an_ES.ISO8859-15', 'anp_in': 'anp_IN.UTF-8', 'ar': 'ar_AA.ISO8859-6', 'ar_aa': 'ar_AA.ISO8859-6', 'ar_ae': 'ar_AE.ISO8859-6', 'ar_bh': 'ar_BH.ISO8859-6', 'ar_dz': 'ar_DZ.ISO8859-6', 'ar_eg': 'ar_EG.ISO8859-6', 'ar_in': 'ar_IN.UTF-8', 'ar_iq': 'ar_IQ.ISO8859-6', 'ar_jo': 'ar_JO.ISO8859-6', 'ar_kw': 'ar_KW.ISO8859-6', 'ar_lb': 'ar_LB.ISO8859-6', 'ar_ly': 'ar_LY.ISO8859-6', 'ar_ma': 'ar_MA.ISO8859-6', 'ar_om': 'ar_OM.ISO8859-6', 'ar_qa': 'ar_QA.ISO8859-6', 'ar_sa': 'ar_SA.ISO8859-6', 'ar_sd': 'ar_SD.ISO8859-6', 'ar_ss': 'ar_SS.UTF-8', 'ar_sy': 'ar_SY.ISO8859-6', 'ar_tn': 'ar_TN.ISO8859-6', 'ar_ye': 'ar_YE.ISO8859-6', 'arabic': 'ar_AA.ISO8859-6', 'as': 'as_IN.UTF-8', 'as_in': 'as_IN.UTF-8', 'ast_es': 'ast_ES.ISO8859-15', 'ayc_pe': 'ayc_PE.UTF-8', 'az': 'az_AZ.ISO8859-9E', 'az_az': 'az_AZ.ISO8859-9E', 'az_az.iso88599e': 'az_AZ.ISO8859-9E', 'az_ir': 'az_IR.UTF-8', 'be': 'be_BY.CP1251', 'be@latin': 'be_BY.UTF-8@latin', 'be_bg.utf8': 'bg_BG.UTF-8', 'be_by': 'be_BY.CP1251', 'be_by@latin': 'be_BY.UTF-8@latin', 'bem_zm': 'bem_ZM.UTF-8', 'ber_dz': 'ber_DZ.UTF-8', 'ber_ma': 'ber_MA.UTF-8', 'bg': 'bg_BG.CP1251', 'bg_bg': 'bg_BG.CP1251', 'bhb_in.utf8': 'bhb_IN.UTF-8', 'bho_in': 'bho_IN.UTF-8', 'bho_np': 'bho_NP.UTF-8', 'bi_vu': 'bi_VU.UTF-8', 'bn_bd': 'bn_BD.UTF-8', 'bn_in': 'bn_IN.UTF-8', 'bo_cn': 'bo_CN.UTF-8', 'bo_in': 'bo_IN.UTF-8', 'bokmal': 'nb_NO.ISO8859-1', 'bokm\xe5l': 'nb_NO.ISO8859-1', 'br': 'br_FR.ISO8859-1', 'br_fr': 'br_FR.ISO8859-1', 'brx_in': 'brx_IN.UTF-8', 'bs': 'bs_BA.ISO8859-2', 'bs_ba': 'bs_BA.ISO8859-2', 'bulgarian': 'bg_BG.CP1251', 'byn_er': 'byn_ER.UTF-8', 'c': 'C', 'c-french': 'fr_CA.ISO8859-1', 'c.ascii': 'C', 'c.en': 'C', 'c.iso88591': 'en_US.ISO8859-1', 'c.utf8': 'en_US.UTF-8', 'c_c': 'C', 'c_c.c': 'C', 'ca': 'ca_ES.ISO8859-1', 'ca_ad': 'ca_AD.ISO8859-1', 'ca_es': 'ca_ES.ISO8859-1', 'ca_es@valencia': 'ca_ES.UTF-8@valencia', 'ca_fr': 'ca_FR.ISO8859-1', 'ca_it': 'ca_IT.ISO8859-1', 'catalan': 'ca_ES.ISO8859-1', 'ce_ru': 'ce_RU.UTF-8', 'cextend': 'en_US.ISO8859-1', 'chinese-s': 'zh_CN.eucCN', 'chinese-t': 'zh_TW.eucTW', 'chr_us': 'chr_US.UTF-8', 'ckb_iq': 'ckb_IQ.UTF-8', 'cmn_tw': 'cmn_TW.UTF-8', 'crh_ua': 'crh_UA.UTF-8', 'croatian': 'hr_HR.ISO8859-2', 'cs': 'cs_CZ.ISO8859-2', 'cs_cs': 'cs_CZ.ISO8859-2', 'cs_cz': 'cs_CZ.ISO8859-2', 'csb_pl': 'csb_PL.UTF-8', 'cv_ru': 'cv_RU.UTF-8', 'cy': 'cy_GB.ISO8859-1', 'cy_gb': 'cy_GB.ISO8859-1', 'cz': 'cs_CZ.ISO8859-2', 'cz_cz': 'cs_CZ.ISO8859-2', 'czech': 'cs_CZ.ISO8859-2', 'da': 'da_DK.ISO8859-1', 'da_dk': 'da_DK.ISO8859-1', 'danish': 'da_DK.ISO8859-1', 'dansk': 'da_DK.ISO8859-1', 'de': 'de_DE.ISO8859-1', 'de_at': 'de_AT.ISO8859-1', 'de_be': 'de_BE.ISO8859-1', 'de_ch': 'de_CH.ISO8859-1', 'de_de': 'de_DE.ISO8859-1', 'de_it': 'de_IT.ISO8859-1', 'de_li.utf8': 'de_LI.UTF-8', 'de_lu': 'de_LU.ISO8859-1', 'deutsch': 'de_DE.ISO8859-1', 'doi_in': 'doi_IN.UTF-8', 'dutch': 'nl_NL.ISO8859-1', 'dutch.iso88591': 'nl_BE.ISO8859-1', 'dv_mv': 'dv_MV.UTF-8', 'dz_bt': 'dz_BT.UTF-8', 'ee': 'ee_EE.ISO8859-4', 'ee_ee': 'ee_EE.ISO8859-4', 'eesti': 'et_EE.ISO8859-1', 'el': 'el_GR.ISO8859-7', 'el_cy': 'el_CY.ISO8859-7', 'el_gr': 'el_GR.ISO8859-7', 'el_gr@euro': 'el_GR.ISO8859-15', 'en': 'en_US.ISO8859-1', 'en_ag': 'en_AG.UTF-8', 'en_au': 'en_AU.ISO8859-1', 'en_be': 'en_BE.ISO8859-1', 'en_bw': 'en_BW.ISO8859-1', 'en_ca': 'en_CA.ISO8859-1', 'en_dk': 'en_DK.ISO8859-1', 'en_dl.utf8': 'en_DL.UTF-8', 'en_gb': 'en_GB.ISO8859-1', 'en_hk': 'en_HK.ISO8859-1', 'en_ie': 'en_IE.ISO8859-1', 'en_il': 'en_IL.UTF-8', 'en_in': 'en_IN.ISO8859-1', 'en_ng': 'en_NG.UTF-8', 'en_nz': 'en_NZ.ISO8859-1', 'en_ph': 'en_PH.ISO8859-1', 'en_sc.utf8': 'en_SC.UTF-8', 'en_sg': 'en_SG.ISO8859-1', 'en_uk': 'en_GB.ISO8859-1', 'en_us': 'en_US.ISO8859-1', 'en_us@euro@euro': 'en_US.ISO8859-15', 'en_za': 'en_ZA.ISO8859-1', 'en_zm': 'en_ZM.UTF-8', 'en_zw': 'en_ZW.ISO8859-1', 'en_zw.utf8': 'en_ZS.UTF-8', 'eng_gb': 'en_GB.ISO8859-1', 'english': 'en_EN.ISO8859-1', 'english.iso88591': 'en_US.ISO8859-1', 'english_uk': 'en_GB.ISO8859-1', 'english_united-states': 'en_US.ISO8859-1', 'english_united-states.437': 'C', 'english_us': 'en_US.ISO8859-1', 'eo': 'eo_XX.ISO8859-3', 'eo.utf8': 'eo.UTF-8', 'eo_eo': 'eo_EO.ISO8859-3', 'eo_us.utf8': 'eo_US.UTF-8', 'eo_xx': 'eo_XX.ISO8859-3', 'es': 'es_ES.ISO8859-1', 'es_ar': 'es_AR.ISO8859-1', 'es_bo': 'es_BO.ISO8859-1', 'es_cl': 'es_CL.ISO8859-1', 'es_co': 'es_CO.ISO8859-1', 'es_cr': 'es_CR.ISO8859-1', 'es_cu': 'es_CU.UTF-8', 'es_do': 'es_DO.ISO8859-1', 'es_ec': 'es_EC.ISO8859-1', 'es_es': 'es_ES.ISO8859-1', 'es_gt': 'es_GT.ISO8859-1', 'es_hn': 'es_HN.ISO8859-1', 'es_mx': 'es_MX.ISO8859-1', 'es_ni': 'es_NI.ISO8859-1', 'es_pa': 'es_PA.ISO8859-1', 'es_pe': 'es_PE.ISO8859-1', 'es_pr': 'es_PR.ISO8859-1', 'es_py': 'es_PY.ISO8859-1', 'es_sv': 'es_SV.ISO8859-1', 'es_us': 'es_US.ISO8859-1', 'es_uy': 'es_UY.ISO8859-1', 'es_ve': 'es_VE.ISO8859-1', 'estonian': 'et_EE.ISO8859-1', 'et': 'et_EE.ISO8859-15', 'et_ee': 'et_EE.ISO8859-15', 'eu': 'eu_ES.ISO8859-1', 'eu_es': 'eu_ES.ISO8859-1', 'eu_fr': 'eu_FR.ISO8859-1', 'fa': 'fa_IR.UTF-8', 'fa_ir': 'fa_IR.UTF-8', 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', 'ff_sn': 'ff_SN.UTF-8', 'fi': 'fi_FI.ISO8859-15', 'fi_fi': 'fi_FI.ISO8859-15', 'fil_ph': 'fil_PH.UTF-8', 'finnish': 'fi_FI.ISO8859-1', 'fo': 'fo_FO.ISO8859-1', 'fo_fo': 'fo_FO.ISO8859-1', 'fr': 'fr_FR.ISO8859-1', 'fr_be': 'fr_BE.ISO8859-1', 'fr_ca': 'fr_CA.ISO8859-1', 'fr_ch': 'fr_CH.ISO8859-1', 'fr_fr': 'fr_FR.ISO8859-1', 'fr_lu': 'fr_LU.ISO8859-1', 'fran\xe7ais': 'fr_FR.ISO8859-1', 'fre_fr': 'fr_FR.ISO8859-1', 'french': 'fr_FR.ISO8859-1', 'french.iso88591': 'fr_CH.ISO8859-1', 'french_france': 'fr_FR.ISO8859-1', 'fur_it': 'fur_IT.UTF-8', 'fy_de': 'fy_DE.UTF-8', 'fy_nl': 'fy_NL.UTF-8', 'ga': 'ga_IE.ISO8859-1', 'ga_ie': 'ga_IE.ISO8859-1', 'galego': 'gl_ES.ISO8859-1', 'galician': 'gl_ES.ISO8859-1', 'gd': 'gd_GB.ISO8859-1', 'gd_gb': 'gd_GB.ISO8859-1', 'ger_de': 'de_DE.ISO8859-1', 'german': 'de_DE.ISO8859-1', 'german.iso88591': 'de_CH.ISO8859-1', 'german_germany': 'de_DE.ISO8859-1', 'gez_er': 'gez_ER.UTF-8', 'gez_et': 'gez_ET.UTF-8', 'gl': 'gl_ES.ISO8859-1', 'gl_es': 'gl_ES.ISO8859-1', 'greek': 'el_GR.ISO8859-7', 'gu_in': 'gu_IN.UTF-8', 'gv': 'gv_GB.ISO8859-1', 'gv_gb': 'gv_GB.ISO8859-1', 'ha_ng': 'ha_NG.UTF-8', 'hak_tw': 'hak_TW.UTF-8', 'he': 'he_IL.ISO8859-8', 'he_il': 'he_IL.ISO8859-8', 'hebrew': 'he_IL.ISO8859-8', 'hi': 'hi_IN.ISCII-DEV', 'hi_in': 'hi_IN.ISCII-DEV', 'hi_in.isciidev': 'hi_IN.ISCII-DEV', 'hif_fj': 'hif_FJ.UTF-8', 'hne': 'hne_IN.UTF-8', 'hne_in': 'hne_IN.UTF-8', 'hr': 'hr_HR.ISO8859-2', 'hr_hr': 'hr_HR.ISO8859-2', 'hrvatski': 'hr_HR.ISO8859-2', 'hsb_de': 'hsb_DE.ISO8859-2', 'ht_ht': 'ht_HT.UTF-8', 'hu': 'hu_HU.ISO8859-2', 'hu_hu': 'hu_HU.ISO8859-2', 'hungarian': 'hu_HU.ISO8859-2', 'hy_am': 'hy_AM.UTF-8', 'hy_am.armscii8': 'hy_AM.ARMSCII_8', 'ia': 'ia.UTF-8', 'ia_fr': 'ia_FR.UTF-8', 'icelandic': 'is_IS.ISO8859-1', 'id': 'id_ID.ISO8859-1', 'id_id': 'id_ID.ISO8859-1', 'ig_ng': 'ig_NG.UTF-8', 'ik_ca': 'ik_CA.UTF-8', 'in': 'id_ID.ISO8859-1', 'in_id': 'id_ID.ISO8859-1', 'is': 'is_IS.ISO8859-1', 'is_is': 'is_IS.ISO8859-1', 'iso-8859-1': 'en_US.ISO8859-1', 'iso-8859-15': 'en_US.ISO8859-15', 'iso8859-1': 'en_US.ISO8859-1', 'iso8859-15': 'en_US.ISO8859-15', 'iso_8859_1': 'en_US.ISO8859-1', 'iso_8859_15': 'en_US.ISO8859-15', 'it': 'it_IT.ISO8859-1', 'it_ch': 'it_CH.ISO8859-1', 'it_it': 'it_IT.ISO8859-1', 'italian': 'it_IT.ISO8859-1', 'iu': 'iu_CA.NUNACOM-8', 'iu_ca': 'iu_CA.NUNACOM-8', 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', 'iw': 'he_IL.ISO8859-8', 'iw_il': 'he_IL.ISO8859-8', 'iw_il.utf8': 'iw_IL.UTF-8', 'ja': 'ja_JP.eucJP', 'ja_jp': 'ja_JP.eucJP', 'ja_jp.euc': 'ja_JP.eucJP', 'ja_jp.mscode': 'ja_JP.SJIS', 'ja_jp.pck': 'ja_JP.SJIS', 'japan': 'ja_JP.eucJP', 'japanese': 'ja_JP.eucJP', 'japanese-euc': 'ja_JP.eucJP', 'japanese.euc': 'ja_JP.eucJP', 'jp_jp': 'ja_JP.eucJP', 'ka': 'ka_GE.GEORGIAN-ACADEMY', 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', 'kab_dz': 'kab_DZ.UTF-8', 'kk_kz': 'kk_KZ.ptcp154', 'kl': 'kl_GL.ISO8859-1', 'kl_gl': 'kl_GL.ISO8859-1', 'km_kh': 'km_KH.UTF-8', 'kn': 'kn_IN.UTF-8', 'kn_in': 'kn_IN.UTF-8', 'ko': 'ko_KR.eucKR', 'ko_kr': 'ko_KR.eucKR', 'ko_kr.euc': 'ko_KR.eucKR', 'kok_in': 'kok_IN.UTF-8', 'korean': 'ko_KR.eucKR', 'korean.euc': 'ko_KR.eucKR', 'ks': 'ks_IN.UTF-8', 'ks_in': 'ks_IN.UTF-8', '[email protected]': 'ks_IN.UTF-8@devanagari', 'ku_tr': 'ku_TR.ISO8859-9', 'kw': 'kw_GB.ISO8859-1', 'kw_gb': 'kw_GB.ISO8859-1', 'ky': 'ky_KG.UTF-8', 'ky_kg': 'ky_KG.UTF-8', 'lb_lu': 'lb_LU.UTF-8', 'lg_ug': 'lg_UG.ISO8859-10', 'li_be': 'li_BE.UTF-8', 'li_nl': 'li_NL.UTF-8', 'lij_it': 'lij_IT.UTF-8', 'lithuanian': 'lt_LT.ISO8859-13', 'ln_cd': 'ln_CD.UTF-8', 'lo': 'lo_LA.MULELAO-1', 'lo_la': 'lo_LA.MULELAO-1', 'lo_la.cp1133': 'lo_LA.IBM-CP1133', 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', 'lo_la.mulelao1': 'lo_LA.MULELAO-1', 'lt': 'lt_LT.ISO8859-13', 'lt_lt': 'lt_LT.ISO8859-13', 'lv': 'lv_LV.ISO8859-13', 'lv_lv': 'lv_LV.ISO8859-13', 'lzh_tw': 'lzh_TW.UTF-8', 'mag_in': 'mag_IN.UTF-8', 'mai': 'mai_IN.UTF-8', 'mai_in': 'mai_IN.UTF-8', 'mai_np': 'mai_NP.UTF-8', 'mfe_mu': 'mfe_MU.UTF-8', 'mg_mg': 'mg_MG.ISO8859-15', 'mhr_ru': 'mhr_RU.UTF-8', 'mi': 'mi_NZ.ISO8859-1', 'mi_nz': 'mi_NZ.ISO8859-1', 'miq_ni': 'miq_NI.UTF-8', 'mjw_in': 'mjw_IN.UTF-8', 'mk': 'mk_MK.ISO8859-5', 'mk_mk': 'mk_MK.ISO8859-5', 'ml': 'ml_IN.UTF-8', 'ml_in': 'ml_IN.UTF-8', 'mn_mn': 'mn_MN.UTF-8', 'mni_in': 'mni_IN.UTF-8', 'mr': 'mr_IN.UTF-8', 'mr_in': 'mr_IN.UTF-8', 'ms': 'ms_MY.ISO8859-1', 'ms_my': 'ms_MY.ISO8859-1', 'mt': 'mt_MT.ISO8859-3', 'mt_mt': 'mt_MT.ISO8859-3', 'my_mm': 'my_MM.UTF-8', 'nan_tw': 'nan_TW.UTF-8', 'nb': 'nb_NO.ISO8859-1', 'nb_no': 'nb_NO.ISO8859-1', 'nds_de': 'nds_DE.UTF-8', 'nds_nl': 'nds_NL.UTF-8', 'ne_np': 'ne_NP.UTF-8', 'nhn_mx': 'nhn_MX.UTF-8', 'niu_nu': 'niu_NU.UTF-8', 'niu_nz': 'niu_NZ.UTF-8', 'nl': 'nl_NL.ISO8859-1', 'nl_aw': 'nl_AW.UTF-8', 'nl_be': 'nl_BE.ISO8859-1', 'nl_nl': 'nl_NL.ISO8859-1', 'nn': 'nn_NO.ISO8859-1', 'nn_no': 'nn_NO.ISO8859-1', 'no': 'no_NO.ISO8859-1', 'no@nynorsk': 'ny_NO.ISO8859-1', 'no_no': 'no_NO.ISO8859-1', 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', 'norwegian': 'no_NO.ISO8859-1', 'nr': 'nr_ZA.ISO8859-1', 'nr_za': 'nr_ZA.ISO8859-1', 'nso': 'nso_ZA.ISO8859-15', 'nso_za': 'nso_ZA.ISO8859-15', 'ny': 'ny_NO.ISO8859-1', 'ny_no': 'ny_NO.ISO8859-1', 'nynorsk': 'nn_NO.ISO8859-1', 'oc': 'oc_FR.ISO8859-1', 'oc_fr': 'oc_FR.ISO8859-1', 'om_et': 'om_ET.UTF-8', 'om_ke': 'om_KE.ISO8859-1', 'or': 'or_IN.UTF-8', 'or_in': 'or_IN.UTF-8', 'os_ru': 'os_RU.UTF-8', 'pa': 'pa_IN.UTF-8', 'pa_in': 'pa_IN.UTF-8', 'pa_pk': 'pa_PK.UTF-8', 'pap_an': 'pap_AN.UTF-8', 'pap_aw': 'pap_AW.UTF-8', 'pap_cw': 'pap_CW.UTF-8', 'pd': 'pd_US.ISO8859-1', 'pd_de': 'pd_DE.ISO8859-1', 'pd_us': 'pd_US.ISO8859-1', 'ph': 'ph_PH.ISO8859-1', 'ph_ph': 'ph_PH.ISO8859-1', 'pl': 'pl_PL.ISO8859-2', 'pl_pl': 'pl_PL.ISO8859-2', 'polish': 'pl_PL.ISO8859-2', 'portuguese': 'pt_PT.ISO8859-1', 'portuguese_brazil': 'pt_BR.ISO8859-1', 'posix': 'C', 'posix-utf2': 'C', 'pp': 'pp_AN.ISO8859-1', 'pp_an': 'pp_AN.ISO8859-1', 'ps_af': 'ps_AF.UTF-8', 'pt': 'pt_PT.ISO8859-1', 'pt_br': 'pt_BR.ISO8859-1', 'pt_pt': 'pt_PT.ISO8859-1', 'quz_pe': 'quz_PE.UTF-8', 'raj_in': 'raj_IN.UTF-8', 'ro': 'ro_RO.ISO8859-2', 'ro_ro': 'ro_RO.ISO8859-2', 'romanian': 'ro_RO.ISO8859-2', 'ru': 'ru_RU.UTF-8', 'ru_ru': 'ru_RU.UTF-8', 'ru_ua': 'ru_UA.KOI8-U', 'rumanian': 'ro_RO.ISO8859-2', 'russian': 'ru_RU.KOI8-R', 'rw': 'rw_RW.ISO8859-1', 'rw_rw': 'rw_RW.ISO8859-1', 'sa_in': 'sa_IN.UTF-8', 'sat_in': 'sat_IN.UTF-8', 'sc_it': 'sc_IT.UTF-8', 'sd': 'sd_IN.UTF-8', 'sd_in': 'sd_IN.UTF-8', '[email protected]': 'sd_IN.UTF-8@devanagari', 'sd_pk': 'sd_PK.UTF-8', 'se_no': 'se_NO.UTF-8', 'serbocroatian': 'sr_RS.UTF-8@latin', 'sgs_lt': 'sgs_LT.UTF-8', 'sh': 'sr_RS.UTF-8@latin', 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', 'sh_hr': 'sh_HR.ISO8859-2', 'sh_hr.iso88592': 'hr_HR.ISO8859-2', 'sh_sp': 'sr_CS.ISO8859-2', 'sh_yu': 'sr_RS.UTF-8@latin', 'shn_mm': 'shn_MM.UTF-8', 'shs_ca': 'shs_CA.UTF-8', 'si': 'si_LK.UTF-8', 'si_lk': 'si_LK.UTF-8', 'sid_et': 'sid_ET.UTF-8', 'sinhala': 'si_LK.UTF-8', 'sk': 'sk_SK.ISO8859-2', 'sk_sk': 'sk_SK.ISO8859-2', 'sl': 'sl_SI.ISO8859-2', 'sl_cs': 'sl_CS.ISO8859-2', 'sl_si': 'sl_SI.ISO8859-2', 'slovak': 'sk_SK.ISO8859-2', 'slovene': 'sl_SI.ISO8859-2', 'slovenian': 'sl_SI.ISO8859-2', 'sm_ws': 'sm_WS.UTF-8', 'so_dj': 'so_DJ.ISO8859-1', 'so_et': 'so_ET.UTF-8', 'so_ke': 'so_KE.ISO8859-1', 'so_so': 'so_SO.ISO8859-1', 'sp': 'sr_CS.ISO8859-5', 'sp_yu': 'sr_CS.ISO8859-5', 'spanish': 'es_ES.ISO8859-1', 'spanish_spain': 'es_ES.ISO8859-1', 'sq': 'sq_AL.ISO8859-2', 'sq_al': 'sq_AL.ISO8859-2', 'sq_mk': 'sq_MK.UTF-8', 'sr': 'sr_RS.UTF-8', 'sr@cyrillic': 'sr_RS.UTF-8', 'sr@latn': 'sr_CS.UTF-8@latin', 'sr_cs': 'sr_CS.UTF-8', 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', 'sr_cs@latn': 'sr_CS.UTF-8@latin', 'sr_me': 'sr_ME.UTF-8', 'sr_rs': 'sr_RS.UTF-8', 'sr_rs@latn': 'sr_RS.UTF-8@latin', 'sr_sp': 'sr_CS.ISO8859-2', 'sr_yu': 'sr_RS.UTF-8@latin', 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', 'sr_yu.iso88592': 'sr_CS.ISO8859-2', 'sr_yu.iso88595': 'sr_CS.ISO8859-5', 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', 'sr_yu.utf8': 'sr_RS.UTF-8', 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', 'sr_yu@cyrillic': 'sr_RS.UTF-8', 'ss': 'ss_ZA.ISO8859-1', 'ss_za': 'ss_ZA.ISO8859-1', 'st': 'st_ZA.ISO8859-1', 'st_za': 'st_ZA.ISO8859-1', 'sv': 'sv_SE.ISO8859-1', 'sv_fi': 'sv_FI.ISO8859-1', 'sv_se': 'sv_SE.ISO8859-1', 'sw_ke': 'sw_KE.UTF-8', 'sw_tz': 'sw_TZ.UTF-8', 'swedish': 'sv_SE.ISO8859-1', 'szl_pl': 'szl_PL.UTF-8', 'ta': 'ta_IN.TSCII-0', 'ta_in': 'ta_IN.TSCII-0', 'ta_in.tscii': 'ta_IN.TSCII-0', 'ta_in.tscii0': 'ta_IN.TSCII-0', 'ta_lk': 'ta_LK.UTF-8', 'tcy_in.utf8': 'tcy_IN.UTF-8', 'te': 'te_IN.UTF-8', 'te_in': 'te_IN.UTF-8', 'tg': 'tg_TJ.KOI8-C', 'tg_tj': 'tg_TJ.KOI8-C', 'th': 'th_TH.ISO8859-11', 'th_th': 'th_TH.ISO8859-11', 'th_th.tactis': 'th_TH.TIS620', 'th_th.tis620': 'th_TH.TIS620', 'thai': 'th_TH.ISO8859-11', 'the_np': 'the_NP.UTF-8', 'ti_er': 'ti_ER.UTF-8', 'ti_et': 'ti_ET.UTF-8', 'tig_er': 'tig_ER.UTF-8', 'tk_tm': 'tk_TM.UTF-8', 'tl': 'tl_PH.ISO8859-1', 'tl_ph': 'tl_PH.ISO8859-1', 'tn': 'tn_ZA.ISO8859-15', 'tn_za': 'tn_ZA.ISO8859-15', 'to_to': 'to_TO.UTF-8', 'tpi_pg': 'tpi_PG.UTF-8', 'tr': 'tr_TR.ISO8859-9', 'tr_cy': 'tr_CY.ISO8859-9', 'tr_tr': 'tr_TR.ISO8859-9', 'ts': 'ts_ZA.ISO8859-1', 'ts_za': 'ts_ZA.ISO8859-1', 'tt': 'tt_RU.TATAR-CYR', 'tt_ru': 'tt_RU.TATAR-CYR', 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', 'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif', 'turkish': 'tr_TR.ISO8859-9', 'ug_cn': 'ug_CN.UTF-8', 'uk': 'uk_UA.KOI8-U', 'uk_ua': 'uk_UA.KOI8-U', 'univ': 'en_US.utf', 'universal': 'en_US.utf', 'universal.utf8@ucs4': 'en_US.UTF-8', 'unm_us': 'unm_US.UTF-8', 'ur': 'ur_PK.CP1256', 'ur_in': 'ur_IN.UTF-8', 'ur_pk': 'ur_PK.CP1256', 'uz': 'uz_UZ.UTF-8', 'uz_uz': 'uz_UZ.UTF-8', 'uz_uz@cyrillic': 'uz_UZ.UTF-8', 've': 've_ZA.UTF-8', 've_za': 've_ZA.UTF-8', 'vi': 'vi_VN.TCVN', 'vi_vn': 'vi_VN.TCVN', 'vi_vn.tcvn': 'vi_VN.TCVN', 'vi_vn.tcvn5712': 'vi_VN.TCVN', 'vi_vn.viscii': 'vi_VN.VISCII', 'vi_vn.viscii111': 'vi_VN.VISCII', 'wa': 'wa_BE.ISO8859-1', 'wa_be': 'wa_BE.ISO8859-1', 'wae_ch': 'wae_CH.UTF-8', 'wal_et': 'wal_ET.UTF-8', 'wo_sn': 'wo_SN.UTF-8', 'xh': 'xh_ZA.ISO8859-1', 'xh_za': 'xh_ZA.ISO8859-1', 'yi': 'yi_US.CP1255', 'yi_us': 'yi_US.CP1255', 'yo_ng': 'yo_NG.UTF-8', 'yue_hk': 'yue_HK.UTF-8', 'yuw_pg': 'yuw_PG.UTF-8', 'zh': 'zh_CN.eucCN', 'zh_cn': 'zh_CN.gb2312', 'zh_cn.big5': 'zh_TW.big5', 'zh_cn.euc': 'zh_CN.eucCN', 'zh_hk': 'zh_HK.big5hkscs', 'zh_hk.big5hk': 'zh_HK.big5hkscs', 'zh_sg': 'zh_SG.GB2312', 'zh_sg.gbk': 'zh_SG.GBK', 'zh_tw': 'zh_TW.big5', 'zh_tw.euc': 'zh_TW.eucTW', 'zh_tw.euctw': 'zh_TW.eucTW', 'zu': 'zu_ZA.ISO8859-1', 'zu_za': 'zu_ZA.ISO8859-1', } # # This maps Windows language identifiers to locale strings. # # This list has been updated from # http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp # to include every locale up to Windows Vista. # # NOTE: this mapping is incomplete. If your language is missing, please # submit a bug report to the Python bug tracker at http://bugs.python.org/ # Make sure you include the missing language identifier and the suggested # locale code. # windows_locale = { 0x0436: "af_ZA", # Afrikaans 0x041c: "sq_AL", # Albanian 0x0484: "gsw_FR",# Alsatian - France 0x045e: "am_ET", # Amharic - Ethiopia 0x0401: "ar_SA", # Arabic - Saudi Arabia 0x0801: "ar_IQ", # Arabic - Iraq 0x0c01: "ar_EG", # Arabic - Egypt 0x1001: "ar_LY", # Arabic - Libya 0x1401: "ar_DZ", # Arabic - Algeria 0x1801: "ar_MA", # Arabic - Morocco 0x1c01: "ar_TN", # Arabic - Tunisia 0x2001: "ar_OM", # Arabic - Oman 0x2401: "ar_YE", # Arabic - Yemen 0x2801: "ar_SY", # Arabic - Syria 0x2c01: "ar_JO", # Arabic - Jordan 0x3001: "ar_LB", # Arabic - Lebanon 0x3401: "ar_KW", # Arabic - Kuwait 0x3801: "ar_AE", # Arabic - United Arab Emirates 0x3c01: "ar_BH", # Arabic - Bahrain 0x4001: "ar_QA", # Arabic - Qatar 0x042b: "hy_AM", # Armenian 0x044d: "as_IN", # Assamese - India 0x042c: "az_AZ", # Azeri - Latin 0x082c: "az_AZ", # Azeri - Cyrillic 0x046d: "ba_RU", # Bashkir 0x042d: "eu_ES", # Basque - Russia 0x0423: "be_BY", # Belarusian 0x0445: "bn_IN", # Begali 0x201a: "bs_BA", # Bosnian - Cyrillic 0x141a: "bs_BA", # Bosnian - Latin 0x047e: "br_FR", # Breton - France 0x0402: "bg_BG", # Bulgarian # 0x0455: "my_MM", # Burmese - Not supported 0x0403: "ca_ES", # Catalan 0x0004: "zh_CHS",# Chinese - Simplified 0x0404: "zh_TW", # Chinese - Taiwan 0x0804: "zh_CN", # Chinese - PRC 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R. 0x1004: "zh_SG", # Chinese - Singapore 0x1404: "zh_MO", # Chinese - Macao S.A.R. 0x7c04: "zh_CHT",# Chinese - Traditional 0x0483: "co_FR", # Corsican - France 0x041a: "hr_HR", # Croatian 0x101a: "hr_BA", # Croatian - Bosnia 0x0405: "cs_CZ", # Czech 0x0406: "da_DK", # Danish 0x048c: "gbz_AF",# Dari - Afghanistan 0x0465: "div_MV",# Divehi - Maldives 0x0413: "nl_NL", # Dutch - The Netherlands 0x0813: "nl_BE", # Dutch - Belgium 0x0409: "en_US", # English - United States 0x0809: "en_GB", # English - United Kingdom 0x0c09: "en_AU", # English - Australia 0x1009: "en_CA", # English - Canada 0x1409: "en_NZ", # English - New Zealand 0x1809: "en_IE", # English - Ireland 0x1c09: "en_ZA", # English - South Africa 0x2009: "en_JA", # English - Jamaica 0x2409: "en_CB", # English - Caribbean 0x2809: "en_BZ", # English - Belize 0x2c09: "en_TT", # English - Trinidad 0x3009: "en_ZW", # English - Zimbabwe 0x3409: "en_PH", # English - Philippines 0x4009: "en_IN", # English - India 0x4409: "en_MY", # English - Malaysia 0x4809: "en_IN", # English - Singapore 0x0425: "et_EE", # Estonian 0x0438: "fo_FO", # Faroese 0x0464: "fil_PH",# Filipino 0x040b: "fi_FI", # Finnish 0x040c: "fr_FR", # French - France 0x080c: "fr_BE", # French - Belgium 0x0c0c: "fr_CA", # French - Canada 0x100c: "fr_CH", # French - Switzerland 0x140c: "fr_LU", # French - Luxembourg 0x180c: "fr_MC", # French - Monaco 0x0462: "fy_NL", # Frisian - Netherlands 0x0456: "gl_ES", # Galician 0x0437: "ka_GE", # Georgian 0x0407: "de_DE", # German - Germany 0x0807: "de_CH", # German - Switzerland 0x0c07: "de_AT", # German - Austria 0x1007: "de_LU", # German - Luxembourg 0x1407: "de_LI", # German - Liechtenstein 0x0408: "el_GR", # Greek 0x046f: "kl_GL", # Greenlandic - Greenland 0x0447: "gu_IN", # Gujarati 0x0468: "ha_NG", # Hausa - Latin 0x040d: "he_IL", # Hebrew 0x0439: "hi_IN", # Hindi 0x040e: "hu_HU", # Hungarian 0x040f: "is_IS", # Icelandic 0x0421: "id_ID", # Indonesian 0x045d: "iu_CA", # Inuktitut - Syllabics 0x085d: "iu_CA", # Inuktitut - Latin 0x083c: "ga_IE", # Irish - Ireland 0x0410: "it_IT", # Italian - Italy 0x0810: "it_CH", # Italian - Switzerland 0x0411: "ja_JP", # Japanese 0x044b: "kn_IN", # Kannada - India 0x043f: "kk_KZ", # Kazakh 0x0453: "kh_KH", # Khmer - Cambodia 0x0486: "qut_GT",# K'iche - Guatemala 0x0487: "rw_RW", # Kinyarwanda - Rwanda 0x0457: "kok_IN",# Konkani 0x0412: "ko_KR", # Korean 0x0440: "ky_KG", # Kyrgyz 0x0454: "lo_LA", # Lao - Lao PDR 0x0426: "lv_LV", # Latvian 0x0427: "lt_LT", # Lithuanian 0x082e: "dsb_DE",# Lower Sorbian - Germany 0x046e: "lb_LU", # Luxembourgish 0x042f: "mk_MK", # FYROM Macedonian 0x043e: "ms_MY", # Malay - Malaysia 0x083e: "ms_BN", # Malay - Brunei Darussalam 0x044c: "ml_IN", # Malayalam - India 0x043a: "mt_MT", # Maltese 0x0481: "mi_NZ", # Maori 0x047a: "arn_CL",# Mapudungun 0x044e: "mr_IN", # Marathi 0x047c: "moh_CA",# Mohawk - Canada 0x0450: "mn_MN", # Mongolian - Cyrillic 0x0850: "mn_CN", # Mongolian - PRC 0x0461: "ne_NP", # Nepali 0x0414: "nb_NO", # Norwegian - Bokmal 0x0814: "nn_NO", # Norwegian - Nynorsk 0x0482: "oc_FR", # Occitan - France 0x0448: "or_IN", # Oriya - India 0x0463: "ps_AF", # Pashto - Afghanistan 0x0429: "fa_IR", # Persian 0x0415: "pl_PL", # Polish 0x0416: "pt_BR", # Portuguese - Brazil 0x0816: "pt_PT", # Portuguese - Portugal 0x0446: "pa_IN", # Punjabi 0x046b: "quz_BO",# Quechua (Bolivia) 0x086b: "quz_EC",# Quechua (Ecuador) 0x0c6b: "quz_PE",# Quechua (Peru) 0x0418: "ro_RO", # Romanian - Romania 0x0417: "rm_CH", # Romansh 0x0419: "ru_RU", # Russian 0x243b: "smn_FI",# Sami Finland 0x103b: "smj_NO",# Sami Norway 0x143b: "smj_SE",# Sami Sweden 0x043b: "se_NO", # Sami Northern Norway 0x083b: "se_SE", # Sami Northern Sweden 0x0c3b: "se_FI", # Sami Northern Finland 0x203b: "sms_FI",# Sami Skolt 0x183b: "sma_NO",# Sami Southern Norway 0x1c3b: "sma_SE",# Sami Southern Sweden 0x044f: "sa_IN", # Sanskrit 0x0c1a: "sr_SP", # Serbian - Cyrillic 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic 0x081a: "sr_SP", # Serbian - Latin 0x181a: "sr_BA", # Serbian - Bosnia Latin 0x045b: "si_LK", # Sinhala - Sri Lanka 0x046c: "ns_ZA", # Northern Sotho 0x0432: "tn_ZA", # Setswana - Southern Africa 0x041b: "sk_SK", # Slovak 0x0424: "sl_SI", # Slovenian 0x040a: "es_ES", # Spanish - Spain 0x080a: "es_MX", # Spanish - Mexico 0x0c0a: "es_ES", # Spanish - Spain (Modern) 0x100a: "es_GT", # Spanish - Guatemala 0x140a: "es_CR", # Spanish - Costa Rica 0x180a: "es_PA", # Spanish - Panama 0x1c0a: "es_DO", # Spanish - Dominican Republic 0x200a: "es_VE", # Spanish - Venezuela 0x240a: "es_CO", # Spanish - Colombia 0x280a: "es_PE", # Spanish - Peru 0x2c0a: "es_AR", # Spanish - Argentina 0x300a: "es_EC", # Spanish - Ecuador 0x340a: "es_CL", # Spanish - Chile 0x380a: "es_UR", # Spanish - Uruguay 0x3c0a: "es_PY", # Spanish - Paraguay 0x400a: "es_BO", # Spanish - Bolivia 0x440a: "es_SV", # Spanish - El Salvador 0x480a: "es_HN", # Spanish - Honduras 0x4c0a: "es_NI", # Spanish - Nicaragua 0x500a: "es_PR", # Spanish - Puerto Rico 0x540a: "es_US", # Spanish - United States # 0x0430: "", # Sutu - Not supported 0x0441: "sw_KE", # Swahili 0x041d: "sv_SE", # Swedish - Sweden 0x081d: "sv_FI", # Swedish - Finland 0x045a: "syr_SY",# Syriac 0x0428: "tg_TJ", # Tajik - Cyrillic 0x085f: "tmz_DZ",# Tamazight - Latin 0x0449: "ta_IN", # Tamil 0x0444: "tt_RU", # Tatar 0x044a: "te_IN", # Telugu 0x041e: "th_TH", # Thai 0x0851: "bo_BT", # Tibetan - Bhutan 0x0451: "bo_CN", # Tibetan - PRC 0x041f: "tr_TR", # Turkish 0x0442: "tk_TM", # Turkmen - Cyrillic 0x0480: "ug_CN", # Uighur - Arabic 0x0422: "uk_UA", # Ukrainian 0x042e: "wen_DE",# Upper Sorbian - Germany 0x0420: "ur_PK", # Urdu 0x0820: "ur_IN", # Urdu - India 0x0443: "uz_UZ", # Uzbek - Latin 0x0843: "uz_UZ", # Uzbek - Cyrillic 0x042a: "vi_VN", # Vietnamese 0x0452: "cy_GB", # Welsh 0x0488: "wo_SN", # Wolof - Senegal 0x0434: "xh_ZA", # Xhosa - South Africa 0x0485: "sah_RU",# Yakut - Cyrillic 0x0478: "ii_CN", # Yi - PRC 0x046a: "yo_NG", # Yoruba - Nigeria 0x0435: "zu_ZA", # Zulu } def _print_locale(): """ Test function. """ categories = {} def _init_categories(categories=categories): for k,v in globals().items(): if k[:3] == 'LC_': categories[k] = v _init_categories() del categories['LC_ALL'] print('Locale defaults as determined by getdefaultlocale():') print('-'*72) lang, enc = getdefaultlocale() print('Language: ', lang or '(undefined)') print('Encoding: ', enc or '(undefined)') print() print('Locale settings on startup:') print('-'*72) for name,category in categories.items(): print(name, '...') lang, enc = getlocale(category) print(' Language: ', lang or '(undefined)') print(' Encoding: ', enc or '(undefined)') print() print() print('Locale settings after calling resetlocale():') print('-'*72) resetlocale() for name,category in categories.items(): print(name, '...') lang, enc = getlocale(category) print(' Language: ', lang or '(undefined)') print(' Encoding: ', enc or '(undefined)') print() try: setlocale(LC_ALL, "") except: print('NOTE:') print('setlocale(LC_ALL, "") does not support the default locale') print('given in the OS environment variables.') else: print() print('Locale settings after calling setlocale(LC_ALL, ""):') print('-'*72) for name,category in categories.items(): print(name, '...') lang, enc = getlocale(category) print(' Language: ', lang or '(undefined)') print(' Encoding: ', enc or '(undefined)') print() ### try: LC_MESSAGES except NameError: pass else: __all__.append("LC_MESSAGES") if __name__=='__main__': print('Locale aliasing:') print() _print_locale() print() print('Number formatting:') print() _test()
py
1a4e9fd8ad51b40a67c29abef0527f00a6de88e1
""" Augmenter that apply operation (word level) to textual input based on contextual word embeddings. """ import string import os import re import logging from nlpaug.augmenter.word import WordAugmenter import nlpaug.model.lang_models as nml from nlpaug.util import Action, Doc CONTEXT_WORD_EMBS_MODELS = {} def init_context_word_embs_model(model_path, model_type, device, force_reload=False, batch_size=32, top_k=None, silence=True, use_custom_api=False): global CONTEXT_WORD_EMBS_MODELS model_name = '_'.join([os.path.basename(model_path), model_type, str(device)]) if model_name in CONTEXT_WORD_EMBS_MODELS and not force_reload: CONTEXT_WORD_EMBS_MODELS[model_name].top_k = top_k CONTEXT_WORD_EMBS_MODELS[model_name].batch_size = batch_size CONTEXT_WORD_EMBS_MODELS[model_name].silence = silence return CONTEXT_WORD_EMBS_MODELS[model_name] if use_custom_api: if model_type == 'distilbert': model = nml.DistilBert(model_path, device=device, top_k=top_k, silence=silence, batch_size=batch_size) elif model_type == 'roberta': model = nml.Roberta(model_path, device=device, top_k=top_k, silence=silence, batch_size=batch_size) elif model_type == 'bert': model = nml.Bert(model_path, device=device, top_k=top_k, silence=silence, batch_size=batch_size) else: raise ValueError('Model type value is unexpected. Only support bert and roberta models.') else: if model_type in ['distilbert', 'bert', 'roberta', 'bart']: model = nml.FmTransformers(model_path, model_type=model_type, device=device, batch_size=batch_size, top_k=top_k, silence=silence) else: raise ValueError('Model type value is unexpected. Only support bert and roberta models.') CONTEXT_WORD_EMBS_MODELS[model_name] = model return model class ContextualWordEmbsAug(WordAugmenter): # https://arxiv.org/pdf/1805.06201.pdf, https://arxiv.org/pdf/2003.02245.pdf """ Augmenter that leverage contextual word embeddings to find top n similar word for augmentation. :param str model_path: Model name or model path. It used transformers to load the model. Tested 'bert-base-uncased', 'bert-base-cased', 'distilbert-base-uncased', 'roberta-base', 'distilroberta-base', 'facebook/bart-base', 'squeezebert/squeezebert-uncased'. :param str model_type: Type of model. For BERT model, use 'bert'. For RoBERTa/LongFormer model, use 'roberta'. For BART model, use 'bart'. If no value is provided, will determine from model name. :param str action: Either 'insert or 'substitute'. If value is 'insert', a new word will be injected to random position according to contextual word embeddings calculation. If value is 'substitute', word will be replaced according to contextual embeddings calculation :param int top_k: Controlling lucky draw pool. Top k score token will be used for augmentation. Larger k, more token can be used. Default value is 100. If value is None which means using all possible tokens. :param float aug_p: Percentage of word will be augmented. :param int aug_min: Minimum number of word will be augmented. :param int aug_max: Maximum number of word will be augmented. If None is passed, number of augmentation is calculated via aup_p. If calculated result from aug_p is smaller than aug_max, will use calculated result from aug_p. Otherwise, using aug_max. :param list stopwords: List of words which will be skipped from augment operation. Do NOT include the UNKNOWN word. UNKNOWN word of BERT is [UNK]. UNKNOWN word of RoBERTa and BART is <unk>. :param str stopwords_regex: Regular expression for matching words which will be skipped from augment operation. :param str device: Default value is CPU. If value is CPU, it uses CPU for processing. If value is CUDA, it uses GPU for processing. Possible values include 'cuda' and 'cpu'. (May able to use other options) :param int batch_size: Batch size. :param bool force_reload: Force reload the contextual word embeddings model to memory when initialize the class. Default value is False and suggesting to keep it as False if performance is the consideration. :param bool silence: Default is True. transformers library will print out warning message when leveraing pre-trained model. Set True to disable the expected warning message. :param str name: Name of this augmenter >>> import nlpaug.augmenter.word as naw >>> aug = naw.ContextualWordEmbsAug() """ def __init__(self, model_path='bert-base-uncased', model_type='', action="substitute", top_k=100, name='ContextualWordEmbs_Aug', aug_min=1, aug_max=10, aug_p=0.3, stopwords=None, batch_size=32, device='cpu', force_reload=False, stopwords_regex=None, verbose=0, silence=True, use_custom_api=True): super().__init__( action=action, name=name, aug_p=aug_p, aug_min=aug_min, aug_max=aug_max, tokenizer=None, device=device, stopwords=stopwords, verbose=verbose, stopwords_regex=stopwords_regex, include_detail=False) self.model_path = model_path self.model_type = model_type if model_type != '' else self.check_model_type() self.silence = silence # TODO: Slow when switching to HuggingFace pipeline. #https://github.com/makcedward/nlpaug/issues/248 self.use_custom_api = use_custom_api self.model = self.get_model( model_path=model_path, model_type=self.model_type, device=device, force_reload=force_reload, batch_size=batch_size, top_k=top_k, silence=silence, use_custom_api=use_custom_api) # Override stopwords # if stopwords and self.model_type in ['xlnet', 'roberta']: # stopwords = [self.stopwords] # lower case all stopwords if stopwords and 'uncased' in model_path: self.stopwords = [s.lower() for s in self.stopwords] self.stopword_reg = None self.reserve_word_reg = None self._build_stop_words(stopwords) self.device = self.model.device """ TODO: Reserve 2 spaces (e.g. [CLS], [SEP]) is not enough as it hit CUDA error in batch processing mode. Therefore, forcing to reserve 5 times of reserved spaces (i.e. 5) """ self.max_num_token = self.model.get_max_num_token() def _build_stop_words(self, stopwords): if stopwords: prefix_reg = '(?<=\s|\W)' suffix_reg = '(?=\s|\W)' stopword_reg = '('+')|('.join([prefix_reg + re.escape(s) + suffix_reg for s in stopwords])+')' self.stopword_reg = re.compile(stopword_reg) unknown_token = self.model.get_unknown_token() or self.model.UNKNOWN_TOKEN reserve_word_reg = '(' + prefix_reg + re.escape(unknown_token) + suffix_reg + ')' self.reserve_word_reg = re.compile(reserve_word_reg) def check_model_type(self): # if 'xlnet' in self.model_path.lower(): # return 'xlnet' if 'longformer' in self.model_path.lower(): return 'roberta' elif 'roberta' in self.model_path.lower(): return 'roberta' elif 'distilbert' in self.model_path.lower(): return 'bert' elif 'squeezebert' in self.model_path.lower(): return 'bert' elif 'bert' in self.model_path.lower(): return 'bert' elif 'bart' in self.model_path.lower(): return 'bart' # 'google/electra-small-discriminator', # 'google/reformer-enwik8', # 'funnel-transformer/small-base', # 'google/tapas-base', # 'microsoft/deberta-base' return '' def is_stop_words(self, token): # Will execute before any tokenization. No need to handle prefix processing if self.stopwords: unknown_token = self.model.get_unknown_token() or self.model.UNKNOWN_TOKEN if token == unknown_token: return True return token.lower() in self.stopwords else: return False def skip_aug(self, token_idxes, tokens): results = [] for token_idx in token_idxes: token = tokens[token_idx] # Do not augment subword if self.model_type in ['bert', 'electra'] \ and token.startswith(self.model.get_subword_prefix()): continue # Do not augment tokens if len is less than aug_min if (self.model.get_subword_prefix() in token and len(token) < self.aug_min+1) \ or (self.model.get_subword_prefix() not in token and len(token) < self.aug_min): continue if self.model_type in ['xlnet', 'roberta', 'bart']: # xlent may tokenize word incorrectly. For example, 'fox', will be tokeinzed as ['_', 'fox'] if token == self.model.get_subword_prefix(): continue # subword if not token.startswith(self.model.get_subword_prefix()): continue results.append(token_idx) return results def split_text(self, data): # Expect to have waring for "Token indices sequence length is longer than the specified maximum sequence length for this model" # Handle stopwords first #https://github.com/makcedward/nlpaug/issues/247 if self.stopwords: unknown_token = self.model.get_unknown_token() or self.model.UNKNOWN_TOKEN preprocessed_data, reserved_stopwords = self.replace_stopword_by_reserved_word(data, self.stopword_reg, unknown_token) else: preprocessed_data, reserved_stopwords = data, None orig_log_level = logging.getLogger('transformers.' + 'tokenization_utils_base').getEffectiveLevel() logging.getLogger('transformers.' + 'tokenization_utils_base').setLevel(logging.ERROR) tokens = self.model.get_tokenizer().tokenize(preprocessed_data) logging.getLogger('transformers.' + 'tokenization_utils_base').setLevel(orig_log_level) if self.model.get_model().config.max_position_embeddings == -1: # e.g. No max length restriction for XLNet return (preprocessed_data, None, tokens, None), reserved_stopwords # (Head text, tail text, head token, tail token), reserved_stopwords ids = self.model.get_tokenizer().convert_tokens_to_ids(tokens[:self.max_num_token]) head_text = self.model.get_tokenizer().decode(ids).strip() # head_text = self.model.get_tokenizer().convert_tokens_to_string(tokens[:self.max_num_token]).strip() tail_text = None if len(tokens) >= self.max_num_token: # tail_text = self.model.get_tokenizer().convert_tokens_to_string(tokens[self.max_num_token:]).strip() ids = self.model.get_tokenizer().convert_tokens_to_ids(tokens[self.max_num_token:]) tail_text = self.model.get_tokenizer().decode(ids).strip() return (head_text, tail_text, tokens[:self.max_num_token], tokens[self.max_num_token:]), reserved_stopwords def insert(self, data): if not data: return data if isinstance(data, list): all_data = data else: if data.strip() == '': return data all_data = [data] # If length of input is larger than max allowed input, only augment heading part split_results = [] # head_text, tail_text, head_tokens, tail_tokens reserved_stopwords = [] for d in all_data: split_result, reserved_stopword = self.split_text(d) split_results.append(split_result) reserved_stopwords.append(reserved_stopword) change_seq = 0 # Pick target word for augmentation for i, (split_result, reserved_stopword_tokens) in enumerate(zip(split_results, reserved_stopwords)): head_text, tail_text, head_tokens, tail_tokens = split_result if self.model_type in ['xlnet', 'roberta', 'bart']: # xlent and roberta tokens include prefix (e.g. ▁ or Ġ') cleaned_head_tokens = [t.replace(self.model.get_subword_prefix(), '') for t in head_tokens] else: cleaned_head_tokens = head_tokens head_doc = Doc(head_text, head_tokens) aug_idxes = self._get_aug_idxes(head_tokens) aug_idxes.sort(reverse=True) if reserved_stopword_tokens: head_doc, change_seq = self.substitute_back_reserved_stopwords( head_doc, reserved_stopword_tokens, change_seq) split_results[i] += (cleaned_head_tokens, head_doc, aug_idxes, ) # Pad aug_idxes max_aug_size = max([len(split_result[6]) for split_result in split_results]) for split_result in split_results: aug_idxes = split_result[6] for _ in range(max_aug_size - len(aug_idxes)): aug_idxes.append(-1) token_placeholder = self.model.get_mask_token() if self.model_type in ['xlnet', 'roberta', 'bart']: token_placeholder = self.model.get_subword_prefix() + token_placeholder # Adding prefix for # Augment same index of aug by batch for i in range(max_aug_size): masked_texts = [] aug_input_poses = [] # store which input augmented. No record if padding change_seq += 1 for j, split_result in enumerate(split_results): head_doc, aug_idx = split_result[5], split_result[6][i] # -1 if it is padding if aug_idx == -1: continue head_doc.add_token(aug_idx, token=token_placeholder, action=Action.INSERT, change_seq=self.parent_change_seq+change_seq) aug_input_poses.append(j) # some tokenizers handle special charas (e.g. don't can merge after decode) if self.model_type in ['bert', 'electra']: ids = self.model.get_tokenizer().convert_tokens_to_ids(head_doc.get_augmented_tokens()) masked_text = self.model.get_tokenizer().decode(ids).strip() elif self.model_type in ['xlnet', 'roberta', 'bart']: masked_text = self.model.get_tokenizer().convert_tokens_to_string(head_doc.get_augmented_tokens()).strip() masked_texts.append(masked_text) if not len(masked_texts): continue outputs = self.model.predict(masked_texts, target_words=None, n=2) # Update doc for aug_input_pos, output, masked_text in zip(aug_input_poses, outputs, masked_texts): split_result = split_results[aug_input_pos] head_doc = split_result[5] aug_idx = split_result[6][i] # augment position in text # TODO: Alternative method better than dropout candidate = '' if len(output) == 0: # TODO: no result? pass elif len(output) == 1: candidate = output[0] elif len(output) > 1: candidate = self.sample(output, 1)[0] # # In XLNet, it can be the first word of sentence which does not come with space. E.g. Zombine (ID:29110) # if self.model_type in ['xlnet']: # if candidate != '' and not candidate.startswith(self.model.get_subword_prefix()): # candidate = self.model.get_subword_prefix() + candidate # if self.model_type in ['roberta', 'bart']: # if candidate != '' and not candidate.startswith(self.model.get_subword_prefix()) and candidate.strip() != candidate: # candidate = self.model.get_subword_prefix() + candidate.strip() # no candidate if candidate == '': head_doc.add_change_log(aug_idx, new_token='', action=Action.DELETE, change_seq=self.parent_change_seq+change_seq) continue head_doc.update_change_log(aug_idx, token=candidate) # Early stop if number of token exceed max number if head_doc.size() > self.max_num_token: for j in range(i+1, max_aug_size): split_results[aug_input_pos][6][j] = -1 augmented_texts = [] for split_result, reserved_stopword_tokens in zip(split_results, reserved_stopwords): tail_text, head_doc = split_result[1], split_result[5] head_tokens = head_doc.get_augmented_tokens() # if self.model_type in ['xlnet', 'roberta']: # # xlent and roberta tokens include prefix (e.g. ▁ or Ġ') # head_tokens = [self.model.get_subword_prefix() + t if self.model.get_subword_prefix() not in t and i != 0 else t for i, t in enumerate(head_tokens)] ids = self.model.get_tokenizer().convert_tokens_to_ids(head_tokens) augmented_text = self.model.get_tokenizer().decode(ids) if tail_text: augmented_text += ' ' + tail_text augmented_texts.append(augmented_text) if isinstance(data, list): return augmented_texts else: return augmented_texts[0] def substitute(self, data): if not data: return data if isinstance(data, list): all_data = data else: if data.strip() == '': return data all_data = [data] # If length of input is larger than max allowed input, only augment heading part split_results = [] # head_text, tail_text, head_tokens, tail_tokens reserved_stopwords = [] for d in all_data: split_result, reserved_stopword = self.split_text(d) split_results.append(split_result) reserved_stopwords.append(reserved_stopword) change_seq = 0 # Pick target word for augmentation for i, (split_result, reserved_stopword_tokens) in enumerate(zip(split_results, reserved_stopwords)): head_text, tail_text, head_tokens, tail_tokens = split_result if self.model_type in ['xlnet', 'roberta', 'bart']: # xlent and roberta tokens include prefix (e.g. ▁ or Ġ') cleaned_head_tokens = [t.replace(self.model.get_subword_prefix(), '') for t in head_tokens] else: cleaned_head_tokens = head_tokens head_doc = Doc(head_text, head_tokens) aug_idxes = self._get_aug_idxes(head_tokens) aug_idxes.sort(reverse=True) if reserved_stopword_tokens: head_doc, change_seq = self.substitute_back_reserved_stopwords( head_doc, reserved_stopword_tokens, change_seq) head_tokens = head_doc.get_augmented_tokens() split_results[i] += (cleaned_head_tokens, head_doc, aug_idxes, ) # Pad aug_idxes max_aug_size = max([len(split_result[6]) for split_result in split_results]) for split_result in split_results: aug_idxes = split_result[6] for _ in range(max_aug_size - len(aug_idxes)): aug_idxes.append(-1) token_placeholder = self.model.get_mask_token() if self.model_type in ['xlnet', 'roberta', 'bart']: token_placeholder = self.model.get_subword_prefix() + token_placeholder # Adding prefix for # Augment same index of aug by batch for i in range(max_aug_size): original_tokens = [] masked_texts = [] aug_input_poses = [] # store which input augmented. No record if padding change_seq += 1 for j, split_result in enumerate(split_results): head_doc, aug_idx = split_result[5], split_result[6][i] # -1 if it is padding if aug_idx == -1: continue original_tokens.append(head_doc.get_token(aug_idx).get_latest_token().token) head_doc.add_change_log(aug_idx, new_token=token_placeholder, action=Action.SUBSTITUTE, change_seq=self.parent_change_seq+change_seq) # remove continuous sub-word to_remove_idxes = [] for k in range(aug_idx+1, head_doc.size()): subword_token = head_doc.get_token(k).orig_token.token if subword_token in string.punctuation: break if self.model_type in ['bert', 'electra'] and self.model.get_subword_prefix() in subword_token: to_remove_idxes.append(k) elif self.model_type in ['xlnet', 'roberta', 'bart'] and self.model.get_subword_prefix() not in subword_token: to_remove_idxes.append(k) else: break for k in reversed(to_remove_idxes): head_doc.add_change_log(k, new_token='', action=Action.SUBSTITUTE, change_seq=self.parent_change_seq+change_seq) aug_input_poses.append(j) # some tokenizers handle special charas (e.g. don't can merge after decode) if self.model_type in ['bert', 'electra']: ids = self.model.get_tokenizer().convert_tokens_to_ids(head_doc.get_augmented_tokens()) masked_text = self.model.get_tokenizer().decode(ids).strip() elif self.model_type in ['xlnet', 'roberta', 'bart']: masked_text = self.model.get_tokenizer().convert_tokens_to_string(head_doc.get_augmented_tokens()).strip() masked_texts.append(masked_text) if not len(masked_texts): continue outputs = self.model.predict(masked_texts, target_words=original_tokens, n=2) # Update doc for original_token, aug_input_pos, output, masked_text in zip(original_tokens, aug_input_poses, outputs, masked_texts): split_result = split_results[aug_input_pos] head_doc = split_result[5] aug_idx = split_result[6][i] # augment position in text # TODO: Alternative method better than dropout candidate = '' if len(output) == 0: # TODO: no result? pass elif len(output) == 1: candidate = output[0] elif len(output) > 1: candidate = self.sample(output, 1)[0] # # In XLNet, it can be the first word of sentence which does not come with space. E.g. Zombine (ID:29110) # if self.model_type in ['xlnet']: # if candidate != '' and not candidate.startswith(self.model.get_subword_prefix()): # candidate = self.model.get_subword_prefix() + candidate # if self.model_type in ['roberta', 'bart']: # if candidate != '' and not candidate.startswith(self.model.get_subword_prefix()) and candidate.strip() != candidate: # candidate = self.model.get_subword_prefix() + candidate.strip() # Fallback to original token if no candidate is appropriate if candidate == '': candidate = original_token head_doc.update_change_log(aug_idx, token=candidate, action=Action.SUBSTITUTE, change_seq=self.parent_change_seq+change_seq) # Early stop if number of token exceed max number if head_doc.size() > self.max_num_token: for j in range(i+1, max_aug_size): split_results[aug_input_pos][6][j] = -1 augmented_texts = [] for split_result in split_results: tail_text, head_doc = split_result[1], split_result[5] head_tokens = head_doc.get_augmented_tokens() # if self.model_type in ['xlnet', 'roberta']: # # xlent and roberta tokens include prefix (e.g. ▁ or Ġ') # head_tokens = [self.model.get_subword_prefix() + t if self.model.get_subword_prefix() not in t and i != 0 else t for i, t in enumerate(head_tokens)] ids = self.model.get_tokenizer().convert_tokens_to_ids(head_tokens) augmented_text = self.model.get_tokenizer().decode(ids) if tail_text is not None: augmented_text += ' ' + tail_text augmented_texts.append(augmented_text) if isinstance(data, list): return augmented_texts else: return augmented_texts[0] @classmethod def get_model(cls, model_path, model_type, device='cuda', force_reload=False, batch_size=32, top_k=None, silence=True, use_custom_api=False): return init_context_word_embs_model(model_path, model_type, device, force_reload, batch_size, top_k, silence, use_custom_api) def substitute_back_reserved_stopwords(self, doc, reserved_stopword_tokens, change_seq): unknown_token = self.model.get_unknown_token() or self.model.UNKNOWN_TOKEN reserved_pos = len(reserved_stopword_tokens) - 1 for token_i, token in enumerate(doc.get_augmented_tokens()): if token == unknown_token: change_seq += 1 doc.update_change_log(token_i, token=reserved_stopword_tokens[reserved_pos], action=Action.SUBSTITUTE, change_seq=self.parent_change_seq+change_seq) reserved_pos -= 1 return doc, change_seq
py
1a4ea08e1f8050f81cfcd15a7bc5b4ffe456d6fc
from django.db import models from django.contrib.auth.models import BaseUserManager from django.contrib.auth.models import AbstractBaseUser from django.contrib.auth.models import PermissionsMixin from django.conf import settings class UserProfileManager(BaseUserManager): """Manager for user profiles""" def create_user(self, email, name, password=None): """Create a new user profile""" if not email: raise ValueError('Users must have an email address') email = self.normalize_email(email) user = self.model(email=email, name=name) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, name, password): """Create and save a new superuser with given details""" user = self.create_user(email, name, password) user.is_superuser = True user.is_staff = True user.save(using=self._db) return user class UserProfile(AbstractBaseUser, PermissionsMixin): """Database model for users in the system""" email = models.EmailField(max_length=255, unique=True) name = models.CharField(max_length=255) is_active = models.BooleanField(default=True) is_staff = models.BooleanField(default=False) objects = UserProfileManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['name'] def get_full_name(self): """Retrieve full name of user""" return self.name def get_short_name(self): """Retrieve short name of user""" return self.name def __str__(self): """Return string representation of our user""" return self.email class ProfileFeedItem(models.Model): """Profile status update""" # Using django settings for user profile # because we may use django default user model later user_profile = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.CASCADE ) status_text = models.CharField(max_length=255) created_on = models.DateTimeField(auto_now_add=True) def __str__(self): """Return model as string""" return self.status_text
py
1a4ea0c32016a34d26b7c6dcaa5afc68c06cc262
#!/usr/bin/env python2 import rospy from std_msgs.msg import Bool from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport from geometry_msgs.msg import TwistStamped from twist_controller import Controller ''' You can build this node only after you have built (or partially built) the `waypoint_updater` node. You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities. You can subscribe to any other message that you find important or refer to the document for list of messages subscribed to by the reference implementation of this node. One thing to keep in mind while building this node and the `twist_controller` class is the status of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will not be the case. This may cause your PID controller to accumulate error because the car could temporarily be driven by a human instead of your controller. We have provided two launch files with this node. Vehicle specific values (like vehicle_mass, wheel_base) etc should not be altered in these files. We have also provided some reference implementations for PID controller and other utility classes. You are free to use them or build your own. Once you have the proposed throttle, brake, and steer values, publish it on the various publishers that we have created in the `__init__` function. ''' class DBWNode(object): def __init__(self): rospy.init_node('dbw_node') args = {'min_speed': 0.1, 'vehicle_mass': rospy.get_param('~vehicle_mass', 1736.35), 'fuel_capacity': rospy.get_param('~fuel_capacity', 13.5), 'brake_deadband': rospy.get_param('~brake_deadband', .1), 'decel_limit': rospy.get_param('~decel_limit', -5), 'accel_limit': rospy.get_param('~accel_limit', 1.), 'wheel_radius': rospy.get_param('~wheel_radius', 0.2413), 'wheel_base': rospy.get_param('~wheel_base', 2.8498), 'steer_ratio': rospy.get_param('~steer_ratio', 14.8), 'max_lat_accel': rospy.get_param('~max_lat_accel', 3.), 'max_steer_angle': rospy.get_param('~max_steer_angle', 8.)} self.steer_pub = rospy.Publisher('/vehicle/steering_cmd', SteeringCmd, queue_size=1) self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd', ThrottleCmd, queue_size=1) self.brake_pub = rospy.Publisher('/vehicle/brake_cmd', BrakeCmd, queue_size=1) # Create `Controller` object self.controller = Controller(args) # Subscribe to all the topics you need to rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb) rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb) rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_cb) self.current_vel = None self.curr_ang_vel = None self.angular_vel = None self.linear_vel = None self.dbw = True self.twist = None self.loop() def loop(self): rate = rospy.Rate(50) # 50Hz # print('RosPy:', rospy.is_shutdown()) while not rospy.is_shutdown(): # Get predicted throttle, brake, and steering using `twist_controller` # You should only publish the control commands if dbw is enabled # print('Status (VAL):', self.current_vel, self.angular_vel, self.linear_vel) if None not in (self.current_vel, self.angular_vel, self.linear_vel): throttle, brake, steer = self.controller.control(self.current_vel, self.angular_vel, self.linear_vel, self.dbw) # print('Prediction (TBS):', throttle, brake, steer) if self.dbw: self.publish(throttle, brake, steer) rate.sleep() def publish(self, throttle, brake, steer): tcmd = ThrottleCmd() tcmd.enable = True tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT tcmd.pedal_cmd = throttle self.throttle_pub.publish(tcmd) scmd = SteeringCmd() scmd.enable = True scmd.steering_wheel_angle_cmd = steer self.steer_pub.publish(scmd) bcmd = BrakeCmd() bcmd.enable = True bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE bcmd.pedal_cmd = brake self.brake_pub.publish(bcmd) def velocity_cb(self, msg): self.current_vel = msg.twist.linear.x def twist_cb(self, msg): self.linear_vel = msg.twist.linear.x self.angular_vel = msg.twist.angular.z def dbw_cb(self, msg): self.dbw = msg if __name__ == '__main__': DBWNode()
py
1a4ea26e40c4c3563194fae4bb21f5dc078d7f54
class No: def __init__(self, dado): self.dado = dado self.anterior = None self.proximo = None
py
1a4ea3dd2efa1ae3021c85e533ab653520b2227d
from django.core.management.base import BaseCommand, CommandError from workshops.models import Person class Command(BaseCommand): args = 'no arguments' help = 'Create a superuser called "admin" with password "admin".' def handle(self, *args, **options): try: Person.objects.create_superuser(username='admin', personal='admin', family='admin', email='[email protected]', password='admin') except Exception as e: raise CommandError('Failed to create admin: {0}'.format(str(e)))
py
1a4ea6bd32a394430d17069e761a5f03a2d02561
from tensorflow.keras.models import load_model from tensorflow import get_logger import logging import pickle import numpy as np logger = get_logger() logger.setLevel(logging.CRITICAL) CATEGORIES = ['Abyssian', 'American_bulldog', 'American_pit_bull', 'Basset_hound', 'Beagle', 'Bengal', 'Birdman', 'Bombay', 'Boxer', 'British_Shorthair', 'Chihuahua', 'Egyptian_Mau', 'English_Cocker_Spaniel', 'English_Setter', 'German_Shorthaired', 'Great_Pyrenees', 'Havanese', 'Japanese_Chin', 'Keeshond', 'Leonberger', 'Maine_Coon', 'Miniature_Pinscher', 'Newfoundland', 'Persian', 'Pomeranian', 'Pug', 'Ragdoll', 'Russian_Blue', 'Saint_Bernard', 'Samoyed', 'Scottish_Terrier', 'Shiba_Inu', 'Siamese', 'Sphynx', 'Staffordshire_Bull_Terrier', 'Wheaten_Terrier', 'Yorkshire_Terrier'] # Carrega o modelo model = load_model('98.1581807136535620200213231827_model.h5') # Pega a label label = pickle.load(open("y.pickle", "rb"))[2] # Pega uma imagem do dataset normalizado img = pickle.load(open("x.pickle", "rb"))[2] # Modelos so keras são otimizados para fazer predições em um batch, ou coleções # Adiciona a imagem em um batch que possui um só membro. img = (np.expand_dims(img, 0)) # Faz a predição predictions_single = model.predict(img) # Predição da única imagem no batch: predicao = np.argmax(predictions_single[0]) print(f"Predição: {predicao} -> {CATEGORIES[int(predicao)]}") print(f"label: {label} -> {CATEGORIES[label]}")
py
1a4ea734d15d922e994b8029052b0e03a4fa7a5c
import re import requests import subprocess import time from time import sleep from log import log, log_add, now, RED, WHT, GRN, YEL # Named Constants # A local host that should always be up (ideally, the gateway router) Local_Host = "192.168.1.1" # An internet host that should always be up (Google DNS,for example) Internet_Host = "8.8.8.8" smallest_outage_to_report = 30 # seconds # Twitter Account of your Internet Provider. #Tweet_To = "@ask_spectrum" My_City = "SomeCity" # replace 'K' sequence by your API_KEY of ThingTweet Api_Key = 'KKKKKKKKKKKKKKKK' # replace 'W' sequence by your WriteAPIKey (from your thingSpeak channel settings) Write_Api_Key = 'WWWWWWWWWWWWWWWW' Thingspeak_Host = "api.thingspeak.com" Tweet_Path = "apps/thingtweet/1/statuses/update" Thingspeak_Path = "/update" Report_File = "netmon.log" # Delete the following line if you put your keys above from my_api_keys import Tweet_To, My_City, Api_Key, Write_Api_Key def send_tweet(message): payload = {'api_key': Api_Key, 'status': message} try: r = requests.post(f"https://{Thingspeak_Host}/{Tweet_Path}", params=payload) except Exception as e: log(f"Couldn't send tweet \"{message}\". Continuing. ({e})") if r.status_code != 200: log("Tweet fail {repr(r)}.") def send_down_tweet(duration): send_tweet(f"{Tweet_To}, internet was down: {str(duration)} s. " f"I'm in {My_City}. #DownTimeDetected") def send_start_tweet(): send_tweet(f"Downtime monitor started {now()}.") def send_thingspeak(duration): args = {'field1': str(duration), 'key': Write_Api_Key} headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} try: r = requests.post(f"https://{Thingspeak_Host}/{Thingspeak_Path}", params=args, headers=headers) except Exception as e: log(f"Couldn't post to thingspeak \"{duration}\". Continuing. ({e})") def host_down(host): """Return (True, '') if host is down, (False, latency) if up. """ latency = '' try: pipe = subprocess.PIPE r = subprocess.run(f"ping -c 1 {host}", shell=True, stdout=pipe, stderr=pipe) except subprocess.CalledProcessError as err: log(f'{RED}FATAL ERROR. Exiting: {err}') exit(1) else: output = r.stdout.decode('utf-8') if len(r.stderr) > 0: err = r.stderr.decode('utf-8') log(f"Surprising error: {YEL}{err}") ms_match = re.search("time=([0-9.]+) ms", output) if ms_match: latency = ms_match.group(1) down = r.returncode > 0 return (down, latency) def check_down(local, internet): """Returns (True, '') if internet is down, (False, latency) if up, None if it's not possible to check because the local net is down. """ inet_down, inet_latency = host_down(internet) if inet_down: # 2nd chance check in case it was just one lost packet inet_down, inet_latency = host_down(internet) if not inet_down: log("hiccup") return (inet_down, inet_latency) # Internet seems down, but are we even locally connected? local_down, local_latency = host_down(local) if local_down: # Locally disconnected, can't tell anything return (None, None) return (inet_down, inet_latency) # MAIN LOOP if __name__ == "__main__": send_start_tweet() log(f"{WHT} -- DownTime Monitor --\n") attempt_num = 0 was_offline = False start_of_outage = 0 outage_count = 0 long_outage_count = 0 while True: attempt_num += 1 latency = '' log(f"{YEL}#{attempt_num}, {long_outage_count}/{outage_count} " "short/long outage(s). ", end="") try: is_down, latency = check_down(Local_Host, Internet_Host) sleep(5) except Exception as e: log(f"{RED}Fail:{e}") sleep(5) continue except KeyboardInterrupt: log(f"{WHT}Goodbye!") exit(1) if is_down is None: log_add(f"{now()}: Disconnect on local network.") continue elif is_down: log_add(f"{WHT}Internet is {RED}down...") else: log_add(f"{WHT}Internet is up. (latency {GRN}{latency}{WHT})") # Internet went down after previous check if is_down and not was_offline: start_of_outage = time.time() was_offline = True continue # Internet came up after previous check if not is_down and was_offline: # 2 digits after decimal makes tweet slightly less likely # to be duplicate. Twitter blocks duplicate tweets. downtime = round(time.time() - start_of_outage, 2) outage_count += 1 was_offline = False send_thingspeak(downtime) if (downtime > smallest_outage_to_report): long_outage_count += 1 dt_str = log(f"Outage above {smallest_outage_to_report} s: {downtime} s\n") with open(Report_File, "a") as TxtFile: TxtFile.write(dt_str) send_down_tweet(downtime)
py
1a4ea7799219bff0d7c2bc66265936b60ddd8367
from django.test import TestCase # write some tests!
py
1a4ea80f8b4c18aa4d85927f9989ed98d1890472
# befor eimporting anything! import sys import os sys.path.insert(1, os.path.realpath('/work/dev-box/blender-2.79-linux-glibc219-x86_64/2.79/python/lib/python3.5/site-packages/')) from enum import Enum class LogColor: INFO = '\033[94m' WARNING = '\033[93m' ERROR = '\033[91m\033[1m' ENDC = '\033[0m' class LogLevel(Enum): INFO = 1 WARNING = 2 ERROR = 3 def log(output,level=LogLevel.INFO): if level == LogLevel.INFO: sys.stderr.write(LogColor.INFO) elif level == LogLevel.WARNING: sys.stderr.write(LogColor.WARNING) elif level == LogLevel.ERROR: sys.stderr.write(LogColor.ERROR) sys.stderr.write(str(output)) sys.stderr.write(LogColor.ENDC) sys.stderr.write("\n") sys.stderr.flush() import bpy import bmesh import math import numpy as np import binvox_rw import import_off import_off.register() from bpy_extras.io_utils import axis_conversion from bpy.props import EnumProperty sphere_base_mesh = None cube_base_mesh = None circle_base_mesh = None def initialize(width=512, height=448): bpy.ops.mesh.primitive_ico_sphere_add() global sphere_base_mesh sphere_base_mesh = bpy.context.scene.objects.active.data.copy() for face in sphere_base_mesh.polygons: face.use_smooth = True bpy.ops.mesh.primitive_cube_add() global cube_base_mesh cube_base_mesh = bpy.context.scene.objects.active.data.copy() bpy.ops.mesh.primitive_circle_add(vertices=1024, radius=1, fill_type='NGON') global circle_base_mesh circle_base_mesh = bpy.context.scene.objects.active.data.copy() # Delete the scene, except for the camera and the lamp for obj in bpy.data.objects: if str(obj.name) in ['Camera']: continue obj.select = True bpy.ops.object.delete() scene = bpy.context.scene # set the camera and its constraint cam = scene.objects['Camera'] cam.location = (0, 3.0, 1.0) cam.data.lens = 35 cam.data.sensor_width = 32 cam.data.sensor_height = 32 cam_constraint = cam.constraints.new(type='TRACK_TO') cam_constraint.track_axis = 'TRACK_NEGATIVE_Z' cam_constraint.up_axis = 'UP_Y' def parent_obj_to_camera(b_camera): origin = (0, 0, 0) b_empty = bpy.data.objects.new('Empty', None) b_empty.location = origin b_camera.parent = b_empty # setup parenting scn = bpy.context.scene scn.objects.link(b_empty) scn.objects.active = b_empty return b_empty camera_target = parent_obj_to_camera(cam) cam_constraint.target = camera_target locations = [ (-0.98382, 0.445997, 0.526505), (-0.421806, -0.870784, 0.524944), (0.075576, -0.960128, 0.816464), (0.493553, -0.57716, 0.928208), (0.787275, -0.256822, 0.635172), (1.01032, 0.148764, 0.335078) ] for i in range(len(locations)): lamp_data = bpy.data.lamps.new(name='Point Lamp ' + str(i), type='POINT') lamp_data.shadow_method = 'RAY_SHADOW' lamp_data.shadow_ray_sample_method = 'CONSTANT_QMC' lamp_data.use_shadow = True lamp_data.shadow_soft_size = 1e6 lamp_data.distance = 2 lamp_data.energy = 0.1 lamp_data.use_diffuse = True lamp_data.use_specular = True lamp_data.falloff_type = 'CONSTANT' lamp_object = bpy.data.objects.new(name='Spot Lamp ' + str(i), object_data=lamp_data) scene.objects.link(lamp_object) lamp_object.location[0] = locations[i][0] lamp_object.location[1] = locations[i][1] lamp_object.location[2] = locations[i][2] lamp_object.rotation_euler[0] = 0 lamp_object.rotation_euler[1] = 0 lamp_object.rotation_euler[2] = 0 lamp_object.parent = camera_target try: if (2, 78, 0) <= bpy.app.version: # https://blender.stackexchange.com/questions/5281/blender-sets-compute-device-cuda-but-doesnt-use-it-for-actual-render-on-ec2 bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA' bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = True else: bpy.context.user_preferences.system.compute_device_type = 'CUDA' except TypeError: pass scene.render.use_file_extension = False scene.render.resolution_x = width scene.render.resolution_y = height scene.render.resolution_percentage = 100 scene.render.use_antialiasing = True scene.render.use_shadows = True world = bpy.context.scene.world world.zenith_color = [1.0, 1.0, 1.0] world.horizon_color = [1.0, 1.0, 1.0] scene.render.alpha_mode = 'SKY' world.light_settings.use_environment_light = True world.light_settings.environment_color = 'PLAIN' world.light_settings.environment_energy = 0.5 return camera_target def make_material(name, diffuse, alpha, shadow=False): material = bpy.data.materials.new(name) material.diffuse_color = diffuse material.diffuse_shader = 'LAMBERT' material.diffuse_intensity = 1 material.specular_color = (1, 1, 1) material.specular_shader = 'COOKTORR' material.specular_intensity = 2 material.alpha = alpha material.use_transparency = True material.ambient = 1.0 material.use_cast_shadows = shadow material.use_shadows = shadow return material def shadow_plane(material, offset = (0, 0, 0), scale = 1): global circle_base_mesh ob = bpy.data.objects.new("BRC_Shadow_Plane", circle_base_mesh) ob.location = offset ob.scale = (scale, scale, scale) bpy.context.scene.objects.link(ob) mat = material mat.use_shadows = True mat.use_transparent_shadows = True mat.use_only_shadow = True mat.use_raytrace = True mat.ambient = 0 ob.data.materials.append(mat) ob.active_material_index = 0 ob.active_material = mat def _load_mesh(name, vertices, faces): # vertices should be list of lists # faces should be list of lists edges = [] mesh = bpy.data.meshes.new(name=name) mesh.from_pydata(vertices, edges, faces) # mesh.vertices.add(len(verts)) # mesh.vertices.foreach_set("co", unpack_list(verts)) # mesh.faces.add(len(facets)) # mesh.faces.foreach_set("vertices", unpack_face_list(facets)) mesh.validate() mesh.update() scene = bpy.context.scene obj = bpy.data.objects.new(mesh.name, mesh) scene.objects.link(obj) scene.objects.active = obj obj.select = True axis_forward = EnumProperty( name="Forward", items=(('X', "X Forward", ""), ('Y', "Y Forward", ""), ('Z', "Z Forward", ""), ('-X', "-X Forward", ""), ('-Y', "-Y Forward", ""), ('-Z', "-Z Forward", ""), ), default='Y', ) axis_up = EnumProperty( name="Up", items=(('X', "X Up", ""), ('Y', "Y Up", ""), ('Z', "Z Up", ""), ('-X', "-X Up", ""), ('-Y', "-Y Up", ""), ('-Z', "-Z Up", ""), ), default='Z', ) global_matrix = axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4() obj.matrix_world = global_matrix scene.update() return mesh def load_mesh(name, vertices, faces, material, offset=(0, 0, 0), scale=1, axes='xyz'): _load_mesh(name, vertices, faces) assert len(offset) == 3 assert scale > 0 assert len(axes) == 3 x_index = axes.find('x') y_index = axes.find('y') z_index = axes.find('z') assert x_index >= 0 and x_index < 3 assert y_index >= 0 and y_index < 3 assert z_index >= 0 and z_index < 3 assert x_index != y_index and x_index != z_index and y_index != z_index for obj in bpy.context.scene.objects: # obj.name contains the group name of a group of faces, see http://paulbourke.net/dataformats/obj/ # every mesh is of type 'MESH', this works not only for ShapeNet but also for 'simple' # obj files if obj.type == 'MESH' and not 'BRC' in obj.name: # change color # this is based on https://stackoverflow.com/questions/4644650/blender-how-do-i-add-a-color-to-an-object # but needed changing a lot of attributes according to documentation obj.data.materials.append(material) for vertex in obj.data.vertices: # make a copy, otherwise axes switching does not work vertex_copy = (vertex.co[0], vertex.co[1], vertex.co[2]) vertex.co[0] = vertex_copy[x_index] vertex.co[1] = vertex_copy[y_index] vertex.co[2] = vertex_copy[z_index] vertex.co[0] = vertex.co[0] * scale + offset[0] vertex.co[1] = vertex.co[1] * scale + offset[1] vertex.co[2] = vertex.co[2] * scale + offset[2] obj.name = 'BRC_' + obj.name def load_off(off_file, material, offset=(0, 0, 0), scale=1, axes='xyz'): bpy.ops.import_mesh.off(filepath=off_file) assert len(offset) == 3 assert scale > 0 assert len(axes) == 3 x_index = axes.find('x') y_index = axes.find('y') z_index = axes.find('z') assert x_index >= 0 and x_index < 3 assert y_index >= 0 and y_index < 3 assert z_index >= 0 and z_index < 3 assert x_index != y_index and x_index != z_index and y_index != z_index for obj in bpy.context.scene.objects: # obj.name contains the group name of a group of faces, see http://paulbourke.net/dataformats/obj/ # every mesh is of type 'MESH', this works not only for ShapeNet but also for 'simple' # obj files if obj.type == 'MESH' and not 'BRC' in obj.name: # change color # this is based on https://stackoverflow.com/questions/4644650/blender-how-do-i-add-a-color-to-an-object # but needed changing a lot of attributes according to documentation obj.data.materials.append(material) for vertex in obj.data.vertices: # make a copy, otherwise axes switching does not work vertex_copy = (vertex.co[0], vertex.co[1], vertex.co[2]) vertex.co[0] = vertex_copy[x_index] vertex.co[1] = vertex_copy[y_index] vertex.co[2] = vertex_copy[z_index] vertex.co[0] = vertex.co[0] * scale + offset[0] vertex.co[1] = vertex.co[1] * scale + offset[1] vertex.co[2] = vertex.co[2] * scale + offset[2] obj.name = 'BRC_' + obj.name def load_txt(txt_file, radius, material, offset=(0, 0, 0), scale=1, axes='xyz'): global sphere_base_mesh assert len(offset) == 3 assert scale > 0 assert len(axes) == 3 x_index = axes.find('x') y_index = axes.find('y') z_index = axes.find('z') assert x_index >= 0 and x_index < 3 assert y_index >= 0 and y_index < 3 assert z_index >= 0 and z_index < 3 assert x_index != y_index and x_index != z_index and y_index != z_index voxel_file = open(txt_file, 'r') voxel_lines = voxel_file.readlines() voxel_file.close() mesh = bmesh.new() for line in voxel_lines: vals = line.split(' ') if not line.startswith('#') and line.strip() != '' and len(vals) >= 3: location = ( float(vals[x_index]) * scale + offset[0], float(vals[y_index]) * scale + offset[1], float(vals[z_index]) * scale + offset[2] ) m = sphere_base_mesh.copy() for vertex in m.vertices: vertex.co[0] = vertex.co[0] * radius + location[0] vertex.co[1] = vertex.co[1] * radius + location[1] vertex.co[2] = vertex.co[2] * radius + location[2] mesh.from_mesh(m) mesh2 = bpy.data.meshes.new('Mesh') mesh.to_mesh(mesh2) obj = bpy.data.objects.new('BRC_Point_Cloud', mesh2) obj.data.materials.append(material) obj.active_material_index = 0 obj.active_material = material bpy.context.scene.objects.link(obj) def load_binvox(binvox_file, radius, material, offset, scale, axes): global cube_base_mesh assert len(offset) == 3 assert len(scale) == 3 assert len(axes) == 3 x_index = axes.find("x") y_index = axes.find("y") z_index = axes.find("z") assert x_index >= 0 and x_index < 3 assert y_index >= 0 and y_index < 3 assert z_index >= 0 and z_index < 3 assert x_index != y_index and x_index != z_index and y_index != z_index with open(binvox_file, 'rb') as f: model = binvox_rw.read_as_3d_array(f) points = np.where(model.data) locations = np.zeros((points[0].shape[0], 3), dtype=float) locations[:, 0] = (points[x_index][:] + 0.5) / model.data.shape[x_index] locations[:, 1] = (points[y_index][:] + 0.5) / model.data.shape[y_index] locations[:, 2] = (points[z_index][:] + 0.5) / model.data.shape[z_index] locations[:, 0] -= 0.5 locations[:, 1] -= 0.5 locations[:, 2] -= 0.5 locations[:, 0] = locations[:, 0] * scale[0] + offset[0] locations[:, 1] = locations[:, 1] * scale[1] + offset[1] locations[:, 2] = locations[:, 2] * scale[2] + offset[2] mesh = bmesh.new() for i in range(locations.shape[0]): m = cube_base_mesh.copy() for vertex in m.vertices: vertex.co[0] = vertex.co[0] * radius + locations[i, 0] vertex.co[1] = vertex.co[1] * radius + locations[i, 1] vertex.co[2] = vertex.co[2] * radius + locations[i, 2] mesh.from_mesh(m) mesh2 = bpy.data.meshes.new('Mesh') mesh.to_mesh(mesh2) obj = bpy.data.objects.new('BRC_Occupancy', mesh2) obj.data.materials.append(material) obj.active_material_index = 0 obj.active_material = material bpy.context.scene.objects.link(obj) def load_volume(volume, radius, material, offset, scale, axes): global cube_base_mesh assert len(offset) == 3 assert len(scale) == 3 assert len(axes) == 3 x_index = axes.find("x") y_index = axes.find("y") z_index = axes.find("z") assert x_index >= 0 and x_index < 3 assert y_index >= 0 and y_index < 3 assert z_index >= 0 and z_index < 3 assert x_index != y_index and x_index != z_index and y_index != z_index points = np.where(volume > 0) locations = np.zeros((points[0].shape[0], 3), dtype=float) locations[:, 0] = (points[x_index][:] + 0.5) / volume.shape[x_index] locations[:, 1] = (points[y_index][:] + 0.5) / volume.shape[y_index] locations[:, 2] = (points[z_index][:] + 0.5) / volume.shape[z_index] locations[:, 0] -= 0.5 locations[:, 1] -= 0.5 locations[:, 2] -= 0.5 locations[:, 0] = locations[:, 0] * scale[0] + offset[0] locations[:, 1] = locations[:, 1] * scale[1] + offset[1] locations[:, 2] = locations[:, 2] * scale[2] + offset[2] mesh = bmesh.new() for i in range(locations.shape[0]): m = cube_base_mesh.copy() for vertex in m.vertices: vertex.co[0] = vertex.co[0] * radius + locations[i, 0] vertex.co[1] = vertex.co[1] * radius + locations[i, 1] vertex.co[2] = vertex.co[2] * radius + locations[i, 2] mesh.from_mesh(m) mesh2 = bpy.data.meshes.new('Mesh') mesh.to_mesh(mesh2) obj = bpy.data.objects.new('BRC_Occupancy', mesh2) obj.data.materials.append(material) obj.active_material_index = 0 obj.active_material = material bpy.context.scene.objects.link(obj) def render(camera_target, output_file, rotation, distance): bpy.context.scene.render.filepath = output_file camera_target.rotation_euler[0] = math.radians(rotation[0]) camera_target.rotation_euler[1] = math.radians(rotation[1]) camera_target.rotation_euler[2] = math.radians(rotation[2]) cam = bpy.context.scene.objects['Camera'] cam.location = (0, 3.0 * distance, 1.0 * distance) bpy.ops.render.render(animation=False, write_still=True)
py
1a4ea880d42d314320d5173d72bf47d236ff0163
# Authors: Alexandre Gramfort <[email protected]> # Matti Hamalainen <[email protected]> # Martin Luessi <[email protected]> # # License: BSD (3-clause) from .externals.six import string_types import os import copy from math import ceil import numpy as np from scipy import linalg, sparse from scipy.sparse import csr_matrix, coo_matrix import warnings from .filter import resample from .fiff.evoked import _get_peak from .parallel import parallel_func from .surface import (read_surface, _get_ico_surface, read_morph_map, _compute_nearest) from .utils import (get_subjects_dir, _check_subject, _check_pandas_index_arguments, _check_pandas_installed, logger, verbose) from .viz import plot_source_estimates from .fixes import in1d from .externals.six.moves import zip def _read_stc(filename): """ Aux Function """ fid = open(filename, 'rb') stc = dict() fid.seek(0, 2) # go to end of file file_length = fid.tell() fid.seek(0, 0) # go to beginning of file # read tmin in ms stc['tmin'] = float(np.fromfile(fid, dtype=">f4", count=1)) stc['tmin'] /= 1000.0 # read sampling rate in ms stc['tstep'] = float(np.fromfile(fid, dtype=">f4", count=1)) stc['tstep'] /= 1000.0 # read number of vertices/sources vertices_n = int(np.fromfile(fid, dtype=">u4", count=1)) # read the source vector stc['vertices'] = np.fromfile(fid, dtype=">u4", count=vertices_n) # read the number of timepts data_n = int(np.fromfile(fid, dtype=">u4", count=1)) if (vertices_n and # vertices_n can be 0 (empty stc) ((file_length / 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0): raise ValueError('incorrect stc file size') # read the data matrix stc['data'] = np.fromfile(fid, dtype=">f4", count=vertices_n * data_n) stc['data'] = stc['data'].reshape([data_n, vertices_n]).T # close the file fid.close() return stc def _write_stc(filename, tmin, tstep, vertices, data): """Write an STC file Parameters ---------- filename : string The name of the STC file. tmin : float The first time point of the data in seconds. tstep : float Time between frames in seconds. vertices : array of integers Vertex indices (0 based). data : 2D array The data matrix (nvert * ntime). """ fid = open(filename, 'wb') # write start time in ms fid.write(np.array(1000 * tmin, dtype='>f4').tostring()) # write sampling rate in ms fid.write(np.array(1000 * tstep, dtype='>f4').tostring()) # write number of vertices fid.write(np.array(vertices.shape[0], dtype='>u4').tostring()) # write the vertex indices fid.write(np.array(vertices, dtype='>u4').tostring()) # write the number of timepts fid.write(np.array(data.shape[1], dtype='>u4').tostring()) # # write the data # fid.write(np.array(data.T, dtype='>f4').tostring()) # close the file fid.close() def _read_3(fid): """ Read 3 byte integer from file """ data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32) out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2] return out def _read_w(filename): """Read a w file and return as dict w files contain activations or source reconstructions for a single time point. Parameters ---------- filename : string The name of the w file. Returns ------- data: dict The w structure. It has the following keys: vertices vertex indices (0 based) data The data matrix (nvert long) """ with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug # skip first 2 bytes fid.read(2) # read number of vertices/sources (3 byte integer) vertices_n = int(_read_3(fid)) vertices = np.zeros((vertices_n), dtype=np.int32) data = np.zeros((vertices_n), dtype=np.float32) # read the vertices and data for i in range(vertices_n): vertices[i] = _read_3(fid) data[i] = np.fromfile(fid, dtype='>f4', count=1)[0] w = dict() w['vertices'] = vertices w['data'] = data return w def _write_3(fid, val): """ Write 3 byte integer to file """ f_bytes = np.zeros((3), dtype=np.uint8) f_bytes[0] = (val >> 16) & 255 f_bytes[1] = (val >> 8) & 255 f_bytes[2] = val & 255 fid.write(f_bytes.tostring()) def _write_w(filename, vertices, data): """Read a w file w files contain activations or source reconstructions for a single time point. Parameters ---------- filename: string The name of the w file. vertices: array of int Vertex indices (0 based). data: 1D array The data array (nvert). """ assert(len(vertices) == len(data)) fid = open(filename, 'wb') # write 2 zero bytes fid.write(np.zeros((2), dtype=np.uint8).tostring()) # write number of vertices/sources (3 byte integer) vertices_n = len(vertices) _write_3(fid, vertices_n) # write the vertices and data for i in range(vertices_n): _write_3(fid, vertices[i]) #XXX: without float() endianness is wrong, not sure why fid.write(np.array(float(data[i]), dtype='>f4').tostring()) # close the file fid.close() def read_source_estimate(fname, subject=None): """Read a soure estimate object Parameters ---------- fname : str Path to (a) source-estimate file(s). subject : str | None Name of the subject the source estimate(s) is (are) from. It is good practice to set this attribute to avoid combining incompatible labels and SourceEstimates (e.g., ones from other subjects). Note that due to file specification limitations, the subject name isn't saved to or loaded from files written to disk. Returns ------- stc : SourceEstimate | VolSourceEstimate The soure estimate object loaded from file. Notes ----- - for volume source estimates, ``fname`` should provide the path to a single file named '*-vl.stc` or '*-vol.stc' - for surface source estimates, ``fname`` should either provide the path to the file corresponding to a single hemisphere ('*-lh.stc', '*-rh.stc') or only specify the asterisk part in these patterns. In any case, the function expects files for both hemisphere with names following this pattern. - for single time point .w files, ``fname`` should follow the same pattern as for surface estimates, except that files are named '*-lh.w' and '*-rh.w'. """ fname_arg = fname # make sure corresponding file(s) can be found ftype = None if os.path.exists(fname): if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \ fname.endswith('-vl.w') or fname.endswith('-vol.w'): ftype = 'volume' elif fname.endswith('.stc'): ftype = 'surface' if fname.endswith(('-lh.stc', '-rh.stc')): fname = fname[:-7] else: err = ("Invalid .stc filename: %r; needs to end with " "hemisphere tag ('...-lh.stc' or '...-rh.stc')" % fname) raise IOError(err) elif fname.endswith('.w'): ftype = 'w' if fname.endswith(('-lh.w', '-rh.w')): fname = fname[:-5] else: err = ("Invalid .w filename: %r; needs to end with " "hemisphere tag ('...-lh.w' or '...-rh.w')" % fname) raise IOError(err) if ftype is not 'volume': stc_exist = [os.path.exists(f) for f in [fname + '-rh.stc', fname + '-lh.stc']] w_exist = [os.path.exists(f) for f in [fname + '-rh.w', fname + '-lh.w']] if all(stc_exist) and (ftype is not 'w'): ftype = 'surface' elif all(w_exist): ftype = 'w' elif any(stc_exist) or any(w_exist): raise IOError("Hemisphere missing for %r" % fname_arg) else: raise IOError("SourceEstimate File(s) not found for: %r" % fname_arg) # read the files if ftype == 'volume': # volume source space if fname.endswith('.stc'): kwargs = _read_stc(fname) elif fname.endswith('.w'): kwargs = _read_w(fname) kwargs['data'] = kwargs['data'][:, np.newaxis] kwargs['tmin'] = 0.0 kwargs['tstep'] = 0.0 else: raise IOError('Volume source estimate must end with .stc or .w') elif ftype == 'surface': # stc file with surface source spaces lh = _read_stc(fname + '-lh.stc') rh = _read_stc(fname + '-rh.stc') assert lh['tmin'] == rh['tmin'] assert lh['tstep'] == rh['tstep'] kwargs = lh.copy() kwargs['data'] = np.r_[lh['data'], rh['data']] kwargs['vertices'] = [lh['vertices'], rh['vertices']] elif ftype == 'w': # w file with surface source spaces lh = _read_w(fname + '-lh.w') rh = _read_w(fname + '-rh.w') kwargs = lh.copy() kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T kwargs['vertices'] = [lh['vertices'], rh['vertices']] # w files only have a single time point kwargs['tmin'] = 0.0 kwargs['tstep'] = 1.0 if ftype != 'volume': # Make sure the vertices are ordered vertices = kwargs['vertices'] if any([np.any(np.diff(v.astype(int)) <= 0) for v in vertices]): sidx = [np.argsort(verts) for verts in vertices] vertices = [verts[idx] for verts, idx in zip(vertices, sidx)] data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]] kwargs['vertices'] = vertices kwargs['data'] = data kwargs['subject'] = subject if ftype == 'volume': stc = VolSourceEstimate(**kwargs) else: stc = SourceEstimate(**kwargs) return stc def _make_stc(data, vertices, tmin=None, tstep=None, subject=None): """Helper function to generate either a surface or volume source estimate """ if isinstance(vertices, list) and len(vertices) == 2: # make a surface source estimate stc = SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject) elif isinstance(vertices, np.ndarray) or isinstance(vertices, list)\ and len(vertices) == 1: stc = VolSourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject) else: raise ValueError('vertices has to be either a list with one or two ' 'arrays or an array') return stc def _verify_source_estimate_compat(a, b): """Make sure two SourceEstimates are compatible for arith. operations""" compat = False if len(a.vertno) == len(b.vertno): if all([np.array_equal(av, vv) for av, vv in zip(a.vertno, b.vertno)]): compat = True if not compat: raise ValueError('Cannot combine SourceEstimates that do not have the ' 'same vertices. Consider using stc.expand().') if a.subject != b.subject: raise ValueError('source estimates do not have the same subject ' 'names, "%s" and "%s"' % (a.name, b.name)) class _BaseSourceEstimate(object): """Abstract base class for source estimates Parameters ---------- data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data) The data in source space. The data can either be a single array or a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and "sens_data" shape (n_sensors, n_times). In this case, the source space data corresponds to "numpy.dot(kernel, sens_data)". vertices : array | list of two arrays Vertex numbers corresponding to the data. tmin : scalar Time point of the first sample in data. tstep : scalar Time step between successive samples in data. subject : str | None The subject name. While not necessary, it is safer to set the subject parameter to avoid analysis errors. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Attributes ---------- subject : str | None The subject name. times : array of shape (n_times,) The time vector. vertices : array or list of arrays of shape (n_dipoles,) The indices of the dipoles in the different source spaces. Can be an array if there is only one source space (e.g., for volumes). data : array of shape (n_dipoles, n_times) The data in source space. shape : tuple The shape of the data. A tuple of int (n_dipoles, n_times). """ @verbose def __init__(self, data, vertices=None, tmin=None, tstep=None, subject=None, verbose=None): kernel, sens_data = None, None if isinstance(data, tuple): if len(data) != 2: raise ValueError('If data is a tuple it has to be length 2') kernel, sens_data = data data = None if kernel.shape[1] != sens_data.shape[0]: raise ValueError('kernel and sens_data have invalid ' 'dimensions') if isinstance(vertices, list): if not (len(vertices) == 2 or len(vertices) == 1) or \ not all([isinstance(v, np.ndarray) for v in vertices]): raise ValueError('Vertices, if a list, must contain one or ' 'two numpy arrays') if any([np.any(np.diff(v.astype(int)) <= 0) for v in vertices]): raise ValueError('Vertices must be ordered in increasing ' 'order.') n_src = sum([len(v) for v in vertices]) if len(vertices) == 1: vertices = vertices[0] elif isinstance(vertices, np.ndarray): n_src = len(vertices) else: raise ValueError('Vertices must be a list or numpy array') # safeguard the user against doing something silly if data is not None and data.shape[0] != n_src: raise ValueError('Number of vertices (%i) and stc.shape[0] (%i) ' 'must match' % (n_src, data.shape[0])) self._data = data self.tmin = tmin self.tstep = tstep self.vertno = vertices self.verbose = verbose self._kernel = kernel self._sens_data = sens_data self._kernel_removed = False self.times = None self._update_times() self.subject = _check_subject(None, subject, False) def _remove_kernel_sens_data_(self): """Remove kernel and sensor space data and compute self._data """ if self._kernel is not None or self._sens_data is not None: self._kernel_removed = True self._data = np.dot(self._kernel, self._sens_data) self._kernel = None self._sens_data = None def crop(self, tmin=None, tmax=None): """Restrict SourceEstimate to a time interval Parameters ---------- tmin : float or None The first time point in seconds. If None the first present is used. tmax : float or None The last time point in seconds. If None the last present is used. """ mask = np.ones(len(self.times), dtype=np.bool) if tmax is not None: mask = mask & (self.times <= tmax) if tmin is not None: mask = mask & (self.times >= tmin) self.tmin = tmin if self._kernel is not None and self._sens_data is not None: self._sens_data = self._sens_data[:, mask] else: self._data = self._data[:, mask] self._update_times() return self # return self for chaining methods @verbose def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1, verbose=None): """Resample data Parameters ---------- sfreq : float New sample rate to use. npad : int Amount to pad the start and end of the data. window : string or tuple Window to use in resampling. See scipy.signal.resample. n_jobs : int Number of jobs to run in parallel. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Defaults to self.verbose. Notes ----- For some data, it may be more accurate to use npad=0 to reduce artifacts. This is dataset dependent -- check your data! Note that the sample rate of the original data is inferred from tstep. """ # resampling in sensor instead of source space gives a somewhat # different result, so we don't allow it self._remove_kernel_sens_data_() o_sfreq = 1.0 / self.tstep self._data = resample(self._data, sfreq, o_sfreq, npad, n_jobs=n_jobs) # adjust indirectly affected variables self.tstep = 1.0 / sfreq self._update_times() @property def data(self): if self._data is None: # compute the solution the first time the data is accessed and # remove the kernel and sensor data self._remove_kernel_sens_data_() return self._data @property def shape(self): if self._data is not None: return self._data.shape return (self._kernel.shape[0], self._sens_data.shape[1]) def _update_times(self): """Update the times attribute after changing tmin, tmax, or tstep""" self.times = self.tmin + (self.tstep * np.arange(self.shape[1])) def __add__(self, a): stc = copy.deepcopy(self) stc += a return stc def __iadd__(self, a): self._remove_kernel_sens_data_() if isinstance(a, _BaseSourceEstimate): _verify_source_estimate_compat(self, a) self._data += a.data else: self._data += a return self def mean(self): """Make a summary stc file with mean power between tmin and tmax. Returns ------- stc : instance of SourceEstimate The modified stc (note: method operates inplace). """ data = self.data tmax = self.tmin + self.tstep * data.shape[1] tmin = (self.tmin + tmax) / 2. tstep = tmax - self.tmin mean_stc = SourceEstimate(self.data.mean(axis=1)[:, np.newaxis], vertices=self.vertno, tmin=tmin, tstep=tstep, subject=self.subject) return mean_stc def __sub__(self, a): stc = copy.deepcopy(self) stc -= a return stc def __isub__(self, a): self._remove_kernel_sens_data_() if isinstance(a, _BaseSourceEstimate): _verify_source_estimate_compat(self, a) self._data -= a.data else: self._data -= a return self def __truediv__(self, a): return self.__div__(a) def __div__(self, a): stc = copy.deepcopy(self) stc /= a return stc def __itruediv__(self, a): return self.__idiv__(a) def __idiv__(self, a): self._remove_kernel_sens_data_() if isinstance(a, _BaseSourceEstimate): _verify_source_estimate_compat(self, a) self._data /= a.data else: self._data /= a return self def __mul__(self, a): stc = copy.deepcopy(self) stc *= a return stc def __imul__(self, a): self._remove_kernel_sens_data_() if isinstance(a, _BaseSourceEstimate): _verify_source_estimate_compat(self, a) self._data *= a.data else: self._data *= a return self def __pow__(self, a): stc = copy.deepcopy(self) stc **= a return stc def __ipow__(self, a): self._remove_kernel_sens_data_() self._data **= a return self def __radd__(self, a): return self + a def __rsub__(self, a): return self - a def __rmul__(self, a): return self * a def __rdiv__(self, a): return self / a def __neg__(self): stc = copy.deepcopy(self) stc._remove_kernel_sens_data_() stc._data *= -1 return stc def __pos__(self): return self def sqrt(self): """Return copy of SourceEstimate with sqrt(data).""" return self ** (0.5) def copy(self): """Return copy of SourceEstimate instance""" return copy.deepcopy(self) def bin(self, width, tstart=None, tstop=None, func=np.mean): """Returns a SourceEstimate object with data summarized over time bins Time bins of ``width`` seconds. This method is intended for visualization only. No filter is applied to the data before binning, making the method inappropriate as a tool for downsampling data. Parameters ---------- width : scalar Width of the individual bins in seconds. func : callable Function that is applied to summarize the data. Needs to accept a numpy.array as first input and an ``axis`` keyword argument. tstart : scalar | None Time point where the first bin starts. The default is the first time point of the stc. tstop : scalar | None Last possible time point contained in a bin (if the last bin would be shorter than width it is dropped). The default is the last time point of the stc. Returns ------- stc : instance of SourceEstimate The binned SourceEstimate. """ if tstart is None: tstart = self.tmin if tstop is None: tstop = self.times[-1] times = np.arange(tstart, tstop + self.tstep, width) nv, _ = self.shape nt = len(times) - 1 data = np.empty((nv, nt), dtype=self.data.dtype) for i in range(nt): idx = (self.times >= times[i]) & (self.times < times[i + 1]) data[:, i] = func(self.data[:, idx], axis=1) tmin = times[0] + width / 2. stc = _make_stc(data, vertices=self.vertno, tmin=tmin, tstep=width, subject=self.subject) return stc def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None): """Get data after a linear (time) transform has been applied The transorm is applied to each source time course independently. Parameters ---------- func : callable The transform to be applied, including parameters (see, e.g., mne.fixes.partial). The first parameter of the function is the input data. The first return value is the transformed data, remaining outputs are ignored. The first dimension of the transformed data has to be the same as the first dimension of the input data. idx : array | None Indicices of source time courses for which to compute transform. If None, all time courses are used. tmin_idx : int | None Index of first time point to include. If None, the index of the first time point is used. tmax_idx : int | None Index of the first time point not to include. If None, time points up to (and including) the last time point are included. Returns ------- data_t : ndarray The transformed data. .. note:: Applying transforms can be significantly faster if the SourceEstimate object was created using "(kernel, sens_data)", for the "data" parameter as the transform is applied in sensor space. Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do this automatically (if possible). """ if idx is None: # use all time courses by default idx = slice(None, None) if self._kernel is None and self._sens_data is None: if self._kernel_removed: warnings.warn('Performance can be improved by not accessing ' 'the data attribute before calling this method.') # transform source space data directly data_t = func(self.data[idx, tmin_idx:tmax_idx]) if isinstance(data_t, tuple): # use only first return value data_t = data_t[0] else: # apply transform in sensor space sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx]) if isinstance(sens_data_t, tuple): # use only first return value sens_data_t = sens_data_t[0] # apply inverse data_shape = sens_data_t.shape if len(data_shape) > 2: # flatten the last dimensions sens_data_t = sens_data_t.reshape(data_shape[0], np.prod(data_shape[1:])) data_t = np.dot(self._kernel[idx, :], sens_data_t) # restore original shape if necessary if len(data_shape) > 2: data_t = data_t.reshape(data_t.shape[0], *data_shape[1:]) return data_t def transform(self, func, idx=None, tmin=None, tmax=None, copy=False): """Apply linear transform The transform is applied to each source time course independently. Parameters ---------- func : callable The transform to be applied, including parameters (see, e.g., mne.fixes.partial). The first parameter of the function is the input data. The first two dimensions of the transformed data should be (i) vertices and (ii) time. Transforms which yield 3D output (e.g. time-frequency transforms) are valid, so long as the first two dimensions are vertices and time. In this case, the copy parameter (see below) must be True and a list of SourceEstimates, rather than a single instance of SourceEstimate, will be returned, one for each index of the 3rd dimension of the transformed data. In the case of transforms yielding 2D output (e.g. filtering), the user has the option of modifying the input inplace (copy = False) or returning a new instance of SourceEstimate (copy = True) with the transformed data. idx : array | None Indices of source time courses for which to compute transform. If None, all time courses are used. tmin : float | int | None First time point to include (ms). If None, self.tmin is used. tmax : float | int | None Last time point to include (ms). If None, self.tmax is used. copy : bool If True, return a new instance of SourceEstimate instead of modifying the input inplace. Returns ------- stcs : instance of SourceEstimate | list The transformed stc or, in the case of transforms which yield N-dimensional output (where N > 2), a list of stcs. For a list, copy must be True. Notes ----- Applying transforms can be significantly faster if the SourceEstimate object was created using "(kernel, sens_data)", for the "data" parameter as the transform is applied in sensor space. Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do this automatically (if possible). """ # min and max data indices to include times = np.round(1000 * self.times) if tmin is None: tmin_idx = None else: tmin = float(tmin) tmin_idx = np.where(times >= tmin)[0][0] if tmax is None: tmax_idx = None else: tmax = float(tmax) tmax_idx = np.where(times <= tmax)[0][-1] data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx, tmax_idx=tmax_idx) # account for change in n_vertices if idx is not None: idx_lh = idx[idx < len(self.lh_vertno)] idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno) verts_lh = self.lh_vertno[idx_lh] verts_rh = self.rh_vertno[idx_rh] else: verts_lh = self.lh_vertno verts_rh = self.rh_vertno verts = [verts_lh, verts_rh] tmin_idx = 0 if tmin_idx is None else tmin_idx tmax_idx = -1 if tmax_idx is None else tmax_idx tmin = self.times[tmin_idx] times = np.arange(self.times[tmin_idx], self.times[tmax_idx] + self.tstep / 2, self.tstep) if data_t.ndim > 2: # return list of stcs if transformed data has dimensionality > 2 if copy: stcs = [SourceEstimate(data_t[:, :, a], verts, tmin, self.tstep, self.subject) for a in range(data_t.shape[-1])] else: raise ValueError('copy must be True if transformed data has ' 'more than 2 dimensions') else: # return new or overwritten stc stcs = self if not copy else self.copy() stcs._data, stcs.vertno = data_t, verts stcs.tmin, stcs.times = tmin, times return stcs def as_data_frame(self, index=None, scale_time=1e3, copy=True): """Represent source estimates as Pandas DataFrame Export source estimates in tabular structure with vertices as columns and two additional info columns 'subject' and 'time'. This function is useful to visualize and analyse source time courses with external statistical software such as statsmodels or R. Parameters ---------- index : tuple of str | None Column to be used as index for the data. Valid string options are 'subject' and 'time'. If None, both info columns will be included in the table as categorial data. If stc.subject is None, only time will be included. scale_time : float Scaling to be applied to time units. copy : bool If true, data will be copied. Else data may be modified in place. Returns ------- df : instance of DataFrame Source estimates exported into tabular data structure. """ pd = _check_pandas_installed() default_index = ['subject', 'time'] if index is not None: _check_pandas_index_arguments(index, default_index) else: index = default_index if self.subject is None: index.remove('subject') data = self.data.T shape = data.shape mindex = list() mindex.append(('time', self.times * scale_time)) mindex.append(('subject', np.repeat(self.subject, shape[0]))) if copy: data = data.copy() assert all(len(mdx) == len(mindex[0]) for mdx in mindex) if isinstance(self.vertno, list): # surface source estimates v_names = [i for e in [['%s %i' % ('LH' if ii < 1 else 'RH', vert) for vert in vertno] for ii, vertno in enumerate(self.vertno)] for i in e] else: # volume source estimates v_names = ['VOL %d' % vert for vert in self.vertno] df = pd.DataFrame(data, columns=v_names) [df.insert(i, k, v) for i, (k, v) in enumerate(mindex)] if index is not None: if 'time' in index: df['time'] = df['time'].astype(np.int64) with warnings.catch_warnings(record=True): df.set_index(index, inplace=True) return df class SourceEstimate(_BaseSourceEstimate): """Container for surface source estimates Parameters ---------- data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data) The data in source space. The data can either be a single array or a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and "sens_data" shape (n_sensors, n_times). In this case, the source space data corresponds to "numpy.dot(kernel, sens_data)". vertices : list of two arrays Vertex numbers corresponding to the data. tmin : scalar Time point of the first sample in data. tstep : scalar Time step between successive samples in data. subject : str | None The subject name. While not necessary, it is safer to set the subject parameter to avoid analysis errors. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Attributes ---------- subject : str | None The subject name. times : array of shape (n_times,) The time vector. vertno : list of two arrays of shape (n_dipoles,) The indices of the dipoles in the left and right source space. data : array of shape (n_dipoles, n_times) The data in source space. shape : tuple The shape of the data. A tuple of int (n_dipoles, n_times). """ @verbose def __init__(self, data, vertices=None, tmin=None, tstep=None, subject=None, verbose=None): if not (isinstance(vertices, list) and len(vertices) == 2): raise ValueError('Vertices, if a list, must contain two ' 'numpy arrays') _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject, verbose=verbose) @verbose def save(self, fname, ftype='stc', verbose=None): """Save the source estimates to a file Parameters ---------- fname : string The stem of the file name. The file names used for surface source spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w" and "-rh.w") to the stem provided, for the left and the right hemisphere, respectively. ftype : string File format to use. Allowed values are "stc" (default) and "w". The "w" format only supports a single time point. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Defaults to self.verbose. """ if ftype not in ['stc', 'w']: raise ValueError('ftype must be "stc" or "w", not "%s"' % ftype) lh_data = self.data[:len(self.lh_vertno)] rh_data = self.data[-len(self.rh_vertno):] if ftype == 'stc': logger.info('Writing STC to disk...') _write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep, vertices=self.lh_vertno, data=lh_data) _write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep, vertices=self.rh_vertno, data=rh_data) elif ftype == 'w': if self.shape[1] != 1: raise ValueError('w files can only contain a single time ' 'point') logger.info('Writing STC to disk (w format)...') _write_w(fname + '-lh.w', vertices=self.lh_vertno, data=lh_data[:, 0]) _write_w(fname + '-rh.w', vertices=self.rh_vertno, data=rh_data[:, 0]) logger.info('[done]') def __repr__(self): if isinstance(self.vertno, list): nv = sum([len(v) for v in self.vertno]) else: nv = self.vertno.size s = "%d vertices" % nv if self.subject is not None: s += ", subject : %s" % self.subject s += ", tmin : %s (ms)" % (1e3 * self.tmin) s += ", tmax : %s (ms)" % (1e3 * self.times[-1]) s += ", tstep : %s (ms)" % (1e3 * self.tstep) s += ", data size : %s x %s" % self.shape return "<SourceEstimate | %s>" % s @property def lh_data(self): return self.data[:len(self.lh_vertno)] @property def rh_data(self): return self.data[len(self.lh_vertno):] @property def lh_vertno(self): return self.vertno[0] @property def rh_vertno(self): return self.vertno[1] def _hemilabel_stc(self, label): if label.hemi == 'lh': stc_vertices = self.vertno[0] else: stc_vertices = self.vertno[1] # find index of the Label's vertices idx = np.nonzero(in1d(stc_vertices, label.vertices))[0] # find output vertices vertices = stc_vertices[idx] # find data if label.hemi == 'rh': values = self.data[idx + len(self.vertno[0])] else: values = self.data[idx] return vertices, values def in_label(self, label): """Returns a SourceEstimate object restricted to a label SourceEstimate contains the time course of activation of all sources inside the label. Parameters ---------- label : Label | BiHemiLabel The label (as created for example by mne.read_label). If the label does not match any sources in the SourceEstimate, a ValueError is raised. """ # make sure label and stc are compatible if label.subject is not None and self.subject is not None \ and label.subject != self.subject: raise RuntimeError('label and stc must have same subject names, ' 'currently "%s" and "%s"' % (label.subject, self.subject)) if label.hemi == 'both': lh_vert, lh_val = self._hemilabel_stc(label.lh) rh_vert, rh_val = self._hemilabel_stc(label.rh) vertices = [lh_vert, rh_vert] values = np.vstack((lh_val, rh_val)) elif label.hemi == 'lh': lh_vert, values = self._hemilabel_stc(label) vertices = [lh_vert, np.array([])] elif label.hemi == 'rh': rh_vert, values = self._hemilabel_stc(label) vertices = [np.array([]), rh_vert] else: raise TypeError("Expected Label or BiHemiLabel; got %r" % label) if sum([len(v) for v in vertices]) == 0: raise ValueError('No vertices match the label in the stc file') label_stc = SourceEstimate(values, vertices=vertices, tmin=self.tmin, tstep=self.tstep, subject=self.subject) return label_stc def expand(self, vertno): """Expand SourceEstimate to include more vertices This will add rows to stc.data (zero-filled) and modify stc.vertno to include all vertices in stc.vertno and the input vertno. Parameters ---------- vertno : list of array New vertices to add. Can also contain old values. Returns ------- stc : instance of SourceEstimate The modified stc (note: method operates inplace). """ if not isinstance(vertno, list): raise TypeError('vertno must be a list') if not len(self.vertno) == len(vertno): raise ValueError('vertno must have the same length as stc.vertno') # can no longer use kernel and sensor data self._remove_kernel_sens_data_() inserters = list() offsets = [0] for vi, (v_old, v_new) in enumerate(zip(self.vertno, vertno)): v_new = np.setdiff1d(v_new, v_old) inds = np.searchsorted(v_old, v_new) # newer numpy might overwrite inds after np.insert, copy here inserters += [inds.copy()] offsets += [len(v_old)] self.vertno[vi] = np.insert(v_old, inds, v_new) inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])] inds = np.concatenate(inds) new_data = np.zeros((len(inds), self._data.shape[1])) self._data = np.insert(self._data, inds, new_data, axis=0) return self @verbose def extract_label_time_course(self, labels, src, mode='mean_flip', allow_empty=False, verbose=None): """Extract label time courses for lists of labels This function will extract one time course for each label. The way the time courses are extracted depends on the mode parameter. Valid values for mode are: 'mean': Average within each label. 'mean_flip': Average within each label with sign flip depending on source orientation. 'pca_flip': Apply an SVD to the time courses within each label and use the scaled and sign-flipped first right-singular vector as the label time course. The scaling is performed such that the power of the label time course is the same as the average per-vertex time course power within the label. The sign of the resulting time course is adjusted by multiplying it with "sign(dot(u, flip))" where u is the first left-singular vector, and flip is a sing-flip vector based on the vertex normals. This procedure assures that the phase does not randomly change by 180 degrees from one stc to the next. See also mne.extract_label_time_course to extract time courses for a list of SourceEstimate more efficiently. Parameters ---------- labels : Label | list of Label The labels for which to extract the time courses. src : list Source spaces for left and right hemisphere. mode : str Extraction mode, see explanation above. allow_empty : bool Instead of emitting an error, return all-zero time course for labels that do not have any vertices in the source estimate. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- label_tc : array, shape=(len(labels), n_times) Extracted time course for each label. """ label_tc = extract_label_time_course(self, labels, src, mode=mode, return_generator=False, allow_empty=allow_empty, verbose=verbose) return label_tc def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False, subjects_dir=None): """Return the vertex on a given surface that is at the center of mass of the activity in stc. Note that all activity must occur in a single hemisphere, otherwise an error is returned. The "mass" of each point in space for computing the spatial center of mass is computed by summing across time, and vice-versa for each point in time in computing the temporal center of mass. This is useful for quantifying spatio-temporal cluster locations, especially when combined with the function mne.source_space.vertex_to_mni(). Parameters ---------- subject : string | None The subject the stc is defined for. hemi : int, or None Calculate the center of mass for the left (0) or right (1) hemisphere. If None, one of the hemispheres must be all zeroes, and the center of mass will be calculated for the other hemisphere (useful for getting COM for clusters). restrict_vertices : bool, or array of int If True, returned vertex will be one from stc. Otherwise, it could be any vertex from surf. If an array of int, the returned vertex will come from that array. For most accuruate estimates, do not restrict vertices. subjects_dir : str, or None Path to the SUBJECTS_DIR. If None, the path is obtained by using the environment variable SUBJECTS_DIR. Returns ------- vertex : int Vertex of the spatial center of mass for the inferred hemisphere, with each vertex weighted by the sum of the stc across time. For a boolean stc, then, this would be weighted purely by the duration each vertex was active. hemi : int Hemisphere the vertex was taken from. t : float Time of the temporal center of mass (weighted by the sum across source vertices). References: Used in Larson and Lee, "The cortical dynamics underlying effective switching of auditory spatial attention", NeuroImage 2012. """ subject = _check_subject(self.subject, subject) values = np.sum(self.data, axis=1) # sum across time vert_inds = [np.arange(len(self.vertno[0])), np.arange(len(self.vertno[1])) + len(self.vertno[0])] if hemi is None: hemi = np.where(np.array([np.sum(values[vi]) for vi in vert_inds]))[0] if not len(hemi) == 1: raise ValueError('Could not infer hemisphere') hemi = hemi[0] if not hemi in [0, 1]: raise ValueError('hemi must be 0 or 1') subjects_dir = get_subjects_dir(subjects_dir) values = values[vert_inds[hemi]] hemis = ['lh', 'rh'] surf = os.path.join(subjects_dir, subject, 'surf', hemis[hemi] + '.sphere') if isinstance(surf, string_types): # read in surface surf = read_surface(surf) if restrict_vertices is False: restrict_vertices = np.arange(surf[0].shape[0]) elif restrict_vertices is True: restrict_vertices = self.vertno[hemi] if np.any(self.data < 0): raise ValueError('Cannot compute COM with negative values') pos = surf[0][self.vertno[hemi], :].T c_o_m = np.sum(pos * values, axis=1) / np.sum(values) # Find the vertex closest to the COM vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] - c_o_m) ** 2, axis=1))) vertex = restrict_vertices[vertex] # do time center of mass by using the values across space masses = np.sum(self.data, axis=0).astype(float) t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses) t = self.tmin + self.tstep * t_ind return vertex, hemi, t def plot(self, subject=None, surface='inflated', hemi='lh', colormap='hot', time_label='time=%0.2f ms', smoothing_steps=10, fmin=5., fmid=10., fmax=15., transparent=True, alpha=1.0, time_viewer=False, config_opts={}, subjects_dir=None, figure=None, views='lat', colorbar=True): """Plot SourceEstimates with PySurfer Note: PySurfer currently needs the SUBJECTS_DIR environment variable, which will automatically be set by this function. Plotting multiple SourceEstimates with different values for subjects_dir will cause PySurfer to use the wrong FreeSurfer surfaces when using methods of the returned Brain object. It is therefore recommended to set the SUBJECTS_DIR environment variable or always use the same value for subjects_dir (within the same Python session). Parameters ---------- stc : SourceEstimates The source estimates to plot. subject : str | None The subject name corresponding to FreeSurfer environment variable SUBJECT. If None stc.subject will be used. If that is None, the environment will be used. surface : str The type of surface (inflated, white etc.). hemi : str, 'lh' | 'rh' | 'split' | 'both' The hemisphere to display. Using 'both' or 'split' requires PySurfer version 0.4 or above. colormap : str The type of colormap to use. time_label : str How to print info about the time instant visualized. smoothing_steps : int The amount of smoothing. fmin : float The minimum value to display. fmid : float The middle value on the colormap. fmax : float The maximum value for the colormap. transparent : bool If True, use a linear transparency between fmin and fmid. alpha : float Alpha value to apply globally to the overlay. time_viewer : bool Display time viewer GUI. config_opts : dict Keyword arguments for Brain initialization. See pysurfer.viz.Brain. subjects_dir : str The path to the FreeSurfer subjects reconstructions. It corresponds to FreeSurfer environment variable SUBJECTS_DIR. figure : instance of mayavi.core.scene.Scene | None If None, the last figure will be cleaned and a new figure will be created. views : str | list View to use. See surfer.Brain(). colorbar : bool If True, display colorbar on scene. Returns ------- brain : Brain A instance of surfer.viz.Brain from PySurfer. """ brain = plot_source_estimates(self, subject, surface=surface, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, fmin=fmin, fmid=fmid, fmax=fmax, transparent=transparent, alpha=alpha, time_viewer=time_viewer, config_opts=config_opts, subjects_dir=subjects_dir, figure=figure, views=views, colorbar=colorbar) return brain @verbose def morph(self, subject_to, grade=5, smooth=None, subjects_dir=None, buffer_size=64, n_jobs=1, subject_from=None, verbose=None): """Morph a source estimate from one subject to another Parameters ---------- subject_to : string Name of the subject on which to morph as named in the SUBJECTS_DIR stc_from : SourceEstimate Source estimates for subject "from" to morph grade : int, list (of two arrays), or None Resolution of the icosahedral mesh (typically 5). If None, all vertices will be used (potentially filling the surface). If a list, then values will be morphed to the set of vertices specified in in grade[0] and grade[1]. Note that specifying the vertices (e.g., grade=[np.arange(10242), np.arange(10242)] for fsaverage on a standard grade 5 source space) can be substantially faster than computing vertex locations. Note that if subject='fsaverage' and 'grade=5', this set of vertices will automatically be used (instead of computed) for speed, since this is a common morph. smooth : int or None Number of iterations for the smoothing of the surface data. If None, smooth is automatically defined to fill the surface with non-zero values. subjects_dir : string, or None Path to SUBJECTS_DIR if it is not set in the environment. buffer_size : int Morph data in chunks of `buffer_size` time instants. Saves memory when morphing long time intervals. n_jobs : int Number of jobs to run in parallel. subject_from : string Name of the original subject as named in the SUBJECTS_DIR. If None, self.subject will be used. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- stc_to : SourceEstimate Source estimate for the destination subject. """ subject_from = _check_subject(self.subject, subject_from) return morph_data(subject_from, subject_to, self, grade, smooth, subjects_dir, buffer_size, n_jobs, verbose) def morph_precomputed(self, subject_to, vertices_to, morph_mat, subject_from=None): """Morph source estimate between subjects using a precomputed matrix Parameters ---------- subject_to : string Name of the subject on which to morph as named in the SUBJECTS_DIR. vertices_to : list of array of int The vertices on the destination subject's brain. morph_mat : sparse matrix The morphing matrix, usually from compute_morph_matrix. subject_from : string | None Name of the original subject as named in the SUBJECTS_DIR. If None, self.subject will be used. Returns ------- stc_to : SourceEstimate Source estimate for the destination subject. """ subject_from = _check_subject(self.subject, subject_from) return morph_data_precomputed(subject_from, subject_to, self, vertices_to, morph_mat) def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs', vert_as_index=False, time_as_index=False): """Get location and latency of peak amplitude hemi : {'lh', 'rh', None} The hemi to be considered. If None, the entire source space is considered. tmin : float | None The minimum point in time to be considered for peak getting. tmax : float | None The maximum point in time to be considered for peak getting. mode : {'pos', 'neg', 'abs'} How to deal with the sign of the data. If 'pos' only positive values will be considered. If 'neg' only negative values will be considered. If 'abs' absolute values will be considered. Defaults to 'abs'. vert_as_index : bool whether to return the vertex index instead of of its ID. Defaults to False. time_as_index : bool Whether to return the time index instead of the latency. Defaults to False. Returns ------- pos : int The vertex exhibiting the maximum response, either ID or index. latency : float | int The time point of the maximum response, either latency in seconds or index. """ data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi] vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno, None: np.concatenate(self.vertno)}[hemi] vert_idx, time_idx = _get_peak(data, self.times, tmin, tmax, mode) return (vert_idx if vert_as_index else vertno[vert_idx], time_idx if time_as_index else self.times[time_idx]) class VolSourceEstimate(_BaseSourceEstimate): """Container for volume source estimates Parameters ---------- data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data) The data in source space. The data can either be a single array or a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and "sens_data" shape (n_sensors, n_times). In this case, the source space data corresponds to "numpy.dot(kernel, sens_data)". vertices : array Vertex numbers corresponding to the data. tmin : scalar Time point of the first sample in data. tstep : scalar Time step between successive samples in data. subject : str | None The subject name. While not necessary, it is safer to set the subject parameter to avoid analysis errors. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Attributes ---------- subject : str | None The subject name. times : array of shape (n_times,) The time vector. vertno : array of shape (n_dipoles,) The indices of the dipoles in the source space. data : array of shape (n_dipoles, n_times) The data in source space. shape : tuple The shape of the data. A tuple of int (n_dipoles, n_times). """ @verbose def __init__(self, data, vertices=None, tmin=None, tstep=None, subject=None, verbose=None): if not (isinstance(vertices, np.ndarray) or isinstance(vertices, list) and len(vertices) == 1): raise ValueError('Vertices must be a numpy array or a list with ' 'one array') _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject, verbose=verbose) @verbose def save(self, fname, ftype='stc', verbose=None): """Save the source estimates to a file Parameters ---------- fname : string The stem of the file name. The stem is extended with "-vl.stc" or "-vl.w". ftype : string File format to use. Allowed values are "stc" (default) and "w". The "w" format only supports a single time point. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Defaults to self.verbose. """ if ftype not in ['stc', 'w']: raise ValueError('ftype must be "stc" or "w", not "%s"' % ftype) if ftype == 'stc': logger.info('Writing STC to disk...') if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')): fname += '-vl.stc' _write_stc(fname, tmin=self.tmin, tstep=self.tstep, vertices=self.vertno, data=self.data) elif ftype == 'w': logger.info('Writing STC to disk (w format)...') if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')): fname += '-vl.w' _write_w(fname, vertices=self.vertno, data=self.data) logger.info('[done]') def save_as_volume(self, fname, src, dest='mri', mri_resolution=False): """Save a volume source estimate in a nifti file Parameters ---------- fname : string The name of the generated nifti file. src : list The list of source spaces (should actually be of length 1) dest : 'mri' | 'surf' If 'mri' the volume is defined in the coordinate system of the original T1 image. If 'surf' the coordinate system of the FreeSurfer surface is used (Surface RAS). mri_resolution: bool It True the image is saved in MRI resolution. WARNING: if you have many time points the file produced can be huge. Returns ------- img : instance Nifti1Image The image object. """ save_stc_as_volume(fname, self, src, dest=dest, mri_resolution=mri_resolution) def as_volume(self, src, dest='mri', mri_resolution=False): """Export volume source estimate as a nifti object Parameters ---------- src : list The list of source spaces (should actually be of length 1) dest : 'mri' | 'surf' If 'mri' the volume is defined in the coordinate system of the original T1 image. If 'surf' the coordinate system of the FreeSurfer surface is used (Surface RAS). mri_resolution: bool It True the image is saved in MRI resolution. WARNING: if you have many time points the file produced can be huge. Returns ------- img : instance Nifti1Image The image object. """ return save_stc_as_volume(None, self, src, dest=dest, mri_resolution=mri_resolution) def __repr__(self): if isinstance(self.vertno, list): nv = sum([len(v) for v in self.vertno]) else: nv = self.vertno.size s = "%d vertices" % nv if self.subject is not None: s += ", subject : %s" % self.subject s += ", tmin : %s (ms)" % (1e3 * self.tmin) s += ", tmax : %s (ms)" % (1e3 * self.times[-1]) s += ", tstep : %s (ms)" % (1e3 * self.tstep) s += ", data size : %s x %s" % self.shape return "<VolSourceEstimate | %s>" % s def get_peak(self, tmin=None, tmax=None, mode='abs', vert_as_index=False, time_as_index=False): """Get location and latency of peak amplitude tmin : float | None The minimum point in time to be considered for peak getting. tmax : float | None The maximum point in time to be considered for peak getting. mode : {'pos', 'neg', 'abs'} How to deal with the sign of the data. If 'pos' only positive values will be considered. If 'neg' only negative values will be considered. If 'abs' absolute values will be considered. Defaults to 'abs'. vert_as_index : bool whether to return the vertex index instead of of its ID. Defaults to False. time_as_index : bool Whether to return the time index instead of the latency. Defaults to False. Returns ------- pos : int The vertex exhibiting the maximum response, either ID or index. latency : float The latency in seconds. """ vert_idx, time_idx = _get_peak(self.data, self.times, tmin, tmax, mode) return (vert_idx if vert_as_index else self.vertno[vert_idx], time_idx if time_as_index else self.times[time_idx]) ############################################################################### # Morphing def mesh_edges(tris): """Returns sparse matrix with edges as an adjacency matrix Parameters ---------- tris : array of shape [n_triangles x 3] The triangles. Returns ------- edges : sparse matrix The adjacency matrix. """ npoints = np.max(tris) + 1 ones_ntris = np.ones(3 * len(tris)) a, b, c = tris.T x = np.concatenate((a, b, c)) y = np.concatenate((b, c, a)) edges = coo_matrix((ones_ntris, (x, y)), shape=(npoints, npoints)) edges = edges.tocsr() edges = edges + edges.T return edges def mesh_dist(tris, vert): """Compute adjacency matrix weighted by distances It generates an adjacency matrix where the entries are the distances between neighboring vertices. Parameters ---------- tris : array (n_tris x 3) Mesh triangulation vert : array (n_vert x 3) Vertex locations Returns ------- dist_matrix : scipy.sparse.csr_matrix Sparse matrix with distances between adjacent vertices """ edges = mesh_edges(tris).tocoo() # Euclidean distances between neighboring vertices dist = np.sqrt(np.sum((vert[edges.row, :] - vert[edges.col, :]) ** 2, axis=1)) dist_matrix = csr_matrix((dist, (edges.row, edges.col)), shape=edges.shape) return dist_matrix @verbose def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps, verbose=None): """Morph data from one subject's source space to another Parameters ---------- data : array, or csr sparse matrix A n_vertices x n_times (or other dimension) dataset to morph. idx_use : array of int Vertices from the original subject's data. e : sparse matrix The mesh edges of the "from" subject. smooth : int Number of smoothing iterations to perform. A hard limit of 100 is also imposed. n_vertices : int Number of vertices. nearest : array of int Vertices on the destination surface to use. maps : sparse matrix Morph map from one subject to the other. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- data_morphed : array, or csr sparse matrix The morphed data (same type as input). """ n_iter = 99 # max nb of smoothing iterations (minus one) if smooth is not None: if smooth <= 0: raise ValueError('The number of smoothing operations ("smooth") ' 'has to be at least 1.') smooth -= 1 # make sure we're in CSR format e = e.tocsr() if sparse.issparse(data): use_sparse = True if not isinstance(data, sparse.csr_matrix): data = data.tocsr() else: use_sparse = False done = False # do the smoothing for k in range(n_iter + 1): # get the row sum mult = np.zeros(e.shape[1]) mult[idx_use] = 1 idx_use_data = idx_use data_sum = e * mult # new indices are non-zero sums idx_use = np.where(data_sum)[0] # typically want to make the next iteration have these indices idx_out = idx_use # figure out if this is the last iteration if smooth is None: if k == n_iter or len(idx_use) >= n_vertices: # stop when vertices filled idx_out = None done = True elif k == smooth: idx_out = None done = True # do standard smoothing multiplication data = _morph_mult(data, e, use_sparse, idx_use_data, idx_out) if done is True: break # do standard normalization if use_sparse: data.data /= data_sum[idx_use].repeat(np.diff(data.indptr)) else: data /= data_sum[idx_use][:, None] # do special normalization for last iteration if use_sparse: data_sum[data_sum == 0] = 1 data.data /= data_sum.repeat(np.diff(data.indptr)) else: data[idx_use, :] /= data_sum[idx_use][:, None] logger.info(' %d smooth iterations done.' % (k + 1)) data_morphed = maps[nearest, :] * data return data_morphed def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None): """Helper for morphing Equivalent to "data = (e[:, idx_use_data] * data)[idx_use_out]" but faster. """ if len(idx_use_data) < e.shape[1]: if use_sparse: data = e[:, idx_use_data] * data else: # constructing a new sparse matrix is faster than sub-indexing # e[:, idx_use_data]! col, row = np.meshgrid(np.arange(data.shape[1]), idx_use_data) d_sparse = sparse.csr_matrix((data.ravel(), (row.ravel(), col.ravel())), shape=(e.shape[1], data.shape[1])) data = e * d_sparse data = np.asarray(data.todense()) else: data = e * data # trim data if idx_use_out is not None: data = data[idx_use_out] return data def _get_subject_sphere_tris(subject, subjects_dir): spheres = [os.path.join(subjects_dir, subject, 'surf', xh + '.sphere.reg') for xh in ['lh', 'rh']] tris = [read_surface(s)[1] for s in spheres] return tris @verbose def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None, subjects_dir=None, buffer_size=64, n_jobs=1, verbose=None): """Morph a source estimate from one subject to another Parameters ---------- subject_from : string Name of the original subject as named in the SUBJECTS_DIR subject_to : string Name of the subject on which to morph as named in the SUBJECTS_DIR stc_from : SourceEstimate Source estimates for subject "from" to morph grade : int, list (of two arrays), or None Resolution of the icosahedral mesh (typically 5). If None, all vertices will be used (potentially filling the surface). If a list, then values will be morphed to the set of vertices specified in in grade[0] and grade[1]. Note that specifying the vertices (e.g., grade=[np.arange(10242), np.arange(10242)] for fsaverage on a standard grade 5 source space) can be substantially faster than computing vertex locations. Note that if subject='fsaverage' and 'grade=5', this set of vertices will automatically be used (instead of computed) for speed, since this is a common morph. smooth : int or None Number of iterations for the smoothing of the surface data. If None, smooth is automatically defined to fill the surface with non-zero values. subjects_dir : string, or None Path to SUBJECTS_DIR if it is not set in the environment. buffer_size : int Morph data in chunks of `buffer_size` time instants. Saves memory when morphing long time intervals. n_jobs : int Number of jobs to run in parallel verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- stc_to : SourceEstimate Source estimate for the destination subject. """ if not isinstance(stc_from, SourceEstimate): raise ValueError('Morphing is only possible with surface source ' 'estimates') logger.info('Morphing data...') subjects_dir = get_subjects_dir(subjects_dir) nearest = grade_to_vertices(subject_to, grade, subjects_dir, n_jobs) tris = _get_subject_sphere_tris(subject_from, subjects_dir) maps = read_morph_map(subject_from, subject_to, subjects_dir) # morph the data data = [stc_from.lh_data, stc_from.rh_data] data_morphed = [None, None] n_chunks = ceil(stc_from.data.shape[1] / float(buffer_size)) parallel, my_morph_buffer, _ = parallel_func(_morph_buffer, n_jobs) for hemi in [0, 1]: e = mesh_edges(tris[hemi]) e.data[e.data == 2] = 1 n_vertices = e.shape[0] e = e + sparse.eye(n_vertices, n_vertices) idx_use = stc_from.vertno[hemi] if len(idx_use) == 0: continue data_morphed[hemi] = np.concatenate( parallel(my_morph_buffer(data_buffer, idx_use, e, smooth, n_vertices, nearest[hemi], maps[hemi]) for data_buffer in np.array_split(data[hemi], n_chunks, axis=1)), axis=1) vertices = [nearest[0], nearest[1]] if data_morphed[0] is None: if data_morphed[1] is None: data = np.r_[[], []] vertices = [np.array([], dtype=int), np.array([], dtype=int)] else: data = data_morphed[1] vertices = [np.array([], dtype=int), vertices[1]] elif data_morphed[1] is None: data = data_morphed[0] vertices = [vertices[0], np.array([], dtype=int)] else: data = np.r_[data_morphed[0], data_morphed[1]] stc_to = SourceEstimate(data, vertices, stc_from.tmin, stc_from.tstep, subject=subject_to, verbose=stc_from.verbose) logger.info('[done]') return stc_to @verbose def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to, smooth=None, subjects_dir=None, verbose=None): """Get a matrix that morphs data from one subject to another Parameters ---------- subject_from : string Name of the original subject as named in the SUBJECTS_DIR subject_to : string Name of the subject on which to morph as named in the SUBJECTS_DIR vertices_from : list of arrays of int Vertices for each hemisphere (LH, RH) for subject_from vertices_to : list of arrays of int Vertices for each hemisphere (LH, RH) for subject_to smooth : int or None Number of iterations for the smoothing of the surface data. If None, smooth is automatically defined to fill the surface with non-zero values. subjects_dir : string Path to SUBJECTS_DIR is not set in the environment verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- morph_matrix : sparse matrix matrix that morphs data from subject_from to subject_to """ logger.info('Computing morph matrix...') subjects_dir = get_subjects_dir(subjects_dir) tris = _get_subject_sphere_tris(subject_from, subjects_dir) maps = read_morph_map(subject_from, subject_to, subjects_dir) morpher = [None] * 2 for hemi in [0, 1]: e = mesh_edges(tris[hemi]) e.data[e.data == 2] = 1 n_vertices = e.shape[0] e = e + sparse.eye(n_vertices, n_vertices) idx_use = vertices_from[hemi] if len(idx_use) == 0: morpher[hemi] = [] continue m = sparse.eye(len(idx_use), len(idx_use), format='csr') morpher[hemi] = _morph_buffer(m, idx_use, e, smooth, n_vertices, vertices_to[hemi], maps[hemi]) # be careful about zero-length arrays if isinstance(morpher[0], list): morpher = morpher[1] elif isinstance(morpher[1], list): morpher = morpher[0] else: morpher = sparse_block_diag(morpher, format='csr') logger.info('[done]') return morpher @verbose def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1, verbose=None): """Convert a grade to source space vertices for a given subject Parameters ---------- subject : str Name of the subject grade : int Resolution of the icosahedral mesh (typically 5). If None, all vertices will be used (potentially filling the surface). If a list, then values will be morphed to the set of vertices specified in in grade[0] and grade[1]. Note that specifying the vertices (e.g., grade=[np.arange(10242), np.arange(10242)] for fsaverage on a standard grade 5 source space) can be substantially faster than computing vertex locations. Note that if subject='fsaverage' and 'grade=5', this set of vertices will automatically be used (instead of computed) for speed, since this is a common morph. subjects_dir : string, or None Path to SUBJECTS_DIR if it is not set in the environment n_jobs : int Number of jobs to run in parallel verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- vertices : list of arrays of int Vertex numbers for LH and RH """ # add special case for fsaverage for speed if subject == 'fsaverage' and grade == 5: return [np.arange(10242), np.arange(10242)] subjects_dir = get_subjects_dir(subjects_dir) spheres_to = [os.path.join(subjects_dir, subject, 'surf', xh + '.sphere.reg') for xh in ['lh', 'rh']] lhs, rhs = [read_surface(s)[0] for s in spheres_to] if grade is not None: # fill a subset of vertices if isinstance(grade, list): if not len(grade) == 2: raise ValueError('grade as a list must have two elements ' '(arrays of output vertices)') vertices = grade else: # find which vertices to use in "to mesh" ico = _get_ico_tris(grade, return_surf=True) lhs /= np.sqrt(np.sum(lhs ** 2, axis=1))[:, None] rhs /= np.sqrt(np.sum(rhs ** 2, axis=1))[:, None] # Compute nearest vertices in high dim mesh parallel, my_compute_nearest, _ = \ parallel_func(_compute_nearest, n_jobs) lhs, rhs, rr = [a.astype(np.float32) for a in [lhs, rhs, ico['rr']]] vertices = parallel(my_compute_nearest(xhs, rr) for xhs in [lhs, rhs]) # Make sure the vertices are ordered vertices = [np.sort(verts) for verts in vertices] else: # potentially fill the surface vertices = [np.arange(lhs.shape[0]), np.arange(rhs.shape[0])] return vertices def morph_data_precomputed(subject_from, subject_to, stc_from, vertices_to, morph_mat): """Morph source estimate between subjects using a precomputed matrix Parameters ---------- subject_from : string Name of the original subject as named in the SUBJECTS_DIR. subject_to : string Name of the subject on which to morph as named in the SUBJECTS_DIR. stc_from : SourceEstimate Source estimates for subject "from" to morph. vertices_to : list of array of int The vertices on the destination subject's brain. morph_mat : sparse matrix The morphing matrix, typically from compute_morph_matrix. Returns ------- stc_to : SourceEstimate Source estimate for the destination subject. """ if not sparse.issparse(morph_mat): raise ValueError('morph_mat must be a sparse matrix') if not isinstance(vertices_to, list) or not len(vertices_to) == 2: raise ValueError('vertices_to must be a list of length 2') if not sum(len(v) for v in vertices_to) == morph_mat.shape[0]: raise ValueError('number of vertices in vertices_to must match ' 'morph_mat.shape[0]') if not stc_from.data.shape[0] == morph_mat.shape[1]: raise ValueError('stc_from.data.shape[0] must be the same as ' 'morph_mat.shape[0]') if stc_from.subject is not None and stc_from.subject != subject_from: raise ValueError('stc_from.subject and subject_from must match') data = morph_mat * stc_from.data stc_to = SourceEstimate(data, vertices_to, stc_from.tmin, stc_from.tstep, verbose=stc_from.verbose, subject=subject_to) return stc_to @verbose def spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None): """Compute connectivity for a source space activation over time Parameters ---------- src : source space The source space. n_times : int Number of time instants. dist : float, or None Maximal geodesic distance (in m) between vertices in the source space to consider neighbors. If None, immediate neighbors are extracted from an ico surface. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- connectivity : sparse COO matrix The connectivity matrix describing the spatio-temporal graph structure. If N is the number of vertices in the source space, the N first nodes in the graph are the vertices are time 1, the nodes from 2 to 2N are the vertices during time 2, etc. """ if dist is None: if src[0]['use_tris'] is None: raise Exception("The source space does not appear to be an ico " "surface. Connectivity cannot be extracted from " "non-ico source spaces.") used_verts = [np.unique(s['use_tris']) for s in src] lh_tris = np.searchsorted(used_verts[0], src[0]['use_tris']) rh_tris = np.searchsorted(used_verts[1], src[1]['use_tris']) tris = np.concatenate((lh_tris, rh_tris + np.max(lh_tris) + 1)) connectivity = spatio_temporal_tris_connectivity(tris, n_times) # deal with source space only using a subset of vertices masks = [in1d(u, s['vertno']) for s, u in zip(src, used_verts)] if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times: raise ValueError('Used vertices do not match connectivity shape') if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]: raise ValueError('Vertex mask does not match number of vertices') masks = np.concatenate(masks) missing = 100 * float(len(masks) - np.sum(masks)) / len(masks) if missing: warnings.warn('%0.1f%% of original source space vertices have been' ' omitted, tri-based connectivity will have holes.\n' 'Consider using distance-based connectivity or ' 'morphing data to all source space vertices.' % missing) masks = np.tile(masks, n_times) masks = np.where(masks)[0] connectivity = connectivity.tocsr() connectivity = connectivity[masks] connectivity = connectivity[:, masks] # return to original format connectivity = connectivity.tocoo() return connectivity else: # use distances computed and saved in the source space file return spatio_temporal_dist_connectivity(src, n_times, dist) @verbose def grade_to_tris(grade, verbose=None): """Get tris defined for a certain grade Parameters ---------- grade : int Grade of an icosahedral mesh. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- tris : list 2-element list containing Nx3 arrays of tris, suitable for use in spatio_temporal_tris_connectivity. """ a = _get_ico_tris(grade, None, False) tris = np.concatenate((a, a + (np.max(a) + 1))) return tris @verbose def spatio_temporal_tris_connectivity(tris, n_times, verbose=None): """Compute connectivity from triangles and time instants Parameters ---------- tris : array N x 3 array defining triangles. n_times : int Number of time points verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- connectivity : sparse COO matrix The connectivity matrix describing the spatio-temporal graph structure. If N is the number of vertices in the source space, the N first nodes in the graph are the vertices are time 1, the nodes from 2 to 2N are the vertices during time 2, etc. """ edges = mesh_edges(tris).tocoo() return _get_connectivity_from_edges(edges, n_times) @verbose def spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None): """Compute connectivity from distances in a source space and time instants Parameters ---------- src : source space The source space must have distances between vertices computed, such that src['dist'] exists and is useful. This can be obtained using MNE with a call to mne_add_patch_info with the --dist option. n_times : int Number of time points dist : float Maximal geodesic distance (in m) between vertices in the source space to consider neighbors. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- connectivity : sparse COO matrix The connectivity matrix describing the spatio-temporal graph structure. If N is the number of vertices in the source space, the N first nodes in the graph are the vertices are time 1, the nodes from 2 to 2N are the vertices during time 2, etc. """ if src[0]['dist'] is None: raise RuntimeError('src must have distances included, consider using\n' 'mne_add_patch_info with --dist argument') edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']] for s in src]) edges.data[:] = np.less_equal(edges.data, dist) # clean it up and put it in coo format edges = edges.tocsr() edges.eliminate_zeros() edges = edges.tocoo() return _get_connectivity_from_edges(edges, n_times) @verbose def spatial_src_connectivity(src, dist=None, verbose=None): """Compute connectivity for a source space activation Parameters ---------- src : source space The source space. dist : float, or None Maximal geodesic distance (in m) between vertices in the source space to consider neighbors. If None, immediate neighbors are extracted from an ico surface. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- connectivity : sparse COO matrix The connectivity matrix describing the spatial graph structure. """ return spatio_temporal_src_connectivity(src, 1, dist) @verbose def spatial_tris_connectivity(tris, verbose=None): """Compute connectivity from triangles Parameters ---------- tris : array N x 3 array defining triangles. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- connectivity : sparse COO matrix The connectivity matrix describing the spatial graph structure. """ return spatio_temporal_tris_connectivity(tris, 1) def spatial_dist_connectivity(src, dist, verbose=None): """Compute connectivity from distances in a source space Parameters ---------- src : source space The source space must have distances between vertices computed, such that src['dist'] exists and is useful. This can be obtained using MNE with a call to mne_add_patch_info with the --dist option. dist : float Maximal geodesic distance (in m) between vertices in the source space to consider neighbors. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- connectivity : sparse COO matrix The connectivity matrix describing the spatial graph structure. """ return spatio_temporal_dist_connectivity(src, 1, dist) def sparse_block_diag(mats, format=None, dtype=None): """An implementation of scipy.sparse.block_diag since old versions of scipy don't have it. Forms a sparse matrix by stacking matrices in block diagonal form. Parameters ---------- mats : list of matrices Input matrices. format : str, optional The sparse format of the result (e.g. "csr"). If not given, the matrix is returned in "coo" format. dtype : dtype specifier, optional The data-type of the output matrix. If not given, the dtype is determined from that of blocks. Returns ------- res : sparse matrix """ try: return sparse.block_diag(mats, format=format, dtype=dtype) except AttributeError: nmat = len(mats) rows = [] for ia, a in enumerate(mats): row = [None] * nmat row[ia] = a rows.append(row) return sparse.bmat(rows, format=format, dtype=dtype) @verbose def _get_connectivity_from_edges(edges, n_times, verbose=None): """Given edges sparse matrix, create connectivity matrix""" n_vertices = edges.shape[0] logger.info("-- number of connected vertices : %d" % n_vertices) nnz = edges.col.size aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int) col = (edges.col[None, :] + aux).ravel() row = (edges.row[None, :] + aux).ravel() if n_times > 1: # add temporal edges o = (n_vertices * np.arange(n_times - 1)[:, None] + np.arange(n_vertices)[None, :]).ravel() d = (n_vertices * np.arange(1, n_times)[:, None] + np.arange(n_vertices)[None, :]).ravel() row = np.concatenate((row, o, d)) col = np.concatenate((col, d, o)) data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1), dtype=np.int) connectivity = coo_matrix((data, (row, col)), shape=(n_times * n_vertices, ) * 2) return connectivity @verbose def _get_ico_tris(grade, verbose=None, return_surf=False): """Get triangles for ico surface.""" ico = _get_ico_surface(grade) if not return_surf: return ico['tris'] else: return ico def save_stc_as_volume(fname, stc, src, dest='mri', mri_resolution=False): """Save a volume source estimate in a nifti file Parameters ---------- fname : string | None The name of the generated nifti file. If None, the image is only returned and not saved. stc : instance of VolSourceEstimate The source estimate src : list The list of source spaces (should actually be of length 1) dest : 'mri' | 'surf' If 'mri' the volume is defined in the coordinate system of the original T1 image. If 'surf' the coordinate system of the FreeSurfer surface is used (Surface RAS). mri_resolution: bool It True the image is saved in MRI resolution. WARNING: if you have many time points the file produced can be huge. Returns ------- img : instance Nifti1Image The image object. """ if not isinstance(stc, VolSourceEstimate): raise Exception('Only volume source estimates can be saved as ' 'volumes') n_times = stc.data.shape[1] shape = src[0]['shape'] shape3d = (shape[2], shape[1], shape[0]) shape = (n_times, shape[2], shape[1], shape[0]) vol = np.zeros(shape) mask3d = src[0]['inuse'].reshape(shape3d).astype(np.bool) if mri_resolution: mri_shape3d = (src[0]['mri_height'], src[0]['mri_depth'], src[0]['mri_width']) mri_shape = (n_times, src[0]['mri_height'], src[0]['mri_depth'], src[0]['mri_width']) mri_vol = np.zeros(mri_shape) interpolator = src[0]['interpolator'] for k, v in enumerate(vol): v[mask3d] = stc.data[:, k] if mri_resolution: mri_vol[k] = (interpolator * v.ravel()).reshape(mri_shape3d) if mri_resolution: vol = mri_vol vol = vol.T if mri_resolution: affine = src[0]['vox_mri_t']['trans'].copy() else: affine = src[0]['src_mri_t']['trans'].copy() if dest == 'mri': affine = np.dot(src[0]['mri_ras_t']['trans'], affine) affine[:3] *= 1e3 try: import nibabel as nib # lazy import to avoid dependency except ImportError: raise ImportError("nibabel is required to save volume images.") header = nib.nifti1.Nifti1Header() header.set_xyzt_units('mm', 'msec') header['pixdim'][4] = 1e3 * stc.tstep img = nib.Nifti1Image(vol, affine, header=header) if fname is not None: nib.save(img, fname) return img def _get_label_flip(labels, label_vertidx, src): """Helper function to get sign-flip for labels""" # do the import here to avoid circular dependency from .label import label_sign_flip # get the sign-flip vector for every label label_flip = list() for label, vertidx in zip(labels, label_vertidx): if label.hemi == 'both': raise ValueError('BiHemiLabel not supported when using sign-flip') if vertidx is not None: flip = label_sign_flip(label, src)[:, None] else: flip = None label_flip.append(flip) return label_flip @verbose def _gen_extract_label_time_course(stcs, labels, src, mode='mean', allow_empty=False, verbose=None): """Generator for extract_label_time_course""" n_labels = len(labels) # get vertno from source space, they have to be the same as in the stcs vertno = [s['vertno'] for s in src] nvert = [len(vn) for vn in vertno] # do the initialization label_vertidx = list() for label in labels: if label.hemi == 'both': # handle BiHemiLabel sub_labels = [label.lh, label.rh] else: sub_labels = [label] this_vertidx = list() for slabel in sub_labels: if slabel.hemi == 'lh': this_vertno = np.intersect1d(vertno[0], slabel.vertices) vertidx = np.searchsorted(vertno[0], this_vertno) elif slabel.hemi == 'rh': this_vertno = np.intersect1d(vertno[1], slabel.vertices) vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertno) else: raise ValueError('label %s has invalid hemi' % label.name) this_vertidx.append(vertidx) # convert it to an array this_vertidx = np.concatenate(this_vertidx) if len(this_vertidx) == 0: msg = ('source space does not contain any vertices for label %s' % label.name) if not allow_empty: raise ValueError(msg) else: logger.warning(msg + '. Assigning all-zero time series to ' 'label.') this_vertidx = None # to later check if label is empty label_vertidx.append(this_vertidx) # mode-dependent initalization if mode == 'mean': pass # we have this here to catch invalid values for mode elif mode == 'mean_flip': # get the sign-flip vector for every label label_flip = _get_label_flip(labels, label_vertidx, src) elif mode == 'pca_flip': # get the sign-flip vector for every label label_flip = _get_label_flip(labels, label_vertidx, src) else: raise ValueError('%s is an invalid mode' % mode) # loop through source estimates and extract time series for stc in stcs: # make sure the stc is compatible with the source space if len(stc.vertno[0]) != nvert[0] or len(stc.vertno[1]) != nvert[1]: raise ValueError('stc not compatible with source space') if any([np.any(svn != vn) for svn, vn in zip(stc.vertno, vertno)]): raise ValueError('stc not compatible with source space') logger.info('Extracting time courses for %d labels (mode: %s)' % (n_labels, mode)) # do the extraction label_tc = np.zeros((n_labels, stc.data.shape[1]), dtype=stc.data.dtype) if mode == 'mean': for i, vertidx in enumerate(label_vertidx): if vertidx is not None: label_tc[i] = np.mean(stc.data[vertidx, :], axis=0) elif mode == 'mean_flip': for i, (vertidx, flip) in enumerate(zip(label_vertidx, label_flip)): if vertidx is not None: label_tc[i] = np.mean(flip * stc.data[vertidx, :], axis=0) elif mode == 'pca_flip': for i, (vertidx, flip) in enumerate(zip(label_vertidx, label_flip)): if vertidx is not None: U, s, V = linalg.svd(stc.data[vertidx, :], full_matrices=False) # determine sign-flip sign = np.sign(np.dot(U[:, 0], flip)) # use average power in label for scaling scale = linalg.norm(s) / np.sqrt(len(vertidx)) label_tc[i] = sign * scale * V[0] else: raise ValueError('%s is an invalid mode' % mode) # this is a generator! yield label_tc @verbose def extract_label_time_course(stcs, labels, src, mode='mean_flip', allow_empty=False, return_generator=False, verbose=None): """Extract label time course for lists of labels and source estimates This function will extract one time course for each label and source estimate. The way the time courses are extracted depends on the mode parameter. Valid values for mode are: 'mean': Average within each label. 'mean_flip': Average within each label with sign flip depending on source orientation. 'pca_flip': Apply an SVD to the time courses within each label and use the scaled and sign-flipped first right-singular vector as the label time course. The scaling is performed such that the power of the label time course is the same as the average per-vertex time course power within the label. The sign of the resulting time course is adjusted by multiplying it with "sign(dot(u, flip))" where u is the first left-singular vector, and flip is a sing-flip vector based on the vertex normals. This procedure assures that the phase does not randomly change by 180 degrees from one stc to the next. Parameters ---------- stcs : SourceEstimate | list (or generator) of SourceEstimate The source estimates from which to extract the time course. labels : Label | list of Label The labels for which to extract the time course. src : list Source spaces for left and right hemisphere. mode : str Extraction mode, see explanation above. allow_empty : bool Instead of emitting an error, return all-zero time courses for labels that do not have any vertices in the source estimate. return_generator : bool If True, a generator instead of a list is returned. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- label_tc : array | list (or generator) of array, shape=(len(labels), n_times) Extracted time course for each label and source estimate. """ # convert inputs to lists if isinstance(stcs, SourceEstimate): stcs = [stcs] return_several = False return_generator = False else: return_several = True if not isinstance(labels, list): labels = [labels] label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode, allow_empty=allow_empty) if not return_generator: # do the extraction and return a list label_tc = list(label_tc) if not return_several: # input was a single SoureEstimate, return single array label_tc = label_tc[0] return label_tc
py
1a4ea9b174c911c7032299c990ef560493c2dcc7
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ TridentNet Training Script. This script is a simplified version of the training script in detectron2/tools. """ import os from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.evaluation import COCOEvaluator from tridentnet import add_tridentnet_config class Trainer(DefaultTrainer): @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") return COCOEvaluator(dataset_name, cfg, True, output_folder) def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() add_tridentnet_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg def main(args): cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) res = Trainer.test(cfg, model) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train() if __name__ == "__main__": args = default_argument_parser().parse_args() print("Command Line Args:", args) launch( main, args.num_gpus, num_machines=args.num_machines, machine_rank=args.machine_rank, dist_url=args.dist_url, args=(args,), )
py
1a4ea9b8ec94c1cf5b524ebae03844e3f712c783
# This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Network(_AWS): _type = "network" _icon_dir = "resources/aws/network" class APIGateway(_Network): _icon = "api-gateway.png" class AppMesh(_Network): _icon = "app-mesh.png" class ClientVpn(_Network): _icon = "client-vpn.png" class CloudMap(_Network): _icon = "cloud-map.png" class CloudFront(_Network): _icon = "cloudfront.png" class DirectConnect(_Network): _icon = "direct-connect.png" class ElasticLoadBalancing(_Network): _icon = "elastic-load-balancing.png" class Endpoint(_Network): _icon = "endpoint.png" class GlobalAccelerator(_Network): _icon = "global-accelerator.png" class InternetGateway(_Network): _icon = "internet-gateway.png" class NATGateway(_Network): _icon = "nat-gateway.png" class NetworkingAndContentDelivery(_Network): _icon = "networking-and-content-delivery.png" class Privatelink(_Network): _icon = "privatelink.png" class Route53(_Network): _icon = "route-53.png" class SiteToSiteVpn(_Network): _icon = "site-to-site-vpn.png" class TransitGateway(_Network): _icon = "transit-gateway.png" class VPCRouter(_Network): _icon = "vpc-router.png" class VPC(_Network): _icon = "vpc.png" # Aliases CF = CloudFront ELB = ElasticLoadBalancing GAX = GlobalAccelerator
py
1a4ea9d135d86f35d40e996a87c55d79479b7a38
def print_nums(n): i = 1 while i <= n: print(i, end=' ') i += 1 print(print_nums(3)) print(print_nums(7)) print(print_nums(8))
py
1a4eaa1058fdbbe01f015dac9a51a73e090b4f83
"""Tests for the resource module""" from django.db import models from django.test import TestCase from django.utils.translation import ugettext_lazy from djangorestframework.serializer import Serializer import datetime import decimal class TestObjectToData(TestCase): """ Tests for the Serializer class. """ def setUp(self): self.serializer = Serializer() self.serialize = self.serializer.serialize def test_decimal(self): """Decimals need to be converted to a string representation.""" self.assertEquals(self.serialize(decimal.Decimal('1.5')), decimal.Decimal('1.5')) def test_function(self): """Functions with no arguments should be called.""" def foo(): return 1 self.assertEquals(self.serialize(foo), 1) def test_method(self): """Methods with only a ``self`` argument should be called.""" class Foo(object): def foo(self): return 1 self.assertEquals(self.serialize(Foo().foo), 1) def test_datetime(self): """datetime objects are left as-is.""" now = datetime.datetime.now() self.assertEquals(self.serialize(now), now) def test_dict_method_name_collision(self): """dict with key that collides with dict method name""" self.assertEquals(self.serialize({'items': 'foo'}), {'items': u'foo'}) self.assertEquals(self.serialize({'keys': 'foo'}), {'keys': u'foo'}) self.assertEquals(self.serialize({'values': 'foo'}), {'values': u'foo'}) def test_ugettext_lazy(self): self.assertEquals(self.serialize(ugettext_lazy('foobar')), u'foobar') class TestFieldNesting(TestCase): """ Test nesting the fields in the Serializer class """ def setUp(self): self.serializer = Serializer() self.serialize = self.serializer.serialize class M1(models.Model): field1 = models.CharField(max_length=256) field2 = models.CharField(max_length=256) class M2(models.Model): field = models.OneToOneField(M1) class M3(models.Model): field = models.ForeignKey(M1) self.m1 = M1(field1='foo', field2='bar') self.m2 = M2(field=self.m1) self.m3 = M3(field=self.m1) def test_tuple_nesting(self): """ Test tuple nesting on `fields` attr """ class SerializerM2(Serializer): fields = (('field', ('field1',)),) class SerializerM3(Serializer): fields = (('field', ('field2',)),) self.assertEqual(SerializerM2().serialize(self.m2), {'field': {'field1': u'foo'}}) self.assertEqual(SerializerM3().serialize(self.m3), {'field': {'field2': u'bar'}}) def test_serializer_class_nesting(self): """ Test related model serialization """ class NestedM2(Serializer): fields = ('field1', ) class NestedM3(Serializer): fields = ('field2', ) class SerializerM2(Serializer): fields = [('field', NestedM2)] class SerializerM3(Serializer): fields = [('field', NestedM3)] self.assertEqual(SerializerM2().serialize(self.m2), {'field': {'field1': u'foo'}}) self.assertEqual(SerializerM3().serialize(self.m3), {'field': {'field2': u'bar'}}) # def test_serializer_no_fields(self): # """ # Test related serializer works when the fields attr isn't present. Fix for # #178. # """ # class NestedM2(Serializer): # fields = ('field1', ) # class NestedM3(Serializer): # fields = ('field2', ) # class SerializerM2(Serializer): # include = [('field', NestedM2)] # exclude = ('id', ) # class SerializerM3(Serializer): # fields = [('field', NestedM3)] # self.assertEqual(SerializerM2().serialize(self.m2), {'field': {'field1': u'foo'}}) # self.assertEqual(SerializerM3().serialize(self.m3), {'field': {'field2': u'bar'}}) def test_serializer_classname_nesting(self): """ Test related model serialization """ class SerializerM2(Serializer): fields = [('field', 'NestedM2')] class SerializerM3(Serializer): fields = [('field', 'NestedM3')] class NestedM2(Serializer): fields = ('field1', ) class NestedM3(Serializer): fields = ('field2', ) self.assertEqual(SerializerM2().serialize(self.m2), {'field': {'field1': u'foo'}}) self.assertEqual(SerializerM3().serialize(self.m3), {'field': {'field2': u'bar'}}) def test_serializer_overridden_hook_method(self): """ Test serializing a model instance which overrides a class method on the serializer. Checks for correct behaviour in odd edge case. """ class SerializerM2(Serializer): fields = ('overridden', ) def overridden(self): return False self.m2.overridden = True self.assertEqual(SerializerM2().serialize_model(self.m2), {'overridden': True})
py
1a4eaa29f056fcd893655754453b8d8364bfaa57
#!/usr/bin/env python3 import argparse import configparser import logging import logging.handlers import os.path import subprocess import sys import threading import time import traceback from collections import Counter, defaultdict from io import StringIO # Global variables config = None email_log = None def tee_log(infile, out_lines, log_level): """ Create a thread that saves all the output on infile to out_lines and logs every line with log_level """ def tee_thread(): for line in iter(infile.readline, ""): logging.log(log_level, line.rstrip()) out_lines.append(line) infile.close() t = threading.Thread(target=tee_thread) t.daemon = True t.start() return t def snapraid_command(command, args={}, *, allow_statuscodes=[]): """ Run snapraid command Raises subprocess.CalledProcessError if errorlevel != 0 """ arguments = ["--quiet"] for (k, v) in args.items(): arguments.extend(["--" + k, str(v)]) p = subprocess.Popen( [config["snapraid"]["executable"], command] + arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, # Snapraid always outputs utf-8 on windows. On linux, utf-8 # also seems a sensible assumption. encoding="utf-8", errors="replace") out = [] threads = [ tee_log(p.stdout, out, logging.OUTPUT), tee_log(p.stderr, [], logging.OUTERR)] for t in threads: t.join() ret = p.wait() # sleep for a while to make pervent output mixup time.sleep(0.3) if ret == 0 or ret in allow_statuscodes: return out else: raise subprocess.CalledProcessError(ret, "snapraid " + command) def send_email(success): import smtplib from email.mime.text import MIMEText from email import charset if len(config["smtp"]["host"]) == 0: logging.error("Failed to send email because smtp host is not set") return # use quoted-printable instead of the default base64 charset.add_charset("utf-8", charset.SHORTEST, charset.QP) if success: body = "SnapRAID job completed successfully:\n\n\n" else: body = "Error during SnapRAID job:\n\n\n" log = email_log.getvalue() maxsize = config['email'].get('maxsize', 500) * 1024 if maxsize and len(log) > maxsize: cut_lines = log.count("\n", maxsize // 2, -maxsize // 2) log = ( "NOTE: Log was too big for email and was shortened\n\n" + log[:maxsize // 2] + "[...]\n\n\n --- LOG WAS TOO BIG - {} LINES REMOVED --\n\n\n[...]".format( cut_lines) + log[-maxsize // 2:]) body += log msg = MIMEText(body, "plain", "utf-8") msg["Subject"] = config["email"]["subject"] + \ (" SUCCESS" if success else " ERROR") msg["From"] = config["email"]["from"] msg["To"] = config["email"]["to"] smtp = {"host": config["smtp"]["host"]} if config["smtp"]["port"]: smtp["port"] = config["smtp"]["port"] if config["smtp"]["ssl"]: server = smtplib.SMTP_SSL(**smtp) else: server = smtplib.SMTP(**smtp) if config["smtp"]["tls"]: server.starttls() if config["smtp"]["user"]: server.login(config["smtp"]["user"], config["smtp"]["password"]) server.sendmail( config["email"]["from"], [config["email"]["to"]], msg.as_string()) server.quit() def finish(is_success): if ("error", "success")[is_success] in config["email"]["sendon"]: try: send_email(is_success) except Exception: logging.exception("Failed to send email") if is_success: logging.info("Run finished successfully") else: logging.error("Run failed") sys.exit(0 if is_success else 1) def load_config(args): global config parser = configparser.RawConfigParser() parser.read(args.conf) sections = ["snapraid", "logging", "email", "smtp", "scrub"] config = dict((x, defaultdict(lambda: "")) for x in sections) for section in parser.sections(): for (k, v) in parser.items(section): config[section][k] = v.strip() int_options = [ ("snapraid", "deletethreshold"), ("logging", "maxsize"), ("scrub", "older-than"), ("email", "maxsize"), ] for section, option in int_options: try: config[section][option] = int(config[section][option]) except ValueError: config[section][option] = 0 config["smtp"]["ssl"] = (config["smtp"]["ssl"].lower() == "true") config["smtp"]["tls"] = (config["smtp"]["tls"].lower() == "true") config["scrub"]["enabled"] = (config["scrub"]["enabled"].lower() == "true") config["email"]["short"] = (config["email"]["short"].lower() == "true") config["snapraid"]["touch"] = (config["snapraid"]["touch"].lower() == "true") # Migration if config["scrub"]["percentage"]: config["scrub"]["plan"] = config["scrub"]["percentage"] if args.scrub is not None: config["scrub"]["enabled"] = args.scrub if args.ignore_deletethreshold: config["snapraid"]["deletethreshold"] = -1 def setup_logger(): log_format = logging.Formatter( "%(asctime)s [%(levelname)-6.6s] %(message)s") root_logger = logging.getLogger() logging.OUTPUT = 15 logging.addLevelName(logging.OUTPUT, "OUTPUT") logging.OUTERR = 25 logging.addLevelName(logging.OUTERR, "OUTERR") root_logger.setLevel(logging.OUTPUT) console_logger = logging.StreamHandler(sys.stdout) console_logger.setFormatter(log_format) root_logger.addHandler(console_logger) if config["logging"]["file"]: max_log_size = max(config["logging"]["maxsize"], 0) * 1024 file_logger = logging.handlers.RotatingFileHandler( config["logging"]["file"], maxBytes=max_log_size, backupCount=9) file_logger.setFormatter(log_format) root_logger.addHandler(file_logger) if config["email"]["sendon"]: global email_log email_log = StringIO() email_logger = logging.StreamHandler(email_log) email_logger.setFormatter(log_format) if config["email"]["short"]: # Don't send programm stdout in email email_logger.setLevel(logging.INFO) root_logger.addHandler(email_logger) def main(): parser = argparse.ArgumentParser() parser.add_argument("-c", "--conf", default="snapraid-runner.conf", metavar="CONFIG", help="Configuration file (default: %(default)s)") parser.add_argument("--no-scrub", action='store_false', dest='scrub', default=None, help="Do not scrub (overrides config)") parser.add_argument("--ignore-deletethreshold", action='store_true', help="Sync even if configured delete threshold is exceeded") args = parser.parse_args() if not os.path.exists(args.conf): print("snapraid-runner configuration file not found") parser.print_help() sys.exit(2) try: load_config(args) except Exception: print("unexpected exception while loading config") print(traceback.format_exc()) sys.exit(2) try: setup_logger() except Exception: print("unexpected exception while setting up logging") print(traceback.format_exc()) sys.exit(2) try: run() except Exception: logging.exception("Run failed due to unexpected exception:") finish(False) def run(): logging.info("=" * 60) logging.info("Run started") logging.info("=" * 60) if not os.path.isfile(config["snapraid"]["executable"]): logging.error("The configured snapraid executable \"{}\" does not " "exist or is not a file".format( config["snapraid"]["executable"])) finish(False) if config["snapraid"]["touch"]: logging.info("Running touch...") snapraid_command("touch") logging.info("*" * 60) logging.info("Running diff...") diff_out = snapraid_command("diff", allow_statuscodes=[2]) logging.info("*" * 60) diff_results = Counter(line.split(" ")[0] for line in diff_out) diff_results = dict((x, diff_results[x]) for x in ["add", "remove", "move", "update"]) logging.info(("Diff results: {add} added, {remove} removed, " + "{move} moved, {update} modified").format(**diff_results)) if (config["snapraid"]["deletethreshold"] >= 0 and diff_results["remove"] > config["snapraid"]["deletethreshold"]): logging.error( "Deleted files exceed delete threshold of {}, aborting".format( config["snapraid"]["deletethreshold"])) logging.error("Run again with --ignore-deletethreshold to sync anyways") finish(False) if (diff_results["remove"] + diff_results["add"] + diff_results["move"] + diff_results["update"] == 0): logging.info("No changes detected, no sync required") else: logging.info("Running sync...") try: snapraid_command("sync") except subprocess.CalledProcessError as e: logging.error(e) finish(False) logging.info("*" * 60) if config["scrub"]["enabled"]: logging.info("Running scrub...") try: # Check if a percentage plan was given int(config["scrub"]["plan"]) except ValueError: scrub_args = {"plan": config["scrub"]["plan"]} else: scrub_args = { "plan": config["scrub"]["plan"], "older-than": config["scrub"]["older-than"], } try: snapraid_command("scrub", scrub_args) except subprocess.CalledProcessError as e: logging.error(e) finish(False) logging.info("*" * 60) logging.info("All done") finish(True) main()
py
1a4eaa6d1d27bd4ccdbfba6fcf94a9b4dda6d421
#!/usr/bin/env python3 # Copyright (c) 2020The PIVX developers # Copyright (c) 2021- The ELONCOIN developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Tests importprivkey and importaddress with staking keys/addresses. Node0 generates staking addresses and sends delegations to them. Node1 imports and rescans. The test checks that cold utxos and staking balance is updated. ''' from time import sleep from test_framework.test_framework import ElonCoinTestFramework from test_framework.util import ( assert_equal, DecimalAmt, ) class ImportStakingTest(ElonCoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [[]] * self.num_nodes def log_title(self): title = "*** Starting %s ***" % self.__class__.__name__ underline = "-" * len(title) description = "Tests importprivkey and importaddress with staking keys/addresses." self.log.info("\n\n%s\n%s\n%s\n", title, underline, description) def run_test(self): NUM_OF_DELEGATIONS = 4 # Create 2*NUM_OF_DELEGATIONS staking addresses self.log_title() # Create cold staking addresses and delegations self.log.info("Creating new staking addresses and sending delegations") staking_addresses = [self.nodes[0].getnewstakingaddress("label %d" % i) for i in range(2 * NUM_OF_DELEGATIONS)] delegations = [] for i, sa in enumerate(staking_addresses): # delegate 10 EMC delegations.append(self.nodes[0].delegatestake(sa, 10)['txid']) # mine a block and check staking balance self.nodes[0].generate(1) assert_equal(self.nodes[0].getdelegatedbalance(), DecimalAmt(10 * (i+1))) self.sync_blocks() # Export keys self.log.info("Exporting keys and importing in node 1") priv_keys = [self.nodes[0].dumpprivkey(x) for x in staking_addresses] # Import keys of addresses 0-(NUM_OF_DELEGATIONS-1) (and rescan) assert_equal(self.nodes[1].getcoldstakingbalance(), DecimalAmt(0)) for i, pk in enumerate(priv_keys[:NUM_OF_DELEGATIONS]): self.nodes[1].importprivkey(pk, "label %d" % i, True, True) val = self.nodes[1].validateaddress(staking_addresses[i]) assert_equal(val['ismine'], True) assert_equal(val['isstaking'], True) assert_equal(val['iswatchonly'], False) assert_equal(self.nodes[1].getcoldstakingbalance(), DecimalAmt(10 * (i + 1))) self.log.info("Balance of node 1 checks out") coldutxos = [x['txid'] for x in self.nodes[1].listcoldutxos()] assert_equal(len(coldutxos), NUM_OF_DELEGATIONS) assert_equal(len([x for x in coldutxos if x in delegations]), NUM_OF_DELEGATIONS) self.log.info("Delegation list of node 1 checks out") # Import remaining addresses as watch-only (and rescan again) self.log.info("Importing addresses (watch-only)") for i, sa in enumerate(staking_addresses[NUM_OF_DELEGATIONS:]): self.nodes[1].importaddress(sa, "label %d" % i, True) # !TODO: add watch-only support in the core (balance and txes) # Currently the only way to check the addressbook without the key here # is to verify the label with validateaddress val = self.nodes[1].validateaddress(sa) assert_equal(val['ismine'], False) assert_equal(val['isstaking'], True) assert_equal(val['iswatchonly'], True) assert_equal(self.nodes[1].getcoldstakingbalance(), DecimalAmt(10 * NUM_OF_DELEGATIONS)) self.log.info("Balance of node 1 checks out") if __name__ == '__main__': ImportStakingTest().main()
py
1a4eaacf45b5d8fa354841415f85078d5fa38aaa
# Solution of; # Project Euler Problem 384: Rudin-Shapiro sequence # https://projecteuler.net/problem=384 # # Define the sequence a(n) as the number of adjacent pairs of ones in the # binary expansion of n (possibly overlapping). E. g. : a(5) = a(1012) = 0, # a(6) = a(1102) = 1, a(7) = a(1112) = 2Define the sequence b(n) = (-1)a(n). # This sequence is called the Rudin-Shapiro sequence. Also consider the # summatory sequence of b(n): $s(n) = \sum \limits_{i = 0}^{n} {b(i)}$. The # first couple of values of these sequences are:n 0 1 2 3 4 5 6 7a(n) 0 0 0 1 # 0 0 1 2b(n) 1 1 1 -1 1 1 -1 1s(n) 1 2 3 2 3 4 3 4The sequence s(n) has the # remarkable property that all elements are positive and every positive # integer k occurs exactly k times. Define g(t,c), with 1 ≤ c ≤ t, as the # index in s(n) for which t occurs for the c'th time in s(n). E. g. : g(3,3) = # 6, g(4,2) = 7 and g(54321,12345) = 1220847710. Let F(n) be the fibonacci # sequence defined by:F(0)=F(1)=1 andF(n)=F(n-1)+F(n-2) for n>1. Define # GF(t)=g(F(t),F(t-1)). Find $\sum$ GF(t) for 2≤t≤45. # # by lcsm29 http://github.com/lcsm29/project-euler import timed def dummy(n): pass if __name__ == '__main__': n = 1000 i = 10000 prob_id = 384 timed.caller(dummy, n, i, prob_id)
py
1a4eab006673f8b7cb2570e8dd76142314019fa5
from .odbcmanager import OdbcManager
py
1a4ead003b31d0f315a0e5e9248bf869c0680526
# # Copyright 2017 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod import math import numpy as np from pandas import isnull from toolz import merge from zipline.assets import Equity, Future from zipline.errors import HistoryWindowStartsBeforeData from zipline.finance.constants import ROOT_SYMBOL_TO_ETA, DEFAULT_ETA from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta from zipline.finance.transaction import create_transaction from zipline.utils.cache import ExpiringCache from zipline.utils.dummy import DummyMapping from zipline.utils.input_validation import ( expect_bounded, expect_strictly_bounded, ) SELL = 1 << 0 BUY = 1 << 1 STOP = 1 << 2 LIMIT = 1 << 3 SQRT_252 = math.sqrt(252) DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT = 0.025 DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT = 0.05 class LiquidityExceeded(Exception): pass def fill_price_worse_than_limit_price(fill_price, order): """ Checks whether the fill price is worse than the order's limit price. Parameters ---------- fill_price: float The price to check. order: zipline.finance.order.Order The order whose limit price to check. Returns ------- bool: Whether the fill price is above the limit price (for a buy) or below the limit price (for a sell). """ if order.limit: # this is tricky! if an order with a limit price has reached # the limit price, we will try to fill the order. do not fill # these shares if the impacted price is worse than the limit # price. return early to avoid creating the transaction. # buy order is worse if the impacted price is greater than # the limit price. sell order is worse if the impacted price # is less than the limit price if (order.direction > 0 and fill_price > order.limit) or ( order.direction < 0 and fill_price < order.limit ): return True return False class SlippageModel(metaclass=FinancialModelMeta): """ Abstract base class for slippage models. Slippage models are responsible for the rates and prices at which orders fill during a simulation. To implement a new slippage model, create a subclass of :class:`~zipline.finance.slippage.SlippageModel` and implement :meth:`process_order`. Methods ------- process_order(data, order) Attributes ---------- volume_for_bar : int Number of shares that have already been filled for the currently-filling asset in the current minute. This attribute is maintained automatically by the base class. It can be used by subclasses to keep track of the total amount filled if there are multiple open orders for a single asset. Notes ----- Subclasses that define their own constructors should call ``super(<subclass name>, self).__init__()`` before performing other initialization. """ # Asset types that are compatible with the given model. allowed_asset_types = (Equity, Future) def __init__(self): self._volume_for_bar = 0 @property def volume_for_bar(self): return self._volume_for_bar @abstractmethod def process_order(self, data, order): """ Compute the number of shares and price to fill for ``order`` in the current minute. Parameters ---------- data : zipline.protocol.BarData The data for the given bar. order : zipline.finance.order.Order The order to simulate. Returns ------- execution_price : float The price of the fill. execution_volume : int The number of shares that should be filled. Must be between ``0`` and ``order.amount - order.filled``. If the amount filled is less than the amount remaining, ``order`` will remain open and will be passed again to this method in the next minute. Raises ------ zipline.finance.slippage.LiquidityExceeded May be raised if no more orders should be processed for the current asset during the current bar. Notes ----- Before this method is called, :attr:`volume_for_bar` will be set to the number of shares that have already been filled for ``order.asset`` in the current minute. :meth:`process_order` is not called by the base class on bars for which there was no historical volume. """ raise NotImplementedError("process_order") def simulate(self, data, asset, orders_for_asset): self._volume_for_bar = 0 volume = data.current(asset, "volume") if volume == 0: return # can use the close price, since we verified there's volume in this # bar. price = data.current(asset, "close") # BEGIN # # Remove this block after fixing data to ensure volume always has # corresponding price. if isnull(price): return # END dt = data.current_dt for order in orders_for_asset: if order.open_amount == 0: continue order.check_triggers(price, dt) if not order.triggered: continue txn = None try: execution_price, execution_volume = self.process_order( data, order ) if execution_price is not None: txn = create_transaction( order, data.current_dt, execution_price, execution_volume, ) except LiquidityExceeded: break if txn: self._volume_for_bar += abs(txn.amount) yield order, txn def asdict(self): return self.__dict__ class NoSlippage(SlippageModel): """A slippage model where all orders fill immediately and completely at the current close price. Notes ----- This is primarily used for testing. """ @staticmethod def process_order(data, order): return ( data.current(order.asset, "close"), order.amount, ) class EquitySlippageModel(SlippageModel, metaclass=AllowedAssetMarker): """ Base class for slippage models which only support equities. """ allowed_asset_types = (Equity,) class FutureSlippageModel(SlippageModel, metaclass=AllowedAssetMarker): """ Base class for slippage models which only support futures. """ allowed_asset_types = (Future,) class VolumeShareSlippage(SlippageModel): """ Model slippage as a quadratic function of percentage of historical volume. Orders to buy will be filled at:: price * (1 + price_impact * (volume_share ** 2)) Orders to sell will be filled at:: price * (1 - price_impact * (volume_share ** 2)) where ``price`` is the close price for the bar, and ``volume_share`` is the percentage of minutely volume filled, up to a max of ``volume_limit``. Parameters ---------- volume_limit : float, optional Maximum percent of historical volume that can fill in each bar. 0.5 means 50% of historical volume. 1.0 means 100%. Default is 0.025 (i.e., 2.5%). price_impact : float, optional Scaling coefficient for price impact. Larger values will result in more simulated price impact. Smaller values will result in less simulated price impact. Default is 0.1. """ def __init__( self, volume_limit=DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT, price_impact=0.1, ): super(VolumeShareSlippage, self).__init__() self.volume_limit = volume_limit self.price_impact = price_impact def __repr__(self): return """ {class_name}( volume_limit={volume_limit}, price_impact={price_impact}) """.strip().format( class_name=self.__class__.__name__, volume_limit=self.volume_limit, price_impact=self.price_impact, ) def process_order(self, data, order): volume = data.current(order.asset, "volume") max_volume = self.volume_limit * volume # price impact accounts for the total volume of transactions # created against the current minute bar remaining_volume = max_volume - self.volume_for_bar if remaining_volume < 1: # we can't fill any more transactions raise LiquidityExceeded() # the current order amount will be the min of the # volume available in the bar or the open amount. cur_volume = int(min(remaining_volume, abs(order.open_amount))) if cur_volume < 1: return None, None # tally the current amount into our total amount ordered. # total amount will be used to calculate price impact total_volume = self.volume_for_bar + cur_volume volume_share = min(total_volume / volume, self.volume_limit) price = data.current(order.asset, "close") # BEGIN # # Remove this block after fixing data to ensure volume always has # corresponding price. if isnull(price): return # END simulated_impact = ( volume_share ** 2 * math.copysign(self.price_impact, order.direction) * price ) impacted_price = price + simulated_impact if fill_price_worse_than_limit_price(impacted_price, order): return None, None return (impacted_price, math.copysign(cur_volume, order.direction)) class FixedSlippage(SlippageModel): """ Simple model assuming a fixed-size spread for all assets. Parameters ---------- spread : float, optional Size of the assumed spread for all assets. Orders to buy will be filled at ``close + (spread / 2)``. Orders to sell will be filled at ``close - (spread / 2)``. Notes ----- This model does not impose limits on the size of fills. An order for an asset will always be filled as soon as any trading activity occurs in the order's asset, even if the size of the order is greater than the historical volume. """ def __init__(self, spread=0.0): super(FixedSlippage, self).__init__() self.spread = spread def __repr__(self): return "{class_name}(spread={spread})".format( class_name=self.__class__.__name__, spread=self.spread, ) def process_order(self, data, order): price = data.current(order.asset, "close") return (price * (1 + self.spread / 2.0 * order.direction), order.amount) class MarketImpactBase(SlippageModel): """ Base class for slippage models which compute a simulated price impact according to a history lookback. """ NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 10.0 / 10000 def __init__(self): super(MarketImpactBase, self).__init__() self._window_data_cache = ExpiringCache() @abstractmethod def get_txn_volume(self, data, order): """ Return the number of shares we would like to order in this minute. Parameters ---------- data : BarData order : Order Return ------ int : the number of shares """ raise NotImplementedError("get_txn_volume") @abstractmethod def get_simulated_impact( self, order, current_price, current_volume, txn_volume, mean_volume, volatility, ): """ Calculate simulated price impact. Parameters ---------- order : The order being processed. current_price : Current price of the asset being ordered. current_volume : Volume of the asset being ordered for the current bar. txn_volume : Number of shares/contracts being ordered. mean_volume : Trailing ADV of the asset. volatility : Annualized daily volatility of returns. Return ------ int : impact on the current price. """ raise NotImplementedError("get_simulated_impact") def process_order(self, data, order): if order.open_amount == 0: return None, None minute_data = data.current(order.asset, ["volume", "high", "low"]) mean_volume, volatility = self._get_window_data(data, order.asset, 20) # Price to use is the average of the minute bar's open and close. price = np.mean([minute_data["high"], minute_data["low"]]) volume = minute_data["volume"] if not volume: return None, None txn_volume = int( min(self.get_txn_volume(data, order), abs(order.open_amount)) ) # If the computed transaction volume is zero or a decimal value, 'int' # will round it down to zero. In that case just bail. if txn_volume == 0: return None, None if mean_volume == 0 or np.isnan(volatility): # If this is the first day the contract exists or there is no # volume history, default to a conservative estimate of impact. simulated_impact = price * self.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT else: simulated_impact = self.get_simulated_impact( order=order, current_price=price, current_volume=volume, txn_volume=txn_volume, mean_volume=mean_volume, volatility=volatility, ) impacted_price = price + math.copysign( simulated_impact, order.direction ) if fill_price_worse_than_limit_price(impacted_price, order): return None, None return impacted_price, math.copysign(txn_volume, order.direction) def _get_window_data(self, data, asset, window_length): """ Internal utility method to return the trailing mean volume over the past 'window_length' days, and volatility of close prices for a specific asset. Parameters ---------- data : The BarData from which to fetch the daily windows. asset : The Asset whose data we are fetching. window_length : Number of days of history used to calculate the mean volume and close price volatility. Returns ------- (mean volume, volatility) """ try: values = self._window_data_cache.get(asset, data.current_session) except KeyError: try: # Add a day because we want 'window_length' complete days, # excluding the current day. volume_history = data.history( asset, "volume", window_length + 1, "1d", ) close_history = data.history( asset, "close", window_length + 1, "1d", ) except HistoryWindowStartsBeforeData: # If there is not enough data to do a full history call, return # values as if there was no data. return 0, np.NaN # Exclude the first value of the percent change array because it is # always just NaN. close_volatility = ( close_history[:-1] .pct_change()[1:] .std( skipna=False, ) ) values = { "volume": volume_history[:-1].mean(), "close": close_volatility * SQRT_252, } self._window_data_cache.set(asset, values, data.current_session) return values["volume"], values["close"] class VolatilityVolumeShare(MarketImpactBase): """ Model slippage for futures contracts according to the following formula: new_price = price + (price * MI / 10000), where 'MI' is market impact, which is defined as: MI = eta * sigma * sqrt(psi) - ``eta`` is a constant which varies by root symbol. - ``sigma`` is 20-day annualized volatility. - ``psi`` is the volume traded in the given bar divided by 20-day ADV. Parameters ---------- volume_limit : float Maximum percentage (as a decimal) of a bar's total volume that can be traded. eta : float or dict Constant used in the market impact formula. If given a float, the eta for all futures contracts is the same. If given a dictionary, it must map root symbols to the eta for contracts of that symbol. """ NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 7.5 / 10000 allowed_asset_types = (Future,) def __init__(self, volume_limit, eta=ROOT_SYMBOL_TO_ETA): super(VolatilityVolumeShare, self).__init__() self.volume_limit = volume_limit # If 'eta' is a constant, use a dummy mapping to treat it as a # dictionary that always returns the same value. # NOTE: This dictionary does not handle unknown root symbols, so it may # be worth revisiting this behavior. if isinstance(eta, (int, float)): self._eta = DummyMapping(float(eta)) else: # Eta is a dictionary. If the user's dictionary does not provide a # value for a certain contract, fall back on the pre-defined eta # values per root symbol. self._eta = merge(ROOT_SYMBOL_TO_ETA, eta) def __repr__(self): if isinstance(self._eta, DummyMapping): # Eta is a constant, so extract it. eta = self._eta["dummy key"] else: eta = "<varies>" return "{class_name}(volume_limit={volume_limit}, eta={eta})".format( class_name=self.__class__.__name__, volume_limit=self.volume_limit, eta=eta, ) def get_simulated_impact( self, order, current_price, current_volume, txn_volume, mean_volume, volatility, ): try: eta = self._eta[order.asset.root_symbol] except Exception: eta = DEFAULT_ETA psi = txn_volume / mean_volume market_impact = eta * volatility * math.sqrt(psi) # We divide by 10,000 because this model computes to basis points. # To convert from bps to % we need to divide by 100, then again to # convert from % to fraction. return (current_price * market_impact) / 10000 def get_txn_volume(self, data, order): volume = data.current(order.asset, "volume") return volume * self.volume_limit class FixedBasisPointsSlippage(SlippageModel): """ Model slippage as a fixed percentage difference from historical minutely close price, limiting the size of fills to a fixed percentage of historical minutely volume. Orders to buy are filled at:: historical_price * (1 + (basis_points * 0.0001)) Orders to sell are filled at:: historical_price * (1 - (basis_points * 0.0001)) Fill sizes are capped at:: historical_volume * volume_limit Parameters ---------- basis_points : float, optional Number of basis points of slippage to apply for each fill. Default is 5 basis points. volume_limit : float, optional Fraction of trading volume that can be filled each minute. Default is 10% of trading volume. Notes ----- - A basis point is one one-hundredth of a percent. - This class, default-constructed, is zipline's default slippage model for equities. """ @expect_bounded( basis_points=(0, None), __funcname="FixedBasisPointsSlippage", ) @expect_strictly_bounded( volume_limit=(0, None), __funcname="FixedBasisPointsSlippage", ) def __init__(self, basis_points=5.0, volume_limit=0.1): super(FixedBasisPointsSlippage, self).__init__() self.basis_points = basis_points self.percentage = self.basis_points / 10000.0 self.volume_limit = volume_limit def __repr__(self): return """ {class_name}( basis_points={basis_points}, volume_limit={volume_limit}, ) """.strip().format( class_name=self.__class__.__name__, basis_points=self.basis_points, volume_limit=self.volume_limit, ) def process_order(self, data, order): volume = data.current(order.asset, "volume") max_volume = int(self.volume_limit * volume) price = data.current(order.asset, "close") shares_to_fill = min( abs(order.open_amount), max_volume - self.volume_for_bar ) if shares_to_fill == 0: raise LiquidityExceeded() return ( price + price * (self.percentage * order.direction), shares_to_fill * order.direction, ) if __name__ == "__main__": f = EquitySlippageModel() # print(f.__meta__) print(f.__class__)
py
1a4ead54d12bb045f022ca752345c39a2164db05
# Adapted from ZJULearning/resa # Better to use a decoupled implementation, # costs more codes, but clear. # Diff from RESA official code: # 1. we use BN+ReLU in channel reducer # 2. we always use the BUSD decoder in the paper (official code does not use BUSD in CULane) # 3. we always use 5 RESA iterations (4 in official code) # 4. we use a higher capacity lane existence classifier (same as ERFNet/ENet baseline) # 5. we use the SCNN sqrt(5) init trick for RESA, which # 5.1. enables fewer warmup steps # 5.2. combined with 4, produces slightly better performance # 6. we do not use horizontal flip or cutting height in loading, in which # 6.1. flip does not help performance (at least on the val set) # 6.2. w.o. cutting height trick probably is the main reason for our lower performance, but we can't use it since # other pytorch-auto-drive models do not use it. import torch.nn as nn from ..common_models import RESA, RESAReducer, BUSD, RESALaneExist, EDLaneExist, PlainDecoder from .._utils import IntermediateLayerGetter from .. import resnet class RESANet(nn.Module): def __init__(self, num_classes, backbone_name, flattened_size, channel_reduce, pretrained_backbone=True): super(RESANet, self).__init__() backbone = resnet.__dict__[backbone_name]( pretrained=pretrained_backbone, replace_stride_with_dilation=[False, True, True]) return_layers = {'layer3': 'out'} self.backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) in_channels = 1024 if backbone_name == 'resnet50' or backbone_name == 'resnet101' else 256 # self.channel_reducer = RESAReducer(in_channels=in_channels, reduce=channel_reduce, bn_relu=False) self.channel_reducer = RESAReducer(in_channels=in_channels, reduce=channel_reduce) self.spatial_conv = RESA() self.decoder = BUSD(num_classes=num_classes) # self.decoder = PlainDecoder(num_classes=num_classes) self.lane_classifier = EDLaneExist(num_output=num_classes - 1, flattened_size=flattened_size) # self.lane_classifier = RESALaneExist(num_output=num_classes - 1, flattened_size=flattened_size) def forward(self, x): x = self.backbone(x)['out'] x = self.channel_reducer(x) x = self.spatial_conv(x) res = {'out': self.decoder(x), 'lane': self.lane_classifier(x)} return res
py
1a4eae3e6100ef7084f3c5715da8706f9d156dcb
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from ... import core from ... import layers from ... import framework def append_cast_op(i, o, prog): """ Append a cast op in a given Program to cast input `i` to data type `o.dtype`. Args: i (Variable): The input Variable. o (Variable): The output Variable. prog (Program): The Program to append cast op. """ prog.global_block().append_op( type="cast", inputs={"X": i}, outputs={"Out": o}, attrs={"in_dtype": i.dtype, "out_dtype": o.dtype}) def _rename_arg(op, old_name, new_name): """ If an op has old_name input and output, rename these input args new_name. Args: op (Operator): Current operator. old_name (str): The old name of input args. new_name (str): The new name of input args. """ op_desc = op.desc if isinstance(op_desc, tuple): op_desc = op_desc[0] op_desc._rename_input(old_name, new_name) op_desc._rename_output(old_name, new_name) def _dtype_to_str(dtype): """ Convert specific variable type to its corresponding string. Args: dtype (VarType): Variable type. """ if dtype == core.VarDesc.VarType.FP16: return 'fp16' else: return 'fp32' def _insert_cast_op(block, op, idx, src_dtype, dest_dtype): """ Insert cast op and rename args of input and output. Args: block (Program): The block in which the operator is. op (Operator): The operator to insert cast op. idx (int): The index of current operator. src_dtype (VarType): The input variable dtype of cast op. desr_dtype (VarType): The output variable dtype of cast op. Returns: num_cast_op (int): The number of cast ops that have been inserted. """ num_cast_ops = 0 valid_types = [ core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.SELECTED_ROWS, core.VarDesc.VarType.LOD_TENSOR_ARRAY ] for in_name in op.input_names: if src_dtype == core.VarDesc.VarType.FP32 and op.type == 'batch_norm': if in_name != 'X': continue for in_var_name in op.input(in_name): in_var = block.var(in_var_name) if in_var.type not in valid_types: continue if in_var.dtype == src_dtype: cast_name = in_var.name + '.cast_' + _dtype_to_str(dest_dtype) out_var = block.vars.get(cast_name) if out_var is None or out_var.dtype != dest_dtype: out_var = block.create_var( name=cast_name, dtype=dest_dtype, persistable=False, stop_gradient=False) block._insert_op( idx, type="cast", inputs={"X": in_var}, outputs={"Out": out_var}, attrs={ "in_dtype": in_var.dtype, "out_dtype": out_var.dtype }) num_cast_ops += 1 _rename_arg(op, in_var.name, out_var.name) else: if op.has_attr('in_dtype'): op._set_attr('in_dtype', dest_dtype) if src_dtype == core.VarDesc.VarType.FP32: for out_name in op.output_names: if op.type == 'batch_norm' and out_name != 'Y': continue for out_var_name in op.output(out_name): out_var = block.var(out_var_name) if out_var.type not in valid_types: continue if out_var.dtype == core.VarDesc.VarType.FP32: out_var.desc.set_dtype(core.VarDesc.VarType.FP16) if op.has_attr('out_dtype'): op._set_attr('out_dtype', core.VarDesc.VarType.FP16) return num_cast_ops def find_true_prev_op(ops, cur_op, var_name): """ Find the true prev op that outputs var_name variable. Args: ops (list): A list of ops. cur_op (Operator): Current operator which has var_name variable. var_name (string): Variable name. """ prev_op = [] for op in ops: if op == cur_op: break for out_name in op.output_names: for out_var_name in op.output(out_name): if out_var_name == var_name: prev_op.append(op) if prev_op: if not len(prev_op) == 1: raise ValueError("There must be only one previous op " "that outputs {0} variable".format(var_name)) else: return prev_op[0] return None def _is_in_black_varnames(op, amp_lists): for in_name in op.input_arg_names: if in_name in amp_lists.black_varnames: return True for out_name in op.output_arg_names: if out_name in amp_lists.black_varnames: return True return False def rewrite_program(main_prog, amp_lists): """ Traverse all ops in current block and insert cast op according to which set current op belongs to. 1. When an op belongs to the black list, add it to black set 2. When an op belongs to the white list, add it to white set 3. When an op belongs to the gray list. If one of its inputs is the output of black set op or black list op, add it to black set. If all of its previous ops are not black op and one of its inputs is the output of white set op or white list op, add it to white set. 4. When an op isn't in the lists, add it to black op set. 5. Add necessary cast ops to make sure that black set op will be computed in fp32 mode, while white set op will be computed in fp16 mode. Args: main_prog (Program): The main program for training. """ block = main_prog.global_block() ops = block.ops white_op_set = set() black_op_set = set() for op in ops: if amp_lists.black_varnames is not None and _is_in_black_varnames( op, amp_lists): black_op_set.add(op) continue if op.type in amp_lists.black_list: black_op_set.add(op) elif op.type in amp_lists.white_list: white_op_set.add(op) elif op.type in amp_lists.gray_list: is_black_op = False is_white_op = False for in_name in op.input_names: # if this op has inputs if in_name: for in_var_name in op.input(in_name): in_var = block.var(in_var_name) # this in_var isn't the output of other op if in_var.op is None: continue elif in_var.op is op: prev_op = find_true_prev_op(ops, op, in_var_name) if prev_op is None: continue else: prev_op = in_var.op # if it's one of inputs if prev_op in black_op_set or \ prev_op.type in amp_lists.black_list: is_black_op = True elif prev_op in white_op_set or \ prev_op.type in amp_lists.white_list: is_white_op = True if is_black_op: black_op_set.add(op) elif is_white_op: white_op_set.add(op) else: pass else: # For numerical safe, we apply fp32 computation on ops that # are not determined which list they should stay. black_op_set.add(op) idx = 0 while idx < len(ops): op = ops[idx] num_cast_ops = 0 if op in black_op_set: num_cast_ops = _insert_cast_op(block, op, idx, core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32) elif op in white_op_set: num_cast_ops = _insert_cast_op(block, op, idx, core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP16) else: pass idx += num_cast_ops + 1 def update_role_var_grad(main_prog, params_grads): """ Update op_role_var attr for some ops to make sure the gradients transfered across gpus is FP16. 1. Check whether the op that outputs gradient is cast or not. 2. If op is cast and gradient is FP32, remove the op_role_var and find the prev op which outputs FP16 gradient 3. Update the op_role_var of the prev op. Args: main_prog (Program): The main program for training. params_grads (list): A list of params and grads. """ block = main_prog.global_block() BACKWARD = core.op_proto_and_checker_maker.OpRole.Backward OPTIMIZE = core.op_proto_and_checker_maker.OpRole.Optimize for p, g in params_grads: op = g.op if g.dtype == core.VarDesc.VarType.FP32 and op.type == 'cast': role = op.attr('op_role') if role & int(BACKWARD) and op.has_attr('op_role_var'): op.desc.remove_attr("op_role_var") else: raise ValueError("The cast op {0} must be in BACKWARD role " "and have op_role_var attr.".format(op)) fp16_grad_name = op.input(op.input_names[0])[0] op_for_fp16_grad = find_true_prev_op(block.ops, op, fp16_grad_name) op_role_var_attr_name = \ core.op_proto_and_checker_maker.kOpRoleVarAttrName() attr_val = [p.name, fp16_grad_name] if op_for_fp16_grad.has_attr(op_role_var_attr_name): attr_val.extend(op_for_fp16_grad.attr(op_role_var_attr_name)) op_for_fp16_grad._set_attr(op_role_var_attr_name, attr_val) # maximize the allreduce overlap op._set_attr('op_role', OPTIMIZE) def update_loss_scaling(is_overall_finite, prev_loss_scaling, num_good_steps, num_bad_steps, incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio): """ Update loss scaling according to overall gradients. If all gradients is finite after incr_every_n_steps, loss scaling will increase by incr_ratio. Otherwisw, loss scaling will decrease by decr_ratio after decr_every_n_nan_or_inf steps and each step some gradients are infinite. Args: is_overall_finite (Variable): A boolean variable indicates whether all gradients are finite. prev_loss_scaling (Variable): Previous loss scaling. num_good_steps (Variable): A variable accumulates good steps in which all gradients are finite. num_bad_steps (Variable): A variable accumulates bad steps in which some gradients are infinite. incr_every_n_steps (Variable): A variable represents increasing loss scaling every n consecutive steps with finite gradients. decr_every_n_nan_or_inf (Variable): A variable represents decreasing loss scaling every n accumulated steps with nan or inf gradients. incr_ratio(float): The multiplier to use when increasing the loss scaling. decr_ratio(float): The less-than-one-multiplier to use when decreasing loss scaling. """ zero_steps = layers.fill_constant(shape=[1], dtype='int32', value=0) with layers.Switch() as switch: with switch.case(is_overall_finite): should_incr_loss_scaling = layers.less_than(incr_every_n_steps, num_good_steps + 1) with layers.Switch() as switch1: with switch1.case(should_incr_loss_scaling): new_loss_scaling = prev_loss_scaling * incr_ratio loss_scaling_is_finite = layers.isfinite(new_loss_scaling) with layers.Switch() as switch2: with switch2.case(loss_scaling_is_finite): layers.assign(new_loss_scaling, prev_loss_scaling) with switch2.default(): pass layers.assign(zero_steps, num_good_steps) layers.assign(zero_steps, num_bad_steps) with switch1.default(): layers.increment(num_good_steps) layers.assign(zero_steps, num_bad_steps) with switch.default(): should_decr_loss_scaling = layers.less_than(decr_every_n_nan_or_inf, num_bad_steps + 1) with layers.Switch() as switch3: with switch3.case(should_decr_loss_scaling): new_loss_scaling = prev_loss_scaling * decr_ratio static_loss_scaling = \ layers.fill_constant(shape=[1], dtype='float32', value=1.0) less_than_one = layers.less_than(new_loss_scaling, static_loss_scaling) with layers.Switch() as switch4: with switch4.case(less_than_one): layers.assign(static_loss_scaling, prev_loss_scaling) with switch4.default(): layers.assign(new_loss_scaling, prev_loss_scaling) layers.assign(zero_steps, num_good_steps) layers.assign(zero_steps, num_bad_steps) with switch3.default(): layers.assign(zero_steps, num_good_steps) layers.increment(num_bad_steps)
py
1a4eaee7af010ce9c7c0c9afad37e67ef40f8975
"""soma URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls import url from django.conf.urls.static import static from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), ] # Static and media files. if settings.DEBUG: urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
py
1a4eafa932dc5f993e6821aeac2d267eb08b2fe5
import requests import re import lxml.html import MySQLdb conn = MySQLdb.connect(db='Crawler', user='cloud', passwd='1111', charset='utf8mb4') c=conn.cursor() delete_sql = 'DELETE FROM re_info WHERE site_name = "인크루트"' c.execute(delete_sql) def crawling(page_count): front_url="http://job.incruit.com/entry/searchjob.asp?ct=12&ty=1&cd=1&page=" for i in range(1, page_count+1): url = front_url+str(i) list_page=requests.get(url) root=lxml.html.fromstring(list_page.content) for everything in root.cssselect('tbody'): for thing in everything.cssselect('tr'): t = 0 companies = thing.cssselect('th > div > .check_list_r > .links > a') if not companies: company = ' ' elif companies: company = companies[0].text.strip() titles = thing.cssselect('td > .subjects > .accent > a') if not titles: title = ' ' title_url = ' ' if titles: title = titles[0].text_content() title_url = titles[0].get('href') site_name = '인크루트' field1 = thing.cssselect('td > .subjects > .details_txts.firstChild > em') if not field1: field1 = ' ' elif field1: field1 = field1[0].text if title_url != ' ': #title_url = "https://"+title_url detail_page = requests.get(title_url) detail = lxml.html.fromstring(detail_page.content) careers = detail.cssselect('.jobpost_sider_jbinfo > div:nth-child(3) > dl:nth-child(2) > dd > div > div > em') if not careers: career = ' ' elif careers: career = careers[0].text academics = detail.cssselect('.jobpost_sider_jbinfo > div:nth-child(3) > dl:nth-child(3) > dd > div > div > em') if not academics: academic = ' ' elif academics: academic = academics[0].text working = detail.cssselect('.jobpost_sider_jbinfo > div.jobpost_sider_jbinfo_inlay.jobpost_sider_jbinfo_inlay_last > dl:nth-child(2) > dd > div > div.tooltip_layer_warp > ul > li') if not working: workingcondition = '' elif working: workingcondition = working[0].text areas = detail.cssselect('.jobpost_sider_jbinfo > div.jobpost_sider_jbinfo_inlay.jobpost_sider_jbinfo_inlay_last > dl:nth-child(3) > dd > div > div.inset_ely_lay') if not areas: area = ' ' if areas: area = areas[0].text area = area.split('> ')[0] deadlines = thing.cssselect('.ddays') if not deadlines: deadline = ' ' if deadlines: deadline = deadlines[0].text insert_sql = 'INSERT INTO re_info(company, title, title_url, site_name, field1, career, academic, area, workingcondition, deadline) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)' insert_val = company, title, title_url, site_name, field1, career, academic, area, workingcondition, deadline c.execute(insert_sql, insert_val) conn.commit() def main(): page_count = 6 crawling(page_count) conn.close() main()
py
1a4eb06d78366f109c5788ae6a7e25c64317c627
""" WSGI config for django-react-redux-jwt-base project. """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoreactredux.settings.dev") application = get_wsgi_application()
py
1a4eb0c01c281585f8261c52dd389408d7db337d
# -*- coding: utf-8 -*- # # Copyright 2020-2021 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tree level output for python This module defines functions that generate python code to make local predictions """ from bigml.tree_utils import INDENT, COMPOSED_FIELDS from bigml.predict_utils.common import missing_branch, \ none_value, get_node, get_predicate, mintree_split from bigml.generators.tree_common import value_to_print, map_data, \ missing_prefix_code, filter_nodes, split_condition_code MISSING_OPERATOR = { "=": "is", "!=": "is not" } def missing_check_code(tree, offsets, fields, objective_id, field, depth, input_map, cmv, metric): """Builds the code to predict when the field is missing """ code = "%sif (%s is None):\n" % \ (INDENT * depth, map_data(fields[field]['slug'], input_map, True)) node = get_node(tree) value = value_to_print(node[offsets["output"]], fields[objective_id]['optype']) code += "%sreturn {\"prediction\": %s," \ " \"%s\": %s}\n" % \ (INDENT * (depth + 1), value, metric, node[offsets["confidence"]]) cmv.append(fields[field]['slug']) return code def plug_in_body(tree, offsets, fields, objective_id, regression, depth=1, cmv=None, input_map=False, ids_path=None, subtree=True): """Translate the model into a set of "if" python statements. `depth` controls the size of indentation. As soon as a value is missing that node is returned without further evaluation. """ # label for the confidence measure and initialization metric = "error" if regression else "confidence" if cmv is None: cmv = [] body = "" term_analysis_fields = [] item_analysis_fields = [] node = get_node(tree) children = [] if node[offsets["children#"]] == 0 else \ node[offsets["children"]] children = filter_nodes(children, offsets, ids=ids_path, subtree=subtree) if children: # field used in the split field = mintree_split(children) has_missing_branch = (missing_branch(children) or none_value(children)) # the missing is singled out as a special case only when there's # no missing branch in the children list one_branch = not has_missing_branch or \ fields[field]['optype'] in COMPOSED_FIELDS if (one_branch and not fields[field]['slug'] in cmv): body += missing_check_code(tree, offsets, fields, objective_id, field, depth, input_map, cmv, metric) for child in children: [_, field, value, _, _] = get_predicate(child) pre_condition = "" # code when missing_splits has been used if has_missing_branch and value is not None: pre_condition = missing_prefix_code(child, fields, field, input_map, cmv) # complete split condition code body += split_condition_code( \ child, fields, depth, input_map, pre_condition, term_analysis_fields, item_analysis_fields, cmv) # value to be determined in next node next_level = plug_in_body(child, offsets, fields, objective_id, regression, depth + 1, cmv=cmv[:], input_map=input_map, ids_path=ids_path, subtree=subtree) body += next_level[0] term_analysis_fields.extend(next_level[1]) item_analysis_fields.extend(next_level[2]) else: value = value_to_print(node[offsets["output"]], fields[objective_id]['optype']) body = "%sreturn {\"prediction\":%s, \"%s\":%s}\n" % ( \ INDENT * depth, value, metric, node[offsets["confidence"]]) return body, term_analysis_fields, item_analysis_fields
py
1a4eb1280b271c74046defe7a8c169593761ac52
from datetime import datetime from typing import Union from dateutil import tz def format_iso_string(iso_string: str) -> str: utc_time = datetime.fromisoformat(iso_string) local_time = utc_time.astimezone(tz.tzlocal()) return local_time.strftime("%Y-%m-%d %H:%M:%S") def auto_unit(number: Union[int, float]) -> str: """ Returns a human-readable formatted size credit: glances """ if number is None: return "-" units = [ (1208925819614629174706176, "Y"), (1180591620717411303424, "Z"), (1152921504606846976, "E"), (1125899906842624, "P"), (1099511627776, "T"), (1073741824, "G"), (1048576, "M"), (1024, "K"), ] for unit, suffix in units: value = float(number) / unit if value > 1: precision = 0 if value < 10: precision = 2 elif value < 100: precision = 1 if suffix == "K": precision = 0 return "{:.{decimal}f}{suffix}".format(value, decimal=precision, suffix=suffix) return "{!s}".format(number)
py
1a4eb261f291b7d157f9cfc289d40b3a43b40d3f
#!/usr/bin/env python import numpy as np from misc import * import matplotlib.pyplot as plt from lalapps import pulsarpputils as pppu """ Script to try a toy ROQ model for a simple CW source """ def signalmodel(t, A, phi0, psi, ra, dec, det): """ A time domain signal modulated by the antenna pattern. """ fps = np.zeros(len(t)) fcs = np.zeros(len(t)) for i in range(len(t)): fp, fc = pppu.antenna_response(t[i], ra, dec, psi, det) fps[i] = fp fcs[i] = fc #return A*(fps + fcs) return A*(fps*np.sin(phi0) + fcs*np.cos(phi0)) # a time series t0 = 900000000. #tend = 900086400. tend = t0 + 10.*86400. N = (tend-t0)/(2*1440.) ts = np.linspace(t0, tend, N) dt = ts[1]-ts[0] ra = 0. dec = 0. det = 'H1' # number of training waveforms TS_size = 1000 psis = np.random.rand(TS_size)*(np.pi/2.)-(np.pi/4.) #psis = np.linspace(-np.pi/4., np.pi/4., TS_size) phi0s = np.random.rand(TS_size)*(2.*np.pi) # allocate memory and create training set TS = np.zeros(TS_size*len(ts)).reshape(TS_size, len(ts)) # store training space in TS_size X len(ts) array A = 1. for i in range(TS_size): TS[i] = signalmodel(ts, A, phi0s[i], psis[i], ra, dec, det) # print TS[i] # normalize TS[i] /= np.sqrt(abs(dot_product(dt, TS[i], TS[i]))) # check for orthonormality #idx1 = 23 #idx2 = 109 #orth = np.sum(np.multiply(TS[idx1]*dt, TS[idx2])) #print orth # Allocate storage for projection coefficients of training space waveforms onto the reduced basis elements proj_coefficients = np.zeros(TS_size*TS_size).reshape(TS_size, TS_size) # Allocate matrix to store the projection of training space waveforms onto the reduced basis projections = np.zeros(TS_size*len(ts)).reshape(TS_size, len(ts)) rb_errors = [] #### Begin greedy: see Field et al. arXiv:1308.3565v2 #### tolerance = 1e-12 # set maximum RB projection error sigma = 1 # (2) of Algorithm 1. (projection error at 0th iteration) rb_errors.append(sigma) RB_matrix = [TS[0]] # (3) of Algorithm 1. (seed greedy algorithm (arbitrary)) iter = 0 #print TS while sigma >= tolerance: # (5) of Algorithm 1. # project the whole training set onto the reduced basis set projections = project_onto_basis(dt, RB_matrix, TS, projections, proj_coefficients, iter) residual = TS - projections # Find projection errors projection_errors = [dot_product(dt, residual[i], residual[i]) for i in range(len(residual))] #print projection_errors sigma = abs(max(projection_errors)) # (7) of Algorithm 1. (Find largest projection error) if sigma < tolerance: break print sigma, iter index = np.argmax(projection_errors) # Find Training-space index of waveform with largest proj. error rb_errors.append(sigma) #Gram-Schmidt to get the next basis and normalize print index next_basis = TS[index] - projections[index] # (9) of Algorithm 1. (Gram-Schmidt) print next_basis.shape next_basis /= np.sqrt(abs(dot_product(dt, next_basis, next_basis))) #(10) of Alg 1. (normalize) RB_matrix.append(next_basis) # (11) of Algorithm 1. (append reduced basis set) print RB_matrix[iter].shape iter += 1 #print TS #### Error check #### TS_rand_size = 100 TS_rand = np.zeros(TS_rand_size*len(ts)).reshape(TS_rand_size, len(ts)) # Allocate random training space psis_rand = np.random.rand(TS_rand_size)*(np.pi/2.)-(np.pi/4.) phi0s_rand = np.random.rand(TS_rand_size)*(2.*np.pi) for i in range(TS_rand_size): TS_rand[i] = signalmodel(ts, A, phi0s_rand[i], psis_rand[i], ra, dec, det) # normalize TS_rand[i] /= np.sqrt(abs(dot_product(dt, TS_rand[i], TS_rand[i]))) ### find projection errors ### iter = 0 proj_rand = np.zeros(len(ts)) proj_error = [] for h in TS_rand: while iter < len(RB_matrix): proj_coefficients_rand = dot_product(dt, RB_matrix[iter], h) proj_rand += proj_coefficients_rand*RB_matrix[iter] iter += 1 residual = h - proj_rand projection_errors = abs(dot_product(dt, residual, residual)) proj_error.append(projection_errors) proj_rand = np.zeros(len(ts)) iter = 0 plt.scatter(np.linspace(0, len(proj_error), len(proj_error)), np.log10(proj_error)) plt.ylabel('log10 projection error') plt.show()
py
1a4eb2870fe4663031b2a3b251171bd057787908
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers # type: ignore from google.api_core import gapic_v1 # type: ignore from google import auth # type: ignore from google.auth import credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v4.resources.types import campaign_label from google.ads.googleads.v4.services.types import campaign_label_service from .base import CampaignLabelServiceTransport, DEFAULT_CLIENT_INFO class CampaignLabelServiceGrpcTransport(CampaignLabelServiceTransport): """gRPC backend transport for CampaignLabelService. Service to manage labels on campaigns. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ def __init__( self, *, host: str = "googleads.googleapis.com", credentials: credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ self._ssl_channel_credentials = ssl_channel_credentials if channel: # Sanity check: Ensure that channel and credentials are not both # provided. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning, ) host = ( api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" ) if credentials is None: credentials, _ = auth.default( scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id ) # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = auth.default(scopes=self.AUTH_SCOPES) # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel( cls, host: str = "googleads.googleapis.com", credentials: credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def get_campaign_label( self, ) -> Callable[ [campaign_label_service.GetCampaignLabelRequest], campaign_label.CampaignLabel, ]: r"""Return a callable for the get campaign label method over gRPC. Returns the requested campaign-label relationship in full detail. Returns: Callable[[~.GetCampaignLabelRequest], ~.CampaignLabel]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_campaign_label" not in self._stubs: self._stubs["get_campaign_label"] = self.grpc_channel.unary_unary( "/google.ads.googleads.v4.services.CampaignLabelService/GetCampaignLabel", request_serializer=campaign_label_service.GetCampaignLabelRequest.serialize, response_deserializer=campaign_label.CampaignLabel.deserialize, ) return self._stubs["get_campaign_label"] @property def mutate_campaign_labels( self, ) -> Callable[ [campaign_label_service.MutateCampaignLabelsRequest], campaign_label_service.MutateCampaignLabelsResponse, ]: r"""Return a callable for the mutate campaign labels method over gRPC. Creates and removes campaign-label relationships. Operation statuses are returned. Returns: Callable[[~.MutateCampaignLabelsRequest], ~.MutateCampaignLabelsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_campaign_labels" not in self._stubs: self._stubs[ "mutate_campaign_labels" ] = self.grpc_channel.unary_unary( "/google.ads.googleads.v4.services.CampaignLabelService/MutateCampaignLabels", request_serializer=campaign_label_service.MutateCampaignLabelsRequest.serialize, response_deserializer=campaign_label_service.MutateCampaignLabelsResponse.deserialize, ) return self._stubs["mutate_campaign_labels"] __all__ = ("CampaignLabelServiceGrpcTransport",)
py
1a4eb2d8c19645535ec97113d0b8442fa1d9d280
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities, _tables from . import outputs __all__ = [ 'CertificateAuthorityCertificateAuthorityConfiguration', 'CertificateAuthorityCertificateAuthorityConfigurationSubject', 'CertificateAuthorityRevocationConfiguration', 'CertificateAuthorityRevocationConfigurationCrlConfiguration', 'CertificateValidity', 'GetCertificateAuthorityRevocationConfigurationResult', 'GetCertificateAuthorityRevocationConfigurationCrlConfigurationResult', ] @pulumi.output_type class CertificateAuthorityCertificateAuthorityConfiguration(dict): def __init__(__self__, *, key_algorithm: str, signing_algorithm: str, subject: 'outputs.CertificateAuthorityCertificateAuthorityConfigurationSubject'): """ :param str key_algorithm: Type of the public key algorithm and size, in bits, of the key pair that your key pair creates when it issues a certificate. Valid values can be found in the [ACM PCA Documentation](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CertificateAuthorityConfiguration.html). :param str signing_algorithm: Name of the algorithm your private CA uses to sign certificate requests. Valid values can be found in the [ACM PCA Documentation](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CertificateAuthorityConfiguration.html). :param 'CertificateAuthorityCertificateAuthorityConfigurationSubjectArgs' subject: Nested argument that contains X.500 distinguished name information. At least one nested attribute must be specified. """ pulumi.set(__self__, "key_algorithm", key_algorithm) pulumi.set(__self__, "signing_algorithm", signing_algorithm) pulumi.set(__self__, "subject", subject) @property @pulumi.getter(name="keyAlgorithm") def key_algorithm(self) -> str: """ Type of the public key algorithm and size, in bits, of the key pair that your key pair creates when it issues a certificate. Valid values can be found in the [ACM PCA Documentation](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CertificateAuthorityConfiguration.html). """ return pulumi.get(self, "key_algorithm") @property @pulumi.getter(name="signingAlgorithm") def signing_algorithm(self) -> str: """ Name of the algorithm your private CA uses to sign certificate requests. Valid values can be found in the [ACM PCA Documentation](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CertificateAuthorityConfiguration.html). """ return pulumi.get(self, "signing_algorithm") @property @pulumi.getter def subject(self) -> 'outputs.CertificateAuthorityCertificateAuthorityConfigurationSubject': """ Nested argument that contains X.500 distinguished name information. At least one nested attribute must be specified. """ return pulumi.get(self, "subject") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class CertificateAuthorityCertificateAuthorityConfigurationSubject(dict): def __init__(__self__, *, common_name: Optional[str] = None, country: Optional[str] = None, distinguished_name_qualifier: Optional[str] = None, generation_qualifier: Optional[str] = None, given_name: Optional[str] = None, initials: Optional[str] = None, locality: Optional[str] = None, organization: Optional[str] = None, organizational_unit: Optional[str] = None, pseudonym: Optional[str] = None, state: Optional[str] = None, surname: Optional[str] = None, title: Optional[str] = None): """ :param str common_name: Fully qualified domain name (FQDN) associated with the certificate subject. Must be less than or equal to 64 characters in length. :param str country: Two digit code that specifies the country in which the certificate subject located. Must be less than or equal to 2 characters in length. :param str distinguished_name_qualifier: Disambiguating information for the certificate subject. Must be less than or equal to 64 characters in length. :param str generation_qualifier: Typically a qualifier appended to the name of an individual. Examples include Jr. for junior, Sr. for senior, and III for third. Must be less than or equal to 3 characters in length. :param str given_name: First name. Must be less than or equal to 16 characters in length. :param str initials: Concatenation that typically contains the first letter of the `given_name`, the first letter of the middle name if one exists, and the first letter of the `surname`. Must be less than or equal to 5 characters in length. :param str locality: The locality (such as a city or town) in which the certificate subject is located. Must be less than or equal to 128 characters in length. :param str organization: Legal name of the organization with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. :param str organizational_unit: A subdivision or unit of the organization (such as sales or finance) with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. :param str pseudonym: Typically a shortened version of a longer `given_name`. For example, Jonathan is often shortened to John. Elizabeth is often shortened to Beth, Liz, or Eliza. Must be less than or equal to 128 characters in length. :param str state: State in which the subject of the certificate is located. Must be less than or equal to 128 characters in length. :param str surname: Family name. In the US and the UK for example, the surname of an individual is ordered last. In Asian cultures the surname is typically ordered first. Must be less than or equal to 40 characters in length. :param str title: A title such as Mr. or Ms. which is pre-pended to the name to refer formally to the certificate subject. Must be less than or equal to 64 characters in length. """ if common_name is not None: pulumi.set(__self__, "common_name", common_name) if country is not None: pulumi.set(__self__, "country", country) if distinguished_name_qualifier is not None: pulumi.set(__self__, "distinguished_name_qualifier", distinguished_name_qualifier) if generation_qualifier is not None: pulumi.set(__self__, "generation_qualifier", generation_qualifier) if given_name is not None: pulumi.set(__self__, "given_name", given_name) if initials is not None: pulumi.set(__self__, "initials", initials) if locality is not None: pulumi.set(__self__, "locality", locality) if organization is not None: pulumi.set(__self__, "organization", organization) if organizational_unit is not None: pulumi.set(__self__, "organizational_unit", organizational_unit) if pseudonym is not None: pulumi.set(__self__, "pseudonym", pseudonym) if state is not None: pulumi.set(__self__, "state", state) if surname is not None: pulumi.set(__self__, "surname", surname) if title is not None: pulumi.set(__self__, "title", title) @property @pulumi.getter(name="commonName") def common_name(self) -> Optional[str]: """ Fully qualified domain name (FQDN) associated with the certificate subject. Must be less than or equal to 64 characters in length. """ return pulumi.get(self, "common_name") @property @pulumi.getter def country(self) -> Optional[str]: """ Two digit code that specifies the country in which the certificate subject located. Must be less than or equal to 2 characters in length. """ return pulumi.get(self, "country") @property @pulumi.getter(name="distinguishedNameQualifier") def distinguished_name_qualifier(self) -> Optional[str]: """ Disambiguating information for the certificate subject. Must be less than or equal to 64 characters in length. """ return pulumi.get(self, "distinguished_name_qualifier") @property @pulumi.getter(name="generationQualifier") def generation_qualifier(self) -> Optional[str]: """ Typically a qualifier appended to the name of an individual. Examples include Jr. for junior, Sr. for senior, and III for third. Must be less than or equal to 3 characters in length. """ return pulumi.get(self, "generation_qualifier") @property @pulumi.getter(name="givenName") def given_name(self) -> Optional[str]: """ First name. Must be less than or equal to 16 characters in length. """ return pulumi.get(self, "given_name") @property @pulumi.getter def initials(self) -> Optional[str]: """ Concatenation that typically contains the first letter of the `given_name`, the first letter of the middle name if one exists, and the first letter of the `surname`. Must be less than or equal to 5 characters in length. """ return pulumi.get(self, "initials") @property @pulumi.getter def locality(self) -> Optional[str]: """ The locality (such as a city or town) in which the certificate subject is located. Must be less than or equal to 128 characters in length. """ return pulumi.get(self, "locality") @property @pulumi.getter def organization(self) -> Optional[str]: """ Legal name of the organization with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. """ return pulumi.get(self, "organization") @property @pulumi.getter(name="organizationalUnit") def organizational_unit(self) -> Optional[str]: """ A subdivision or unit of the organization (such as sales or finance) with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. """ return pulumi.get(self, "organizational_unit") @property @pulumi.getter def pseudonym(self) -> Optional[str]: """ Typically a shortened version of a longer `given_name`. For example, Jonathan is often shortened to John. Elizabeth is often shortened to Beth, Liz, or Eliza. Must be less than or equal to 128 characters in length. """ return pulumi.get(self, "pseudonym") @property @pulumi.getter def state(self) -> Optional[str]: """ State in which the subject of the certificate is located. Must be less than or equal to 128 characters in length. """ return pulumi.get(self, "state") @property @pulumi.getter def surname(self) -> Optional[str]: """ Family name. In the US and the UK for example, the surname of an individual is ordered last. In Asian cultures the surname is typically ordered first. Must be less than or equal to 40 characters in length. """ return pulumi.get(self, "surname") @property @pulumi.getter def title(self) -> Optional[str]: """ A title such as Mr. or Ms. which is pre-pended to the name to refer formally to the certificate subject. Must be less than or equal to 64 characters in length. """ return pulumi.get(self, "title") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class CertificateAuthorityRevocationConfiguration(dict): def __init__(__self__, *, crl_configuration: Optional['outputs.CertificateAuthorityRevocationConfigurationCrlConfiguration'] = None): """ :param 'CertificateAuthorityRevocationConfigurationCrlConfigurationArgs' crl_configuration: Nested argument containing configuration of the certificate revocation list (CRL), if any, maintained by the certificate authority. Defined below. """ if crl_configuration is not None: pulumi.set(__self__, "crl_configuration", crl_configuration) @property @pulumi.getter(name="crlConfiguration") def crl_configuration(self) -> Optional['outputs.CertificateAuthorityRevocationConfigurationCrlConfiguration']: """ Nested argument containing configuration of the certificate revocation list (CRL), if any, maintained by the certificate authority. Defined below. """ return pulumi.get(self, "crl_configuration") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class CertificateAuthorityRevocationConfigurationCrlConfiguration(dict): def __init__(__self__, *, expiration_in_days: int, custom_cname: Optional[str] = None, enabled: Optional[bool] = None, s3_bucket_name: Optional[str] = None): """ :param int expiration_in_days: Number of days until a certificate expires. Must be between 1 and 5000. :param str custom_cname: Name inserted into the certificate CRL Distribution Points extension that enables the use of an alias for the CRL distribution point. Use this value if you don't want the name of your S3 bucket to be public. Must be less than or equal to 253 characters in length. :param bool enabled: Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to `false`. :param str s3_bucket_name: Name of the S3 bucket that contains the CRL. If you do not provide a value for the `custom_cname` argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You must specify a bucket policy that allows ACM PCA to write the CRL to your bucket. Must be less than or equal to 255 characters in length. """ pulumi.set(__self__, "expiration_in_days", expiration_in_days) if custom_cname is not None: pulumi.set(__self__, "custom_cname", custom_cname) if enabled is not None: pulumi.set(__self__, "enabled", enabled) if s3_bucket_name is not None: pulumi.set(__self__, "s3_bucket_name", s3_bucket_name) @property @pulumi.getter(name="expirationInDays") def expiration_in_days(self) -> int: """ Number of days until a certificate expires. Must be between 1 and 5000. """ return pulumi.get(self, "expiration_in_days") @property @pulumi.getter(name="customCname") def custom_cname(self) -> Optional[str]: """ Name inserted into the certificate CRL Distribution Points extension that enables the use of an alias for the CRL distribution point. Use this value if you don't want the name of your S3 bucket to be public. Must be less than or equal to 253 characters in length. """ return pulumi.get(self, "custom_cname") @property @pulumi.getter def enabled(self) -> Optional[bool]: """ Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to `false`. """ return pulumi.get(self, "enabled") @property @pulumi.getter(name="s3BucketName") def s3_bucket_name(self) -> Optional[str]: """ Name of the S3 bucket that contains the CRL. If you do not provide a value for the `custom_cname` argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You must specify a bucket policy that allows ACM PCA to write the CRL to your bucket. Must be less than or equal to 255 characters in length. """ return pulumi.get(self, "s3_bucket_name") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class CertificateValidity(dict): def __init__(__self__, *, type: str, value: str): """ :param str type: Determines how `value` is interpreted. Valid values: `DAYS`, `MONTHS`, `YEARS`, `ABSOLUTE`, `END_DATE`. :param str value: If `type` is `DAYS`, `MONTHS`, or `YEARS`, the relative time until the certificate expires. If `type` is `ABSOLUTE`, the date in seconds since the Unix epoch. If `type` is `END_DATE`, the date in RFC 3339 format. """ pulumi.set(__self__, "type", type) pulumi.set(__self__, "value", value) @property @pulumi.getter def type(self) -> str: """ Determines how `value` is interpreted. Valid values: `DAYS`, `MONTHS`, `YEARS`, `ABSOLUTE`, `END_DATE`. """ return pulumi.get(self, "type") @property @pulumi.getter def value(self) -> str: """ If `type` is `DAYS`, `MONTHS`, or `YEARS`, the relative time until the certificate expires. If `type` is `ABSOLUTE`, the date in seconds since the Unix epoch. If `type` is `END_DATE`, the date in RFC 3339 format. """ return pulumi.get(self, "value") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class GetCertificateAuthorityRevocationConfigurationResult(dict): def __init__(__self__, *, crl_configurations: Sequence['outputs.GetCertificateAuthorityRevocationConfigurationCrlConfigurationResult']): pulumi.set(__self__, "crl_configurations", crl_configurations) @property @pulumi.getter(name="crlConfigurations") def crl_configurations(self) -> Sequence['outputs.GetCertificateAuthorityRevocationConfigurationCrlConfigurationResult']: return pulumi.get(self, "crl_configurations") @pulumi.output_type class GetCertificateAuthorityRevocationConfigurationCrlConfigurationResult(dict): def __init__(__self__, *, custom_cname: str, enabled: bool, expiration_in_days: int, s3_bucket_name: str): pulumi.set(__self__, "custom_cname", custom_cname) pulumi.set(__self__, "enabled", enabled) pulumi.set(__self__, "expiration_in_days", expiration_in_days) pulumi.set(__self__, "s3_bucket_name", s3_bucket_name) @property @pulumi.getter(name="customCname") def custom_cname(self) -> str: return pulumi.get(self, "custom_cname") @property @pulumi.getter def enabled(self) -> bool: return pulumi.get(self, "enabled") @property @pulumi.getter(name="expirationInDays") def expiration_in_days(self) -> int: return pulumi.get(self, "expiration_in_days") @property @pulumi.getter(name="s3BucketName") def s3_bucket_name(self) -> str: return pulumi.get(self, "s3_bucket_name")
py
1a4eb3de618904871250a1222d9f5547e4c97294
from restfly.iterator import APIIterator from box import BoxList from copy import copy class OTIterator(APIIterator): _path = None limit = 500 offset = 0 def __init__(self, api, **kwargs): self._path = kwargs.pop('path') self._payload = kwargs.pop('payload', {}) self.limit = kwargs.get('limit', self.limit) self.offset = kwargs.get('offset', self.offset) super(OTIterator, self).__init__(api, **kwargs) def _get_page(self): ''' Retrieves the next page of data ''' # if the size of the page is less than the limit, then we will simply # bail and let iterator stop. if self.num_pages > 0 and len(self.page) < self.limit: raise StopIteration() # make a copy of the payload (so not to pollute it) and then set the # offset and limits. p = copy(self._payload) p['offset'] = self.offset p['limit'] = self.limit # make the call and update the offset. self.page = self._api.post(self._path, json=p, box=BoxList) self.offset += self.limit
py
1a4eb435b5cf3bb8c20cf730609b4bf272e9d73b
#!/usr/bin/env python import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intered.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
py
1a4eb4c8944e8289c199638cc789d37d539e0ff8
# # Metrix++, Copyright 2009-2013, Metrix++ Project # Link: http://metrixplusplus.sourceforge.net # # This file is a part of Metrix++ Tool. # # Metrix++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # Metrix++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Metrix++. If not, see <http://www.gnu.org/licenses/>. # import logging import re import mpp.api import mpp.utils import mpp.cout class Plugin(mpp.api.Plugin, mpp.api.IConfigurable, mpp.api.IRunable): MODE_NEW = 0x01 MODE_TREND = 0x03 MODE_TOUCHED = 0x07 MODE_ALL = 0x15 def declare_configuration(self, parser): self.parser = parser parser.add_option("--hotspots", "--hs", default=None, help="If not set (none), all exceeded limits are printed." " If set, exceeded limits are sorted (the worst is the first) and only first HOTSPOTS limits are printed." " [default: %default]", type=int) parser.add_option("--disable-suppressions", "--ds", action="store_true", default=False, help = "If not set (none), all suppressions are ignored" " and associated warnings are printed. [default: %default]") parser.add_option("--warn-mode", "--wm", default='all', choices=['new', 'trend', 'touched', 'all'], help="Defines the warnings mode. " "'all' - all warnings active, " "'new' - warnings for new regions/files only, " "'trend' - warnings for new regions/files and for bad trend of modified regions/files, " "'touched' - warnings for new and modified regions/files " "[default: %default]") parser.add_option("--min-limit", "--min", action="multiopt", help="A threshold per 'namespace:field' metric in order to select regions, " "which have got metric value less than the specified limit. " "This option can be specified multiple times, if it is necessary to apply several limits. " "Should be in the format: <namespace>:<field>:<limit-value>, for example: " "'std.code.lines:comments:1'.") parser.add_option("--max-limit", "--max", action="multiopt", help="A threshold per 'namespace:field' metric in order to select regions, " "which have got metric value more than the specified limit. " "This option can be specified multiple times, if it is necessary to apply several limits. " "Should be in the format: <namespace>:<field>:<limit-value>, for example: " "'std.code.complexity:cyclomatic:7'.") def configure(self, options): self.hotspots = options.__dict__['hotspots'] self.no_suppress = options.__dict__['disable_suppressions'] if options.__dict__['warn_mode'] == 'new': self.mode = self.MODE_NEW elif options.__dict__['warn_mode'] == 'trend': self.mode = self.MODE_TREND elif options.__dict__['warn_mode'] == 'touched': self.mode = self.MODE_TOUCHED elif options.__dict__['warn_mode'] == 'all': self.mode = self.MODE_ALL if self.mode != self.MODE_ALL and options.__dict__['db_file_prev'] == None: self.parser.error("option --warn-mode: The mode '" + options.__dict__['warn_mode'] + "' requires '--db-file-prev' option set") class Limit(object): def __init__(self, limit_type, limit, namespace, field, db_filter): self.type = limit_type self.limit = limit self.namespace = namespace self.field = field self.filter = db_filter def __repr__(self): return "namespace '" + self.namespace + "', filter '" + str(self.filter) + "'" self.limits = [] pattern = re.compile(r'''([^:]+)[:]([^:]+)[:]([-+]?[0-9]+(?:[.][0-9]+)?)''') if options.__dict__['max_limit'] != None: for each in options.__dict__['max_limit']: match = re.match(pattern, each) if match == None: self.parser.error("option --max-limit: Invalid format: " + each) limit = Limit("max", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '>', float(match.group(3)))) self.limits.append(limit) if options.__dict__['min_limit'] != None: for each in options.__dict__['min_limit']: match = re.match(pattern, each) if match == None: self.parser.error("option --min-limit: Invalid format: " + each) limit = Limit("min", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '<', float(match.group(3)))) self.limits.append(limit) def initialize(self): super(Plugin, self).initialize() db_loader = self.get_plugin('mpp.dbf').get_loader() self._verify_namespaces(db_loader.iterate_namespace_names()) for each in db_loader.iterate_namespace_names(): self._verify_fields(each, db_loader.get_namespace(each).iterate_field_names()) def _verify_namespaces(self, valid_namespaces): valid = [] for each in valid_namespaces: valid.append(each) for each in self.limits: if each.namespace not in valid: self.parser.error("option --{0}-limit: metric '{1}:{2}' is not available in the database file.". format(each.type, each.namespace, each.field)) def _verify_fields(self, namespace, valid_fields): valid = [] for each in valid_fields: valid.append(each) for each in self.limits: if each.namespace == namespace: if each.field not in valid: self.parser.error("option --{0}-limit: metric '{1}:{2}' is not available in the database file.". format(each.type, each.namespace, each.field)) def iterate_limits(self): for each in self.limits: yield each def is_mode_matched(self, limit, value, diff, is_modified): if is_modified == None: # means new region, True in all modes return True if self.mode == self.MODE_ALL: return True if self.mode == self.MODE_TOUCHED and is_modified == True: return True if self.mode == self.MODE_TREND and is_modified == True: if limit < value and diff > 0: return True if limit > value and diff < 0: return True return False def run(self, args): return main(self, args) def main(plugin, args): exit_code = 0 loader_prev = plugin.get_plugin('mpp.dbf').get_loader_prev() loader = plugin.get_plugin('mpp.dbf').get_loader() paths = None if len(args) == 0: paths = [""] else: paths = args # Try to optimise iterative change scans modified_file_ids = None if plugin.mode != plugin.MODE_ALL: modified_file_ids = get_list_of_modified_files(loader, loader_prev) for path in paths: path = mpp.utils.preprocess_path(path) for limit in plugin.iterate_limits(): logging.info("Applying limit: " + str(limit)) filters = [limit.filter] if modified_file_ids != None: filters.append(('file_id', 'IN', modified_file_ids)) sort_by = None limit_by = None limit_warnings = None if plugin.hotspots != None: sort_by = limit.field if limit.type == "max": sort_by = "-" + sort_by if plugin.mode == plugin.MODE_ALL: # if it is not ALL mode, the tool counts number of printed warnings below limit_by = plugin.hotspots limit_warnings = plugin.hotspots selected_data = loader.load_selected_data(limit.namespace, fields = [limit.field], path=path, filters = filters, sort_by=sort_by, limit_by=limit_by) if selected_data == None: mpp.utils.report_bad_path(path) exit_code += 1 continue for select_data in selected_data: if limit_warnings != None and limit_warnings <= 0: break is_modified = None diff = None file_data = loader.load_file_data(select_data.get_path()) file_data_prev = loader_prev.load_file_data(select_data.get_path()) if file_data_prev != None: if file_data.get_checksum() == file_data_prev.get_checksum(): diff = 0 is_modified = False else: matcher = mpp.utils.FileRegionsMatcher(file_data, file_data_prev) prev_id = matcher.get_prev_id(select_data.get_region().get_id()) if matcher.is_matched(select_data.get_region().get_id()): if matcher.is_modified(select_data.get_region().get_id()): is_modified = True else: is_modified = False diff = mpp.api.DiffData(select_data, file_data_prev.get_region(prev_id)).get_data(limit.namespace, limit.field) if (plugin.is_mode_matched(limit.limit, select_data.get_data(limit.namespace, limit.field), diff, is_modified) == False): continue is_sup = is_metric_suppressed(limit.namespace, limit.field, loader, select_data) if is_sup == True and plugin.no_suppress == False: continue exit_code += 1 region_cursor = 0 region_name = None if select_data.get_region() != None: region_cursor = select_data.get_region().cursor region_name = select_data.get_region().name report_limit_exceeded(select_data.get_path(), region_cursor, limit.namespace, limit.field, region_name, select_data.get_data(limit.namespace, limit.field), diff, limit.limit, is_modified, is_sup) if limit_warnings != None: limit_warnings -= 1 return exit_code def get_list_of_modified_files(loader, loader_prev): logging.info("Identifying changed files...") old_files_map = {} for each in loader_prev.iterate_file_data(): old_files_map[each.get_path()] = each.get_checksum() if len(old_files_map) == 0: return None modified_file_ids = [] for each in loader.iterate_file_data(): if len(modified_file_ids) > 1000: # If more than 1000 files changed, skip optimisation return None if (each.get_path() not in old_files_map.keys()) or old_files_map[each.get_path()] != each.get_checksum(): modified_file_ids.append(str(each.get_id())) old_files_map = None if len(modified_file_ids) != 0: modified_file_ids = " , ".join(modified_file_ids) modified_file_ids = "(" + modified_file_ids + ")" return modified_file_ids return None def is_metric_suppressed(metric_namespace, metric_field, loader, select_data): data = loader.load_file_data(select_data.get_path()) if select_data.get_region() != None: data = data.get_region(select_data.get_region().get_id()) sup_data = data.get_data('std.suppress', 'list') else: sup_data = data.get_data('std.suppress.file', 'list') if sup_data != None and sup_data.find('[' + metric_namespace + ':' + metric_field + ']') != -1: return True return False def report_limit_exceeded(path, cursor, namespace, field, region_name, stat_level, trend_value, stat_limit, is_modified, is_suppressed): if region_name != None: message = "Metric '" + namespace + ":" + field + "' for region '" + region_name + "' exceeds the limit." else: message = "Metric '" + namespace + ":" + field + "' exceeds the limit." details = [("Metric name", namespace + ":" + field), ("Region name", region_name), ("Metric value", stat_level), ("Modified", is_modified), ("Change trend", '{0:{1}}'.format(trend_value, '+' if trend_value else '')), ("Limit", stat_limit), ("Suppressed", is_suppressed)] mpp.cout.notify(path, cursor, mpp.cout.SEVERITY_WARNING, message, details)
py
1a4eb64e4468c59965665718b7798d1d9f655002
# -*- coding: utf-8 -*- import datapackage import os # filenames FD_DIR = "../data/2020-02-21_fd/" RAW_DIR = "../data/2020-02-21/" files = os.listdir(FD_DIR) fd_files = [os.path.join(FD_DIR, f) for f in files] raw_files = [] for file in files: base = os.path.splitext(file)[0] path = os.path.join(RAW_DIR, base + ".txt") raw_files.append(path) # compute number of spikes for raw, fd in zip(raw_files, fd_files): with open(raw) as f: lines = f.readlines() # +1 might not be needed on Unix active_channels_raw = 0 for i in range(len(lines) - 1): if lines[i] == "[ms] \t[µV] \t \n" and \ lines[i+1] != "\n": active_channels_raw += 1 spikes_raw = (len(lines) - 60 * 4 - 2 + 1 + active_channels_raw) / 76 package = datapackage.Package(fd) spikes_fd = len(package.get_resource("spikes").read()) active_channels_fd = len(package.get_resource("spike-trains").read()) assert spikes_raw == spikes_fd, "Difference in number of spikes in file " \ + raw assert active_channels_raw == active_channels_fd, "Difference in number of\ active channels in file " + raw
py
1a4eb6a2042360261de957c5ed500d6fccaf0edd
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # TopicDB documentation build configuration file, created by # sphinx-quickstart on Sat Dec 24 10:06:55 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation ROOT, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of STRING: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'TopicDB' copyright = '2016, Brett Alistair Kromkamp' author = 'Brett Alistair Kromkamp' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1.0' # The full version, including alpha/beta/rc tags. release = '0.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'TopicDBdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'TopicDB.tex', 'TopicDB Documentation', 'Brett Alistair Kromkamp', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'topicdb', 'TopicDB Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'TopicDB', 'TopicDB Documentation', author, 'TopicDB', 'One line description of project.', 'Miscellaneous'), ]
py
1a4eb6bb6d21540bf2e4f0eafd2ca7015e31c910
import sys sys.path.append("/") import time import os from celery import Celery from celery.utils.log import get_task_logger from app.models.discount_code import DiscountCode from app.manage import app as web_app from app.worker.utils import generate_code logger = get_task_logger(__name__) app = Celery("tasks", broker=os.environ["CELERY_BROKER_URL"], backend="rpc://") @app.task() def generate_discount_codes(data): logger.info("Generating codes - Starting work ", data) count = data.get("count", 0) brand_id = data["brand_id"] with web_app.app_context(): for i in range(count): code = DiscountCode(code=generate_code(6), brand_id=brand_id) code.save()
py
1a4eb8fa7039716a6fb466db08cb567e40df6711
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import six import numpy as np import time import os import math import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.executor import Executor import data from args import * import lm_model import logging logging.basicConfig() import pickle def prepare_batch_input(batch, args): x = batch['token_ids'] x_r = batch['token_ids_reverse'] y = batch['next_token_id'] y_r = batch['next_token_id_reverse'] inst = [] for i in range(len(x)): if args.use_custom_samples: custom_samples_array = np.zeros( (args.num_steps, args.n_negative_samples_batch + 1), dtype='int64') custom_samples_array_r = np.zeros( (args.num_steps, args.n_negative_samples_batch + 1), dtype='int64') custom_probabilities_array = np.zeros( (args.num_steps, args.n_negative_samples_batch + 1), dtype='float32') for j in range(args.num_steps): for k in range(args.n_negative_samples_batch + 1): custom_samples_array[j][k] = k custom_samples_array_r[j][k] = k custom_probabilities_array[j][k] = 1.0 custom_samples_array[j][0] = y[i][j] custom_samples_array_r[j][0] = y_r[i][j] inst.append([ x[i], y[i], x_r[i], y_r[i], custom_samples_array, custom_samples_array_r, custom_probabilities_array ]) else: inst.append([x[i], y[i], x_r[i], y_r[i]]) return inst def batch_reader(batch_list, args): res = [] for batch in batch_list: res.append(prepare_batch_input(batch, args)) return res def read_multiple(reader, batch_size, count, clip_last=True): """ Stack data from reader for multi-devices. """ def __impl__(): # one time read batch_size * count data for rnn for data in reader(): inst_num_per_part = batch_size split_data = {} len_check = True for k in data.keys(): if data[k] is not None: if len(data[k]) != batch_size * count: len_check = False print("data check error!!, data=" + data[k] + ", k=" + k) break if len_check: res = [] for i in range(count): split_data = {} for k in data.keys(): if data[k] is not None: split_data[k] = data[k][inst_num_per_part * i:inst_num_per_part * (i + 1)] res.append(split_data) yield res return __impl__ def LodTensor_Array(lod_tensor): lod = lod_tensor.lod() array = np.array(lod_tensor) new_array = [] for i in range(len(lod[0]) - 1): new_array.append(array[lod[0][i]:lod[0][i + 1]]) return new_array def get_current_model_para(train_prog, train_exe): param_list = train_prog.block(0).all_parameters() param_name_list = [p.name for p in param_list] vals = {} for p_name in param_name_list: p_array = np.array(fluid.global_scope().find_var(p_name).get_tensor()) vals[p_name] = p_array return vals def save_para_npz(train_prog, train_exe): logger.info("begin to save model to model_base") param_list = train_prog.block(0).all_parameters() param_name_list = [p.name for p in param_list] vals = {} for p_name in param_name_list: p_array = np.array(fluid.global_scope().find_var(p_name).get_tensor()) vals[p_name] = p_array emb = vals["embedding_para"] logger.info("begin to save model to model_base") np.savez("mode_base", **vals) def prepare_input(batch, epoch_id=0, with_lr=True): x, y = batch inst = [] for i in range(len(x)): inst.append([x[i], y[i]]) return inst def eval(vocab, infer_progs, dev_count, logger, args): infer_prog, infer_startup_prog, infer_model = infer_progs feed_order = infer_model.feed_order loss = infer_model.loss # prepare device place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() exe = Executor(place) if not args.use_gpu: place = fluid.CPUPlace() import multiprocessing dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) else: place = fluid.CUDAPlace(0) dev_count = fluid.core.get_cuda_device_count() total_loss = 0.0 total_cnt = 0 n_batch_cnt = 0 n_batch_loss = 0.0 val_feed_list = [ infer_prog.global_block().var(var_name) for var_name in feed_order ] val_feeder = fluid.DataFeeder(val_feed_list, place) dev_data = data.BidirectionalLMDataset( args.test_path, vocab, test=True, shuffle_on_load=False) dev_data_iter = lambda: dev_data.iter_batches(args.batch_size * dev_count, args.num_steps) dev_reader = read_multiple(dev_data_iter, args.batch_size, dev_count) last_hidden_values = np.zeros( (dev_count, args.num_layers * 2 * args.batch_size * args.embed_size), dtype='float32') last_cell_values = np.zeros( (dev_count, args.num_layers * 2 * args.batch_size * args.hidden_size), dtype='float32') for batch_id, batch_list in enumerate(dev_reader(), 1): feed_data = batch_reader(batch_list, args) feed = list(val_feeder.feed_parallel(feed_data, dev_count)) for i in range(dev_count): init_hidden_tensor = fluid.core.LoDTensor() if args.use_gpu: placex = fluid.CUDAPlace(i) else: placex = fluid.CPUPlace() init_hidden_tensor.set(last_hidden_values[i], placex) init_cell_tensor = fluid.core.LoDTensor() init_cell_tensor.set(last_cell_values[i], placex) feed[i]['init_hiddens'] = init_hidden_tensor feed[i]['init_cells'] = init_cell_tensor last_hidden_values = [] last_cell_values = [] for i in range(dev_count): val_fetch_outs = exe.run( program=infer_prog, feed=feed[i], fetch_list=[ infer_model.loss.name, infer_model.last_hidden.name, infer_model.last_cell.name ], return_numpy=False) last_hidden_values.append(np.array(val_fetch_outs[1])) last_cell_values.append(np.array(val_fetch_outs[2])) total_loss += np.array(val_fetch_outs[0]).sum() n_batch_cnt += len(np.array(val_fetch_outs[0])) total_cnt += len(np.array(val_fetch_outs[0])) n_batch_loss += np.array(val_fetch_outs[0]).sum() last_hidden_values = np.array(last_hidden_values).reshape(( dev_count, args.num_layers * 2 * args.batch_size * args.embed_size)) last_cell_values = np.array(last_cell_values).reshape( (dev_count, args.num_layers * 2 * args.batch_size * args.hidden_size)) log_every_n_batch = args.log_interval if log_every_n_batch > 0 and batch_id % log_every_n_batch == 0: logger.info('Average dev loss from batch {} to {} is {}'.format( batch_id - log_every_n_batch + 1, batch_id, "%.10f" % ( n_batch_loss / n_batch_cnt))) n_batch_loss = 0.0 n_batch_cnt = 0 batch_offset = 0 ppl = np.exp(total_loss / total_cnt) return ppl def train(): args = parse_args() if args.random_seed == 0: args.random_seed = None print("random seed is None") if args.enable_ce: random.seed(args.random_seed) np.random.seed(args.random_seed) logger = logging.getLogger("lm") logger.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(formatter) logger.info('Running with args : {}'.format(args)) logger.info('Running paddle : {}'.format(paddle.version.commit)) hidden_size = args.hidden_size batch_size = args.batch_size data_path = args.data_path logger.info("begin to load vocab") vocab = data.Vocabulary(args.vocab_path, validate_file=True) vocab_size = vocab.size logger.info("finished load vocab") logger.info('build the model...') # build model train_prog = fluid.Program() train_startup_prog = fluid.Program() if args.enable_ce: train_prog.random_seed = args.random_seed train_startup_prog.random_seed = args.random_seed # build infer model infer_prog = fluid.Program() infer_startup_prog = fluid.Program() with fluid.program_guard(infer_prog, infer_startup_prog): with fluid.unique_name.guard(): # Infer process infer_model = lm_model.LanguageModel( args, vocab_size, test_mode=True) infer_model.build() infer_progs = infer_prog, infer_startup_prog, infer_model with fluid.program_guard(train_prog, train_startup_prog): with fluid.unique_name.guard(): # Training process train_model = lm_model.LanguageModel( args, vocab_size, test_mode=False) train_model.build() fluid.clip.set_gradient_clip( clip=fluid.clip.GradientClipByGlobalNorm( clip_norm=args.max_grad_norm)) # build optimizer if args.optim == 'adagrad': optimizer = fluid.optimizer.Adagrad( learning_rate=args.learning_rate, epsilon=0.0, initial_accumulator_value=1.0) elif args.optim == 'sgd': optimizer = fluid.optimizer.SGD( learning_rate=args.learning_rate) elif args.optim == 'adam': optimizer = fluid.optimizer.Adam( learning_rate=args.learning_rate) elif args.optim == 'rprop': optimizer = fluid.optimizer.RMSPropOptimizer( learning_rate=args.learning_rate) else: logger.error('Unsupported optimizer: {}'.format(args.optim)) exit(-1) optimizer.minimize(train_model.loss * args.num_steps) # initialize parameters place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() exe = Executor(place) train_progs = train_prog, train_startup_prog, train_model if args.local: logger.info("local start_up:") train_loop(args, logger, vocab, train_progs, infer_progs, optimizer) else: if args.update_method == "nccl2": trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) if args.test_nccl: worker_endpoints_env = os.getenv("PADDLE_WORK_ENDPOINTS") worker_endpoints = worker_endpoints_env.split(',') trainers_num = len(worker_endpoints) current_endpoint = worker_endpoints[trainer_id] else: port = os.getenv("PADDLE_PORT") worker_ips = os.getenv("PADDLE_TRAINERS") worker_endpoints = [] for ip in worker_ips.split(","): worker_endpoints.append(':'.join([ip, port])) worker_endpoints_env = ','.join(worker_endpoints) trainers_num = len(worker_endpoints) current_endpoint = os.getenv("POD_IP") + ":" + port if trainer_id == 0: logger.info("train_id == 0, sleep 60s") time.sleep(60) logger.info("trainers_num:{}".format(trainers_num)) logger.info("worker_endpoints:{}".format(worker_endpoints)) logger.info("current_endpoint:{}".format(current_endpoint)) config = fluid.DistributeTranspilerConfig() config.mode = "nccl2" t = fluid.DistributeTranspiler(config=config) t.transpile( trainer_id, trainers=worker_endpoints_env, current_endpoint=current_endpoint, program=train_prog, startup_program=train_startup_prog) train_progs = train_prog, train_startup_prog, train_model train_loop(args, logger, vocab, train_progs, infer_progs, optimizer, trainers_num, trainer_id, worker_endpoints) else: port = os.getenv("PADDLE_PORT", "6174") pserver_ips = os.getenv("PADDLE_PSERVERS") eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) trainers = int(os.getenv("PADDLE_TRAINERS_NUM", "0")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) logger.info("pserver_endpoints:{}".format(pserver_endpoints)) logger.info("current_endpoint:{}".format(current_endpoint)) logger.info("trainer_id:{}".format(trainer_id)) logger.info("pserver_ips:{}".format(pserver_ips)) logger.info("port:{}".format(port)) t = fluid.DistributeTranspiler() t.transpile( trainer_id, pservers=pserver_endpoints, trainers=trainers, program=train_prog, startup_program=startup_prog) if training_role == "PSERVER": logger.info("distributed: pserver started") current_endpoint = os.getenv("POD_IP") + ":" + os.getenv( "PADDLE_PORT") if not current_endpoint: logger.critical("need env SERVER_ENDPOINT") exit(1) pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": logger.info("distributed: trainer started") trainer_prog = t.get_trainer_program() train_loop(args, logger, vocab, train_progs, infer_progs, optimizer) else: logger.critical( "environment var TRAINER_ROLE should be TRAINER os PSERVER") exit(1) def train_loop(args, logger, vocab, train_progs, infer_progs, optimizer, nccl2_num_trainers=1, nccl2_trainer_id=0, worker_endpoints=None): train_prog, train_startup_prog, train_model = train_progs infer_prog, infer_startup_prog, infer_model = infer_progs # prepare device place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() exe = Executor(place) if not args.use_gpu: place = fluid.CPUPlace() import multiprocessing dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) else: place = fluid.CUDAPlace(0) dev_count = fluid.core.get_cuda_device_count() if args.load_dir: logger.info('load pretrained checkpoints from {}'.format(args.load_dir)) fluid.io.load_persistables(exe, args.load_dir, main_program=train_prog) elif args.load_pretraining_params: logger.info('load pretrained params from {}'.format(args.load_pretraining_params)) exe.run(train_startup_prog) init_pretraining_params(exe, args.load_pretraining_params, main_program=train_prog) else: exe.run(train_startup_prog) # prepare data feed_list = [ train_prog.global_block().var(var_name) for var_name in train_model.feed_order ] feeder = fluid.DataFeeder(feed_list, place) logger.info('Training the model...') exe_strategy = fluid.parallel_executor.ExecutionStrategy() parallel_executor = fluid.ParallelExecutor( loss_name=train_model.loss.name, main_program=train_prog, use_cuda=bool(args.use_gpu), exec_strategy=exe_strategy, num_trainers=nccl2_num_trainers, trainer_id=nccl2_trainer_id) logger.info("begin to load data") train_data = data.BidirectionalLMDataset( args.train_path, vocab, test=(not args.shuffle), shuffle_on_load=args.shuffle) logger.info("finished load vocab") # get train epoch size log_interval = args.log_interval total_time = 0.0 batch_size = args.batch_size hidden_size = args.hidden_size custom_samples_array = np.zeros( (batch_size, args.num_steps, args.n_negative_samples_batch + 1), dtype='int64') custom_probabilities_array = np.zeros( (batch_size, args.num_steps, args.n_negative_samples_batch + 1), dtype='float32') for i in range(batch_size): for j in range(0, args.num_steps): for k in range(0, args.n_negative_samples_batch + 1): custom_samples_array[i][j][k] = k custom_probabilities_array[i][j][k] = 1.0 start_time = time.time() train_data_iter = lambda: train_data.iter_batches(batch_size * dev_count, args.num_steps) train_reader = read_multiple(train_data_iter, batch_size, dev_count) total_num = 0 n_batch_loss = 0.0 n_batch_cnt = 0 last_hidden_values = np.zeros( (dev_count, args.num_layers * 2 * batch_size * args.embed_size), dtype='float32') last_cell_values = np.zeros( (dev_count, args.num_layers * 2 * batch_size * hidden_size), dtype='float32') n_tokens_per_batch = args.batch_size * args.num_steps n_batches_per_epoch = int(args.all_train_tokens / n_tokens_per_batch) n_batches_total = args.max_epoch * n_batches_per_epoch begin_time = time.time() for batch_id, batch_list in enumerate(train_reader(), 1): if batch_id > n_batches_total: break feed_data = batch_reader(batch_list, args) feed = list(feeder.feed_parallel(feed_data, dev_count)) for i in range(dev_count): init_hidden_tensor = fluid.core.LoDTensor() if args.use_gpu: placex = fluid.CUDAPlace(i) else: placex = fluid.CPUPlace() init_hidden_tensor.set(last_hidden_values[i], placex) init_cell_tensor = fluid.core.LoDTensor() init_cell_tensor.set(last_cell_values[i], placex) feed[i]['init_hiddens'] = init_hidden_tensor feed[i]['init_cells'] = init_cell_tensor fetch_outs = parallel_executor.run( feed=feed, fetch_list=[ train_model.loss.name, train_model.last_hidden.name, train_model.last_cell.name ], return_numpy=False) cost_train = np.array(fetch_outs[0]).mean() last_hidden_values = np.array(fetch_outs[1]) last_hidden_values = last_hidden_values.reshape( (dev_count, args.num_layers * 2 * batch_size * args.embed_size)) last_cell_values = np.array(fetch_outs[2]) last_cell_values = last_cell_values.reshape(( dev_count, args.num_layers * 2 * batch_size * args.hidden_size)) total_num += args.batch_size * dev_count n_batch_loss += np.array(fetch_outs[0]).sum() n_batch_cnt += len(np.array(fetch_outs[0])) if batch_id > 0 and batch_id % log_interval == 0: smoothed_ppl = np.exp(n_batch_loss / n_batch_cnt) ppl = np.exp( np.array(fetch_outs[0]).sum() / len(np.array(fetch_outs[0]))) used_time = time.time() - begin_time speed = log_interval / used_time logger.info( "[train] step:{}, loss:{:.3f}, ppl:{:.3f}, smoothed_ppl:{:.3f}, speed:{:.3f}". format(batch_id, n_batch_loss / n_batch_cnt, ppl, smoothed_ppl, speed)) n_batch_loss = 0.0 n_batch_cnt = 0 begin_time = time.time() if batch_id > 0 and batch_id % args.dev_interval == 0: valid_ppl = eval(vocab, infer_progs, dev_count, logger, args) logger.info("valid ppl {}".format(valid_ppl)) if batch_id > 0 and batch_id % args.save_interval == 0: model_path = os.path.join(args.para_save_dir, str(batch_id + epoch_id)) if not os.path.isdir(model_path): os.makedirs(model_path) fluid.io.save_persistables( executor=exe, dirname=model_path, main_program=train_prog) end_time = time.time() total_time += end_time - start_time epoch_id = int(batch_id/n_batches_per_epoch) model_path = os.path.join(args.para_save_dir, str(epoch_id)) if not os.path.isdir(model_path): os.makedirs(model_path) fluid.io.save_persistables( executor=exe, dirname=model_path, main_program=train_prog) valid_ppl = eval(vocab, infer_progs, dev_count, logger, args) logger.info("valid ppl {}".format(valid_ppl)) test_ppl = eval(vocab, infer_progs, dev_count, logger, args) if __name__ == '__main__': train()
py
1a4eb8fdf6daaa5df85eb3171c539d21013f4399
import copy import gym import os import sys import random import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from gym import wrappers from datetime import datetime from scipy.misc import imresize ##### testing only MAX_EXPERIENCES = 10000 MIN_EXPERIENCES = 1000 #MAX_EXPERIENCES = 500000 #MIN_EXPERIENCES = 50000 TARGET_UPDATE_PERIOD = 10000 IM_SIZE = 80 K = 4 #env.action_space.n def downsample_image(A): B = A[31:195] # select the important parts of the image B = B.mean(axis=2) # convert to grayscale # downsample image # changing aspect ratio doesn't significantly distort the image # nearest neighbor interpolation produces a much sharper image # than default bilinear B = imresize(B, size=(IM_SIZE, IM_SIZE), interp='nearest') return B def update_state(state, obs): obs_small = downsample_image(obs) return np.append(state[1:], np.expand_dims(obs_small, 0), axis=0) class DQN: def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, scope): self.K = K self.scope = scope with tf.variable_scope(scope): # inputs and targets self.X = tf.placeholder(tf.float32, shape=(None, 4, IM_SIZE, IM_SIZE), name='X') # tensorflow convolution needs the order to be: # (num_samples, height, width, "color") # so we need to tranpose later self.G = tf.placeholder(tf.float32, shape=(None,), name='G') self.actions = tf.placeholder(tf.int32, shape=(None,), name='actions') # calculate output and cost # convolutional layers # these built-in layers are faster and don't require us to # calculate the size of the output of the final conv layer! Z = self.X / 255.0 Z = tf.transpose(Z, [0, 2, 3, 1]) for num_output_filters, filtersz, poolsz in conv_layer_sizes: Z = tf.contrib.layers.conv2d( Z, num_output_filters, filtersz, poolsz, activation_fn=tf.nn.relu ) # fully connected layers Z = tf.contrib.layers.flatten(Z) for M in hidden_layer_sizes: Z = tf.contrib.layers.fully_connected(Z, M) # final output layer self.predict_op = tf.contrib.layers.fully_connected(Z, K) selected_action_values = tf.reduce_sum( self.predict_op * tf.one_hot(self.actions, K), reduction_indices=[1] ) cost = tf.reduce_mean(tf.square(self.G - selected_action_values)) # self.train_op = tf.train.AdamOptimizer(1e-2).minimize(cost) # self.train_op = tf.train.AdagradOptimizer(1e-2).minimize(cost) # self.train_op = tf.train.RMSPropOptimizer(2.5e-4, decay=0.99, epsilon=1e-3).minimize(cost) self.train_op = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(cost) # self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(cost) # self.train_op = tf.train.GradientDescentOptimizer(1e-4).minimize(cost) self.cost = cost def copy_from(self, other): mine = [t for t in tf.trainable_variables() if t.name.startswith(self.scope)] mine = sorted(mine, key=lambda v: v.name) theirs = [t for t in tf.trainable_variables() if t.name.startswith(other.scope)] theirs = sorted(theirs, key=lambda v: v.name) ops = [] for p, q in zip(mine, theirs): actual = self.session.run(q) op = p.assign(actual) ops.append(op) self.session.run(ops) def set_session(self, session): self.session = session def predict(self, states): return self.session.run(self.predict_op, feed_dict={self.X: states}) def update(self, states, actions, targets): c, _ = self.session.run( [self.cost, self.train_op], feed_dict={ self.X: states, self.G: targets, self.actions: actions } ) return c def sample_action(self, x, eps): if np.random.random() < eps: return np.random.choice(self.K) else: return np.argmax(self.predict([x])[0]) def learn(model, target_model, experience_replay_buffer, gamma, batch_size): # Sample experiences samples = random.sample(experience_replay_buffer, batch_size) states, actions, rewards, next_states, dones = map(np.array, zip(*samples)) # Calculate targets next_Qs = target_model.predict(next_states) next_Q = np.amax(next_Qs, axis=1) targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q # Update model loss = model.update(states, actions, targets) return loss def play_one( env, total_t, experience_replay_buffer, model, target_model, gamma, batch_size, epsilon, epsilon_change, epsilon_min): t0 = datetime.now() # Reset the environment obs = env.reset() obs_small = downsample_image(obs) state = np.stack([obs_small] * 4, axis=0) assert(state.shape == (4, 80, 80)) loss = None total_time_training = 0 num_steps_in_episode = 0 episode_reward = 0 done = False while not done: # Update target network if total_t % TARGET_UPDATE_PERIOD == 0: target_model.copy_from(model) print("Copied model parameters to target network. total_t = %s, period = %s" % (total_t, TARGET_UPDATE_PERIOD)) # Take action action = model.sample_action(state, epsilon) obs, reward, done, _ = env.step(action) obs_small = downsample_image(obs) next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0) # assert(state.shape == (4, 80, 80)) episode_reward += reward # Remove oldest experience if replay buffer is full if len(experience_replay_buffer) == MAX_EXPERIENCES: experience_replay_buffer.pop(0) # Save the latest experience experience_replay_buffer.append((state, action, reward, next_state, done)) # Train the model, keep track of time t0_2 = datetime.now() loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size) dt = datetime.now() - t0_2 total_time_training += dt.total_seconds() num_steps_in_episode += 1 state = next_state total_t += 1 epsilon = max(epsilon - epsilon_change, epsilon_min) return total_t, episode_reward, (datetime.now() - t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon if __name__ == '__main__': # hyperparams and initialize stuff conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)] hidden_layer_sizes = [512] gamma = 0.99 batch_sz = 32 num_episodes = 2 total_t = 0 experience_replay_buffer = [] episode_rewards = np.zeros(num_episodes) # epsilon # decays linearly until 0.1 epsilon = 1.0 epsilon_min = 0.1 epsilon_change = (epsilon - epsilon_min) / 500000 # Create environment env = gym.envs.make("Breakout-v0") # Create models model = DQN( K=K, conv_layer_sizes=conv_layer_sizes, hidden_layer_sizes=hidden_layer_sizes, gamma=gamma, scope="model") target_model = DQN( K=K, conv_layer_sizes=conv_layer_sizes, hidden_layer_sizes=hidden_layer_sizes, gamma=gamma, scope="target_model" ) with tf.Session() as sess: model.set_session(sess) target_model.set_session(sess) sess.run(tf.global_variables_initializer()) print("Populating experience replay buffer...") obs = env.reset() obs_small = downsample_image(obs) state = np.stack([obs_small] * 4, axis=0) # assert(state.shape == (4, 80, 80)) for i in range(MIN_EXPERIENCES): action = np.random.choice(K) obs, reward, done, _ = env.step(action) next_state = update_state(state, obs) # assert(state.shape == (4, 80, 80)) experience_replay_buffer.append((state, action, reward, next_state, done)) if done: obs = env.reset() obs_small = downsample_image(obs) state = np.stack([obs_small] * 4, axis=0) # assert(state.shape == (4, 80, 80)) else: state = next_state # Play a number of episodes and learn! for i in range(num_episodes): total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one( env, total_t, experience_replay_buffer, model, target_model, gamma, batch_sz, epsilon, epsilon_change, epsilon_min, ) episode_rewards[i] = episode_reward last_100_avg = episode_rewards[max(0, i - 100):i + 1].mean() print("Episode:", i, "Duration:", duration, "Num steps:", num_steps_in_episode, "Reward:", episode_reward, "Training time per step:", "%.3f" % time_per_step, "Avg Reward (Last 100):", "%.3f" % last_100_avg, "Epsilon:", "%.3f" % epsilon ) sys.stdout.flush()
py
1a4eb91e39d561ae37229b6415feb7b1bc108321
import os os.environ['dev'] = '1' from dash_mess2 import dev_server if __name__ == '__main__': dev_server(debug=True)
py
1a4eba4b77661f8e83936a192304168ba7651863
class Error(Exception): """BlobDB error""" class AmbiguousBlobStorageError(Error): """Ambiguous blob storage backend error""" class BadName(Error): """Blob name error""" class InvalidContext(Error): """Raise when code is executed outside a valid context""" class NotFound(Error): """Raised when an attachment cannot be found"""
py
1a4eba70d3d0e7f89a579dec7b406f7884355f9f
import FWCore.ParameterSet.Config as cms genericTriggerEventFlag4fullTracker = cms.PSet( andOr = cms.bool( False ), dcsInputTag = cms.InputTag( "scalersRawToDigi" ), dcsPartitions = cms.vint32 ( 24, 25, 26, 27, 28, 29 ), andOrDcs = cms.bool( False ), errorReplyDcs = cms.bool( True ), ) genericTriggerEventFlag4onlyStrip = cms.PSet( andOr = cms.bool( False ), dcsInputTag = cms.InputTag( "scalersRawToDigi" ), dcsPartitions = cms.vint32 ( 24, 25, 26, 27 ), andOrDcs = cms.bool( False ), errorReplyDcs = cms.bool( True ), ) genericTriggerEventFlag4fullTrackerAndHLTdb = cms.PSet( andOr = cms.bool( False ), dcsInputTag = cms.InputTag( "scalersRawToDigi" ), dcsPartitions = cms.vint32 ( 24, 25, 26, 27, 28, 29 ), andOrDcs = cms.bool( False ), errorReplyDcs = cms.bool( True ), dbLabel = cms.string("SiStripDQMTrigger"), #("TrackerDQMTrigger"), andOrHlt = cms.bool(True),# True:=OR; False:=AND hltInputTag = cms.InputTag( "TriggerResults::HLT" ), hltPaths = cms.vstring(""), # HLT_ZeroBias_v* hltDBKey = cms.string("Tracking_HLT"), errorReplyHlt = cms.bool( False ), verbosityLevel = cms.uint32(1) ) genericTriggerEventFlag4fullTrackerAndHLTnoHIPnoOOTdb = cms.PSet( andOr = cms.bool( False ), dcsInputTag = cms.InputTag( "scalersRawToDigi" ), dcsPartitions = cms.vint32 ( 24, 25, 26, 27, 28, 29 ), andOrDcs = cms.bool( False ), errorReplyDcs = cms.bool( True ), dbLabel = cms.string("SiStripDQMTrigger"), #("TrackerDQMTrigger"), andOrHlt = cms.bool(True),# True:=OR; False:=AND hltInputTag = cms.InputTag( "TriggerResults::HLT" ), hltPaths = cms.vstring(""), # HLT_ZeroBias_FirstCollisionAfterAbortGap_* hltDBKey = cms.string("Tracking_HLT_noHIP_noOOT"), errorReplyHlt = cms.bool( False ), verbosityLevel = cms.uint32(1) ) genericTriggerEventFlag4fullTrackerAndHLTHIPnoOOTdb = cms.PSet( andOr = cms.bool( False ), dcsInputTag = cms.InputTag( "scalersRawToDigi" ), dcsPartitions = cms.vint32 ( 24, 25, 26, 27, 28, 29 ), andOrDcs = cms.bool( False ), errorReplyDcs = cms.bool( True ), dbLabel = cms.string("SiStripDQMTrigger"), #("TrackerDQMTrigger"), andOrHlt = cms.bool(True),# True:=OR; False:=AND hltInputTag = cms.InputTag( "TriggerResults::HLT" ), hltPaths = cms.vstring(""), # HLT_ZeroBias_FirstCollisionInTrain_* hltDBKey = cms.string("Tracking_HLT_HIP_noOOT"), errorReplyHlt = cms.bool( False ), verbosityLevel = cms.uint32(1) ) genericTriggerEventFlag4fullTrackerAndHLTHIPOOTdb = cms.PSet( andOr = cms.bool( False ), dcsInputTag = cms.InputTag( "scalersRawToDigi" ), dcsPartitions = cms.vint32 ( 24, 25, 26, 27, 28, 29 ), andOrDcs = cms.bool( False ), errorReplyDcs = cms.bool( True ), dbLabel = cms.string("SiStripDQMTrigger"), #("TrackerDQMTrigger"), andOrHlt = cms.bool(True),# True:=OR; False:=AND hltInputTag = cms.InputTag( "TriggerResults::HLT" ), hltPaths = cms.vstring(""), # HLT_ZeroBias_FirstBXAfterTrain_* hltDBKey = cms.string("Tracking_HLT_HIP_OOT"), errorReplyHlt = cms.bool( False ), verbosityLevel = cms.uint32(1) )
py
1a4eba98e047296c114ab10d3d58f566a7c64a12
# The actual simulation goes here # This is the main application framework for the Race Simulation which contains the MainWindow, # based on PyQt, and spawns a Qthread SimulationThread thread. Qt signals/slots are used to # communicate in both directions between them to control (start/pause/stop) and report results # between them. # # # To Execute: python3 simulation.py # # Dependencies: python3, PyQt5 etc. # # Description: MainWindow is created by the app, which in turn starts a SimulationThread thread. o # Note: the MainWindow is not a QMainWindow, rather a QWidget which allows for more flexibility # in placing controls, plots, etc. # The MainWindow contains user controls such push button (QPushButton) that when pressed, # emits a signal that is captured but the "slot" on the SimulationThread thread which acts on it # (thread_start_calculating). # Likewise, the SimulationThread thread emits various signals which are captured by associated slots # in the MainWindow and acted upon. # In either direction data (e.g. input parameters to the SimulationThread thread or results of # calculation from the SimulationThread thread) passed with emitted signal is then displayed on the # PushButton. # # This is based on : # https://stackoverflow.com/questions/52993677/how-do-i-setup-signals-and-slots-in-pyqt-with-qthreads-in-both-directions # Author: RMH 10/28/2020 # # Status: # 11/25/20 This version does NO simulating and provides only the very basic GUI framework # with a simple placeholder graph/plot, threading, and signalling between the thread and # the main window. # 12/1/20 Adding a data storage area to share between the SimulationThread and MainWindow thread # which incorporates a mutex mechanism (QReadWriteLock) to allow coordinating sharing of the # data which MainWindow will be consuming (reading). # 12/52/20 Manual merge in branch 'one-lock-rules-them-all' simulation code with the QThread # architecture framed in from the previous versions of this branch # USE ONLY SI UNITS import sys import time import logging from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * import pyqtgraph as pg import cProfile from datastore import (DataStore, RacingSimulationResults) from logging_config import configure_logging from physics_equations import (max_negative_power_physics_simulation, max_positive_power_physics_simulation, constrained_velocity_physics_simulation, ) from electric_car_properties import ElectricCarProperties from track_properties import (TrackProperties, high_plains_raceway) logger = logging.getLogger(__name__) class MainWindow(QWidget): # define the SIGNALs that MainWindow will send to other threads mainWindowStartCalculatingSignal = pyqtSignal(int) def __init__(self, *args, **kwargs): QWidget.__init__(self, parent=None) self.data_store = DataStore() logger.info("MainWindow: DataStore initialized", extra={'sim_index': self.data_store.get_simulation_index()}) # Create GUI related resources self.setWindowTitle('Race Simulation') # create the user play controls and data results graphs to run the simulation self.createUserDisplayControls() # create placeholders for the plots MainWindow will delivering (updating) # data into. self.graphs = pg.GraphicsLayoutWidget(show=True, title="Race Sim plots") self.graphs.resize(1000, 540) self.p1 = self.graphs.addPlot(name="Plot1", title="Time (s)") self.p2 = self.graphs.addPlot(name="Plot2", title="Distance (m)") self.p2.hide() self.p3 = self.graphs.addPlot(name="Plot3", title="Velocity (m/s)") self.p3.hide() self.p4 = self.graphs.addPlot(name="Plot4", title="Acceleration (m/s^2)") self.p4.hide() self.p5 = self.graphs.addPlot(name="Plot5", title="Motor Power") self.p5.hide() self.p6 = self.graphs.addPlot(name="Plot6", title="Battery Power") self.p6.hide() self.p7 = self.graphs.addPlot(name="Plot7", title="Battery Energy (joules)") self.p7.hide() # Links user X-coordinate movements of all plots together. Practically, there has # to be one plot they all link to, and in this case it's self.p1 (Time) b self.p2.setXLink(self.p1) self.p3.setXLink(self.p1) self.p4.setXLink(self.p1) self.p5.setXLink(self.p1) self.p6.setXLink(self.p1) self.p7.setXLink(self.p1) # Layout the major GUI components #self.layout = QtGui.QVBoxLayout() self.layout = QHBoxLayout() self.layout.addWidget(self.userDisplayControlsGroup) self.layout.addWidget(self.graphs) self.setLayout(self.layout) # Create the instances of our worker threads self.simulationThread = SimulationThread(self.data_store) self.plotRefreshTimingThread = PlotRefreshTimingThread() # Setup the SIGNALs to be received from the worker threads self.simulationThread.simulationThreadSignal.connect(self.signalRcvFromSimulationThread) self.plotRefreshTimingThread.plotRefreshTimingSignal.connect(self.signalPlotRefresh) # TODO - what mechanism and what to do when SimulationThread or dies like # refresh GUI and save/close results file?? #self.simulationThread.finished.connect(self.simulationThreadFinished) #self.simulationThread.terminated.connect(self.simulationThreadTerminated) # Now that the SimulationThread has been created (but not yet running), connect the # Button clicked in MainWindow - call a SimulationThread method to do something self.buttonRun.clicked.connect(self.createStartCalculatingSignal) self.buttonStop.clicked.connect(self.simulationThread.thread_stop_calculating) self.checkboxDistanceBreakpoint.clicked.connect(self.enableBreakpointSpinbox) self.simulationThread.start() self.plotRefreshTimingThread.start() def enableBreakpointSpinbox(self): if self.checkboxDistanceBreakpoint.isChecked() == True: self.spinboxDistanceBreakpoint.setEnabled(True) self.spinboxDistanceBreakpoint.setReadOnly(False) else: self.spinboxDistanceBreakpoint.setEnabled(False) self.spinboxDistanceBreakpoint.setReadOnly(True) def createStartCalculatingSignal(self): """ Send a SIGNAL to the simulation thread to start the simulation calculations. Based on the user's control settings in the GUI, figure out what "distance" value to send with the signal to Simulation Thread to start/continue simulation "distance" value sent to the SimulationThread is overload with these meanings: >0 distance in meters from the start on the track... =0 singlestep, <0 whole track, """ if self.checkboxDistanceBreakpoint.isChecked() == True: distance = self.spinboxDistanceBreakpoint.value() else: # No breakpoint indicated on GUI so run the whole track or # until user hits "pause" button distance = -1 # signal the thread self.simulationThread.thread_start_calculating(distance) def createUserDisplayControls(self): self.labelDisplayControl = QLabel("Display Control") # Note - FYI - created in the order the controls appear on screen self.labelStatus = QLabel("Status") self.textboxStatus = QLineEdit("Initialized", self) self.textboxStatus.setReadOnly(True) self.buttonRun = QPushButton('Run/Continue', self) self.buttonRun.setEnabled(True) self.buttonStop = QPushButton('Pause', self) self.buttonStop.setEnabled(True) self.checkboxDistanceBreakpoint = QCheckBox('Distance Breakpoint (m)', self) self.checkboxDistanceBreakpoint.setChecked(False) self.spinboxDistanceBreakpoint = QDoubleSpinBox() self.spinboxDistanceBreakpoint.setReadOnly(True) self.spinboxDistanceBreakpoint.setRange(0,999999) #outputs of simulation self.labelSimulationIndex = QLabel("Current Sim. Index") self.textboxSimulationIndex = QLineEdit("0",self) self.textboxSimulationIndex.setReadOnly(False) self.checkboxTime = QCheckBox('Time (s)', self) self.checkboxTime.setChecked(False) self.spinboxTime = QDoubleSpinBox() self.spinboxTime.setReadOnly(True) self.spinboxTime.setRange(0, 999999) self.checkboxDistance = QCheckBox('Distance (m)', self) self.checkboxDistance.setChecked(False) self.spinboxDistance = QDoubleSpinBox() self.spinboxDistance.setReadOnly(True) self.spinboxDistance.setRange(0,999999) self.checkboxVelocity = QCheckBox('Velocity (m/s)', self) self.checkboxVelocity.setChecked(False) self.spinboxVelocity = QDoubleSpinBox() self.spinboxVelocity.setReadOnly(True) self.spinboxVelocity.setRange(0,999999) self.checkboxAcceleration = QCheckBox('Acceleration (m/s^2)', self) self.checkboxAcceleration.setChecked(False) self.spinboxAcceleration = QDoubleSpinBox() self.spinboxAcceleration.setReadOnly(True) self.checkboxMotorPower = QCheckBox('Motor Power', self) self.checkboxMotorPower.setChecked(False) self.spinboxMotorPower = QDoubleSpinBox() self.spinboxMotorPower.setReadOnly(True) self.spinboxMotorPower.setRange(0,999999) self.checkboxBatteryPower = QCheckBox('Battery Power', self) self.checkboxBatteryPower.setChecked(False) self.spinboxBatteryPower = QDoubleSpinBox() self.spinboxBatteryPower.setReadOnly(True) self.spinboxBatteryPower.setRange(0,999999) self.checkboxBatteryEnergy = QCheckBox('Battery Energy (j)', self) self.checkboxBatteryEnergy.setChecked(False) self.spinboxBatteryEnergy = QDoubleSpinBox() self.spinboxBatteryEnergy.setReadOnly(True) self.spinboxBatteryEnergy.setRange(0,999999) #self.userDisplayControlsGroup = QtGui.QGroupBox('User Display Controls') self.userDisplayControlsGroup = QGroupBox('User Display Controls') #self.userDisplayControlsLayout= QtGui.QGridLayout() self.userDisplayControlsLayout= QGridLayout() self.userDisplayControlsLayout.addWidget(self.labelStatus, 0, 0) self.userDisplayControlsLayout.addWidget(self.textboxStatus, 0, 1) self.userDisplayControlsLayout.addWidget(self.buttonRun, 1, 0) self.userDisplayControlsLayout.addWidget(self.buttonStop, 1, 1) self.userDisplayControlsLayout.addWidget(self.checkboxDistanceBreakpoint, 2, 0) self.userDisplayControlsLayout.addWidget(self.spinboxDistanceBreakpoint, 2, 1) self.userDisplayControlsLayout.addWidget(self.labelSimulationIndex, 3, 0) self.userDisplayControlsLayout.addWidget(self.textboxSimulationIndex, 3, 1) self.userDisplayControlsLayout.addWidget(self.checkboxTime, 4, 0) self.userDisplayControlsLayout.addWidget(self.spinboxTime, 4, 1) self.userDisplayControlsLayout.addWidget(self.checkboxDistance, 5, 0) self.userDisplayControlsLayout.addWidget(self.spinboxDistance, 5, 1) self.userDisplayControlsLayout.addWidget(self.checkboxVelocity, 6, 0) self.userDisplayControlsLayout.addWidget(self.spinboxVelocity, 6, 1) self.userDisplayControlsLayout.addWidget(self.checkboxAcceleration, 7, 0) self.userDisplayControlsLayout.addWidget(self.spinboxAcceleration, 7, 1) self.userDisplayControlsLayout.addWidget(self.checkboxMotorPower, 8, 0) self.userDisplayControlsLayout.addWidget(self.spinboxMotorPower, 8, 1) self.userDisplayControlsLayout.addWidget(self.checkboxBatteryPower, 9, 0) self.userDisplayControlsLayout.addWidget(self.spinboxBatteryPower, 9, 1) self.userDisplayControlsLayout.addWidget(self.checkboxBatteryEnergy, 10, 0) self.userDisplayControlsLayout.addWidget(self.spinboxBatteryEnergy, 10, 1) self.userDisplayControlsGroup.setLayout(self.userDisplayControlsLayout) def simulationThreadResultsDataDisplay(self): # TODO placeholder for real work to be done when the SimulationThread (a simulationThread thread) # SIGNALs MainWindow new data is available in shared memory print('Window SIGNAL from SimulationThread: Results_data_ready') def simulationThreadFinished(self): # TODO placeholder for SimulationThread SIGNALs ??exiting # data is available in shared memory print('Window: SIGNAL From SimulationThread: Finished') def simulationThreadTerminated(self): # TODO placeholder for SimulationThread SIGNALs terminated print('Window: SIGNAL From SimulationThread: Terminated') """ Slots routines to handle SIGNALs sent to MainWindow from other threads """ @pyqtSlot(str) def signalRcvFromSimulationThread(self, text): #self.buttonRun.setText(text) self.textboxStatus.setText(text) @pyqtSlot() def signalPlotRefresh(self): #Display/update the window to display computation status, data, and plots selected by the user # This is called periodically because of the signal emitted from PlotRefreshTimingThread current_sim_index = (self.data_store.get_simulation_index()) logger.info("MainWindow:", extra={'sim_index': current_sim_index}) self.textboxSimulationIndex.setText("{}".format(current_sim_index)) """ Only refresh data if the simulations calculations have begun, indicated by current_sim-index > 0 Note: current_sim_index is descremented "-1" for the following calls because the lap_velocity_simulation calculations may be incomplete for the index when this "plot" signal was received and interrupted it. That is, the SimulationThread is/could be still updating a DataStore data (lists) records simulation_index and not all lists # have been calculated, so we should just plot upto the last complete record. """ if current_sim_index > 0 : # Get the current data values and update the corresponding display field textbox time = self.data_store.get_time_at_index(current_sim_index-1) self.spinboxTime.setValue(time) distance = self.data_store.get_distance_at_index(current_sim_index-1) self.spinboxDistance.setValue(distance) velocity = self.data_store.get_velocity_at_index(current_sim_index-1) self.spinboxVelocity.setValue(velocity) acceleration = self.data_store.get_acceleration_at_index(current_sim_index-1) self.spinboxAcceleration.setValue(acceleration) motor_power = self.data_store.get_motor_power_at_index(current_sim_index-1) self.spinboxMotorPower.setValue(motor_power) battery_power = self.data_store.get_battery_power_at_index(current_sim_index-1) self.spinboxBatteryPower.setValue(battery_power) # TBD not yet implemented in physics_equations #battery_energy = self.data_store.get_battery_energy_at_index(current_sim_index-1) #self.spinboxBatteryEnergy.setValue(battery_energy) # Display the data values # create a new plot for every point simulated so far x = [z for z in range(current_sim_index)] _time = [] _distance = [] _velocity = [] _max_velocity = [] _acceleration = [] _motor_power = [] _battery_power = [] _battery_energy = [] _time = self.data_store.get_time_list(current_sim_index) _distance = self.data_store.get_distance_list(current_sim_index) _velocity = self.data_store.get_velocity_list(current_sim_index) _max_velocity = self.data_store.get_track_max_velocity_list(current_sim_index) _acceleration = self.data_store.get_acceleration_list(current_sim_index) _motor_power = self.data_store.get_motor_power_list(current_sim_index) _battery_power = self.data_store.get_battery_power_list(current_sim_index) #TODO not yet implemented #_battery_energy = self.data_store.get_battery_energy_list(current_sim_index) self.p1.plot(x=x, y=_time, name="Plot1", title="Time") # selectively display the plots based on the checkboxes if self.checkboxDistance.isChecked() == True : self.p2.show() self.p2.plot(x=x, y=_distance, name="Plot2", title="Distance (m)") else: self.p2.hide() if self.checkboxVelocity.isChecked() == True : self.p3.show() self.p3.plot(x=x, y=_max_velocity, name="Plot3", title="Max Velocity (m/sec)", pen='r') self.p3.plot(x=x, y=_velocity, name="Plot3", title="Velocity (m/sec)") else: self.p3.hide() if self.checkboxAcceleration.isChecked() == True : self.p4.show() self.p4.plot(x=x, y=_acceleration, name="Plot4", title="Acceleration (m/sec^2)") else: self.p4.hide() if self.checkboxMotorPower.isChecked() == True : self.p5.show() self.p5.plot(x=x, y=_motor_power, name="Plot5", title="Motor Power") else: self.p5.hide() if self.checkboxBatteryPower.isChecked() == True : self.p6.show() self.p6.plot(x=x, y=_battery_power, name="Plot6", title="Battery Power") else: self.p6.hide() """TBD - to be added once Battery Energy is working in physics_equations if self.checkboxBatteryEnergy.isChecked() == True : self.p7.show() self.p7.plot(x=x, y=_battery_energy, name="Plot7", title="Battery Energy (joules)") else: self.p7.hide() """ class SimulationThread(QThread): # Define the Signals we'll be emitting to the MainWindow simulationThreadSignal = pyqtSignal(str) simulationThreadPlotSignal = pyqtSignal(int) breakpointDistance = 0 def __init__(self, passed_data_store, parent=None): QThread.__init__(self, parent) self.exiting = False self.setObjectName("SimulationThread") """ SimulationComputing is used for staring/stopping loop control logic which is controlled ( signalled) from the MainWindow. Start without compution in the simulationThread running """ self.simulationComputing = False self.breakpointDistance = 0 # Initialize the simulation universe self._data_store = passed_data_store self.initialize_race() #print('SimulationThread: __init()__') #print("SimulationThread: Simulation Index = {}".format(self._data_store.get_simulation_index())) #connect some signals from the main window to us #self.connect(self, QtCore.SIGNAL('To_End',self.processToEnd) def __del__(self): # Before a SimulationThread object is destroyed, we need to ensure that it stops processing. # For this reason, we implement the following method in a way that indicates to # the part of the object that performs the processing that it must stop, and waits # until it does so. self.exiting = True self.wait() # rotational inertia estimation: http://www.hpwizard.com/rotational-inertia.html def initialize_race(self): segment_distance = 0.005 # meters, this must be very very small battery_power = 40000 # 40kW motor_efficiency = 0.8 wheel_radius = 0.25 # m, ~20 in OD on tires rotational_inertia = 10 # kg*m^2 mass = 1000 # kg drag_coefficient = 0.4 frontal_area = 7 # m^2 air_density = 1 # kg/m^3 wheel_pressure_bar = 3 # bar track = TrackProperties() track.set_air_density(air_density) for distance in high_plains_raceway: track.add_critical_point(distance, high_plains_raceway[distance], track.FREE_ACCELERATION) track.generate_track_list(segment_distance) car = ElectricCarProperties() car.set_car_parameters(mass=mass, rotational_inertia=rotational_inertia, motor_power=battery_power, motor_efficiency=motor_efficiency, battery_capacity=10, drag_coefficient=drag_coefficient, frontal_area=frontal_area, wheel_radius=wheel_radius, wheel_pressure_bar=wheel_pressure_bar) self._data_store.initialize_lap_lists(len(track.distance_list)) self._data_store.set_car_properties(car) self._data_store.set_track_properties(track) """ SimulationThread signal handling routines. This is the collection of SLOTS that get signaled (emitted) from the MainWindow and tell the SimulationThread what to do, like change states and start calculating, pause, etc. """ @pyqtSlot() def thread_start_calculating(self, distance_value): """ This signal (slot) handler takes the distance value and updates SimulationThread computing state and interprets the distance_value into appropriate values for "breakpoints" to, if necessary, to stop computing. """ print("Breakpoint Distance value:{}".format(distance_value)) logger.info('Slot:thread_start_calculating :', extra={'sim_index': self._data_store.get_simulation_index()}) if distance_value == 0: logger.info('Slot:thread_start_calculating SINGLE STEP NOT IMPLEMENTED:', extra={'sim_index': self._data_store.get_simulation_index()}) #TODO - finish this breakpoint case self.simulationComputing = False elif distance_value == -1: logger.info('Slot:thread_start_calculating RUN TO COMPLETION :', extra={'sim_index': self._data_store.get_simulation_index()}) # set the breakpoint to be a very large number to indicate run to completion self.breakpointDistance = 9999999 self.simulationComputing = True else: # run to the distance value point in the track sim_index = self._data_store.get_simulation_index() if distance_value > self._data_store.get_distance_at_index(sim_index) : logger.info('Slot:thread_start_calculating RUN TO DISTANCE :', extra={'sim_index': sim_index}) # requested breakpoint is further down the track self.breakpointDistance = distance_value # Start computing and acknowledge to MainWindow by sending a signal back self.simulationThreadSignal.emit("Calculating...") # "state" variable indicating thread should be calculating self.simulationComputing = True else: logger.info('Slot:thread_start_calculating PAST REQUESTED DISTANCE :', extra={'sim_index': sim_index}) # simulation has already past this point in the track, don't proceed self.simulationComputing = False @pyqtSlot() def thread_stop_calculating(self): logger.info('Slot:thread_stop_calculating :', extra={'sim_index': self._data_store.get_simulation_index()}) # Now send a signal back to the main window self.simulationThreadSignal.emit("Paused") # "state" variable indicating thread should stop calculating self.simulationComputing = False def racing_simulation(self): """Function accepts a car and a track and executes a simulation to ouput critical metrics related to battery life and track speed. Args: Nothing, all required vars are defined in class Returns: Nothing, all required vars are defined in class """ results = RacingSimulationResults() self.lap_velocity_simulation() # only calculate results if the simulation ran through without an interruption if not self._data_store.exit_event.is_set(): lap_results = self._data_store.get_lap_results() # TODO fix this #results.laps_per_pit_stop = car["battery_capacity"] / lap_results.motor_energy_list[-1] results.lap_time = lap_results.end_velocity results.lap_results = lap_results self._data_store.set_race_results(results) def lap_velocity_simulation(self): """Function calculates the velocity profile of a car with car_properties on a track with track_properties. The car starts with an ititial velocity of initial_velocity. Args: data_store (DataStore): Thread safe storage for all simulation data Returns: Nothing (all data saved in the datastore) """ # performance increases by assigning local functions # https://towardsdatascience.com/10-techniques-to-speed-up-python-runtime-95e213e925dc add_physics_result_to_datastore = self._data_store.add_physics_results_to_lap_results get_velocity = self._data_store.get_velocity_at_index track = self._data_store.get_track_properties() air_density = track.get_air_density() car = self._data_store.get_car_properties() # need to populate the time profile be the same length as the distance list # to complete a lap of simulation list_len = len(track.distance_list) logger.debug('track.distance_list length={}'.format(list_len), extra={'sim_index': self._data_store.get_simulation_index()}) # TODO - Add self.simulationComputing to loop control to while while self._data_store.get_simulation_index() < list_len: # get the new index we are going to calculate sim_index = self._data_store.get_simulation_index() if self._data_store.exit_event.is_set(): break distance_of_travel = (track.distance_list[sim_index] - track.distance_list[sim_index - 1]) # only continue simulation computing if the GUI says to do so. if (self.simulationComputing == True and self.breakpointDistance > track.distance_list[sim_index]): velocity = get_velocity(sim_index - 1) physics_results = max_positive_power_physics_simulation(velocity, distance_of_travel, car, air_density) add_physics_result_to_datastore(physics_results, sim_index) # check if velocity constraints are violated if get_velocity(sim_index) > track.max_velocity_list[sim_index]: # velocity constraint violated!! # start walking back until velocity constraint at sim_index is met logger.info("velocity constraint violated starting walk back, current v: {}, max: {}" .format(physics_results.final_velocity, track.max_velocity_list[sim_index]), extra={'sim_index': self._data_store.get_simulation_index()}) max_velocity_constraint = track.max_velocity_list[sim_index] while get_velocity(sim_index) > max_velocity_constraint: """This while loop's purpose is to recalculate a portion of the car's car profile because the car ended up going too fast at a point on the track. To recalculate the following happens: 1. a "walk back" index is used to track how far back the recalculation occurs 2. from the index (sim_index - walk_back_index) to (sim_index - 1) the results are calculated as a maximum regeneration effort by the motor 3. at the sim_index the results are calculated as a constrained velocity - if the results of the calculation are realistic then the walk back is done - if the results are not realistic then increment the walk back counter and recalculate """ walk_back_counter = self._data_store.get_walk_back_counter() recalculation_start_index = sim_index - walk_back_counter logger.debug("starting and ending walkback index: {}, {}" .format(recalculation_start_index, sim_index), extra={'sim_index': self._data_store.get_simulation_index()}) for i in range(recalculation_start_index, sim_index): velocity = get_velocity(i - 1) logger.debug("velocity: {}" .format(velocity), extra={'sim_index': i}) # recalculate with negative motor power physics_results = max_negative_power_physics_simulation(velocity, distance_of_travel, car, air_density) logger.debug("next velocity: {}" .format(physics_results.final_velocity), extra={'sim_index': i}) add_physics_result_to_datastore(physics_results, i) velocity = get_velocity(sim_index - 1) # last deceleration will be a constrained velocity because # it will be neither max positive or negative motor power physics_results = \ constrained_velocity_physics_simulation(velocity, max_velocity_constraint, distance_of_travel, car, air_density) logger.debug("velocity start, end, max: {} {} {}" .format(velocity, physics_results.final_velocity, max_velocity_constraint), extra={'sim_index': sim_index}) # check if constrained velocity calculation is realistic # TODO other checks here can be on acceleration or wheel force if physics_results.motor_power < -car["motor_power"]: logger.debug( "velocity constraint still violated, calculated power: {}, max power: {}" .format(physics_results.motor_power, car["motor_power"]), extra={'sim_index': sim_index}) logger.debug("sim_index, walkback: {} {}, incrementing walk back" .format(sim_index, walk_back_counter), extra={'sim_index': sim_index}) self._data_store.increment_walk_back_counter() else: logger.info( "velocity constraint accepted, calculated power: {}, max power: {}" .format(physics_results.motor_power, car["motor_power"]), extra={'sim_index': sim_index}) logger.info("constrained velocity equation accepted", extra={'sim_index': sim_index}) add_physics_result_to_datastore(physics_results, sim_index) #end of while while get_velocity(sim_index) > max_velocity_constraint: # walk back complete, reset walk back index for next time self._data_store.reset_walk_back_counter() # completed calculation for the latest simulation index, self._data_store.increment_simulation_index() else: # self.simulationComputing is False or we've reached a breakpoint, # so wait for GUI user to indicate proceed if self.simulationComputing == True : # if we're computing and got here, we must have hit a breakpoint, therefore pause # Now send a signal back to the main window self.simulationThreadSignal.emit("Paused") # "state" variable indicating thread should stop calculating self.simulationComputing = False #else: # we've began not computing or a breakpoint already has sent us there # so do nothing more than waitk # in any case, wait until user gives us a new condition to continue computing time.sleep(1.0) logger.debug("waiting for simulationComputing==True", extra={'sim_index': sim_index}) # end of while data_store.get_simulation_index() < list_len: logger.info("SIMULATION COMPLETE!", extra={'sim_index': 'N/A'}) self.simulationThreadSignal.emit("Finished!") self._data_store.exit_event.set() def run(self): # Note: This is never called directly. It is called by Qt once the # thread environment with the thread's start() method has been setup, # and then runs "continuously" logger.info("SimulationThread: entering cProfile.runctx() ", extra={'sim_index': 'N/A'}) # profiling tool, look at results with runsnake: # https://kupczynski.info/2015/01/16/profiling-python-scripts.html # this has relatively little overhead for the overall runtime of the program # I have only been able to get the runsnake files to work on linux # alternative profile results viewer for windows (untried): https://sourceforge.net/projects/qcachegrindwin/ cProfile.runctx("self.racing_simulation()", globals(), locals(), 'profile-simulation.out') class PlotRefreshTimingThread(QThread): # Thread responsible for a periodic signal to the MainWindow which when received causes # MainWindow to refresh it's plots. # Define the Signals we'll be emitting to the MainWindow plotRefreshTimingSignal = pyqtSignal() # start without compution in the simulationThread running def __init__(self, parent=None): QThread.__init__(self, parent) self.exiting = False logger.info("PlotRefreshTimingThread: __init()__", extra={'sim_index': 'N/A'}) # TODO connect some signals from the main window to us #self.connect(self, QtCore.SIGNAL('To_End',self.processToEnd) def __del__(self): # Before a PlotRefreshTimingThread object is destroyed, we need to ensure that it stops # processing. For this reason, we implement the following method in a way that # indicates to the part of the object that performs the processing that it must stop, # and waits until it does so. self.exiting = True self.wait() def run(self): # Note: This is never called directly. It is called by Qt once the # thread environment with the thread's start() method has been setup, # and then runs "continuously" to do the work of the thread as it's main # processing loop logger.info("PlotRefreshTimingThread: entering while() ", extra={'sim_index': 'N/A'}) while True: time.sleep(5.0) self.plotRefreshTimingSignal.emit() if __name__ == "__main__": MainApp = QApplication(sys.argv) if __name__ == "__main__": configure_logging() window = MainWindow() window.show() sys.exit(cProfile.runctx("MainApp.exec_()", globals(), locals(), 'profile-display.out'))
py
1a4ebb037c1061b54644719c677a42c48c3e33e0
""" Plot a geodesic of SO(3) equipped with its left-invariant canonical METRIC. """ import matplotlib.pyplot as plt import numpy as np import os import geomstats.visualization as visualization from geomstats.special_orthogonal_group import SpecialOrthogonalGroup SO3_GROUP = SpecialOrthogonalGroup(n=3) METRIC = SO3_GROUP.bi_invariant_metric def main(): initial_point = SO3_GROUP.identity initial_tangent_vec = [0.5, 0.5, 0.8] geodesic = METRIC.geodesic(initial_point=initial_point, initial_tangent_vec=initial_tangent_vec) n_steps = 10 t = np.linspace(0, 1, n_steps) points = geodesic(t) visualization.plot(points, space='SO3_GROUP') plt.show() if __name__ == "__main__": if os.environ['GEOMSTATS_BACKEND'] == 'tensorflow': print('Examples with visualizations are only implemented ' 'with numpy backend.\n' 'To change backend, write: ' 'export GEOMSTATS_BACKEND = \'numpy\'.') else: main()
py
1a4ebb07399da0d9bbf065faae79a040bd0de2db
from __future__ import division import random import pprint import sys import time import numpy as np import cv2 from optparse import OptionParser import pickle import os import traceback from keras import backend as K from keras.optimizers import Adam, SGD, RMSprop from keras.layers import Input from keras.models import Model, load_model, model_from_json from keras_frcnn import config, data_generators from keras_frcnn import losses as losses import keras_frcnn.roi_helpers as roi_helpers from keras.utils import generic_utils if 'tensorflow' == K.backend(): import tensorflow as tf from keras.backend.tensorflow_backend import set_session config2 = tf.ConfigProto() config2.gpu_options.allow_growth = True set_session(tf.Session(config=config2)) sys.setrecursionlimit(40000) def kl_div(P, Q): return np.nansum([p * np.log2(p / (q + 1e-8)) for p, q in zip(P, Q) if p != 0]) def js_distance(P, Q): M = 0.5 * (P + Q) return np.sqrt(0.5 * kl_div(P, M) + 0.5 * kl_div(Q, M)) def get_optimal_alpha(p_img, p_curr, rule_mode = "max"): js_dist_list = [js_distance(p_img[0,i,:], p_curr[0,i,:]) for i in range(p_img.shape[1])] if rule_mode == "max": dist_diff = np.nanmax(js_dist_list) elif rule_mode == "min": dist_diff = np.nanmin(js_dist_list) else: dist_diff = np.nanmean(js_dist_list) return np.max([alpha_final, dist_diff / (1 - dist_diff + 1e-8)]) def make_target_probas(p_img, p_curr, alpha, constrain_hard_examples = False): target_probas = (np.log(p_curr[0] + 1e-8) + alpha * np.log(p_img[0] + 1e-8)) / (1 + alpha) target_probas = np.exp(target_probas) / np.exp(target_probas).sum(axis = 1)[:, None] idx = [] if constrain_hard_examples: # Confident predictions in img_classifier idx_conf = np.where(p_img[0] >= 0.90) target_probas[idx_conf[0],:] = 0 target_probas[idx_conf] = 1 # Easy predictions (agreement between img and current) idx_agree = np.where((p_img[0].argmax(1) == p_curr[0].argmax(1)) & (p_curr[0].max(1) >= 0.50))[0] cols_agree = p_curr[0].argmax(1)[idx_agree] target_probas[idx_agree,:] = 0 target_probas[idx_agree, cols_agree] = 1 idx = np.unique(idx_conf[0].tolist() + idx_agree.tolist()).tolist() return np.expand_dims(target_probas, axis = 0), idx def make_target_bbs(bb_curr, bb_phase1, alpha): target_bbs = (bb_curr + alpha * bb_phase1) / (1 + alpha) return target_bbs def get_img_probas(img_path, P_cls, P_regr, ROIs, C, f): img = cv2.imread(img_path) new_height = 299 new_width = 299 img_probas = np.zeros((P_cls.shape[1], len(class_mapping))) for ii in range(P_cls.shape[1]): (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except: pass # Get the true BB coordinates x1, y1, x2, y2 = C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) x1, y1, x2, y2 = data_generators.get_real_coordinates(f, x1, y1, x2, y2) # Get the probabilities from the image classifier cropped_img = img[y1:y2, x1:x2, :] x_resized = cv2.resize(np.copy(cropped_img), (int(new_width), int(new_height)), interpolation = cv2.INTER_CUBIC) x_resized = x_resized / 255. x_resized = np.expand_dims(x_resized, axis = 0) img_probas[ii, :] = img_classifier.predict(x_resized)[0] return np.expand_dims(img_probas, axis = 0) def rpn_to_class_inputs(X, img_data, C, mode = "source", eps = 0.05): [Y1, Y2] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), use_regr = True, overlap_thresh = 0.4, max_boxes = 300) X2, Y1, Y2, _ = roi_helpers.calc_iou(R, img_data, C, class_mapping, mode, eps) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) raise NameError('No quality ROIs in X2. Training on another sample') neg_samples = np.where(Y1[0, :, :].argmax(1) == len(class_mapping) - 1) pos_samples = np.where(Y1[0, :, :].argmax(1) != len(class_mapping) - 1) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois//2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois//2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # In the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) X2 = X2[:, sel_samples, :] Y1 = Y1[:, sel_samples, :] Y2 = Y2[:, sel_samples, :] return X2, Y1, Y2, len(selected_pos_samples) def get_target_img_data(X_target, img_data, alpha, constrain_hard_examples = False, use_optimal_alpha = False): [Y1, Y2, F] = phase1_rpn.predict(X_target) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh = 0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} all_probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis = 0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: # Pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded # Make predictions with current FRCNN and phase 1 detector [_, P_regr_phase1] = phase1_classifier.predict([F, ROIs]) [P_cls_curr, P_regr_curr] = model_classifier.predict([X_target, ROIs]) # <- This returns a (1, n_ROIs, n_class) and (1, n_ROIs, 4) tensors # Get the probabilities from the image classifier img_probas = get_img_probas(filepath, P_cls_curr, P_regr_curr, ROIs, C, f) # Optional re-computation of the alpha parameter if use_optimal_alpha: alpha = get_optimal_alpha(img_probas, P_cls_curr, "mean") # Get the target probabilities P_cls, no_change_bb_idx = make_target_probas(img_probas, P_cls_curr, alpha, constrain_hard_examples) for ii in range(P_cls.shape[1]): # If the detected object is bg skip if np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = inv_map[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] all_probs[cls_name] = [] cls_num = np.argmax(P_cls[0, ii, :]) (x1, y1, w1, h1) = ROIs[0, ii, :] (x2, y2, w2, h2) = ROIs[0, ii, :] try: (tx, ty, tw, th) = P_regr_phase1[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x1, y1, w1, h1 = roi_helpers.apply_regr(x1, y1, w1, h1, tx, ty, tw, th) except: pass try: (tx, ty, tw, th) = P_regr_curr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x2, y2, w2, h2 = roi_helpers.apply_regr(x2, y2, w2, h2, tx, ty, tw, th) except: pass if ii in no_change_bb_idx: x, y, w, h = x2, y2, w2, h2 else: x, y, w, h = make_target_bbs(np.array([x2, y2, w2, h2]), np.array([x1, y1, w1, h1]), alpha) bboxes[cls_name].append([C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h)]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_probs[cls_name].append(P_cls[0, ii, :]) for key in bboxes: new_boxes, _, chosen_idx = roi_helpers.non_max_suppression_fast(np.array(bboxes[key]), np.array(probs[key]), overlap_thresh = 0.1) probas = np.array(all_probs[key])[chosen_idx, :] # img_data = {"filepath" : filepath, "width" : width, "height" : height, "bboxes" : []} # all_imgs[filename]['bboxes'].append({'class': class_name, 'x1': int(x1), 'x2': int(x2), 'y1': int(y1), 'y2': int(y2)}) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] (x1, y1, x2, y2) = data_generators.get_real_coordinates(f, x1, y1, x2, y2) img_data["bboxes"].append({'class': key, 'x1': int(x1), 'x2': int(x2), 'y1': int(y1), 'y2': int(y2), 'probas': probas[jk, :]}) return img_data parser = OptionParser() parser.add_option("-s", "--source_path", dest="source_path", help="Path to source training txt file.") parser.add_option("-t", "--target_path", dest="target_path", help="Path to target training detections txt file.") parser.add_option("-p", "--parser", dest="parser", help="Parser to use. One of general or pascal_voc", default="general") parser.add_option("-r", "--num_rois", type="int", dest="num_rois", help="Number of ROIs to process at once.", default=32) parser.add_option("--num_epochs", type="int", dest="num_epochs", help="Number of epochs.", default=50) parser.add_option("--elen", dest="epoch_length", help="Set the epoch length. def=1000", default=1000) parser.add_option("--opt", dest="optimizers", help="Set the optimizer to use", default="SGD") parser.add_option("--lr", dest="lr", help="Initial learning rate", type=float, default=1e-3) parser.add_option("--load_checkpoint", dest="load_checkpoint", help="Path to model weights from past checkpoint. Used to resume training.", default=None) parser.add_option("--alpha_init", type=float, dest="alpha_init", help="Starting alpha value.", default=100.) parser.add_option("--alpha_final", type=float, dest="alpha_final", help="Final/smallest alpha value.", default=0.5) parser.add_option("--hard_constraints", dest="hard_constraints", help="Set hard thresholds on confident predictions", action="store_true", default=False) parser.add_option("--recompute_alpha", dest="recompute_alpha", help="Recompute alpha automatically using Hausdorf distance.", action="store_true", default=False) parser.add_option("--phase1_config_file", dest="phase1_config", help="Path of the config file of phase 1 F-RCNN.", default="config.pickle") parser.add_option("--phase1_weights", dest="phase1_weights", help="Path to .hdf5 file with phase 1 F-RCNN model weights") parser.add_option("--img_json", dest="img_json_path", help="Path to JSON file with phase 2 img model architecture") parser.add_option("--img_weights", dest="img_weight_path", help="Path to .hdf5 file with phase 2 img model weights") parser.add_option("--output_config_file", dest="output_config", help="Path to save final phase 3 config file (for testing)", default="config_phase3.pickle") parser.add_option("--output_weight_path", dest="output_weight_path", help="Output path for weights.", default='models/phase3/phase3_weights.hdf5') (options, args) = parser.parse_args() # Check for user errors if not options.phase1_weights: parser.error('Error: path to phase 1 weights must be specified. Pass --phase1_weights to command line') if not options.img_json_path: parser.error('Error: path to phase 2 JSON file must be specified. Pass --img_json to command line') if not options.img_weight_path: parser.error('Error: path to phase 2 weights must be specified. Pass --img_weights to command line') if not options.source_path: parser.error('Error: path to source training data must be specified. Pass --source_path to command line') if not options.target_path: parser.error('Error: path to target training data must be specified. Pass --target_path to command line') # Loading the selected parser if options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif options.parser == "general": from keras_frcnn.general_parser import get_data else: raise ValueError("Command line option parser must be a valid one") # mkdir to save models. if not os.path.isdir("models"): os.mkdir("models") if not os.path.isdir("models/phase3"): os.mkdir("models/phase3") # Loading the config file from phase 1 with open(options.phase1_config, 'rb') as f_in: C = pickle.load(f_in) C.num_rois = int(options.num_rois) C.model_path = options.output_weight_path # Select the proper backbone configuration if C.network == 'vgg16': from keras_frcnn import vgg as nn feature_dim = 512 elif C.network == 'resnet50': from keras_frcnn import resnet as nn feature_dim = 1024 elif C.network == 'vgg19': from keras_frcnn import vgg19 as nn feature_dim = 512 elif C.network == 'mobilenetv1': from keras_frcnn import mobilenetv1 as nn feature_dim = 512 elif C.network == 'mobilenetv2': from keras_frcnn import mobilenetv2 as nn feature_dim = 320 elif C.network == 'densenet': from keras_frcnn import densenet as nn feature_dim = 1024 else: print('Check network name in phase 1 config file.') raise ValueError # Load source and target data and creating the generators source_imgs, classes_count, _ = get_data(options.source_path) target_imgs, _, _ = get_data(options.target_path) class_mapping = C.class_mapping if 'bg' not in classes_count: classes_count['bg'] = 0 if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) inv_map = {v: k for k, v in class_mapping.items()} print('Source training images per class:') pprint.pprint(classes_count) print('Num source classes (including bg) = {}'.format(len(classes_count))) with open(options.output_config, 'wb') as config_f: pickle.dump(C, config_f) print('Config has been written to {}, and can be loaded when testing to ensure correct results'.format(options.output_config)) source_train_imgs = [s for s in source_imgs if s['imageset'] == 'train'] target_train_imgs = [s for s in target_imgs if s['imageset'] == 'train'] source_val_imgs = [s for s in source_imgs if s['imageset'] == 'test'] # Feeling pretty, might delete later random.shuffle(source_train_imgs) random.shuffle(source_val_imgs) random.shuffle(target_train_imgs) print('Num source train images {}'.format(len(source_train_imgs))) #print('Num source val images {}'.format(len(source_val_imgs))) print('Num target train images {}'.format(len(target_train_imgs))) data_gen_source_train = data_generators.get_anchor_gt(source_train_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode = 'train') #data_gen_source_val = data_generators.get_anchor_gt(source_val_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode = 'val') data_gen_target_train = data_generators.get_anchor_gt(target_train_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode = 'val') if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (feature_dim, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, feature_dim) # Loading the phase 1 detector img_input = Input(shape = input_shape_img) roi_input = Input(shape = (C.num_rois, 4)) feature_map_input = Input(shape = input_shape_features) shared_layers = nn.nn_base(img_input, trainable = True) num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes = len(class_mapping), trainable = True) phase1_rpn = Model(img_input, rpn_layers) phase1_classifier = Model([feature_map_input, roi_input], classifier) phase1_rpn.load_weights(options.phase1_weights, by_name = True) phase1_classifier.load_weights(options.phase1_weights, by_name = True) phase1_rpn.compile(optimizer = 'sgd', loss = 'mse') phase1_classifier.compile(optimizer = 'sgd', loss = 'mse') print("Loaded phase 1 Faster R-CNN detector") # Loading the image classifier # load json and create model json_file = open(options.img_json_path, 'r') img_classifier = model_from_json(json_file.read()) json_file.close() # load weights into new model img_classifier.load_weights(options.img_weight_path) print("Loaded phase 2 image classifier") # Creating the phase 3 detector img_input = Input(shape = input_shape_img) roi_input = Input(shape = (None, 4)) shared_layers = nn.nn_base(img_input, trainable = True) num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes = len(classes_count), trainable = True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # This is a model that holds both the RPN and the classifier, used to load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) # Load pretrained Imagenet weights try: print('Loading weights from {}'.format(C.base_net_weights)) model_rpn.load_weights(C.base_net_weights, by_name = True) model_classifier.load_weights(C.base_net_weights, by_name = True) except: print('Could not load pretrained model weights. Weights can be found in the keras application folder \ https://github.com/fchollet/keras/tree/master/keras/applications') # Use this to resume from previous training. Specify the frcnn model to load if options.load_checkpoint is not None: print("Loading previous model from", options.load_checkpoint) model_rpn.load_weights(options.load_checkpoint, by_name = True) model_classifier.load_weights(options.load_checkpoint, by_name = True) else: print("No previous model checkpoint was loaded") # Optimizer setup clipnorm_val = 1e-5 lr_val = options.lr if options.optimizers == "SGD": optimizer = SGD(lr = lr_val, momentum = 0.9, clipnorm = clipnorm_val) optimizer_classifier = SGD(lr = lr_val, momentum = 0.9, clipnorm = clipnorm_val) else: optimizer = Adam(lr = lr_val, clipnorm = clipnorm_val) optimizer_classifier = Adam(lr = lr_val, clipnorm = clipnorm_val / 1) # Compile the model AFTER loading weights! model_rpn.compile(optimizer=optimizer, loss=[losses.rpn_loss_cls(num_anchors), losses.rpn_loss_regr(num_anchors)]) model_classifier.compile(optimizer=optimizer_classifier, loss=[losses.class_loss_cls, losses.class_loss_regr(len(classes_count)-1)], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'}) model_all.compile(optimizer = 'sgd', loss = 'mae') epoch_length = int(options.epoch_length) num_epochs = int(options.num_epochs) iter_num = 0 losses = np.zeros((epoch_length, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] best_loss = np.Inf class_mapping_inv = {v: k for k, v in class_mapping.items()} # Hyperparameters of the robust F-RCNN eps = 0.05 alpha_init = float(options.alpha_init) alpha_final = float(options.alpha_final) constant_thresh = int(5 / 7 * epoch_length * num_epochs) iter_count = 0 print('Starting training') for epoch_num in range(num_epochs): start_time = time.time() progbar = generic_utils.Progbar(epoch_length, stateful_metrics = ["rpn_cls", "rpn_regr", "detector_cls", "detector_regr", "avg nb of objects"]) print('Epoch {} / {}'.format(epoch_num + 1, num_epochs)) # if epoch_num > 0 and epoch_num < 45: # clipnorm_val = np.array(clipnorm_val * 0.95) # lr_val = lr_val * 0.95 # K.set_value(model_rpn.optimizer.lr, lr_val) # K.set_value(model_classifier.optimizer.lr, lr_val) # K.set_value(model_rpn.optimizer.clipnorm, clipnorm_val) # K.set_value(model_classifier.optimizer.clipnorm, clipnorm_val) while True: try: if iter_count <= constant_thresh: alpha = alpha_init - iter_count * (alpha_init - alpha_final) / constant_thresh if iter_count == constant_thresh and options.load_checkpoint is None: lr_val = lr_val * 0.1 K.set_value(model_rpn.optimizer.lr, lr_val) K.set_value(model_classifier.optimizer.lr, lr_val) if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print('\nAverage number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.') # Get next batch samples X, Y, img_data = next(data_gen_source_train) #X, Y, img_data = next(data_gen_source_val) # Unaltered RPN training with source data loss_rpn = model_rpn.train_on_batch(X, Y) # NOTE: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format # Y1 is the output with the one-hot hard labels [0,0,0,0,1,0] # X2 is the 1 x R x 4 tensor with the ROI coordinates to be trained, they're already in (x1,y1,w,h) format X2, Y1, Y2, n_pos_samples_1 = rpn_to_class_inputs(X, img_data, C, mode = "source") loss_class_1 = model_classifier.train_on_batch([X, X2], [Y1, Y2]) # VERY IMPORTANT: This loop guarantees that there will always be one target step per source step while True: try: X_target, filepath, width, height, f = next(data_gen_target_train) img_data = {"filepath" : filepath, "width" : width, "height" : height, "bboxes" : []} img_data = get_target_img_data(X_target, img_data, alpha, options.hard_constraints, options.recompute_alpha) X2, Y1, Y2, n_pos_samples_2 = rpn_to_class_inputs(X_target, img_data, C, mode = "target", eps = eps) loss_class_2 = model_classifier.train_on_batch([X_target, X2], [Y1, Y2]) break except Exception as e: #print(traceback.format_exc()) #print('Exception: {} at line {}'.format(e, sys.exc_info()[2].tb_lineno)) continue losses[iter_num, 0] = loss_rpn[1] losses[iter_num, 1] = loss_rpn[2] losses[iter_num, 2] = loss_class_1[1] + loss_class_2[1] losses[iter_num, 3] = loss_class_1[2] + loss_class_2[2] losses[iter_num, 4] = np.mean([loss_class_1[3], loss_class_2[3]]) progbar.update(iter_num, [('rpn_cls', losses[iter_num, 0]), ('rpn_regr', losses[iter_num, 1]), ('detector_cls', losses[iter_num, 2]), ('detector_regr', losses[iter_num, 3]), ("avg nb of objects", np.mean([n_pos_samples_1, n_pos_samples_2]))]) iter_num += 1 iter_count += 1 if iter_num == epoch_length: loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]).round(1) curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print('\nMean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(mean_overlapping_bboxes)) print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format(loss_class_cls)) print('Loss Detector regression: {}'.format(loss_class_regr)) print('Total Loss: {}'.format(curr_loss)) print('Elapsed time: {}'.format(time.time() - start_time)) iter_num = 0 if curr_loss < best_loss: if C.verbose: print('Total loss decreased from {} to {}, saving weights'.format(best_loss, curr_loss)) best_loss = curr_loss model_all.save_weights(C.model_path) break except Exception as e: #print(traceback.format_exc()) #print('Exception: {} at line {}'.format(e, sys.exc_info()[2].tb_lineno)) continue print('Training complete, exiting.')
py
1a4ebb0b80ed5e9575454446ec029f2052da4e85
# This file contains various useful constants for py3status GENERAL_DEFAULTS = { "color_bad": "#FF0000", "color_degraded": "#FFFF00", "color_good": "#00FF00", "color_separator": "#333333", "colors": False, "interval": 5, "output_format": "i3bar", } MAX_NESTING_LEVELS = 4 TIME_FORMAT = "%Y-%m-%d %H:%M:%S" TZTIME_FORMAT = "%Y-%m-%d %H:%M:%S %Z" TIME_MODULES = ["time", "tztime"] I3S_INSTANCE_MODULES = [ "battery", "cpu_temperature", "disk", "ethernet", "memory", "path_exists", "run_watch", "tztime", "volume", "wireless", ] I3S_SINGLE_NAMES = ["cpu_usage", "ddate", "ipv6", "load", "time"] I3S_ALLOWED_COLORS = ["color_bad", "color_good", "color_degraded"] # i3status modules that allow colors to be passed. # general section also allows colors so is included. I3S_COLOR_MODULES = ["general", "battery", "cpu_temperature", "disk", "load"] I3S_MODULE_NAMES = I3S_SINGLE_NAMES + I3S_INSTANCE_MODULES CONFIG_FILE_SPECIAL_SECTIONS = ["general", "py3status"] ERROR_CONFIG = """ general {colors = true interval = 60} order += "static_string py3status" order += "tztime local" order += "group error" static_string py3status {format = "py3status"} tztime local {format = "%c"} group error{ button_next = 1 button_prev = 0 fixed_width = False format = "{output}" static_string error_min {format = "CONFIG ERROR" color = "#FF0000"} static_string error {format = "$error" color = "#FF0000"} } """ COLOR_NAMES_EXCLUDED = ["good", "bad", "degraded", "separator", "threshold", "None"] COLOR_NAMES = { "aliceblue": "#F0F8FF", "antiquewhite": "#FAEBD7", "aqua": "#00FFFF", "aquamarine": "#7FFFD4", "azure": "#F0FFFF", "beige": "#F5F5DC", "bisque": "#FFE4C4", "black": "#000000", "blanchedalmond": "#FFEBCD", "blue": "#0000FF", "blueviolet": "#8A2BE2", "brown": "#A52A2A", "burlywood": "#DEB887", "cadetblue": "#5F9EA0", "chartreuse": "#7FFF00", "chocolate": "#D2691E", "coral": "#FF7F50", "cornflowerblue": "#6495ED", "cornsilk": "#FFF8DC", "crimson": "#DC143C", "cyan": "#00FFFF", "darkblue": "#00008B", "darkcyan": "#008B8B", "darkgoldenrod": "#B8860B", "darkgray": "#A9A9A9", "darkgrey": "#A9A9A9", "darkgreen": "#006400", "darkkhaki": "#BDB76B", "darkmagenta": "#8B008B", "darkolivegreen": "#556B2F", "darkorange": "#FF8C00", "darkorchid": "#9932CC", "darkred": "#8B0000", "darksalmon": "#E9967A", "darkseagreen": "#8FBC8F", "darkslateblue": "#483D8B", "darkslategray": "#2F4F4F", "darkslategrey": "#2F4F4F", "darkturquoise": "#00CED1", "darkviolet": "#9400D3", "deeppink": "#FF1493", "deepskyblue": "#00BFFF", "dimgray": "#696969", "dimgrey": "#696969", "dodgerblue": "#1E90FF", "firebrick": "#B22222", "floralwhite": "#FFFAF0", "forestgreen": "#228B22", "fuchsia": "#FF00FF", "gainsboro": "#DCDCDC", "ghostwhite": "#F8F8FF", "gold": "#FFD700", "goldenrod": "#DAA520", "gray": "#808080", "grey": "#808080", "green": "#008000", "greenyellow": "#ADFF2F", "honeydew": "#F0FFF0", "hotpink": "#FF69B4", "indianred": "#CD5C5C", "indigo": "#4B0082", "ivory": "#FFFFF0", "khaki": "#F0E68C", "lavender": "#E6E6FA", "lavenderblush": "#FFF0F5", "lawngreen": "#7CFC00", "lemonchiffon": "#FFFACD", "lightblue": "#ADD8E6", "lightcoral": "#F08080", "lightcyan": "#E0FFFF", "lightgoldenrodyellow": "#FAFAD2", "lightgray": "#D3D3D3", "lightgrey": "#D3D3D3", "lightgreen": "#90EE90", "lightpink": "#FFB6C1", "lightsalmon": "#FFA07A", "lightseagreen": "#20B2AA", "lightskyblue": "#87CEFA", "lightslategray": "#778899", "lightslategrey": "#778899", "lightsteelblue": "#B0C4DE", "lightyellow": "#FFFFE0", "lime": "#00FF00", "limegreen": "#32CD32", "linen": "#FAF0E6", "magenta": "#FF00FF", "maroon": "#800000", "mediumaquamarine": "#66CDAA", "mediumblue": "#0000CD", "mediumorchid": "#BA55D3", "mediumpurple": "#9370DB", "mediumseagreen": "#3CB371", "mediumslateblue": "#7B68EE", "mediumspringgreen": "#00FA9A", "mediumturquoise": "#48D1CC", "mediumvioletred": "#C71585", "midnightblue": "#191970", "mintcream": "#F5FFFA", "mistyrose": "#FFE4E1", "moccasin": "#FFE4B5", "navajowhite": "#FFDEAD", "navy": "#000080", "oldlace": "#FDF5E6", "olive": "#808000", "olivedrab": "#6B8E23", "orange": "#FFA500", "orangered": "#FF4500", "orchid": "#DA70D6", "palegoldenrod": "#EEE8AA", "palegreen": "#98FB98", "paleturquoise": "#AFEEEE", "palevioletred": "#DB7093", "papayawhip": "#FFEFD5", "peachpuff": "#FFDAB9", "peru": "#CD853F", "pink": "#FFC0CB", "plum": "#DDA0DD", "powderblue": "#B0E0E6", "purple": "#800080", "rebeccapurple": "#663399", "red": "#FF0000", "rosybrown": "#BC8F8F", "royalblue": "#4169E1", "saddlebrown": "#8B4513", "salmon": "#FA8072", "sandybrown": "#F4A460", "seagreen": "#2E8B57", "seashell": "#FFF5EE", "sienna": "#A0522D", "silver": "#C0C0C0", "skyblue": "#87CEEB", "slateblue": "#6A5ACD", "slategray": "#708090", "slategrey": "#708090", "snow": "#FFFAFA", "springgreen": "#00FF7F", "steelblue": "#4682B4", "tan": "#D2B48C", "teal": "#008080", "thistle": "#D8BFD8", "tomato": "#FF6347", "turquoise": "#40E0D0", "violet": "#EE82EE", "wheat": "#F5DEB3", "white": "#FFFFFF", "whitesmoke": "#F5F5F5", "yellow": "#FFFF00", "yellowgreen": "#9ACD32", } ON_TRIGGER_ACTIONS = ["refresh", "refresh_and_freeze"] POSITIONS = ["left", "center", "right"] RETIRED_MODULES = { "nvidia_temp": { "new": ["nvidia_smi"], "msg": "Module {old} has been replaced with a module {new}.", }, "scratchpad_async": { "new": ["scratchpad"], "msg": "Module {old} has been replaced with a consolidated module {new}.", }, "scratchpad_counter": { "new": ["scratchpad"], "msg": "Module {old} has been replaced with a consolidated module {new}.", }, "window_title": { "new": ["window"], "msg": "Module {old} has been replaced with a consolidated module {new}.", }, "window_title_async": { "new": ["window"], "msg": "Module {old} has been replaced with a consolidated module {new}.", }, "weather_yahoo": { "new": ["weather_owm"], "msg": "Module {old} is no longer available due to retired Yahoo Weather APIs and new Oath requirements. You can try a different module {new}.", }, "xkb_layouts": { "new": ["xkb_input"], "msg": "Module {old} has been replaced with a module {new} to support sway too.", }, } MARKUP_LANGUAGES = ["pango", "none"]
py
1a4ebb372cbf942e1e433681d5d277538999a9b7
from sympy import Basic, S, Symbol, Wild, Real, Integer, Rational, \ sin, cos, exp, log, oo, sqrt, symbols, Integral, sympify, \ WildFunction, Poly, Function, Derivative, Number, pi, var, \ NumberSymbol, zoo, Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, \ radsimp, powsimp, simplify, together, separate, collect, \ apart, combsimp, factor, refine, cancel, invert from sympy.core.cache import clear_cache from sympy.utilities.pytest import XFAIL, raises class DummyNumber(object): """ Minimal implementation of a number that works with SymPy. If one has a Number class (e.g. Sage Integer, or some other custom class) that one wants to work well with SymPy, one has to implement at least the methods of this class DummyNumber, resp. its subclasses I5 and F1_1. Basically, one just needs to implement either __int__() or __float__() and then one needs to make sure that the class works with Python integers and with itself. """ def __radd__(self, a): if isinstance(a, (int, float)): return a + self.number return NotImplemented def __truediv__(a, b): return a.__div__(b) def __rtruediv__(a, b): return a.__rdiv__(b) def __add__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number + a return NotImplemented def __rsub__(self, a): if isinstance(a, (int, float)): return a - self.number return NotImplemented def __sub__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number - a return NotImplemented def __rmul__(self, a): if isinstance(a, (int, float)): return a * self.number return NotImplemented def __mul__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number * a return NotImplemented def __rdiv__(self, a): if isinstance(a, (int, float)): return a / self.number return NotImplemented def __div__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number / a return NotImplemented def __rpow__(self, a): if isinstance(a, (int, float)): return a ** self.number return NotImplemented def __pow__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number ** a return NotImplemented def __pos__(self): return self.number def __neg__(self): return - self.number class I5(DummyNumber): number = 5 def __int__(self): return self.number class F1_1(DummyNumber): number = 1.1 def __float__(self): return self.number x,y,z,t = symbols('xyzt') i5 = I5() f1_1 = F1_1() # basic sympy objects basic_objs = [ Rational(2), Real("1.3"), x, y, pow(x,y)*y, ] # all supported objects all_objs = basic_objs + [ 5, 5.5, i5, f1_1 ] def dotest(s): for x in all_objs: for y in all_objs: s(x,y) def test_basic(): def s(a,b): x = a x = +a x = -a x = a+b x = a-b x = a*b x = a/b x = a**b dotest(s) def test_ibasic(): def s(a,b): x = a x += b x = a x -= b x = a x *= b x = a x /= b dotest(s) def test_relational(): assert (pi < 3) == False assert (pi <= 3) == False assert (pi > 3) == True assert (pi >= 3) == True assert (-pi < 3) == True assert (-pi <= 3) == True assert (-pi > 3) == False assert (-pi >= 3) == False assert (x - 2 < x - 3) == False def test_relational_noncommutative(): from sympy import Lt, Gt, Le, Ge a, b = symbols('a b', commutative=False) assert (a < b) == Lt(a, b) assert (a <= b) == Le(a, b) assert (a > b) == Gt(a, b) assert (a >= b) == Ge(a, b) def test_basic_nostr(): for obj in basic_objs: for op in ['+','-','*','/','**']: raises(TypeError, "obj %s '1'" % op) def test_leadterm(): assert (3+2*x**(log(3)/log(2)-1)).leadterm(x) == (3,0) assert (1/x**2+1+x+x**2).leadterm(x)[1] == -2 assert (1/x+1+x+x**2).leadterm(x)[1] == -1 assert (x**2+1/x).leadterm(x)[1] == -1 assert (1+x**2).leadterm(x)[1] == 0 assert (x+1).leadterm(x)[1] == 0 assert (x+x**2).leadterm(x)[1] == 1 assert (x**2).leadterm(x)[1] == 2 def test_as_leading_term(): assert (3+2*x**(log(3)/log(2)-1)).as_leading_term(x) == 3 assert (1/x**2+1+x+x**2).as_leading_term(x) == 1/x**2 assert (1/x+1+x+x**2).as_leading_term(x) == 1/x assert (x**2+1/x).as_leading_term(x) == 1/x assert (1+x**2).as_leading_term(x) == 1 assert (x+1).as_leading_term(x) == 1 assert (x+x**2).as_leading_term(x) == x assert (x**2).as_leading_term(x) == x**2 def test_leadterm2(): assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \ (sin(1 + sin(1)), 0) def test_leadterm3(): assert (y+z+x).leadterm(x) == (y+z, 0) def test_as_leading_term2(): assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \ sin(1 + sin(1)) def test_as_leading_term3(): assert (2+pi+x).as_leading_term(x) == 2 + pi assert (2*x+pi*x+x**2).as_leading_term(x) == 2*x + pi*x def test_atoms(): assert sorted(list(x.atoms())) == [x] assert sorted(list((1+x).atoms())) == sorted([1, x]) assert sorted(list((1+2*cos(x)).atoms(Symbol))) == [x] assert sorted(list((1+2*cos(x)).atoms(Symbol,Number))) == sorted([1, 2, x]) assert sorted(list((2*(x**(y**x))).atoms())) == sorted([2, x, y]) assert sorted(list(Rational(1,2).atoms())) == [S.Half] assert sorted(list(Rational(1,2).atoms(Symbol))) == [] assert sorted(list(sin(oo).atoms(oo))) == [oo] assert sorted(list(Poly(0, x).atoms())) == [S.Zero] assert sorted(list(Poly(1, x).atoms())) == [S.One] assert sorted(list(Poly(x, x).atoms())) == [x] assert sorted(list(Poly(x, x, y).atoms())) == [x] assert sorted(list(Poly(x + y, x, y).atoms())) == sorted([x, y]) assert sorted(list(Poly(x + y, x, y, z).atoms())) == sorted([x, y]) assert sorted(list(Poly(x + y*t, x, y, z).atoms())) == sorted([t, x, y]) I = S.ImaginaryUnit assert list((I*pi).atoms(NumberSymbol)) == [pi] assert sorted((I*pi).atoms(NumberSymbol, I)) == \ sorted((I*pi).atoms(I,NumberSymbol)) == [pi, I] I = S.ImaginaryUnit assert list((I*pi).atoms(NumberSymbol)) == [pi] assert sorted((I*pi).atoms(NumberSymbol, I)) == \ sorted((I*pi).atoms(I,NumberSymbol)) == [pi, I] def test_is_polynomial(): z = Symbol('z') k = Symbol('k', nonnegative=True, integer=True) assert Rational(2).is_polynomial(x, y, z) == True assert (S.Pi).is_polynomial(x, y, z) == True assert x.is_polynomial(x) == True assert x.is_polynomial(y) == True assert (x**2).is_polynomial(x) == True assert (x**2).is_polynomial(y) == True assert (x**(-2)).is_polynomial(x) == False assert (x**(-2)).is_polynomial(y) == True assert (2**x).is_polynomial(x) == False assert (2**x).is_polynomial(y) == True assert (x**k).is_polynomial(x) == True assert (x**k).is_polynomial(k) == False assert (x**x).is_polynomial(x) == False assert (k**k).is_polynomial(k) == False assert (k**x).is_polynomial(k) == None assert (x**(-k)).is_polynomial(x) == None assert ((2*x)**k).is_polynomial(x) == True assert (x**2 + 3*x - 8).is_polynomial(x) == True assert (x**2 + 3*x - 8).is_polynomial(y) == True assert (x**2 + 3*x - 8).is_polynomial() == True assert sqrt(x).is_polynomial(x) == False assert (x**S.Half).is_polynomial(x) == False assert (x**Rational(3,2)).is_polynomial(x) == False assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) == True assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) == False assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() == True assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() == False assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) == True assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) == False def test_is_rational_function(): x,y = symbols('xy') assert Integer(1).is_rational_function() == True assert Integer(1).is_rational_function(x) == True assert Rational(17,54).is_rational_function() == True assert Rational(17,54).is_rational_function(x) == True assert (12/x).is_rational_function() == True assert (12/x).is_rational_function(x) == True assert (x/y).is_rational_function() == True assert (x/y).is_rational_function(x) == True assert (x/y).is_rational_function(x, y) == True assert (x**2+1/x/y).is_rational_function() == True assert (x**2+1/x/y).is_rational_function(x) == True assert (x**2+1/x/y).is_rational_function(x, y) == True assert (sin(y)/x).is_rational_function() == False assert (sin(y)/x).is_rational_function(y) == False assert (sin(y)/x).is_rational_function(x) == True assert (sin(y)/x).is_rational_function(x, y) == False def test_SAGE1(): #see http://code.google.com/p/sympy/issues/detail?id=247 class MyInt: def _sympy_(self): return Integer(5) m = MyInt() e = Rational(2)*m assert e == 10 raises(TypeError, "Rational(2)*MyInt") def test_SAGE2(): class MyInt(object): def __int__(self): return 5 assert sympify(MyInt()) == 5 e = Rational(2)*MyInt() assert e == 10 raises(TypeError, "Rational(2)*MyInt") def test_SAGE3(): class MySymbol: def __rmul__(self, other): return ('mys', other, self) o = MySymbol() e = x*o assert e == ('mys', x, o) def test_len(): x, y, z = symbols("xyz") e = x*y assert len(e.args) == 2 e = x+y+z assert len(e.args) == 3 def test_doit(): a = Integral(x**2, x) assert isinstance(a.doit(), Integral) == False assert isinstance(a.doit(integrals=True), Integral) == False assert isinstance(a.doit(integrals=False), Integral) == True assert (2*Integral(x, x)).doit() == x**2 def test_attribute_error(): raises(AttributeError, "x.cos()") raises(AttributeError, "x.sin()") raises(AttributeError, "x.exp()") def test_args(): assert (x*y).args in ((x, y), (y, x)) assert (x+y).args in ((x, y), (y, x)) assert (x*y+1).args in ((x*y, 1), (1, x*y)) assert sin(x*y).args == (x*y,) assert sin(x*y).args[0] == x*y assert (x**y).args == (x,y) assert (x**y).args[0] == x assert (x**y).args[1] == y def test_iter_basic_args(): assert list(sin(x*y).iter_basic_args()) == [x*y] assert list((x**y).iter_basic_args()) == [x, y] def test_noncommutative_expand_issue658(): A, B, C = symbols('ABC', commutative=False) assert A*B - B*A != 0 assert (A*(A+B)*B).expand() == A**2*B + A*B**2 assert (A*(A+B+C)*B).expand() == A**2*B + A*B**2 + A*C*B def test_as_numer_denom(): assert oo.as_numer_denom() == (1, 0) assert (-oo).as_numer_denom() == (-1, 0) assert zoo.as_numer_denom() == (zoo, 1) assert (-zoo).as_numer_denom() == (zoo, 1) assert (1/x).as_numer_denom() == (1, x) assert x.as_numer_denom() == (x, 1) assert (x/y).as_numer_denom() == (x, y) def test_as_independent(): assert (2*x*sin(x)+y+x).as_independent(x) == (y, x + 2*x*sin(x)) assert (2*x*sin(x)+y+x).as_independent(y) == (x + 2*x*sin(x), y) assert (2*x*sin(x)+y+x).as_independent(x, y) == (0, y + x + 2*x*sin(x)) assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x)) assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y)) assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y)) assert (sin(x)).as_independent(x) == (1, sin(x)) assert (sin(x)).as_independent(y) == (sin(x), 1) assert (2*sin(x)).as_independent(x) == (2, sin(x)) assert (2*sin(x)).as_independent(y) == (2*sin(x), 1) def test_subs_dict(): a,b,c,d,e = symbols('abcde') assert (sin(x))._subs_dict({ x : 1, sin(x) : 2}) == 2 assert (sin(x))._subs_dict([(x, 1), (sin(x), 2)]) == 2 expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x) seq = [ (sqrt(sin(2*x)),a), (cos(2*x),b), (sin(2*x),c), (x,d), (exp(x),e) ] assert expr._subs_dict(seq) == c + a*b*sin(d*e) seq = [ (sqrt(sin(2*x)),a), (sin(2*x),c), (cos(2*x),b), (x,d), (exp(x),e) ] assert expr._subs_dict(seq) == c + a*b*sin(d*e) def test_subs_list(): x,y = symbols('xy') assert (sin(x))._subs_list([(sin(x), 2), (x, 1)]) == 2 assert (sin(x))._subs_list([(x, 1), (sin(x), 2)]) == sin(1) assert (x+y)._subs_list([(x, 3), (y, x**2)]) == 3 + x**2 assert (x+y)._subs_list([(y, x**2), (x, 3)]) == 12 def test_call(): a,b,c,d,e = symbols('abcde') assert sin(x)({ x : 1, sin(x) : 2}) == 2 expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x) assert expr({ sqrt(sin(2*x)) : a, cos(2*x) : b, sin(2*x) : c, x : d, exp(x) : e}) == c + a*b*sin(d*e) def test_has(): x, y = symbols("xy") f = Function("f") g = Function("g") p = Wild('p') assert sin(x).has(x) assert sin(x).has(sin) assert not sin(x).has(y) assert not sin(x).has(cos) assert f(x).has(x) assert f(x).has(f) assert not f(x).has(y) assert not f(x).has(g) assert f(x).diff(x).has(x) assert f(x).diff(x).has(f) assert f(x).diff(x).has(Derivative) assert not f(x).diff(x).has(y) assert not f(x).diff(x).has(g) assert not f(x).diff(x).has(sin) assert (x**2).has(Symbol) assert not (x**2).has(Wild) assert (2*p).has(Wild) def test_has_any_symbols(): x,y,z,t,u = symbols('xyztu') i = Integer(4400) assert i.has_any_symbols(x) == False assert (i*x**i).has_any_symbols(x) == True assert (i*y**i).has_any_symbols(x) == False assert (i*y**i).has_any_symbols(x, y) == True expr = x**2*y + sin(2**t + log(z)) assert expr.has_any_symbols(u) == False assert expr.has_any_symbols(x) == True assert expr.has_any_symbols(y) == True assert expr.has_any_symbols(z) == True assert expr.has_any_symbols(t) == True assert expr.has_any_symbols(x, y, z, t) == True assert expr.has_any_symbols(x, y, z, t, u) == True from sympy.physics.units import m, s assert (x*m/s).has_any_symbols(x) == True assert (x*m/s).has_all_symbols(x) == True assert (x*m/s).has_any_symbols(y, z) == False assert (x*m/s).has_all_symbols(x, y) == False poly = Poly(x**2 + x*y*sin(z), x, y, t) assert poly.has_any_symbols(x) == True assert poly.has_any_symbols(x, y, z) == True assert poly.has_any_symbols(x, y, z, t) == True assert poly.has_all_symbols(x, y, z) == True assert poly.has_all_symbols(x, y, z, t) == False def test_has_all_symbols(): x,y,z,t,u = symbols('xyztu') i = Integer(4400) assert i.has_all_symbols(x) == False assert (i*x**i).has_all_symbols(x) == True assert (i*y**i).has_all_symbols(x) == False expr = x**2*y + sin(2**t + log(z)) assert expr.has_all_symbols(y, z, t) == True assert expr.has_all_symbols(x, z, t) == True assert expr.has_all_symbols(x, y, t) == True assert expr.has_all_symbols(x, y, z) == True assert expr.has_all_symbols(y, u, t) == False assert expr.has_all_symbols(x, z, u) == False assert expr.has_all_symbols(u, y, z) == False assert expr.has_all_symbols(x, y, z, t) == True assert expr.has_all_symbols(x, y, z, t, u) == False def test_as_poly_basic(): x, y = symbols('xy') f = x**2 + 2*x*y assert f.as_poly().as_basic() == f assert f.as_poly(x, y).as_basic() == f assert (f + sin(x)).as_poly(x, y) is None p = Poly(f, x, y) assert p.as_poly() == p def test_nonzero(): assert bool(S.Zero) == False assert bool(S.One) == True assert bool(x) == True assert bool(x+y) == True assert bool(x-x) == False assert bool(x*y) == True assert bool(x*1) == True assert bool(x*0) == False def test_is_number(): x, y = symbols('xy') g = WildFunction('g') assert Real(3.14).is_number == True assert Integer(737).is_number == True assert Rational(3, 2).is_number == True assert Rational(8).is_number == True assert x.is_number == False assert (2*x).is_number == False assert (x + y).is_number == False assert log(2).is_number == True assert log(x).is_number == False assert (2 + log(2)).is_number == True assert (8+log(2)).is_number == True assert (2 + log(x)).is_number == False assert (8+log(2)+x).is_number == False assert (2*g).is_number == False assert (1+x**2/x-x).is_number == True # test extensibility of .is_number # on subinstances of Basic class A(Basic): pass a = A() assert a.is_number == False # TODO write more tests for as_coeff_factors def test_as_coeff_factors(): x = Symbol('x') assert x .as_coeff_factors() == ( 0, (x,)) assert (-1+x).as_coeff_factors() == (-1, (x,)) assert ( 2+x).as_coeff_factors() == ( 2, (x,)) assert ( 1+x).as_coeff_factors() == ( 1, (x,)) def test_as_coeff_exponent(): x, y = symbols("xy") assert (3*x**4).as_coeff_exponent(x) == (3, 4) assert (2*x**3).as_coeff_exponent(x) == (2, 3) assert (4*x**2).as_coeff_exponent(x) == (4, 2) assert (6*x**1).as_coeff_exponent(x) == (6, 1) assert (3*x**0).as_coeff_exponent(x) == (3, 0) assert (2*x**0).as_coeff_exponent(x) == (2, 0) assert (1*x**0).as_coeff_exponent(x) == (1, 0) assert (0*x**0).as_coeff_exponent(x) == (0, 0) assert (-1*x**0).as_coeff_exponent(x) == (-1, 0) assert (-2*x**0).as_coeff_exponent(x) == (-2, 0) assert (2*x**3+pi*x**3).as_coeff_exponent(x) == (2+pi, 3) assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \ (log(2)/(2+pi), 0) # 1685 D = Derivative f = Function('f') fx = D(f(x), x) assert fx.as_coeff_exponent(f(x)) == (fx ,0) def test_extractions(): x, y = symbols("xy") n = Symbol("n", integer=True) assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2 assert ((x*y)**3).extract_multiplicatively(x**4 * y) == None assert (2*x).extract_multiplicatively(2) == x assert (2*x).extract_multiplicatively(3) == None assert (2*x).extract_multiplicatively(-1) == None assert (Rational(1,2)*x).extract_multiplicatively(3) == x/6 assert (x**(Rational(1,2))).extract_multiplicatively(x) == None assert (x**(Rational(1,2))).extract_multiplicatively(1/x) == x**(Rational(3,2)) assert ((x*y)**3).extract_additively(1) == None assert (x+1).extract_additively(x) == 1 assert (x+1).extract_additively(2*x) == None assert (x+1).extract_additively(-x) == 1+2*x assert (-x+1).extract_additively(2*x) == 1-3*x assert (Integer(-3)).could_extract_minus_sign() == True assert (-n*x+x).could_extract_minus_sign() != (n*x-x).could_extract_minus_sign() assert (x-y).could_extract_minus_sign() != (-x+y).could_extract_minus_sign() assert (1-x-y).could_extract_minus_sign() == True assert (1-x+y).could_extract_minus_sign() == False assert ((-x-x*y)/y).could_extract_minus_sign() == True assert (-(x+x*y)/y).could_extract_minus_sign() == True assert ((x+x*y)/(-y)).could_extract_minus_sign() == True assert ((x+x*y)/y).could_extract_minus_sign() == False assert (x*(-x-x**3)).could_extract_minus_sign() == True # used to give inf recurs assert ((-x-y)/(x+y)).could_extract_minus_sign() == True # is_Mul odd case # The results of each of these will vary on different machines, e.g. # the first one might be False and the other (then) is true or vice versa, # so both are included. assert ((-x-y)/(x-y)).could_extract_minus_sign() == False or\ ((-x-y)/(y-x)).could_extract_minus_sign() == False # is_Mul even case def test_coeff(): assert (3+2*x+4*x**2).coeff(1) == None assert (3+2*x+4*x**2).coeff(-1) == None assert (3+2*x+4*x**2).coeff(x) == 2 assert (3+2*x+4*x**2).coeff(x**2) == 4 assert (3+2*x+4*x**2).coeff(x**3) == None assert (-x/8 + x*y).coeff(x) == -S(1)/8 + y assert (-x/8 + x*y).coeff(-x) == S(1)/8 - y assert (-x/8 + x*y).coeff(2*x) == -S(1)/16 + y/2 assert (x/8 + x*y).coeff(2*y*x) == S(1)/2 assert (x/8 + x*y).coeff(y*x/2) == 2 f = Function('f') assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2 def test_coeff2(): var('r, kappa') psi = Function("psi") g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2)) g = g.expand() assert g.coeff((psi(r).diff(r))) == 2/r def test_coeff2_0(): var('r, kappa') psi = Function("psi") g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2)) g = g.expand() assert g.coeff(psi(r).diff(r, 2)) == 1 def test_coeff_expand(): x, y, z = symbols('x y z') expr = z*(x+y)**2 expr2 = z*(x+y)**2 + z*(2*x + 2*y)**2 assert expr.coeff(z) == 2*x*y + x**2 + y**2 assert expr.coeff(z, expand=False) == (x+y)**2 assert expr2.coeff(z) == 10*x*y + 5*x**2 + 5*y**2 assert expr2.coeff(z, expand=False) == (x+y)**2 + (2*x + 2*y)**2 def test_integrate(): assert x.integrate(x) == x**2/2 assert x.integrate((x, 0, 1)) == S(1)/2 def test_count_ops(): f = (x*y + 3/y)**(3 + 2) assert f.count_ops() == Symbol('ADD') + 2*Symbol('MUL') + 2*Symbol('POW') assert f.count_ops(symbolic=False) == 5 def test_contains(): f = (x*y + 3/y)**(3 + 2) g = Function('g') h = Function('h') p = Piecewise( (g, x<-1), (1, x<=1), (f, True)) assert x in p assert y in p assert not z in p assert 1 in p assert 3 in p assert not 4 in p assert f in p assert g in p assert not h in p def test_as_Something(): assert x.as_Add() == [x] assert x.as_Mul() == [x] assert x.as_Pow() == (x, S.One) assert (x*y*z).as_Add() == [x*y*z] assert sorted((x*y*z).as_Mul()) == [x, y, z] assert (x*y*z).as_Pow() == (x*y*z, S.One) assert sorted((x+y+z).as_Add()) == [x, y, z] assert (x+y+z).as_Mul() == [x+y+z] assert (x+y+z).as_Pow() == (x+y+z, S.One) assert ((x+y)**z).as_Add() == [(x+y)**z] assert ((x+y)**z).as_Mul() == [(x+y)**z] assert ((x+y)**z).as_Pow() == (x+y, z) def test_Basic_keep_sign(): Basic.keep_sign = True assert Mul(x - 1, x + 1) == (x - 1)*(x + 1) assert (1/(x - 1)).as_coeff_terms()[0] == +1 clear_cache() Basic.keep_sign = False assert Mul(x - 1, x + 1) == -(1 - x)*(1 + x) assert (1/(x - 1)).as_coeff_terms()[0] == -1 def test_issue1864(): assert hasattr(Mul(x, y), "is_commutative") assert hasattr(Mul(x, y, evaluate=False), "is_commutative") assert hasattr(Pow(x, y), "is_commutative") assert hasattr(Pow(x, y, evaluate=False), "is_commutative") expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1 assert hasattr(expr, "is_commutative") def test_action_verbs(): a,b,c,d = symbols('abcd') assert nsimplify((1/(exp(3*pi*x/5)+1))) == (1/(exp(3*pi*x/5)+1)).nsimplify() assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp() assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep = True) assert radsimp(1/(2+sqrt(2))) == (1/(2+sqrt(2))).radsimp() assert powsimp(x**y*x**z*y**z, combine='all') == (x**y*x**z*y**z).powsimp(combine='all') assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify() assert together(1/x + 1/y) == (1/x + 1/y).together() assert separate((x*(y*z)**3)**2) == ((x*(y*z)**3)**2).separate() assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == (a*x**2 + b*x**2 + a*x - b*x + c).collect(x) assert apart(y/(y+2)/(y+1), y) == (y/(y+2)/(y+1)).apart(y) assert combsimp(y/(x+2)/(x+1)) == (y/(x+2)/(x+1)).combsimp() assert factor(x**2+5*x+6) == (x**2+5*x+6).factor() assert refine(sqrt(x**2)) == sqrt(x**2).refine() assert cancel((x**2+5*x+6)/(x+2)) == ((x**2+5*x+6)/(x+2)).cancel()
py
1a4ebd06d570f788526deb529a7b5fbda2134d8b
import Root import Chain import Pelvis import Chest import Leg import Foot import Arm import Hand import Head
py
1a4ebd7b0c4d8a1a8faf768c016779d46ed4d9f6
import os import numpy as np import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.append(BASE_DIR) sys.path.append(os.path.join(ROOT_DIR, 'utils')) import data_prep_util import indoor3d_util # Constants data_dir = os.path.join(ROOT_DIR, 'data') indoor3d_data_dir = os.path.join(data_dir, 'stanford_indoor3d') NUM_POINT = 4096 H5_BATCH_SIZE = 1000 data_dim = [NUM_POINT, 9] label_dim = [NUM_POINT] data_dtype = 'float32' label_dtype = 'uint8' # Set paths filelist = os.path.join(BASE_DIR, 'meta/all_data_label.txt') data_label_files = [os.path.join(indoor3d_data_dir, line.rstrip()) for line in open(filelist)] output_dir = os.path.join(data_dir, 'indoor3d_sem_seg_hdf5_data') if not os.path.exists(output_dir): os.mkdir(output_dir) output_filename_prefix = os.path.join(output_dir, 'ply_data_all') output_room_filelist = os.path.join(output_dir, 'room_filelist.txt') fout_room = open(output_room_filelist, 'w') # -------------------------------------- # ----- BATCH WRITE TO HDF5 ----- # -------------------------------------- batch_data_dim = [H5_BATCH_SIZE] + data_dim batch_label_dim = [H5_BATCH_SIZE] + label_dim h5_batch_data = np.zeros(batch_data_dim, dtype=np.float32) h5_batch_label = np.zeros(batch_label_dim, dtype=np.uint8) buffer_size = 0 # state: record how many samples are currently in buffer h5_index = 0 # state: the next h5 file to save def insert_batch(data, label, last_batch=False): global h5_batch_data, h5_batch_label global buffer_size, h5_index data_size = data.shape[0] # If there is enough space, just insert if buffer_size + data_size <= h5_batch_data.shape[0]: h5_batch_data[buffer_size:buffer_size + data_size, ...] = data h5_batch_label[buffer_size:buffer_size + data_size] = label buffer_size += data_size else: # not enough space capacity = h5_batch_data.shape[0] - buffer_size assert (capacity >= 0) if capacity > 0: h5_batch_data[buffer_size:buffer_size + capacity, ...] = data[0:capacity, ...] h5_batch_label[buffer_size:buffer_size + capacity, ...] = label[0:capacity, ...] # Save batch data and label to h5 file, reset buffer_size h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5' data_prep_util.save_h5(h5_filename, h5_batch_data, h5_batch_label, data_dtype, label_dtype) print('Stored {0} with size {1}'.format(h5_filename, h5_batch_data.shape[0])) h5_index += 1 buffer_size = 0 # recursive call insert_batch(data[capacity:, ...], label[capacity:, ...], last_batch) if last_batch and buffer_size > 0: h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5' data_prep_util.save_h5(h5_filename, h5_batch_data[0:buffer_size, ...], h5_batch_label[0:buffer_size, ...], data_dtype, label_dtype) print('Stored {0} with size {1}'.format(h5_filename, buffer_size)) h5_index += 1 buffer_size = 0 return sample_cnt = 0 for i, data_label_filename in enumerate(data_label_files): print(data_label_filename) data, label = indoor3d_util.room2blocks_wrapper_normalized(data_label_filename, NUM_POINT, block_size=1.0, stride=0.5, random_sample=False, sample_num=None) print('{0}, {1}'.format(data.shape, label.shape)) for _ in range(data.shape[0]): fout_room.write(os.path.basename(data_label_filename)[0:-4] + '\n') sample_cnt += data.shape[0] insert_batch(data, label, i == len(data_label_files) - 1) fout_room.close() print("Total samples: {0}".format(sample_cnt))
py
1a4ebda8e7e322755d1eb7797ce6a0899837679d
import base64 import pytest from h.models.auth_client import GrantType class TestUpdateGroup: def test_it_returns_http_200_with_valid_payload_and_user_token( self, app, token_auth_header, first_party_group ): group = {"name": "Rename My Group"} res = app.patch_json( "/api/groups/{id}".format(id=first_party_group.pubid), group, headers=token_auth_header, ) assert res.status_code == 200 assert res.json_body["name"] == "Rename My Group" assert res.json_body["groupid"] is None def test_it_does_not_update_group_if_empty_payload_and_user_token( self, app, token_auth_header, first_party_group ): payload = {} res = app.patch_json( "/api/groups/{id}".format(id=first_party_group.pubid), payload, headers=token_auth_header, ) assert res.status_code == 200 assert res.json_body["name"] == "My First Group" assert res.json_body["groupid"] is None def test_it_ignores_non_whitelisted_fields_in_payload_and_user_token( self, app, token_auth_header, first_party_group ): group = { "id": "fbdzzz", "name": "My Group", "organization": "foobar", "joinable_by": "whoever", } res = app.patch_json( "/api/groups/{id}".format(id=first_party_group.pubid), group, headers=token_auth_header, ) assert res.status_code == 200 assert res.json_body["id"] != group["id"] assert res.json_body["organization"] is None def test_it_returns_http_400_with_invalid_payload_and_user_token( self, app, token_auth_header, first_party_group ): group = { "name": "Oooopoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" } res = app.patch_json( "/api/groups/{id}".format(id=first_party_group.pubid), group, headers=token_auth_header, expect_errors=True, ) assert res.status_code == 400 def test_it_returns_http_400_if_groupid_set_on_default_authority_and_user_token( self, app, token_auth_header, first_party_group ): group = {"groupid": "3434kjkjk"} res = app.patch_json( "/api/groups/{id}".format(id=first_party_group.pubid), group, headers=token_auth_header, expect_errors=True, ) assert res.status_code == 400 def test_it_returns_http_404_if_no_authenticated_user(self, app, first_party_group): group = {"name": "My Group"} res = app.patch_json( "/api/groups/{id}".format(id=first_party_group.pubid), group, expect_errors=True, ) assert res.status_code == 404 def test_it_returns_http_404_if_token_user_unauthorized( self, app, token_auth_header, factories, db_session ): # Not created by user represented by token_auth_header group = factories.Group() db_session.commit() group_payload = {"name": "My Group"} res = app.patch_json( "/api/groups/{id}".format(id=group.pubid), group_payload, headers=token_auth_header, expect_errors=True, ) assert res.status_code == 404 def test_it_allows_auth_client_with_valid_forwarded_user( self, app, auth_client_header, third_party_user, factories, db_session ): group = factories.Group( creator=third_party_user, authority=third_party_user.authority ) db_session.commit() headers = auth_client_header headers["X-Forwarded-User"] = third_party_user.userid group_payload = {"name": "My Group"} path = "/api/groups/{id}".format(id=group.pubid) res = app.patch_json(path, group_payload, headers=headers) assert res.status_code == 200 assert res.json_body["name"] == "My Group" def test_it_allows_auth_client_with_matching_authority( self, app, auth_client_header, third_party_user, factories, db_session ): group = factories.Group( creator=third_party_user, authority=third_party_user.authority ) db_session.commit() group_payload = {"name": "My Group"} path = "/api/groups/{id}".format(id=group.pubid) res = app.patch_json(path, group_payload, headers=auth_client_header) assert res.status_code == 200 assert res.json_body["name"] == "My Group" def test_it_does_not_allow_auth_client_with_mismatched_authority( self, app, auth_client_header, factories, db_session ): group = factories.Group(authority="rando.biz") db_session.commit() group_payload = {"name": "My Group"} path = "/api/groups/{id}".format(id=group.pubid) res = app.patch_json( path, group_payload, headers=auth_client_header, expect_errors=True ) assert res.status_code == 404 def test_it_allows_groupid_from_auth_client_with_forwarded_user( self, app, auth_client_header, third_party_user, factories, db_session ): group = factories.Group( creator=third_party_user, authority=third_party_user.authority ) db_session.commit() headers = auth_client_header headers["X-Forwarded-User"] = third_party_user.userid group_payload = { "name": "My Group", "groupid": "group:[email protected]", } path = "/api/groups/{id}".format(id=group.pubid) res = app.patch_json(path, group_payload, headers=headers) assert res.status_code == 200 assert "groupid" in res.json_body assert res.json_body["groupid"] == "group:[email protected]" def test_it_returns_HTTP_Conflict_if_groupid_is_duplicate( self, app, auth_client_header, third_party_user, factories, db_session ): group1 = factories.Group( creator=third_party_user, authority=third_party_user.authority, groupid="group:[email protected]", ) group2 = factories.Group( creator=third_party_user, authority=third_party_user.authority, groupid="group:[email protected]", ) db_session.commit() headers = auth_client_header headers["X-Forwarded-User"] = third_party_user.userid group_payload = {"groupid": "group:[email protected]"} # Attempting to set group2's `groupid` to one already taken by group1 path = "/api/groups/{id}".format(id=group2.pubid) res = app.patch_json(path, group_payload, headers=headers, expect_errors=True) assert group1.groupid in res.json_body["reason"] assert res.status_code == 409 @pytest.fixture def first_party_user(db_session, factories): user = factories.User() db_session.commit() return user @pytest.fixture def first_party_group(db_session, factories, first_party_user): group = factories.Group( name="My First Group", description="Original description", creator=first_party_user, authority=first_party_user.authority, ) db_session.commit() return group @pytest.fixture def user_with_token(db_session, factories, first_party_user): token = factories.DeveloperToken(userid=first_party_user.userid) db_session.add(token) db_session.commit() return (first_party_user, token) @pytest.fixture def token_auth_header(user_with_token): user, token = user_with_token return {"Authorization": "Bearer {}".format(token.value)} @pytest.fixture def third_party_user(factories, db_session): user = factories.User(authority="thirdparty.com") db_session.commit() return user @pytest.fixture def auth_client(db_session, factories): auth_client = factories.ConfidentialAuthClient( authority="thirdparty.com", grant_type=GrantType.client_credentials ) db_session.commit() return auth_client @pytest.fixture def auth_client_header(auth_client): user_pass = "{client_id}:{secret}".format( client_id=auth_client.id, secret=auth_client.secret ) encoded = base64.standard_b64encode(user_pass.encode("utf-8")) return {"Authorization": "Basic {creds}".format(creds=encoded.decode("ascii"))}
py
1a4ebdd190c14f8e8f7debf0cab8d8b94fb00b4e
from dataclasses import dataclass from bindings.gmd.code_list_value_type import CodeListValueType __NAMESPACE__ = "http://www.isotc211.org/2005/gmd" @dataclass class MdScopeCode(CodeListValueType): class Meta: name = "MD_ScopeCode" namespace = "http://www.isotc211.org/2005/gmd"
py
1a4ebdd99caeec368d100e383fe68a93771e8901
from typing import Optional from typing import Union import pytest from _pytest.config import Config from _pytest.config import ExitCode from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureDef from _pytest.fixtures import SubRequest def pytest_addoption(parser: Parser) -> None: group = parser.getgroup("debugconfig") group.addoption( "--setupplan", "--setup-plan", action="store_true", help="Show what fixtures and tests would be executed but " "don't execute anything", ) @pytest.hookimpl(tryfirst=True) def pytest_fixture_setup( fixturedef: FixtureDef[object], request: SubRequest ) -> Optional[object]: # Will return a dummy fixture if the setuponly option is provided. if request.config.option.setupplan: my_cache_key = fixturedef.cache_key(request) fixturedef.cached_result = (None, my_cache_key, None) return fixturedef.cached_result return None @pytest.hookimpl(tryfirst=True) def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: if config.option.setupplan: config.option.setuponly = True config.option.setupshow = True return None
py
1a4ebec92765bb1b1e4db4b37c46d419b86b878d
"""HTML slide show Exporter class""" #----------------------------------------------------------------------------- # Copyright (c) 2013, the IPython Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from IPython.utils.traitlets import Unicode from IPython.nbconvert import preprocessors from IPython.config import Config from .html import HTMLExporter #----------------------------------------------------------------------------- # Classes #----------------------------------------------------------------------------- class SlidesExporter(HTMLExporter): """Exports HTML slides with reveal.js""" file_extension = Unicode( 'slides.html', config=True, help="Extension of the file that should be written to disk" ) output_mimetype = 'text/html' default_template = Unicode('reveal', config=True, help="""Template of the data format to use. I.E. 'reveal'""") @property def default_config(self): c = Config({ 'RevealHelpPreprocessor': { 'enabled': True, }, }) c.merge(super(SlidesExporter,self).default_config) return c
py
1a4ebef9c1ea202f4ea476a6e06c0a92b3032031
import hashlib from Crypto.Hash import SHA3_256 import time import os block = 6553665536 onemb = '/Users/dev/PycharmProjects/ComputerSecurity/Test1mb.txt' onekb = '/Users/dev/PycharmProjects/ComputerSecurity/Test1kb.txt' def sha256_hash(in_filename): fsz = os.path.getsize(in_filename) hashtime = time.time() hashobj = hashlib.sha256() with open(in_filename,'rb') as afile: buff = afile.read(block) while len(buff) > 0 : hashobj.update(buff) buff = afile.read(block) print (hashobj.hexdigest()) print "Time elapsed : {:.10f}s".format(time.time() - hashtime) speed = (time.time() - hashtime)/fsz print "Cycles per Byte: {:.10f}".format(speed) def sha512_hash(in_filename): fsz = os.path.getsize(in_filename) hashtime = time.time() hashobj = hashlib.sha512() with open(in_filename, 'rb') as afile: buff = afile.read(block) while len(buff) > 0: hashobj.update(buff) buff = afile.read(block) print (hashobj.hexdigest()) print "Time elapsed : {:.10f}s".format(time.time() - hashtime) speed = (time.time() - hashtime)/fsz print "Cycles per Byte: {:.10f}".format(speed) def sha3256_hash(in_filename): fsz = os.path.getsize(in_filename) hashtime = time.time() hashobj = SHA3_256.new() with open(in_filename,'rb') as afile: buff = afile.read(block) while len(buff) > 0 : hashobj.update(buff) buff = afile.read(block) print (hashobj.hexdigest()) print "Time elapsed : {:.10f}s".format(time.time() - hashtime) speed = fsz / hashtime print "Cycles per Byte: {:.10f}".format(speed) def compute_hash(): print("Hash Algorithm Choices: (1/2/3) ") print("1. SHA 256") print("2. SHA 512") print("3. SHA 3-256") data = input("Please enter the choice: ") if data == 1: print("The Hash is for 1KB file is: ") sha256_hash(onekb) print(" ") print("The Hash is for 1MB file is: ") sha256_hash(onemb) elif data == 2: print("The Hash is for 1KB file is: ") sha512_hash(onekb) print(" ") print("The Hash is for 1MB file is: ") sha512_hash(onemb) elif data == 3: print("The Hash is for 1KB file is: ") sha3256_hash(onekb) print(" ") print("The Hash is for 1MB file is: ") sha3256_hash(onemb) else: print("Invalid choice. Exiting program") return 0 compute_hash()
py
1a4ebf9c3855019d20cd9482a2f17a4a83695ed3
import abc from sandbox.location.location import Location class LocationSelector: @abc.abstractmethod def check(self, location: Location) -> bool: raise NotImplementedError
py
1a4ec01d145004767d43bdc0ee4f2d0c088469a2
import os from collections import OrderedDict from itertools import chain import torch from torch import nn as nn from models.alexnet import Id from models.model_utils import ReverseLayerF from torch.autograd import Variable import numpy.random as npr import numpy as np import torch.nn.functional as F import random class AlexNetCaffe(nn.Module): def __init__(self, jigsaw_classes=1000, n_classes=100, domains=3, dropout=True): super(AlexNetCaffe, self).__init__() print("Using Caffe AlexNet") self.features = nn.Sequential(OrderedDict([ ("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)), ("relu1", nn.ReLU(inplace=True)), ("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)), ("relu2", nn.ReLU(inplace=True)), ("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)), ("relu3", nn.ReLU(inplace=True)), ("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)), ("relu4", nn.ReLU(inplace=True)), ("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)), ("relu5", nn.ReLU(inplace=True)), ("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ])) self.classifier = nn.Sequential(OrderedDict([ ("fc6", nn.Linear(256 * 6 * 6, 4096)), ("relu6", nn.ReLU(inplace=True)), ("drop6", nn.Dropout() if dropout else Id()), ("fc7", nn.Linear(4096, 4096)), ("relu7", nn.ReLU(inplace=True)), ("drop7", nn.Dropout() if dropout else Id())])) self.jigsaw_classifier = nn.Linear(4096, jigsaw_classes) self.class_classifier = nn.Linear(4096, n_classes) # self.domain_classifier = nn.Sequential( # nn.Linear(256 * 6 * 6, 1024), # nn.ReLU(), # nn.Dropout(), # nn.Linear(1024, 1024), # nn.ReLU(), # nn.Dropout(), # nn.Linear(1024, domains)) def get_params(self, base_lr): return [{"params": self.features.parameters(), "lr": 0.}, {"params": chain(self.classifier.parameters(), self.jigsaw_classifier.parameters() , self.class_classifier.parameters()#, self.domain_classifier.parameters() ), "lr": base_lr}] def is_patch_based(self): return False def forward(self, x, lambda_val=0): x = self.features(x*57.6) #57.6 is the magic number needed to bring torch data back to the range of caffe data, based on used std x = x.view(x.size(0), -1) #d = ReverseLayerF.apply(x, lambda_val) x = self.classifier(x) return self.jigsaw_classifier(x), self.class_classifier(x)#, self.domain_classifier(d) class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class AlexNetCaffeAvgPool(AlexNetCaffe): def __init__(self, jigsaw_classes=1000, n_classes=100): super().__init__() print("Global Average Pool variant") self.features = nn.Sequential(OrderedDict([ ("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)), ("relu1", nn.ReLU(inplace=True)), ("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)), ("relu2", nn.ReLU(inplace=True)), ("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)), ("relu3", nn.ReLU(inplace=True)), ("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)), ("relu4", nn.ReLU(inplace=True)), ("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)), # ("relu5", nn.ReLU(inplace=True)), # ("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ])) self.classifier = nn.Sequential( nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 512, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.Conv2d(512, 1024, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(1024), nn.ReLU(inplace=True)) self.jigsaw_classifier = nn.Sequential( nn.Conv2d(1024, 128, kernel_size=3, stride=2, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), Flatten(), nn.Linear(128 * 6 * 6, jigsaw_classes) ) self.class_classifier = nn.Sequential( nn.Conv2d(1024, n_classes, kernel_size=3, padding=1, bias=False), nn.AvgPool2d(13), Flatten(), # nn.Linear(1024, n_classes) ) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') nn.init.constant_(m.bias, 0.) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) class AlexNetCaffeFC7(AlexNetCaffe): def __init__(self, jigsaw_classes=1000, n_classes=100, dropout=True): super(AlexNetCaffeFC7, self).__init__() print("FC7 branching variant") self.features = nn.Sequential(OrderedDict([ ("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)), ("relu1", nn.ReLU(inplace=True)), ("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)), ("relu2", nn.ReLU(inplace=True)), ("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)), ("relu3", nn.ReLU(inplace=True)), ("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)), ("relu4", nn.ReLU(inplace=True)), ("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)), ("relu5", nn.ReLU(inplace=True)), ("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ])) self.classifier = nn.Sequential(OrderedDict([ ("fc6", nn.Linear(256 * 6 * 6, 4096)), ("relu6", nn.ReLU(inplace=True)), ("drop6", nn.Dropout() if dropout else Id())])) self.jigsaw_classifier = nn.Sequential(OrderedDict([ ("fc7", nn.Linear(4096, 4096)), ("relu7", nn.ReLU(inplace=True)), ("drop7", nn.Dropout()), ("fc8", nn.Linear(4096, jigsaw_classes))])) self.class_classifier = nn.Sequential(OrderedDict([ ("fc7", nn.Linear(4096, 4096)), ("relu7", nn.ReLU(inplace=True)), ("drop7", nn.Dropout()), ("fc8", nn.Linear(4096, n_classes))])) def caffenet(jigsaw_classes, classes): model = AlexNetCaffe(jigsaw_classes, classes) for m in model.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight, .1) nn.init.constant_(m.bias, 0.) state_dict = torch.load(os.path.join(os.path.dirname(__file__), "pretrained/alexnet_caffe.pth.tar")) del state_dict["classifier.fc8.weight"] del state_dict["classifier.fc8.bias"] model.load_state_dict(state_dict, strict=False) return model def caffenet_gap(jigsaw_classes, classes): model = AlexNetCaffe(jigsaw_classes, classes) state_dict = torch.load(os.path.join(os.path.dirname(__file__), "pretrained/alexnet_caffe.pth.tar")) del state_dict["classifier.fc6.weight"] del state_dict["classifier.fc6.bias"] del state_dict["classifier.fc7.weight"] del state_dict["classifier.fc7.bias"] del state_dict["classifier.fc8.weight"] del state_dict["classifier.fc8.bias"] model.load_state_dict(state_dict, strict=False) # weights are initialized in the constructor return model def caffenet_fc7(jigsaw_classes, classes): model = AlexNetCaffeFC7(jigsaw_classes, classes) state_dict = torch.load("models/pretrained/alexnet_caffe.pth.tar") state_dict["jigsaw_classifier.fc7.weight"] = state_dict["classifier.fc7.weight"] state_dict["jigsaw_classifier.fc7.bias"] = state_dict["classifier.fc7.bias"] state_dict["class_classifier.fc7.weight"] = state_dict["classifier.fc7.weight"] state_dict["class_classifier.fc7.bias"] = state_dict["classifier.fc7.bias"] del state_dict["classifier.fc8.weight"] del state_dict["classifier.fc8.bias"] del state_dict["classifier.fc7.weight"] del state_dict["classifier.fc7.bias"] model.load_state_dict(state_dict, strict=False) nn.init.xavier_uniform_(model.jigsaw_classifier.fc8.weight, .1) nn.init.constant_(model.jigsaw_classifier.fc8.bias, 0.) nn.init.xavier_uniform_(model.class_classifier.fc8.weight, .1) nn.init.constant_(model.class_classifier.fc8.bias, 0.) return model class AlexNetCaffeRSC(nn.Module): def __init__(self, n_classes=100, percent=6, dropout=True): super(AlexNetCaffeRSC, self).__init__() print("Using Caffe AlexNet") self.percent = percent print("Using Total Percent Sample: 1 / {}".format(self.percent)) self.features = nn.Sequential(OrderedDict([ ("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)), ("relu1", nn.ReLU(inplace=True)), ("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)), ("relu2", nn.ReLU(inplace=True)), ("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)), ("relu3", nn.ReLU(inplace=True)), ("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)), ("relu4", nn.ReLU(inplace=True)), ("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)), ("relu5", nn.ReLU(inplace=True)), ("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ])) self.classifier = nn.Sequential(OrderedDict([ ("fc6", nn.Linear(256 * 6 * 6, 4096)), ("relu6", nn.ReLU(inplace=True)), ("drop6", nn.Dropout() if dropout else Id()), ("fc7", nn.Linear(4096, 4096)), ("relu7", nn.ReLU(inplace=True)), ("drop7", nn.Dropout() if dropout else Id())])) # self.jigsaw_classifier = nn.Linear(4096, jigsaw_classes) self.class_classifier = nn.Linear(4096, n_classes) # self.domain_classifier = nn.Sequential( # nn.Linear(256 * 6 * 6, 1024), # nn.ReLU(), # nn.Dropout(), # nn.Linear(1024, 1024), # nn.ReLU(), # nn.Dropout(), # nn.Linear(1024, domains)) # def get_params(self, base_lr): # return [{"params": self.features.parameters(), "lr": 0.}, # {"params": chain(self.classifier.parameters() # , self.class_classifier.parameters()#, self.domain_classifier.parameters() # ), "lr": base_lr}] def is_patch_based(self): return False def forward(self, x, gt=None, flag=None): # x = self.features(x*57.6) #57.6 is the magic number needed to bring torch data back to the range of caffe data, based on used std # x = x.view(x.size(0), -1) # #d = ReverseLayerF.apply(x, lambda_val) # x = self.classifier(x) # return self.class_classifier(x)#, self.domain_classifier(d) # ------------------------------------------------------------------- x = self.features(x * 57.6) # x = self.features.conv1(x * 57.6) # x = self.features.relu1(x) # x = self.features.pool1(x) # x = self.features.norm1(x) # x = self.features.conv2(x) # x = self.features.relu2(x) # x = self.features.pool2(x) # x = self.features.norm2(x) # x = self.features.conv3(x) # x = self.features.relu3(x) # x = self.features.conv4(x) # x = self.features.relu4(x) # x = self.features.conv5(x) # x = self.features.relu5(x) # x = self.features.pool5(x) if flag: self.eval() x_new = x.clone().detach() # x_new = self.features.conv4(x_new) # x_new = self.features.relu4(x_new) # x_new = self.features.conv5(x_new) # x_new = self.features.relu5(x_new) # x_new = self.features.pool5(x_new) x_new = Variable(x_new.data, requires_grad=True) x_new_view = x_new.view(x_new.size(0), -1) x_new_view = self.classifier(x_new_view) output = self.class_classifier(x_new_view) class_num = output.shape[1] index = gt num_rois = x_new.shape[0] num_channel = x_new.shape[1] H = x_new.shape[2] HW = x_new.shape[2] * x_new.shape[3] one_hot = torch.zeros((1), dtype=torch.float32).cuda() one_hot = Variable(one_hot, requires_grad=False) sp_i = torch.ones([2, num_rois]).long() sp_i[0, :] = torch.arange(num_rois) sp_i[1, :] = index sp_v = torch.ones([num_rois]) one_hot_sparse = torch.sparse.FloatTensor(sp_i, sp_v, torch.Size([num_rois, class_num])).to_dense().cuda() one_hot_sparse = Variable(one_hot_sparse, requires_grad=False) # [256, 21] one_hot = torch.sum(output * one_hot_sparse) self.zero_grad() one_hot.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_rois, num_channel, -1), dim=2) channel_mean = grad_channel_mean spatial_mean = torch.mean(grads_val, dim=1) spatial_mean = spatial_mean.view(num_rois, H, H).view(num_rois, HW) self.zero_grad() choose_one = random.randint(0, 9) if choose_one <= 4: # ---------------------------- spatial ----------------------- spatial_drop_num = int(HW * 1 / 3.0) th_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th_mask_value = th_mask_value.view(num_rois, 1).expand(num_rois, HW) mask_all_cuda = torch.where(spatial_mean >= th_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.detach().cpu().numpy() for q in range(num_rois): mask_all_temp = np.ones((HW), dtype=np.float32) zero_index = np.where(mask_all[q, :] == 0)[0] num_zero_index = zero_index.size if num_zero_index >= spatial_drop_num: dumy_index = npr.choice(zero_index, size=spatial_drop_num, replace=False) else: zero_index = np.arange(HW) dumy_index = npr.choice(zero_index, size=spatial_drop_num, replace=False) mask_all_temp[dumy_index] = 0 mask_all[q, :] = mask_all_temp mask_all = torch.from_numpy(mask_all.reshape(num_rois, 7, 7)).cuda() mask_all = mask_all.view(num_rois, 1, 7, 7) else: # -------------------------- channel ---------------------------- mask_all = torch.zeros((num_rois, num_channel, 1, 1)).cuda() vector_thresh_percent = int(num_channel * 1 / 3.0) vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_rois, 1).expand(num_rois, num_channel) vector = torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) vector_all = vector.detach().cpu().numpy() channel_drop_num = int(num_channel * 1 / 3.2) vector_all_new = np.ones((num_rois, num_channel), dtype=np.float32) for q in range(num_rois): vector_all_temp = np.ones((num_channel), dtype=np.float32) zero_index = np.where(vector_all[q, :] == 0)[0] num_zero_index = zero_index.size if num_zero_index >= channel_drop_num: dumy_index = npr.choice(zero_index, size=channel_drop_num, replace=False) else: zero_index = np.arange(num_channel) dumy_index = npr.choice(zero_index, size=channel_drop_num, replace=False) vector_all_temp[dumy_index] = 0 vector_all_new[q, :] = vector_all_temp vector = torch.from_numpy(vector_all_new).cuda() for m in range(num_rois): index_channel = vector[m, :].nonzero()[:, 0].long() index_channel = index_channel.detach().cpu().numpy().tolist() mask_all[m, index_channel, :, :] = 1 # ----------------------------------- batch ---------------------------------------- cls_prob_before = F.softmax(output, dim=1) x_new_view_after = x_new * mask_all x_new_view_after = x_new_view_after.view(x_new_view_after.size(0), -1) x_new_view_after = self.classifier(x_new_view_after) x_new_view_after = self.class_classifier(x_new_view_after) cls_prob_after = F.softmax(x_new_view_after, dim=1) sp_i = torch.ones([2, num_rois]).long() sp_i[0, :] = torch.arange(num_rois) sp_i[1, :] = index sp_v = torch.ones([num_rois]) one_hot_sparse = torch.sparse.FloatTensor(sp_i, sp_v, torch.Size([num_rois, class_num])).to_dense().cuda() before_vector = torch.sum(one_hot_sparse * cls_prob_before, dim=1) after_vector = torch.sum(one_hot_sparse * cls_prob_after, dim=1) change_vector = before_vector - after_vector - 0.0001 change_vector = torch.where(change_vector > 0, change_vector, torch.zeros(change_vector.shape).cuda()) th_fg_value = torch.sort(change_vector, dim=0, descending=True)[0][int(round(float(num_rois) * 1 / 3.0))] drop_index_fg = change_vector.gt(th_fg_value) ignore_index_fg = 1 - drop_index_fg not_01_ignore_index_fg = ignore_index_fg.nonzero()[:, 0] mask_all[not_01_ignore_index_fg.long(), :] = 1 self.train() mask_all = Variable(mask_all, requires_grad=True) x = x * mask_all x = x.view(x.size(0), -1) x = self.classifier(x) return self.class_classifier(x) # , self.domain_classifier(d) def caffenetRSC(classes, percent): model = AlexNetCaffeRSC(classes, percent) for m in model.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight, .1) nn.init.constant_(m.bias, 0.) state_dict = torch.load(os.path.join(os.path.dirname(__file__), "pretrained/alexnet_caffe.pth.tar")) del state_dict["classifier.fc8.weight"] del state_dict["classifier.fc8.bias"] model.load_state_dict(state_dict, strict=False) return model
py
1a4ec0d1c6cc898d42cf63c83560b52d285bc0d5
from flask import render_template from app import app from .request import get_articles, get_sources # Views @app.route('/') def index(): ''' View root page function that returns the index page and its data ''' all_sources = get_sources() title = 'Trending Now in the World| Ready News' return render_template('index.html', title=title, sources=all_sources) @app.route('/source/<string:id>') def source(id): ''' View aticles from one source function ''' source = get_articles(id) title = 'Trending Now in the World | Get More' return render_template('source.html', title=title, source=source)
py
1a4ec149ebc8c8f75929ddc082c1985322f5d67b
# -*- coding: utf-8 -*- # # Copyright (C) 2019 Chris Caron <[email protected]> # All rights reserved. # # This code is licensed under the MIT License. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files(the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and / or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions : # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import from __future__ import print_function from .NotifyBase import NotifyBase from ..common import NotifyImageSize from ..common import NotifyType from ..utils import parse_bool from ..AppriseLocale import gettext_lazy as _ # Default our global support flag NOTIFY_GNOME_SUPPORT_ENABLED = False try: # 3rd party modules (Gnome Only) import gi # require_version() call is required otherwise we generate a warning gi.require_version("Notify", "0.7") # We can import the actual libraries we care about now: from gi.repository import Notify from gi.repository import GdkPixbuf # We're good to go! NOTIFY_GNOME_SUPPORT_ENABLED = True except (ImportError, ValueError, AttributeError): # No problem; we just simply can't support this plugin; we could # be in microsoft windows, or we just don't have the python-gobject # library available to us (or maybe one we don't support)? # Alternativey A ValueError will get thrown upon calling # gi.require_version() if the requested Notify namespace isn't available pass # Urgencies class GnomeUrgency(object): LOW = 0 NORMAL = 1 HIGH = 2 GNOME_URGENCIES = { GnomeUrgency.LOW: 'low', GnomeUrgency.NORMAL: 'normal', GnomeUrgency.HIGH: 'high', } GNOME_URGENCY_MAP = { # Maps against string 'low' 'l': GnomeUrgency.LOW, # Maps against string 'moderate' 'm': GnomeUrgency.LOW, # Maps against string 'normal' 'n': GnomeUrgency.NORMAL, # Maps against string 'high' 'h': GnomeUrgency.HIGH, # Maps against string 'emergency' 'e': GnomeUrgency.HIGH, # Entries to additionally support (so more like Gnome's API) '0': GnomeUrgency.LOW, '1': GnomeUrgency.NORMAL, '2': GnomeUrgency.HIGH, } class NotifyGnome(NotifyBase): """ A wrapper for local Gnome Notifications """ # Set our global enabled flag enabled = NOTIFY_GNOME_SUPPORT_ENABLED requirements = { # Define our required packaging in order to work 'details': _('A local Gnome environment is required.') } # The default descriptive name associated with the Notification service_name = _('Gnome Notification') # The service URL service_url = 'https://www.gnome.org/' # The default protocol protocol = 'gnome' # A URL that takes you to the setup/help of the specific protocol setup_url = 'https://github.com/caronc/apprise/wiki/Notify_gnome' # Allows the user to specify the NotifyImageSize object image_size = NotifyImageSize.XY_128 # Disable throttle rate for Gnome requests since they are normally # local anyway request_rate_per_sec = 0 # Limit results to just the first 10 line otherwise there is just to much # content to display body_max_line_count = 10 # A title can not be used for Gnome Messages. Setting this to zero will # cause any title (if defined) to get placed into the message body. title_maxlen = 0 # Define object templates templates = ( '{schema}://', ) # Define our template arguments template_args = dict(NotifyBase.template_args, **{ 'urgency': { 'name': _('Urgency'), 'type': 'choice:int', 'values': GNOME_URGENCIES, 'default': GnomeUrgency.NORMAL, }, 'priority': { # Apprise uses 'priority' everywhere; it's just a nice consistent # feel to be able to use it here as well. Just map the # value back to 'priority' 'alias_of': 'urgency', }, 'image': { 'name': _('Include Image'), 'type': 'bool', 'default': True, 'map_to': 'include_image', }, }) def __init__(self, urgency=None, include_image=True, **kwargs): """ Initialize Gnome Object """ super(NotifyGnome, self).__init__(**kwargs) # The urgency of the message self.urgency = int( NotifyGnome.template_args['urgency']['default'] if urgency is None else next(( v for k, v in GNOME_URGENCY_MAP.items() if str(urgency).lower().startswith(k)), NotifyGnome.template_args['urgency']['default'])) # Track whether or not we want to send an image with our notification # or not. self.include_image = include_image return def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs): """ Perform Gnome Notification """ try: # App initialization Notify.init(self.app_id) # image path icon_path = None if not self.include_image \ else self.image_path(notify_type, extension='.ico') # Build message body notification = Notify.Notification.new(body) # Assign urgency notification.set_urgency(self.urgency) # Always call throttle before any remote server i/o is made self.throttle() if icon_path: try: # Use Pixbuf to create the proper image type image = GdkPixbuf.Pixbuf.new_from_file(icon_path) # Associate our image to our notification notification.set_icon_from_pixbuf(image) notification.set_image_from_pixbuf(image) except Exception as e: self.logger.warning( "Could not load Gnome notification icon ({}): {}" .format(icon_path, e)) notification.show() self.logger.info('Sent Gnome notification.') except Exception: self.logger.warning('Failed to send Gnome notification.') self.logger.exception('Gnome Exception') return False return True def url(self, privacy=False, *args, **kwargs): """ Returns the URL built dynamically based on specified arguments. """ # Define any URL parameters params = { 'image': 'yes' if self.include_image else 'no', 'urgency': GNOME_URGENCIES[self.template_args['urgency']['default']] if self.urgency not in GNOME_URGENCIES else GNOME_URGENCIES[self.urgency], } # Extend our parameters params.update(self.url_parameters(privacy=privacy, *args, **kwargs)) return '{schema}://?{params}'.format( schema=self.protocol, params=NotifyGnome.urlencode(params), ) @staticmethod def parse_url(url): """ There are no parameters nessisary for this protocol; simply having gnome:// is all you need. This function just makes sure that is in place. """ results = NotifyBase.parse_url(url, verify_host=False) # Include images with our message results['include_image'] = \ parse_bool(results['qsd'].get('image', True)) # Gnome supports urgency, but we we also support the keyword priority # so that it is consistent with some of the other plugins if 'priority' in results['qsd'] and len(results['qsd']['priority']): # We intentionally store the priority in the urgency section results['urgency'] = \ NotifyGnome.unquote(results['qsd']['priority']) if 'urgency' in results['qsd'] and len(results['qsd']['urgency']): results['urgency'] = \ NotifyGnome.unquote(results['qsd']['urgency']) return results
py
1a4ec165fa00506cbc25be497e539fe9115eeaac
# -*- coding: utf-8 -*- """ WSGI """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rg.settings") # pylint: disable=invalid-name application = get_wsgi_application()
py
1a4ec1dd4f9f1a5c03a292194d427aea17ca70eb
import abc import collections import copy import datetime import functools import logging from typing import Any, Callable, Dict, List, Optional, Tuple, Union import pandas as pd import core.dataflow.core as cdc import core.dataflow.utils as cdu import helpers.dbg as dbg _LOG = logging.getLogger(__name__) # TODO(*): Create a dataflow types file. _COL_TYPE = Union[int, str] _PANDAS_DATE_TYPE = Union[str, pd.Timestamp, datetime.datetime] _TO_LIST_MIXIN_TYPE = Union[List[_COL_TYPE], Callable[[], List[_COL_TYPE]]] # ############################################################################# # Abstract node classes with sklearn-style interfaces # ############################################################################# class FitPredictNode(cdc.Node, abc.ABC): """ Class with abstract sklearn-style `fit` and `predict` functions. The class contains an optional state that can be serialized/deserialized with `get_fit_state()` and `set_fit_state()`. Nodes may store a dictionary of information for each method following the method's invocation. """ def __init__( self, nid: str, inputs: Optional[List[str]] = None, outputs: Optional[List[str]] = None, ) -> None: if inputs is None: inputs = ["df_in"] if outputs is None: outputs = ["df_out"] super().__init__(nid=nid, inputs=inputs, outputs=outputs) self._info = collections.OrderedDict() @abc.abstractmethod def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]: pass @abc.abstractmethod def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]: pass def get_fit_state(self) -> Dict[str, Any]: return {} def set_fit_state(self, fit_state: Dict[str, Any]) -> None: pass def get_info( self, method: str ) -> Optional[Union[str, collections.OrderedDict]]: # TODO(Paul): Add a dassert_getattr function to use here and in core. dbg.dassert_isinstance(method, str) dbg.dassert(getattr(self, method)) if method in self._info.keys(): return self._info[method] # TODO(Paul): Maybe crash if there is no info. _LOG.warning("No info found for nid=%s, method=%s", self.nid, method) return None def _set_info(self, method: str, values: collections.OrderedDict) -> None: dbg.dassert_isinstance(method, str) dbg.dassert(getattr(self, method)) dbg.dassert_isinstance(values, collections.OrderedDict) # Save the info in the node: we make a copy just to be safe. self._info[method] = copy.copy(values) class DataSource(FitPredictNode, abc.ABC): """ A source node that generates data for cross-validation from the passed data frame. Derived classes inject the data as a DataFrame in this class at construction time (e.g., from a passed DataFrame, reading from a file) This node implements the interface of `FitPredictNode` allowing to filter data for fitting and predicting based on intervals. """ def __init__(self, nid: str, outputs: Optional[List[str]] = None) -> None: if outputs is None: outputs = ["df_out"] # TODO(gp): This seems a common function. We can factor it out in a # `validate_string_list()`. # Do not allow any empty list, repetition, or empty strings. dbg.dassert(outputs) dbg.dassert_no_duplicates(outputs) for output in outputs: dbg.dassert_ne(output, "") super().__init__(nid, inputs=[], outputs=outputs) # This data is initialized by the derived classes depending on their semantics. self.df = None self._fit_intervals = None self._predict_intervals = None self._predict_idxs = None def set_fit_intervals(self, intervals: List[Tuple[Any, Any]]) -> None: """ Set the intervals to be used to generate data for the fit stage. :param intervals: a list of closed time intervals like [start1, end1], [start2, end2]. `None` boundary is interpreted as data start/end """ self._validate_intervals(intervals) self._fit_intervals = intervals # `DataSource` uses data passed at construction time, so it does not need a # `df_in` in either `fit()` or `predict()` as a typical `FitPredictNode` does. # For this reason the function signature is different. # pylint: disable=arguments-differ def fit(self) -> Dict[str, pd.DataFrame]: """ :return: training set as df """ if self._fit_intervals is not None: idx_slices = [ self.df.loc[interval[0] : interval[1]].index for interval in self._fit_intervals ] idx = functools.reduce(lambda x, y: x.union(y), idx_slices) fit_df = self.df.loc[idx] else: fit_df = self.df fit_df = fit_df.copy() dbg.dassert(not fit_df.empty, "`fit_df` is empty") # Update `info`. info = collections.OrderedDict() info["fit_df_info"] = cdu.get_df_info_as_string(fit_df) self._set_info("fit", info) return {self.output_names[0]: fit_df} def set_predict_intervals(self, intervals: List[Tuple[Any, Any]]) -> None: """ Same as `set_fit_intervals()`, but for the predict stage. """ # TODO(*): Warn if intervals overlap with `fit` intervals. # TODO(*): Maybe enforce that the intervals be ordered. self._validate_intervals(intervals) self._predict_intervals = intervals # pylint: disable=arguments-differ def predict(self) -> Dict[str, pd.DataFrame]: """ :return: test set as df """ if self._predict_intervals is not None: idx_slices = [ self.df.loc[interval[0] : interval[1]].index for interval in self._predict_intervals ] idx = functools.reduce(lambda x, y: x.union(y), idx_slices) predict_df = self.df.loc[idx].copy() else: predict_df = self.df.copy() dbg.dassert(not predict_df.empty) # Update `info`. info = collections.OrderedDict() info["predict_df_info"] = cdu.get_df_info_as_string(predict_df) self._set_info("predict", info) return {self.output_names[0]: predict_df} def get_df(self) -> pd.DataFrame: dbg.dassert_is_not(self.df, None, "No DataFrame found!") return self.df # TODO(gp): This is a nice function to move to `dataflow/utils.py`. @staticmethod def _validate_intervals(intervals: List[Tuple[Any, Any]]) -> None: dbg.dassert_isinstance(intervals, list) for interval in intervals: dbg.dassert_eq(len(interval), 2) if interval[0] is not None and interval[1] is not None: dbg.dassert_lte(interval[0], interval[1]) class Transformer(FitPredictNode, abc.ABC): """ Single-input single-output node calling a stateless transformation. The transformation is user-defined and called before `fit()` and `predict()`. """ # TODO(Paul): Consider giving users the option of renaming the single # input and single output (but verify there is only one of each). def __init__(self, nid: str) -> None: super().__init__(nid) def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]: dbg.dassert_no_duplicates(df_in.columns) # Transform the input df. df_out, info = self._transform(df_in) dbg.dassert_no_duplicates(df_out.columns) # Update `info`. self._set_info("fit", info) return {"df_out": df_out} def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]: dbg.dassert_no_duplicates(df_in.columns) # Transform the input df. df_out, info = self._transform(df_in) dbg.dassert_no_duplicates(df_out.columns) # Update `info`. self._set_info("predict", info) return {"df_out": df_out} @abc.abstractmethod def _transform( self, df: pd.DataFrame ) -> Tuple[pd.DataFrame, collections.OrderedDict]: """ :return: df, info """ # ############################################################################# # Plumbing nodes # ############################################################################# class YConnector(FitPredictNode): """ Create an output dataframe from two input dataframes. """ # TODO(Paul): Support different input/output names. def __init__( self, nid: str, connector_func: Callable[..., pd.DataFrame], connector_kwargs: Optional[Any] = None, ) -> None: """ :param nid: unique node id :param connector_func: * Merge ``` connector_func = lambda df_in1, df_in2, **connector_kwargs: df_in1.merge(df_in2, **connector_kwargs) ``` * Reindexing ``` connector_func = lambda df_in1, df_in2, connector_kwargs: df_in1.reindex(index=df_in2.index, **connector_kwargs) ``` * User-defined functions ``` # my_func(df_in1, df_in2, **connector_kwargs) connector_func = my_func ``` :param connector_kwargs: kwargs associated with `connector_func` """ super().__init__(nid, inputs=["df_in1", "df_in2"]) self._connector_func = connector_func self._connector_kwargs = connector_kwargs or {} self._df_in1_col_names = None self._df_in2_col_names = None def get_df_in1_col_names(self) -> List[str]: """ Allow introspection on column names of input dataframe #1. """ return self._get_col_names(self._df_in1_col_names) def get_df_in2_col_names(self) -> List[str]: """ Allow introspection on column names of input dataframe #2. """ return self._get_col_names(self._df_in2_col_names) # pylint: disable=arguments-differ def fit( self, df_in1: pd.DataFrame, df_in2: pd.DataFrame ) -> Dict[str, pd.DataFrame]: df_out, info = self._apply_connector_func(df_in1, df_in2) self._set_info("fit", info) return {"df_out": df_out} # pylint: disable=arguments-differ def predict( self, df_in1: pd.DataFrame, df_in2: pd.DataFrame ) -> Dict[str, pd.DataFrame]: df_out, info = self._apply_connector_func(df_in1, df_in2) self._set_info("predict", info) return {"df_out": df_out} def _apply_connector_func( self, df_in1: pd.DataFrame, df_in2: pd.DataFrame ) -> Tuple[pd.DataFrame, Dict[str, Any]]: self._df_in1_col_names = df_in1.columns.tolist() self._df_in2_col_names = df_in2.columns.tolist() # TODO(Paul): Add meaningful info. df_out = self._connector_func(df_in1, df_in2, **self._connector_kwargs) info = collections.OrderedDict() info["df_merged_info"] = cdu.get_df_info_as_string(df_out) return df_out, info @staticmethod def _get_col_names(col_names: List[str]) -> List[str]: dbg.dassert_is_not( col_names, None, "No column names. This may indicate " "an invocation prior to graph execution.", ) return col_names class ColModeMixin: """ Select columns to propagate in output dataframe. TODO(*): Refactor this so that it has clear pre and post processing stages. """ def _apply_col_mode( self, df_in: pd.DataFrame, df_out: pd.DataFrame, cols: Optional[List[Any]] = None, col_rename_func: Optional[Callable[[Any], Any]] = None, col_mode: Optional[str] = None, ) -> pd.DataFrame: """ Merge transformed dataframe with original dataframe. :param df_in: original dataframe :param df_out: transformed dataframe :param cols: columns in `df_in` that were transformed to obtain `df_out` - `None` defaults to all columns in `df_out` :param col_mode: Determines what columns are propagated. - "merge_all" (default): perform an outer merge between the - "replace_selected": - "replace_all": all columns are propagated :param col_rename_func: function for naming transformed columns, e.g., `lambda x: "zscore_" + x` - `None` defaults to identity transform :return: dataframe with columns selected by `col_mode` """ dbg.dassert_isinstance(df_in, pd.DataFrame) dbg.dassert_isinstance(df_out, pd.DataFrame) dbg.dassert(cols is None or isinstance(cols, list)) cols = cols or df_out.columns.tolist() # col_rename_func = col_rename_func or (lambda x: x) dbg.dassert_isinstance(col_rename_func, collections.Callable) # col_mode = col_mode or "merge_all" # Rename transformed columns. df_out = df_out.rename(columns=col_rename_func) self._transformed_col_names = df_out.columns.tolist() # Select columns to return. if col_mode == "merge_all": shared_columns = df_out.columns.intersection(df_in.columns) dbg.dassert( shared_columns.empty, "Transformed column names `%s` conflict with existing column " "names `%s`.", df_out.columns, df_in.columns, ) df_out = df_in.merge( df_out, how="outer", left_index=True, right_index=True ) elif col_mode == "replace_selected": df_in_not_transformed_cols = df_in.columns.drop(cols) dbg.dassert( df_in_not_transformed_cols.intersection(df_out.columns).empty, "Transformed column names `%s` conflict with existing column " "names `%s`.", df_out.columns, df_in_not_transformed_cols, ) df_out = df_in.drop(columns=cols).merge( df_out, left_index=True, right_index=True ) elif col_mode == "replace_all": pass else: dbg.dfatal("Unsupported column mode `%s`", col_mode) dbg.dassert_no_duplicates(df_out.columns.tolist()) return df_out # ############################################################################# # Column processing helpers # ############################################################################# class GroupedColDfToDfColProcessor: """ Provides dataflow processing wrappers for dataframe-to-dataframe functions. Examples: 1. Suppose we want to learn one model per instrument given a dataframe `df` with multilevel columns ``` feat1 feat2 y MN0 MN1 MN2 MN3 MN0 MN1 MN2 MN3 MN0 MN1 MN2 MN3 ``` Then, to `preprocess()` we pass in a list of tuples, i.e., `col_groups = [("feat1",), ("feat2",), ("y",)]. The function `preprocess()` returns a dictionary keyed by `MN0`, ..., `MN3`, with values consisting of dataframes with columns ``` feat1 feat2 y ``` Suppose the learning step returns a dataframe with column "y_hat" (one dataframe for each input dataframe). We then apply `postprocess()` to the dictionary of results, taking `col_group = (,)`, to obtain a single dataframe with multilevel columns ``` y_hat MN0 MN1 MN2 MN3 ``` """ @staticmethod def preprocess( df: pd.DataFrame, col_groups: List[Tuple[_COL_TYPE]], ) -> Dict[_COL_TYPE, pd.DataFrame]: """ Provides wrappers for transformations operating on many columns. :param df: a dataframe with multilevel columns :param col_groups: a collection of tuples specifying all but column leaves. All tuples provided should provide access to the same set of leaf values (the leaf column names are the same). All tuples should have the same length. :return: a dictionary of single-column-level dataframes indexed by the selected leaf column names of `df`. To the single-column-level dataframe has column names generated from the last tuple positions of the tuples in `col_groups`. """ # The list `col_groups` should be nonempty and not contain any # duplicates. dbg.dassert_isinstance(col_groups, list) dbg.dassert_lt( 0, len(col_groups), msg="Tuple `col_group` must be nonempty." ) dbg.dassert_no_duplicates(col_groups) # This is an implementation requirement that we may be able to relax. dbg.dassert_lte(1, len(col_groups)) # dbg.dassert_isinstance(df, pd.DataFrame) # Sanity check each column group tuple. for col_group in col_groups: dbg.dassert_isinstance(col_group, tuple) dbg.dassert_eq( len(col_group), df.columns.nlevels - 1, f"Dataframe multiindex column depth incompatible with {col_group}", ) # Determine output dataframe column names. out_col_names = [col_group[-1] for col_group in col_groups] _LOG.debug("out_col_names=%s", out_col_names) dbg.dassert_no_duplicates(out_col_names) # Determine keys (i.e., leaf column names). keys = df[col_groups[0]].columns.to_list() _LOG.debug("keys=%s", keys) # Ensure all groups have the same keys. for col_group in col_groups: col_group_keys = df[col_group].columns.to_list() dbg.dassert_set_eq(keys, col_group_keys) # Swap levels in `df` so that keys are top level. df_out = df.swaplevel(i=-2, j=-1, axis=1) # Sort by keys for faster selection. df_out.sort_index(axis=1, level=-2, inplace=True) # To generate a dataframe for each key, generate tuples that key # up to the last two levels. roots = [col_group[:-1] for col_group in col_groups] # Get rid of any duplicates. roots = list(set(roots)) _LOG.debug("col group roots=%s", roots) # Generate one dataframe per key. dfs = {} for key in keys: local_dfs = [] for root in roots: local_df = df_out[root + (key,)] local_df = local_df[out_col_names] local_dfs.append(local_df) local_df = pd.concat(local_dfs, axis=1) # Ensure that there is no column name ambiguity. dbg.dassert_no_duplicates(local_df.columns.to_list()) dfs[key] = local_df return dfs @staticmethod def postprocess( dfs: Dict[_COL_TYPE, pd.DataFrame], col_group: Tuple[_COL_TYPE], ) -> pd.DataFrame: """ As in `_postprocess_dataframe_dict()`. """ return _postprocess_dataframe_dict(dfs, col_group) class CrossSectionalDfToDfColProcessor: """ Provides dataflow processing wrappers for cross-sectional transformations. These helpers are useful when we want to apply an operation such as principal component projection or residualization to a family of instruments. Examples: 1. Suppose we want to perform a principal component projection of `MN0`, ..., `MN3` of the `ret_0` group of a dataframe `df` with multilevel columns as follows: ``` ret_0 close MN0 MN1 MN2 MN3 MN0 MN1 MN2 MN3 ``` Then we invoke `preprocess()` with `col_group = "ret_0"`. The principal component projection operates on a dataframe with columns ``` MN0 MN1 MN2 MN3 ``` and returns a dataframe with columns ``` 0 1 2 3 ``` We apply `postprocess()` to this dataframe with `col_group = "pca'` to obtain ``` pca 0 1 2 3 ``` 2. If we perform residualization on `df` as given above instead of principcal component projection, then column names are preserved after the residualization, and we may apply `postprocess()` with `col_group = "residual"` to obtian ``` residual MN0 MN1 MN2 MN3 ``` """ @staticmethod def preprocess( df: pd.DataFrame, col_group: Tuple[_COL_TYPE], ) -> pd.DataFrame: """ As in `preprocess_multiindex_cols()`. """ return preprocess_multiindex_cols(df, col_group) @staticmethod def postprocess( df: pd.DataFrame, col_group: Tuple[_COL_TYPE], ) -> pd.DataFrame: """ Create a multi-indexed column dataframe from a single-indexed one. :param df: a single-level column dataframe :param col_group: a tuple of indices to insert :return: a multi-indexed column dataframe. If `df` has columns `MN0 MN1 MN2 MN3` and `col_group = "pca"`, then the output dataframe has columns ``` pca MN0 MN1 MN2 MN3 ``` """ # Perform sanity checks on dataframe. dbg.dassert_isinstance(df, pd.DataFrame) dbg.dassert_no_duplicates(df.columns) dbg.dassert_eq( 1, df.columns.nlevels, ) # dbg.dassert_isinstance(col_group, tuple) # if col_group: df = pd.concat([df], axis=1, keys=[col_group]) return df class SeriesToDfColProcessor: """ Provides dataflow processing wrappers for series-to-dataframe functions. Examples of functions to wrap include: - series decompositions (e.g., STL, Fourier coefficients, wavelet levels) - multiple lags - volatility modeling Examples: 1. Suppose we want to add two lags of the columns `MN0`, ..., `MN3` of the `ret_0` group of a dataframe `df` with multilevel columns as follows: ``` ret_0 close MN0 MN1 MN2 MN3 MN0 MN1 MN2 MN3 ``` Then we invoke `preprocess()` with `col_group = "ret_0"`. Two lags are computed for each column of the dataframe with columns ``` MN0 MN1 MN2 MN3 ``` The results of the lag computation are represented by a dictionary with keys `MN0`, ..., `MN3` and values consisting of dataframes with columns ``` lag_1 lag_2 ``` We apply `postprocess()` to this dataframe with `col_group = ()` (an empty tuple) to obtain ``` lag_1 lag_2 MN0 MN1 MN2 MN3 MN0 MN1 MN2 MN3 ``` """ @staticmethod def preprocess( df: pd.DataFrame, col_group: Tuple[_COL_TYPE], ) -> pd.DataFrame: """ As in `preprocess_multiindex_cols()`. """ return preprocess_multiindex_cols(df, col_group) @staticmethod def postprocess( dfs: Dict[_COL_TYPE, pd.DataFrame], col_group: Tuple[_COL_TYPE], ) -> pd.DataFrame: """ As in `_postprocess_dataframe_dict()`. """ return _postprocess_dataframe_dict(dfs, col_group) class SeriesToSeriesColProcessor: """ Provides dataflow processing wrappers for series-to-series functions. Examples of functions to wrap include: - signal filters (e.g., smooth moving averages, z-scoring, outlier processing) - rolling features (e.g., moments, centered moments) """ @staticmethod def preprocess( df: pd.DataFrame, col_group: Tuple[_COL_TYPE], ) -> pd.DataFrame: """ As in `preprocess_multiindex_cols()`. """ return preprocess_multiindex_cols(df, col_group) @staticmethod def postprocess( srs: List[pd.Series], col_group: Tuple[_COL_TYPE], ) -> pd.DataFrame: """ Create a multi-indexed column dataframe from `srs` and `col_group`. :param srs: a list of symbols uniquely named (by symbol) :param col_group: column levels to add :return: multi-indexed column dataframe with series names as leaf columns """ # Perform basic type checks. dbg.dassert_isinstance(srs, list) for series in srs: dbg.dassert_isinstance(series, pd.Series) dbg.dassert_isinstance(col_group, tuple) # Create dataframe from series. df = pd.concat(srs, axis=1) # Ensure that there are no duplicates. dbg.dassert_no_duplicates(df.columns) if col_group: df = pd.concat([df], axis=1, keys=[col_group]) return df def preprocess_multiindex_cols( df: pd.DataFrame, col_group: Tuple[_COL_TYPE], ) -> pd.DataFrame: """ Extract a single-level column dataframe from a multi-indexed one. Typically, the last column index level corresponds to an instrument. :param df: multi-indexed column dataframe, e.g., ``` ret_0 close MN0 MN1 MN2 MN3 MN0 MN1 MN2 MN3 ``` :param col_group: tuple specifying all but leaf instruments, which are selected implicitly. E.g., `col_group = "ret_0"` extracts `(ret_0, MN0)` through `(ret_0, MN3)`. :return: a single-level column dataframe. E.g., a dataframe with columns ``` MN0 MN1 MN2 MN3 ``` extracted from the `ret_0` group. """ # Perform `col_group` sanity checks. dbg.dassert_isinstance(col_group, tuple) # TODO(Paul): Consider whether we want to allow the "degenerate case". dbg.dassert_lt(0, len(col_group), msg="Tuple `col_group` must be nonempty.") # dbg.dassert_isinstance(df, pd.DataFrame) # Do not allow duplicate columns. dbg.dassert_no_duplicates(df.columns) # Ensure compatibility between dataframe column levels and col groups. dbg.dassert_eq( len(col_group), df.columns.nlevels - 1, "Dataframe multiindex column depth incompatible with config.", ) # Select single-column-level dataframe and return. df = df[col_group].copy() return df def _postprocess_dataframe_dict( dfs: Dict[_COL_TYPE, pd.DataFrame], col_group: Tuple[_COL_TYPE], ) -> pd.DataFrame: """ Create a multi-indexed column dataframe from keys, values, `col_group`. :param dfs: dataframes indexed by symbol. :param col_group: column levels to prefix `df` columns with :return: multi-level column dataframe - leaf columns are symbols - the next column level is defined by the columns of the dataframes in `dfs` (which are to be the same). - the initial levels are given by `col_group` """ dbg.dassert_isinstance(dfs, dict) # Ensure that the dictionary is not empty. dbg.dassert(dfs) # Perform sanity checks on dataframe. for symbol, df in dfs.items(): # Ensure that each values of `dfs` is a nonempty dataframe. dbg.dassert_isinstance(df, pd.DataFrame) dbg.dassert(not df.empty) # Ensure that `df` columns do not have duplicates and are single-level. dbg.dassert_no_duplicates(df.columns) dbg.dassert_eq( 1, df.columns.nlevels, ) # Ensure that `col_group` is a (possibly empty) tuple. dbg.dassert_isinstance(col_group, tuple) # Insert symbols as a column level. df = pd.concat(dfs.values(), axis=1, keys=dfs.keys()) # Swap column levels so that symbols are leaves. df = df.swaplevel(i=0, j=1, axis=1) df.sort_index(axis=1, level=0, inplace=True) if col_group: df = pd.concat([df], axis=1, keys=[col_group]) return df
py
1a4ec1f7eb0580c0305cde2303c8720baec5993d
""" Given two binary strings, return their sum (also a binary string). The input strings are both non-empty and contains only characters 1 or 0. Example 1: Input: a = "11", b = "1" Output: "100" Example 2: Input: a = "1010", b = "1011" Output: "10101" """ # 2018-6-23 # Add Binary class Solution: def addBinary(self, a, b): """ :type a: str :type b: str :rtype: str """ res = '' carry = '0' i = 0 lena = len(a) lenb = len(b) while i < max(lena, lenb) or carry == '1': aa = a[-1 - i] if i < lena else '0' bb = b[-1 - i] if i < lenb else '0' sums = int(aa) + int(bb) + int(carry) res = str(sums%2) + res carry = '1' if sums//2 > 0 else '0' i += 1 return res # test a = "1010" b = "1011" test = Solution() res = test.addBinary(a,b) print(res)
py
1a4ec22048d766256e7699c4e0be040f63aeb443
""" Module for handling atomic elements. """ class Element: """ Class for storing and accessing atomic elements. Args: input_value: The atomic number, symbol, or name of the element """ def __init__(self, input_value): self.input = input_value # list with atomic number z, short name, full name, valence, # valence electrons, covalent radius, vdW radius, metallic radius self.elements_list = [ (1, 'H', 'Hydrogen', 1.0, 1, 0.31, 1.20, None), (2, 'He', 'Helium', 0.5, 2, 0.28, 1.40, None), (3, 'Li', 'Lithium', 1.0, 1, 1.28, 1.82, 1.52), (4, 'Be', 'Beryllium', 2.0, 2, 0.96, 1.53, 1.12), (5, 'B', 'Boron', 3.0, 3, 0.84, 1.92, None), (6, 'C', 'Carbon', 4.0, 4, 0.70, 1.70, None), (7, 'N', 'Nitrogen', 3.0, 5, 0.71, 1.55, None), (8, 'O', 'Oxygen', 2.0, 6, 0.66, 1.52, None), (9, 'F', 'Fluorine', 1.0, 7, 0.57, 1.47, None), (10, 'Ne', 'Neon', 0.5, 8, 0.58, 1.54, None), (11, 'Na', 'Sodium', 1.0, 1, 1.66, 2.27, 1.86), (12, 'Mg', 'Magnesium', 2.0, 2, 1.41, 1.73, 1.60), (13, 'Al', 'Aluminium', 3.0, 3, 1.21, 1.84, 1.43), (14, 'Si', 'Silicon', 4.0, 4, 1.11, 2.10, None), (15, 'P', 'Phosphorus', 3.0, 5, 1.07, 1.80, None), (16, 'S', 'Sulfur', 2.0, 6, 1.05, 1.80, None), (17, 'Cl', 'Chlorine', 1.0, 7, 1.02, 1.75, None), (18, 'Ar', 'Argon', 0.5, 8, 1.06, 1.88, None), (19, 'K', 'Potassium', 1.0, 1, 2.03, 2.75, 2.27), (20, 'Ca', 'Calcium', 2.0, 2, 1.76, 2.31, 1.97), (21, 'Sc', 'Scandium', 3.0, 3, 1.70, 2.11, 1.62), (22, 'Ti', 'Titanium', 4.0, 4, 1.60, 2.00, 1.47), (23, 'V', 'Vanadium', 4.0, 5, 1.53, 2.00, 1.34), (24, 'Cr', 'Chromium', 3.0, 6, 1.39, 2.00, 1.28), (25, 'Mn', 'Manganese', 4.0, 5, 1.39, 2.00, 1.27), (26, 'Fe', 'Iron', 3.0, 3, 1.32, 2.00, 1.26), (27, 'Co', 'Cobalt', 3.0, 3, 1.26, 2.00, 1.25), (28, 'Ni', 'Nickel', 2.0, 3, 1.24, 1.63, 1.24), (29, 'Cu', 'Copper', 2.0, 2, 1.32, 1.40, 1.28), (30, 'Zn', 'Zinc', 2.0, 2, 1.22, 1.39, 1.34), (31, 'Ga', 'Gallium', 3.0, 3, 1.22, 1.87, 1.35), (32, 'Ge', 'Germanium', 4.0, 4, 1.20, 2.11, None), (33, 'As', 'Arsenic', 3.0, 5, 1.19, 1.85, None), (34, 'Se', 'Selenium', 2.0, 6, 1.20, 1.90, None), (35, 'Br', 'Bromine', 1.0, 7, 1.20, 1.85, None), (36, 'Kr', 'Krypton', 0.5, 8, 1.16, 2.02, None), (37, 'Rb', 'Rubidium', 1.0, 1, 2.20, 3.03, 2.48), (38, 'Sr', 'Strontium', 2.0, 2, 1.95, 2.49, 2.15), (39, 'Y', 'Yttrium', 3.0, 3, 1.90, 2.00, 1.80), (40, 'Zr', 'Zirconium', 4.0, 4, 1.75, 2.00, 1.60), (41, 'Nb', 'Niobium', 5.0, 5, 1.64, 2.00, 1.46), (42, 'Mo', 'Molybdenum',4.0, 6, 1.54, 2.00, 1.39), (43, 'Tc', 'Technetium',4.0, 5, 1.47, 2.00, 1.36), (44, 'Ru', 'Ruthenium', 4.0, 3, 1.46, 2.00, 1.34), (45, 'Rh', 'Rhodium', 4.0, 3, 1.42, 1.63, 1.34), (46, 'Pd', 'Palladium', 4.0, 3, 1.39, 1.72, 1.37), (47, 'Ag', 'Silver', 1.0, 2, 1.45, 1.58, 1.44), (48, 'Cd', 'Cadmium', 2.0, 2, 1.44, 1.93, 1.51), (49, 'In', 'Indium', 3.0, 3, 1.42, 2.17, 1.67), (50, 'Sn', 'Tin', 4.0, 4, 1.39, 2.06, None), (51, 'Sb', 'Antimony', 3.0, 5, 1.39, 2.06, None), (52, 'Te', 'Tellurium', 2.0, 6, 1.38, 2.06, None), (53, 'I', 'Iodine', 1.0, 7, 1.39, 1.98, None), (54, 'Xe', 'Xenon', 0.5, 8, 1.40, 2.16, None), (55, 'Cs', 'Caesium', 1.0, 1, 2.44, 3.43, 2.65), (56, 'Ba', 'Barium', 2.0, 2, 2.15, 2.68, 2.22), (57, 'La', 'Lanthanum', 3.0, 3, 2.07, 2.10, 1.87), (58, 'Ce', 'Cerium', 4.0, 3, 2.04, 2.10, 1.818), (59,'Pr','Praseodymium',3.0, 3, 2.03, 2.10, 1.824), (60, 'Nd', 'Neodymium', 3.0, 3, 2.01, 2.10, 1.814), (61, 'Pm', 'Promethium',3.0, 3, 1.99, 2.10, 1.834), (62, 'Sm', 'Samarium', 3.0, 3, 1.98, 2.10, 1.804), (63, 'Eu', 'Europium', 3.0, 3, 1.98, 2.10, 1.804), (64, 'Gd', 'Gadolinium',3.0, 3, 1.96, 2.10, 1.804), (65, 'Tb', 'Terbium', 3.0, 3, 1.94, 2.10, 1.773), (66, 'Dy', 'Dysprosium',3.0, 3, 1.92, 2.10, 1.781), (67, 'Ho', 'Holmium', 3.0, 3, 1.92, 2.10, 1.762), (68, 'Er', 'Erbium', 3.0, 3, 1.89, 2.10, 1.761), (69, 'Tm', 'Thulium', 3.0, 3, 1.90, 2.10, 1.759), (70, 'Yb', 'Ytterbium', 3.0, 3, 1.87, 2.10, 1.76), (71, 'Lu', 'Lutetium', 3.0, 3, 1.87, 2.10, 1.738), (72, 'Hf', 'Hafnium', 4.0, 3, 1.75, 2.10, 1.59), (73, 'Ta', 'Tantalum', 5.0, 3, 1.70, 2.10, 1.46), (74, 'W', 'Tungsten', 4.0, 3, 1.62, 2.10, 1.39), (75, 'Re', 'Rhenium', 4.0, 3, 1.51, 2.10, 1.37), (76, 'Os', 'Osmium', 4.0, 3, 1.44, 2.10, 1.35), (77, 'Ir', 'Iridium', 4.0, 3, 1.41, 2.10, 1.355), (78, 'Pt', 'Platinum', 4.0, 3, 1.36, 1.75, 1.385), (79, 'Au', 'Gold', 1.0, 3, 1.36, 1.66, 1.44), (80, 'Hg', 'Mercury', 2.0, 3, 1.32, 1.55, 1.51), (81, 'Tl', 'Thallium', 3.0, 3, 1.45, 1.96, 1.70), (82, 'Pb', 'Lead', 4.0, 4, 1.46, 2.02, None), (83, 'Bi', 'Bismuth', 3.0, 5, 1.48, 2.07, None), (84, 'Po', 'Polonium', 2.0, 6, 1.40, 1.97, None), (85, 'At', 'Astatine', 1.0, 7, 1.50, 2.02, None), (86, 'Rn', 'Radon', 0.5, 8, 1.50, 2.20, None), (87, 'Fr', 'Francium', 1.0, 1, 2.60, 3.48, None), (88, 'Ra', 'Radium', 2.0, 2, 2.21, 2.83, None), (89, 'Ac', 'Actinium', 3.0, 3, 2.15, 2.20, None), (90, 'Th', 'Thorium', 4.0, 3, 2.06, 2.20, 1.79), (91,'Pa','Protactinium',4.0, 3, 2.00, 2.20, 1.63), (92, 'U', 'Uranium', 4.0, 3, 1.96, 2.20, 1.56), (93, 'Np', 'Neptunium', 4.0, 3, 1.90, 2.20, 1.55), (94, 'Pu', 'Plutonium', 4.0, 3, 1.87, 2.20, 1.59), (95, 'Am', 'Americium', 4.0, 3, 1.80, 2.20, 1.73), (96, 'Cm', 'Curium', 4.0, 3, 1.69, 2.20, 1.74), (97, 'Bk', 'Berkelium', 4.0, 3, None, None, 1.70), (98,'Cf','Californium', 4.0, 3, None, None, 1.86), (99,'Es','Einsteinium', 4.0, 3, None, None, 1.86), (100, 'Fm', 'Fermium', 4.0, 3, None, None, None), (101,'Md','Mendelevium',4.0, 3, None, None, None), (102, 'No', 'Nobelium', 4.0, 3, None, None, None), (103, 'Lr','Lawrencium',4.0, 3, None, None, None), (104,'Rf','Rutherfordium',4.0,3,None, None, None), (105, 'Db', 'Dubnium', 2.0, 3, None, None, None), ] """A list of atomic numbers, symbols, names, and other information, up to atomic number 105""" #scatter factor self.sf=[ [ 0.493, 0.323, 0.140, 0.041, 10.511, 26.126, 3.142, 57.800, 0.003], [ 0.873, 0.631, 0.311, 0.178, 9.104, 3.357, 22.928, 0.982, 0.006], [ 1.128, 0.751, 0.618, 0.465, 3.955, 1.052, 85.391,168.261, 0.038], [ 1.592, 1.128, 0.539, 0.703, 43.643, 1.862,103.483, 0.542, 0.038], [ 2.055, 1.333, 1.098, 0.707, 23.219, 1.021, 60.350, 0.140, -0.193], [ 2.310, 1.020, 1.589, 0.865, 20.844, 10.208, 0.569, 51.651, 0.216], [ 12.213, 3.132, 2.013, 1.166, 0.006, 9.893, 28.997, 0.583,-11.529], [ 3.049, 2.287, 1.546, 0.867, 13.277, 5.701, 0.324, 32.909, 0.251], [ 3.539, 2.641, 1.517, 1.024, 10.283, 4.294, 0.262, 26.148, 0.278], [ 3.955, 3.112, 1.455, 1.125, 8.404, 3.426, 0.231, 21.718, 0.352], [ 4.763, 3.174, 1.267, 1.113, 3.285, 8.842, 0.314,129.424, 0.676], [ 5.420, 2.174, 1.227, 2.307, 2.828, 79.261, 0.381, 7.194, 0.858], [ 6.420, 1.900, 1.594, 1.965, 3.039, 0.743, 31.547, 85.089, 1.115], [ 6.292, 3.035, 1.989, 1.541, 2.439, 32.334, 0.678, 81.694, 1.141], [ 6.435, 4.179, 1.780, 1.491, 1.907, 27.157, 0.526, 68.164, 1.115], [ 6.905, 5.203, 1.438, 1.586, 1.468, 22.215, 0.254, 56.172, 0.867], [ 11.460, 7.196, 6.256, 1.645, 0.010, 1.166, 18.519, 47.778, -9.557], [ 7.484, 6.772, 0.654, 1.644, 0.907, 14.841, 43.898, 33.393, 1.444], [ 8.219, 7.440, 1.052, 0.866, 12.795, 0.775,213.187, 41.684, 1.423], [ 8.627, 7.387, 1.590, 1.021, 10.442, 0.660, 85.748,178.437, 1.375], [ 9.189, 7.368, 1.641, 1.468, 9.021, 0.573,136.108, 51.353, 1.333], [ 9.759, 7.356, 1.699, 1.902, 7.851, 0.500, 35.634,116.105, 1.281], [ 10.297, 7.351, 2.070, 2.057, 6.866, 0.438, 26.894,102.478, 1.220], [ 10.641, 7.354, 3.324, 1.492, 6.104, 0.392, 20.263, 98.740, 1.183], [ 11.282, 7.357, 3.019, 2.244, 5.341, 0.343, 17.867, 83.754, 1.090], [ 11.769, 7.357, 3.522, 2.305, 4.761, 0.307, 15.354, 76.881, 1.037], [ 12.284, 7.341, 4.003, 2.349, 4.279, 0.278, 13.536, 71.169, 1.012], [ 12.838, 7.292, 4.444, 2.380, 3.878, 0.257, 12.176, 66.342, 1.034], [ 13.338, 7.168, 5.616, 1.673, 3.583, 0.247, 11.397, 64.831, 1.191], [ 14.074, 7.032, 5.165, 2.410, 3.266, 0.233, 10.316, 58.710, 1.304], [ 15.235, 6.701, 4.359, 2.962, 3.067, 0.241, 10.781, 61.414, 1.719], [ 16.082, 6.375, 3.707, 3.683, 2.851, 0.252, 11.447, 54.763, 2.131], [ 16.672, 6.070, 3.431, 4.278, 2.635, 0.265, 12.948, 47.797, 2.531], [ 17.001, 5.820, 3.973, 4.354, 2.410, 0.273, 15.237, 43.816, 2.841], [ 17.179, 5.236, 5.638, 3.985, 2.172, 16.580, 0.261, 41.433, 2.956], [ 17.355, 6.729, 5.549, 3.537, 1.938, 16.562, 0.226, 39.397, 2.825], [ 17.178, 9.644, 5.140, 1.529, 1.789, 17.315, 0.275,164.934, 3.487], [ 17.566, 9.818, 5.422, 2.669, 1.556, 14.099, 0.166,132.376, 2.506], [ 17.776, 10.295, 5.726, 3.266, 1.403, 12.801, 0.261,104.354, 1.912], [ 17.876, 10.948, 5.417, 3.657, 1.276, 11.916, 0.118, 87.663, 2.069], [ 17.614, 12.014, 4.042, 3.533, 1.189, 11.766, 0.205, 69.796, 3.756], [ 3.703, 17.236, 12.888, 3.743, 0.277, 1.096, 11.004, 61.658, 4.387], [ 19.130, 11.095, 4.649, 2.713, 0.864, 8.145, 21.571, 86.847, 5.404], [ 19.267, 12.918, 4.863, 1.568, 0.809, 8.435, 24.800, 94.293, 5.379], [ 19.296, 14.350, 4.734, 1.289, 0.752, 8.218, 25.875, 98.606, 5.328], [ 19.332, 15.502, 5.295, 0.606, 0.699, 7.989, 25.205, 76.899, 5.266], [ 19.281, 16.688, 4.805, 1.046, 0.645, 7.473, 24.660, 99.816, 5.179], [ 19.221, 17.644, 4.461, 1.603, 0.595, 6.909, 24.701, 87.482, 5.069], [ 19.162, 18.560, 4.295, 2.040, 0.548, 6.378, 25.850, 92.803, 4.939], [ 19.189, 19.101, 4.458, 2.466, 5.830, 0.503, 26.891, 83.957, 4.782], [ 19.642, 19.045, 5.037, 2.683, 5.303, 0.461, 27.907, 75.283, 4.591], [ 19.964, 19.014, 6.145, 2.524, 4.817, 0.421, 28.528, 70.840, 4.352], [ 20.147, 18.995, 7.514, 2.273, 4.347, 0.381, 27.766, 66.878, 4.071], [ 20.293, 19.030, 8.977, 1.990, 3.928, 0.344, 26.466, 64.266, 3.712], [ 20.389, 19.106, 10.662, 1.495, 3.569, 0.311, 24.388,213.904, 3.335], [ 20.336, 19.297, 10.888, 2.696, 3.216, 0.276, 20.207,167.202, 2.773], [ 20.578, 19.599, 11.373, 3.287, 2.948, 0.244, 18.773,133.124, 2.147], [ 21.167, 19.770, 11.851, 3.330, 2.812, 0.227, 17.608,127.113, 1.863], [ 22.044, 19.670, 12.386, 2.824, 2.774, 0.222, 16.767,143.644, 2.058], [ 22.684, 19.685, 12.774, 2.851, 2.662, 0.211, 15.885,137.903, 1.985], [ 23.340, 19.610, 13.123, 2.875, 2.563, 0.202, 15.101,132.721, 2.029], [ 24.004, 19.426, 13.440, 2.896, 2.473, 0.196, 14.400,128.007, 2.210], [ 24.627, 19.089, 13.760, 2.293, 2.388, 0.194, 13.755,123.174, 2.575], [ 25.071, 19.080, 13.852, 3.545, 2.253, 0.182, 12.933,101.398, 2.420], [ 25.898, 18.219, 14.317, 2.954, 2.243, 0.196, 12.665,115.362, 3.583], [ 26.507, 17.638, 14.560, 2.966, 2.180, 0.202, 12.190,111.874, 4.297], [ 26.905, 17.294, 14.558, 3.638, 2.071, 0.198, 11.441, 92.657, 4.568], [ 27.656, 16.428, 14.978, 2.982, 2.074, 0.224, 11.360,105.703, 5.920], [ 28.182, 15.885, 15.154, 2.987, 2.029, 0.239, 10.998,102.961, 6.756], [ 28.664, 15.434, 15.309, 2.990, 1.989, 0.257, 10.665,100.417, 7.567], [ 28.948, 15.221, 15.100, 3.716, 1.902, 9.985, 0.261, 84.330, 7.976], [ 29.144, 15.173, 14.759, 4.300, 1.833, 9.600, 0.275, 72.029, 8.582], [ 29.202, 15.229, 14.514, 4.765, 1.773, 9.370, 0.296, 63.364, 9.244], [ 0.000, 0.000, 0.000, 0.000, 1.722, 9.231, 0.323, 57.725, 9.858], [ 28.762, 15.719, 14.556, 5.442, 1.672, 9.092, 0.350, 52.086, 10.472], [ 28.189, 16.155, 14.931, 5.676, 1.629, 8.979, 0.383, 48.165, 11.000], [ 27.305, 16.730, 15.611, 5.834, 1.593, 8.866, 0.418, 45.001, 11.472], [ 27.006, 17.764, 15.713, 5.784, 1.513, 8.812, 0.425, 38.610, 11.688], [ 16.882, 18.591, 25.558, 5.860, 0.461, 8.622, 1.483, 36.396, 12.066], [ 20.681, 19.042, 21.657, 5.968, 0.545, 8.448, 1.573, 38.325, 12.609], [ 27.545, 19.158, 15.538, 5.526, 0.655, 8.708, 1.963, 45.815, 13.175], [ 31.062, 13.064, 18.442, 5.970, 0.690, 2.358, 8.618, 47.258, 13.412], [ 33.369, 12.951, 16.588, 6.469, 0.704, 2.924, 8.794, 48.009, 13.578], [ 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [ 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [ 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [ 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [ 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [ 35.660, 23.103, 12.598, 4.087, 0.589, 3.652, 18.599,117.020, 13.527], [ 35.564, 23.422, 12.747, 4.807, 0.563, 3.462, 17.831, 99.172, 13.431], [ 35.885, 23.295, 14.189, 4.173, 0.548, 3.415, 16.924,105.251, 13.429], [ 0.000, 0.000, 0.000, 0.000, 0.530, 3.335, 16.143,101.371, 13.393], [ 36.187, 23.596, 15.640, 4.186, 0.512, 3.254, 15.362, 97.491, 13.357], [ 36.526, 23.808, 16.771, 3.479, 0.499, 3.264, 14.946,105.980, 13.381]] """A list of scatter factors for the elements""" self.z = None """atomic number""" self.short_name = None """atomic symbol""" self.long_name = None """atomic name""" self.valence = None """valence value""" self.valence_electrons = None """number of valence electrons""" self.covalent_radius = None """atomic radius used for distance checking within crystals""" self.vdw_radius = None """atomic radius used for volume estimation within crystals""" self.metallic_radius = None """atomic radius used for distance checking within metallic crystals""" pos = None try: int(self.input) self.z = self.input for i, el in enumerate(self.elements_list): if el[0] == self.z: pos = i self.short_name = el[1] self.long_name = el[2] break except ValueError: self.short_name = self.input for i, el in enumerate(self.elements_list): if el[1] == self.short_name: pos = i self.z = el[0] self.long_name = el[2] break if not self.z: self.short_name = None self.long_name = self.input for i, el in enumerate(self.elements_list): if el[2] == self.long_name: pos = i self.z = el[0] self.short_name = el[1] break if not self.z: self.long_name = None if pos is not None: self.valence = self.elements_list[pos][3] self.valence_electrons = self.elements_list[pos][4] self.covalent_radius = self.elements_list[pos][5] self.vdw_radius = self.elements_list[pos][6] self.metallic_radius = self.elements_list[pos][7] self.scatter = self.sf[pos] def get_all(self, pos): """ Return all [pos] elements in the full element list Args: pos: the index of the elements to retrieve Returns: a list containing only the [pos] elements of self.elements_list """ els = [] for el in self.elements_list: els.append(el[pos]) return els #TODO: add docstrings for miscellaneous functions def all_z(self): return self.get_all(0) def all_short_names(self): return self.get_all(1) def all_long_names(self): return self.get_all(2) def all_valences(self): return self.get_all(3) def all_valence_electrons(self): return self.get_all(4) def all_covalent_radii(self): return self.get_all(5) def all_vdw_radii(self): return self.get_all(6) def all_metallic_radii(self): return self.get_all(7) def get_sf(self, pos): fp = resource_filename("pyxtal_xrd", "database/atomic_scattering_params.json") with open(fp, 'r') as f: ATOMIC_SCATTERING_PARAMS = json.load(f) els = ATOMIC_SCATTERING_PARAMS[pos] return els def number_from_specie(specie): if type(specie) == int or type(specie) == float: if specie <= 105 and specie >= 1: index = int(specie) else: print(specie) print("Error: Atomic number must be between 1 and 105.") return elif type(specie) == str: try: el = Element(specie) index = el.z except: print("Error: Invalid atomic symbol, name, or number.") return elif type(specie) == Element: try: index = specie.z except: print("Error: Element object has no atomic number 'z'.") return else: try: el = Element(specie.number) index = el.z except: print("Error: Invalid atomic symbol, name, or number.") return return index
py
1a4ec263cfaf19c79837b14101b60dfe2c9f3a44
# Copyright (C) 2002, Thomas Hamelryck ([email protected]) # # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. """Map residues of two structures to each other based on a FASTA alignment.""" from __future__ import print_function from Bio.Data import SCOPData from Bio.PDB import Selection from Bio.PDB.Polypeptide import is_aa class StructureAlignment(object): """Class to align two structures based on an alignment of their sequences.""" def __init__(self, fasta_align, m1, m2, si=0, sj=1): """Initialize. Attributes: - fasta_align - Alignment object - m1, m2 - two models - si, sj - the sequences in the Alignment object that correspond to the structures """ length = fasta_align.get_alignment_length() # Get the residues in the models rl1 = Selection.unfold_entities(m1, "R") rl2 = Selection.unfold_entities(m2, "R") # Residue positions p1 = 0 p2 = 0 # Map equivalent residues to each other map12 = {} map21 = {} # List of residue pairs (None if -) duos = [] for i in range(length): column = fasta_align[:, i] aa1 = column[si] aa2 = column[sj] if aa1 != "-": # Position in seq1 is not - while True: # Loop until an aa is found r1 = rl1[p1] p1 = p1 + 1 if is_aa(r1): break self._test_equivalence(r1, aa1) else: r1 = None if aa2 != "-": # Position in seq2 is not - while True: # Loop until an aa is found r2 = rl2[p2] p2 = p2 + 1 if is_aa(r2): break self._test_equivalence(r2, aa2) else: r2 = None if r1: # Map residue in seq1 to its equivalent in seq2 map12[r1] = r2 if r2: # Map residue in seq2 to its equivalent in seq1 map21[r2] = r1 # Append aligned pair (r is None if gap) duos.append((r1, r2)) self.map12 = map12 self.map21 = map21 self.duos = duos def _test_equivalence(self, r1, aa1): """Test if aa in sequence fits aa in structure (PRIVATE).""" resname = r1.get_resname() resname = SCOPData.protein_letters_3to1[resname] assert(aa1 == resname) def get_maps(self): """Map residues between the structures. Return two dictionaries that map a residue in one structure to the equivealent residue in the other structure. """ return self.map12, self.map21 def get_iterator(self): """Create an iterator over all residue pairs.""" for i in range(0, len(self.duos)): yield self.duos[i]
py
1a4ec2ff586a04ff9d827c840e9f597c79aff8b5
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import json import os import re import unittest from io import StringIO import google.auth import mock import tenacity from google.auth.environment_vars import CREDENTIALS from google.auth.exceptions import GoogleAuthError from google.cloud.exceptions import Forbidden from airflow import version from airflow.exceptions import AirflowException from airflow.providers.google.cloud.utils.credentials_provider import _DEFAULT_SCOPES from airflow.providers.google.common.hooks import base_google as hook from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id default_creds_available = True default_project = None try: _, default_project = google.auth.default(scopes=_DEFAULT_SCOPES) except GoogleAuthError: default_creds_available = False MODULE_NAME = "airflow.providers.google.common.hooks.base_google" class NoForbiddenAfterCount: """Holds counter state for invoking a method several times in a row.""" def __init__(self, count, **kwargs): self.counter = 0 self.count = count self.kwargs = kwargs def __call__(self): """ Raise an Forbidden until after count threshold has been crossed. Then return True. """ if self.counter < self.count: self.counter += 1 raise Forbidden(**self.kwargs) return True @hook.GoogleBaseHook.quota_retry(wait=tenacity.wait_none()) def _retryable_test_with_temporary_quota_retry(thing): return thing() class QuotaRetryTestCase(unittest.TestCase): # ptlint: disable=invalid-name def test_do_nothing_on_non_error(self): result = _retryable_test_with_temporary_quota_retry(lambda: 42) self.assertTrue(result, 42) def test_retry_on_exception(self): message = "POST https://translation.googleapis.com/language/translate/v2: User Rate Limit Exceeded" errors = [ mock.MagicMock(details=mock.PropertyMock(return_value='userRateLimitExceeded')) ] custom_fn = NoForbiddenAfterCount( count=5, message=message, errors=errors ) _retryable_test_with_temporary_quota_retry(custom_fn) self.assertEqual(5, custom_fn.counter) def test_raise_exception_on_non_quota_exception(self): with self.assertRaisesRegex(Forbidden, "Daily Limit Exceeded"): message = "POST https://translation.googleapis.com/language/translate/v2: Daily Limit Exceeded" errors = [ mock.MagicMock(details=mock.PropertyMock(return_value='dailyLimitExceeded')) ] _retryable_test_with_temporary_quota_retry( NoForbiddenAfterCount(5, message=message, errors=errors) ) class FallbackToDefaultProjectIdFixtureClass: def __init__(self, project_id): self.mock = mock.Mock() self.fixture_project_id = project_id @hook.GoogleBaseHook.fallback_to_default_project_id def method(self, project_id=None): self.mock(project_id=project_id) @property def project_id(self): return self.fixture_project_id class TestFallbackToDefaultProjectId(unittest.TestCase): def test_no_arguments(self): gcp_hook = FallbackToDefaultProjectIdFixtureClass(321) gcp_hook.method() gcp_hook.mock.assert_called_once_with(project_id=321) def test_default_project_id(self): gcp_hook = FallbackToDefaultProjectIdFixtureClass(321) gcp_hook.method(project_id=None) gcp_hook.mock.assert_called_once_with(project_id=321) def test_provided_project_id(self): gcp_hook = FallbackToDefaultProjectIdFixtureClass(321) gcp_hook.method(project_id=123) gcp_hook.mock.assert_called_once_with(project_id=123) def test_restrict_positional_arguments(self): gcp_hook = FallbackToDefaultProjectIdFixtureClass(321) with self.assertRaises(AirflowException) as cm: gcp_hook.method(123) self.assertEqual( str(cm.exception), "You must use keyword arguments in this methods rather than positional" ) self.assertEqual(gcp_hook.mock.call_count, 0) ENV_VALUE = "/tmp/a" class TestProvideGcpCredentialFile(unittest.TestCase): def setUp(self): with mock.patch( MODULE_NAME + '.GoogleBaseHook.__init__', new=mock_base_gcp_hook_default_project_id, ): self.instance = hook.GoogleBaseHook(gcp_conn_id="google-cloud-default") def test_provide_gcp_credential_file_decorator_key_path_and_keyfile_dict(self): key_path = '/test/key-path' self.instance.extras = { 'extra__google_cloud_platform__key_path': key_path, 'extra__google_cloud_platform__keyfile_dict': '{"foo": "bar"}' } @hook.GoogleBaseHook.provide_gcp_credential_file def assert_gcp_credential_file_in_env(_): self.assertEqual(os.environ[CREDENTIALS], key_path) with self.assertRaisesRegex( AirflowException, 'The `keyfile_dict` and `key_path` fields are mutually exclusive. ' 'Please provide only one value.' ): assert_gcp_credential_file_in_env(self.instance) def test_provide_gcp_credential_file_decorator_key_path(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} @hook.GoogleBaseHook.provide_gcp_credential_file def assert_gcp_credential_file_in_env(_): self.assertEqual(os.environ[CREDENTIALS], key_path) assert_gcp_credential_file_in_env(self.instance) @mock.patch('tempfile.NamedTemporaryFile') def test_provide_gcp_credential_file_decorator_key_content(self, mock_file): string_file = StringIO() file_content = '{"foo": "bar"}' file_name = '/test/mock-file' self.instance.extras = {'extra__google_cloud_platform__keyfile_dict': file_content} mock_file_handler = mock_file.return_value.__enter__.return_value mock_file_handler.name = file_name mock_file_handler.write = string_file.write @hook.GoogleBaseHook.provide_gcp_credential_file def assert_gcp_credential_file_in_env(_): self.assertEqual(os.environ[CREDENTIALS], file_name) self.assertEqual(file_content, string_file.getvalue()) assert_gcp_credential_file_in_env(self.instance) @mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE}) def test_provide_gcp_credential_keep_environment(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} @hook.GoogleBaseHook.provide_gcp_credential_file def assert_gcp_credential_file_in_env(_): self.assertEqual(os.environ[CREDENTIALS], key_path) assert_gcp_credential_file_in_env(self.instance) self.assertEqual(os.environ[CREDENTIALS], ENV_VALUE) @mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE}) def test_provide_gcp_credential_keep_environment_when_exception(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} @hook.GoogleBaseHook.provide_gcp_credential_file def assert_gcp_credential_file_in_env(_): raise Exception() with self.assertRaises(Exception): assert_gcp_credential_file_in_env(self.instance) self.assertEqual(os.environ[CREDENTIALS], ENV_VALUE) @mock.patch.dict(os.environ, clear=True) def test_provide_gcp_credential_keep_clear_environment(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} @hook.GoogleBaseHook.provide_gcp_credential_file def assert_gcp_credential_file_in_env(_): self.assertEqual(os.environ[CREDENTIALS], key_path) assert_gcp_credential_file_in_env(self.instance) self.assertNotIn(CREDENTIALS, os.environ) @mock.patch.dict(os.environ, clear=True) def test_provide_gcp_credential_keep_clear_environment_when_exception(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} @hook.GoogleBaseHook.provide_gcp_credential_file def assert_gcp_credential_file_in_env(_): raise Exception() with self.assertRaises(Exception): assert_gcp_credential_file_in_env(self.instance) self.assertNotIn(CREDENTIALS, os.environ) class TestProvideGcpCredentialFileAsContext(unittest.TestCase): def setUp(self): with mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__', new=mock_base_gcp_hook_default_project_id, ): self.instance = hook.GoogleBaseHook(gcp_conn_id="google-cloud-default") def test_provide_gcp_credential_file_decorator_key_path(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} with self.instance.provide_gcp_credential_file_as_context(): self.assertEqual(os.environ[CREDENTIALS], key_path) @mock.patch('tempfile.NamedTemporaryFile') def test_provide_gcp_credential_file_decorator_key_content(self, mock_file): string_file = StringIO() file_content = '{"foo": "bar"}' file_name = '/test/mock-file' self.instance.extras = {'extra__google_cloud_platform__keyfile_dict': file_content} mock_file_handler = mock_file.return_value.__enter__.return_value mock_file_handler.name = file_name mock_file_handler.write = string_file.write with self.instance.provide_gcp_credential_file_as_context(): self.assertEqual(os.environ[CREDENTIALS], file_name) self.assertEqual(file_content, string_file.getvalue()) @mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE}) def test_provide_gcp_credential_keep_environment(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} with self.instance.provide_gcp_credential_file_as_context(): self.assertEqual(os.environ[CREDENTIALS], key_path) self.assertEqual(os.environ[CREDENTIALS], ENV_VALUE) @mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE}) def test_provide_gcp_credential_keep_environment_when_exception(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} with self.assertRaises(Exception): with self.instance.provide_gcp_credential_file_as_context(): raise Exception() self.assertEqual(os.environ[CREDENTIALS], ENV_VALUE) @mock.patch.dict(os.environ, clear=True) def test_provide_gcp_credential_keep_clear_environment(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} with self.instance.provide_gcp_credential_file_as_context(): self.assertEqual(os.environ[CREDENTIALS], key_path) self.assertNotIn(CREDENTIALS, os.environ) @mock.patch.dict(os.environ, clear=True) def test_provide_gcp_credential_keep_clear_environment_when_exception(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} with self.assertRaises(Exception): with self.instance.provide_gcp_credential_file_as_context(): raise Exception() self.assertNotIn(CREDENTIALS, os.environ) class TestGoogleBaseHook(unittest.TestCase): def setUp(self): self.instance = hook.GoogleBaseHook() @mock.patch(MODULE_NAME + '.get_credentials_and_project_id', return_value=("CREDENTIALS", "PROJECT_ID")) def test_get_credentials_and_project_id_with_default_auth(self, mock_get_creds_and_proj_id): self.instance.extras = {} result = self.instance._get_credentials_and_project_id() mock_get_creds_and_proj_id.assert_called_once_with( key_path=None, keyfile_dict=None, scopes=self.instance.scopes, delegate_to=None) self.assertEqual(('CREDENTIALS', 'PROJECT_ID'), result) @mock.patch(MODULE_NAME + '.get_credentials_and_project_id') def test_get_credentials_and_project_id_with_service_account_file( self, mock_get_creds_and_proj_id ): mock_credentials = mock.MagicMock() mock_get_creds_and_proj_id.return_value = (mock_credentials, "PROJECT_ID") self.instance.extras = { 'extra__google_cloud_platform__key_path': "KEY_PATH.json" } result = self.instance._get_credentials_and_project_id() mock_get_creds_and_proj_id.assert_called_once_with( key_path='KEY_PATH.json', keyfile_dict=None, scopes=self.instance.scopes, delegate_to=None ) self.assertEqual((mock_credentials, 'PROJECT_ID'), result) def test_get_credentials_and_project_id_with_service_account_file_and_p12_key( self ): self.instance.extras = { 'extra__google_cloud_platform__key_path': "KEY_PATH.p12" } with self.assertRaises(AirflowException): self.instance._get_credentials_and_project_id() def test_get_credentials_and_project_id_with_service_account_file_and_unknown_key( self ): self.instance.extras = { 'extra__google_cloud_platform__key_path': "KEY_PATH.unknown" } with self.assertRaises(AirflowException): self.instance._get_credentials_and_project_id() @mock.patch(MODULE_NAME + '.get_credentials_and_project_id') def test_get_credentials_and_project_id_with_service_account_info( self, mock_get_creds_and_proj_id ): mock_credentials = mock.MagicMock() mock_get_creds_and_proj_id.return_value = (mock_credentials, "PROJECT_ID") service_account = { 'private_key': "PRIVATE_KEY" } self.instance.extras = { 'extra__google_cloud_platform__keyfile_dict': json.dumps(service_account) } result = self.instance._get_credentials_and_project_id() mock_get_creds_and_proj_id.assert_called_once_with( key_path=None, keyfile_dict=service_account, scopes=self.instance.scopes, delegate_to=None ) self.assertEqual((mock_credentials, 'PROJECT_ID'), result) @mock.patch(MODULE_NAME + '.get_credentials_and_project_id') def test_get_credentials_and_project_id_with_default_auth_and_delegate( self, mock_get_creds_and_proj_id ): mock_credentials = mock.MagicMock() mock_get_creds_and_proj_id.return_value = (mock_credentials, "PROJECT_ID") self.instance.extras = {} self.instance.delegate_to = "USER" result = self.instance._get_credentials_and_project_id() mock_get_creds_and_proj_id.assert_called_once_with( key_path=None, keyfile_dict=None, scopes=self.instance.scopes, delegate_to="USER" ) self.assertEqual((mock_credentials, "PROJECT_ID"), result) @mock.patch('google.auth.default') def test_get_credentials_and_project_id_with_default_auth_and_unsupported_delegate( self, mock_auth_default ): self.instance.delegate_to = "TEST_DELLEGATE_TO" mock_credentials = mock.MagicMock(spec=google.auth.compute_engine.Credentials) mock_auth_default.return_value = (mock_credentials, "PROJECT_ID") with self.assertRaisesRegex(AirflowException, re.escape( "The `delegate_to` parameter cannot be used here as the current authentication method does not " "support account impersonate. Please use service-account for authorization." )): self.instance._get_credentials_and_project_id() @mock.patch( # type: ignore MODULE_NAME + '.get_credentials_and_project_id', return_value=("CREDENTIALS", "PROJECT_ID") ) def test_get_credentials_and_project_id_with_default_auth_and_overridden_project_id( self, mock_get_creds_and_proj_id ): self.instance.extras = { 'extra__google_cloud_platform__project': "SECOND_PROJECT_ID" } result = self.instance._get_credentials_and_project_id() mock_get_creds_and_proj_id.assert_called_once_with( key_path=None, keyfile_dict=None, scopes=self.instance.scopes, delegate_to=None ) self.assertEqual(("CREDENTIALS", 'SECOND_PROJECT_ID'), result) def test_get_credentials_and_project_id_with_mutually_exclusive_configuration( self, ): self.instance.extras = { 'extra__google_cloud_platform__project': "PROJECT_ID", 'extra__google_cloud_platform__key_path': "KEY_PATH", 'extra__google_cloud_platform__keyfile_dict': '{"KEY": "VALUE"}', } with self.assertRaisesRegex(AirflowException, re.escape( 'The `keyfile_dict` and `key_path` fields are mutually exclusive.' )): self.instance._get_credentials_and_project_id() def test_get_credentials_and_project_id_with_invalid_keyfile_dict( self, ): self.instance.extras = { 'extra__google_cloud_platform__keyfile_dict': 'INVALID_DICT', } with self.assertRaisesRegex(AirflowException, re.escape( 'Invalid key JSON.' )): self.instance._get_credentials_and_project_id() @unittest.skipIf(not default_creds_available, 'Default GCP credentials not available to run tests') def test_default_creds_with_scopes(self): self.instance.extras = { 'extra__google_cloud_platform__project': default_project, 'extra__google_cloud_platform__scope': ( ','.join( ( 'https://www.googleapis.com/auth/bigquery', 'https://www.googleapis.com/auth/devstorage.read_only', ) ) ), } credentials = self.instance._get_credentials() if not hasattr(credentials, 'scopes') or credentials.scopes is None: # Some default credentials don't have any scopes associated with # them, and that's okay. return scopes = credentials.scopes self.assertIn('https://www.googleapis.com/auth/bigquery', scopes) self.assertIn( 'https://www.googleapis.com/auth/devstorage.read_only', scopes) @unittest.skipIf( not default_creds_available, 'Default GCP credentials not available to run tests') def test_default_creds_no_scopes(self): self.instance.extras = { 'extra__google_cloud_platform__project': default_project } credentials = self.instance._get_credentials() if not hasattr(credentials, 'scopes') or credentials.scopes is None: # Some default credentials don't have any scopes associated with # them, and that's okay. return scopes = credentials.scopes self.assertEqual(tuple(_DEFAULT_SCOPES), tuple(scopes)) def test_provide_gcp_credential_file_decorator_key_path(self): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} @hook.GoogleBaseHook.provide_gcp_credential_file def assert_gcp_credential_file_in_env(hook_instance): # pylint: disable=unused-argument self.assertEqual(os.environ[CREDENTIALS], key_path) assert_gcp_credential_file_in_env(self.instance) @mock.patch('tempfile.NamedTemporaryFile') def test_provide_gcp_credential_file_decorator_key_content(self, mock_file): string_file = StringIO() file_content = '{"foo": "bar"}' file_name = '/test/mock-file' self.instance.extras = { 'extra__google_cloud_platform__keyfile_dict': file_content } mock_file_handler = mock_file.return_value.__enter__.return_value mock_file_handler.name = file_name mock_file_handler.write = string_file.write @hook.GoogleBaseHook.provide_gcp_credential_file def assert_gcp_credential_file_in_env(hook_instance): # pylint: disable=unused-argument self.assertEqual(os.environ[CREDENTIALS], file_name) self.assertEqual(file_content, string_file.getvalue()) assert_gcp_credential_file_in_env(self.instance) def test_provided_scopes(self): self.instance.extras = { 'extra__google_cloud_platform__project': default_project, 'extra__google_cloud_platform__scope': ( ','.join( ( 'https://www.googleapis.com/auth/bigquery', 'https://www.googleapis.com/auth/devstorage.read_only', ) ) ), } self.assertEqual( self.instance.scopes, [ 'https://www.googleapis.com/auth/bigquery', 'https://www.googleapis.com/auth/devstorage.read_only', ], ) def test_default_scopes(self): self.instance.extras = {'extra__google_cloud_platform__project': default_project} self.assertEqual(self.instance.scopes, ('https://www.googleapis.com/auth/cloud-platform',)) @mock.patch("airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_connection") def test_num_retries_is_not_none_by_default(self, get_con_mock): """ Verify that if 'num_retries' in extras is not set, the default value should not be None """ get_con_mock.return_value.extra_dejson = { "extra__google_cloud_platform__num_retries": None } self.assertEqual(self.instance.num_retries, 5) @mock.patch("airflow.providers.google.common.hooks.base_google.httplib2.Http") @mock.patch("airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials") def test_authorize_assert_user_agent_is_sent(self, mock_get_credentials, mock_http): """ Verify that if 'num_retires' in extras is not set, the default value should not be None """ request = mock_http.return_value.request response = mock.MagicMock(status_code=200) content = "CONTENT" mock_http.return_value.request.return_value = response, content new_response, new_content = self.instance._authorize().request("/test-action") request.assert_called_once_with( '/test-action', body=None, connection_type=None, headers={'user-agent': 'airflow/' + version.version}, method='GET', redirections=5 ) self.assertEqual(response, new_response) self.assertEqual(content, new_content) class TestProvideAuthorizedGcloud(unittest.TestCase): def setUp(self): with mock.patch( MODULE_NAME + '.GoogleBaseHook.__init__', new=mock_base_gcp_hook_default_project_id, ): self.instance = hook.GoogleBaseHook(gcp_conn_id="google-cloud-default") @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=mock.PropertyMock, return_value="PROJECT_ID" ) @mock.patch(MODULE_NAME + '.check_output') def test_provide_authorized_gcloud_key_path_and_keyfile_dict( self, mock_check_output, mock_default ): key_path = '/test/key-path' self.instance.extras = { 'extra__google_cloud_platform__key_path': key_path, 'extra__google_cloud_platform__keyfile_dict': '{"foo": "bar"}' } with self.assertRaisesRegex( AirflowException, 'The `keyfile_dict` and `key_path` fields are mutually exclusive. ' 'Please provide only one value.' ): with self.instance.provide_authorized_gcloud(): self.assertEqual(os.environ[CREDENTIALS], key_path) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=mock.PropertyMock, return_value="PROJECT_ID" ) @mock.patch(MODULE_NAME + '.check_output') def test_provide_authorized_gcloud_key_path(self, mock_check_output, mock_project_id): key_path = '/test/key-path' self.instance.extras = {'extra__google_cloud_platform__key_path': key_path} with self.instance.provide_authorized_gcloud(): self.assertEqual(os.environ[CREDENTIALS], key_path) mock_check_output.has_calls( mock.call(['gcloud', 'config', 'set', 'core/project', 'PROJECT_ID']), mock.call(['gcloud', 'auth', 'activate-service-account', '--key-file=/test/key-path']) ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=mock.PropertyMock, return_value="PROJECT_ID" ) @mock.patch(MODULE_NAME + '.check_output') @mock.patch('tempfile.NamedTemporaryFile') def test_provide_authorized_gcloud_keyfile_dict(self, mock_file, mock_check_output, mock_project_id): string_file = StringIO() file_content = '{"foo": "bar"}' file_name = '/test/mock-file' self.instance.extras = {'extra__google_cloud_platform__keyfile_dict': file_content} mock_file_handler = mock_file.return_value.__enter__.return_value mock_file_handler.name = file_name mock_file_handler.write = string_file.write with self.instance.provide_authorized_gcloud(): self.assertEqual(os.environ[CREDENTIALS], file_name) mock_check_output.has_calls([ mock.call(['gcloud', 'config', 'set', 'core/project', 'PROJECT_ID']), mock.call(['gcloud', 'auth', 'activate-service-account', '--key-file=/test/mock-file']) ]) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=mock.PropertyMock, return_value="PROJECT_ID" ) @mock.patch(MODULE_NAME + '._cloud_sdk') @mock.patch(MODULE_NAME + '.check_output') @mock.patch('tempfile.NamedTemporaryFile') def test_provide_authorized_gcloud_via_gcloud_application_default( self, mock_file, mock_check_output, mock_cloud_sdk, mock_project_id ): # This file always exists. mock_cloud_sdk.get_application_default_credentials_path.return_value = __file__ file_content = json.dumps({ "client_id": "CLIENT_ID", "client_secret": "CLIENT_SECRET", "refresh_token": "REFRESH_TOKEN", "type": "authorized_user" }) with mock.patch(MODULE_NAME + '.open', mock.mock_open(read_data=file_content)): with self.instance.provide_authorized_gcloud(): # Do nothing pass mock_check_output.has_calls( [ mock.call(['gcloud', 'config', 'set', 'auth/client_id', 'CLIENT_ID']), mock.call(['gcloud', 'config', 'set', 'auth/client_secret', 'CLIENT_SECRET']), mock.call(['gcloud', 'config', 'set', 'core/project', 'PROJECT_ID']), mock.call(['gcloud', 'auth', 'activate-refresh-token', 'CLIENT_ID', 'REFRESH_TOKEN']) ], any_order=False ) class TestNumRetry(unittest.TestCase): def test_should_return_int_when_set_int_via_connection(self): instance = hook.GoogleBaseHook(gcp_conn_id="google_cloud_default") instance.extras = { 'extra__google_cloud_platform__num_retries': 10, } self.assertIsInstance(instance.num_retries, int) self.assertEqual(10, instance.num_retries) @mock.patch.dict( 'os.environ', AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT=( 'google-cloud-platform://?extra__google_cloud_platform__num_retries=5' ) ) def test_should_return_int_when_set_via_env_var(self): instance = hook.GoogleBaseHook(gcp_conn_id="google_cloud_default") self.assertIsInstance(instance.num_retries, int) @mock.patch.dict( 'os.environ', AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT=( 'google-cloud-platform://?extra__google_cloud_platform__num_retries=cat' ) ) def test_should_raise_when_invalid_value_via_env_var(self): instance = hook.GoogleBaseHook(gcp_conn_id="google_cloud_default") with self.assertRaisesRegex( AirflowException, re.escape("The num_retries field should be a integer.") ): self.assertIsInstance(instance.num_retries, int) @mock.patch.dict( 'os.environ', AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT=( 'google-cloud-platform://?extra__google_cloud_platform__num_retries=' ) ) def test_should_fallback_when_empty_string_in_env_var(self): instance = hook.GoogleBaseHook(gcp_conn_id="google_cloud_default") self.assertIsInstance(instance.num_retries, int) self.assertEqual(5, instance.num_retries)
py
1a4ec499b12136712343d28efde2959cd9e35db9
from flask_jwt_extended import get_jwt_identity from app.models.users_model import UserModel from functools import wraps from http import HTTPStatus def verify_role_admin(func): @wraps(func) def security_func(*args, **kwargs): jwt_data = get_jwt_identity() user: UserModel = UserModel.query.filter_by(id=jwt_data['id']).first() if user.user_role != 'admin': return {"error": "Exclusive resource for admin."}, HTTPStatus.UNAUTHORIZED else: return func(*args, **kwargs) return security_func
py
1a4ec4ce457710a3de7e6447ebc8e965dfce5ed4
# Copyright 2019 The Dreamer Authors. Copyright 2020 Plan2Explore Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from plan2explore import tools def cross_entropy_method( cell, objective, state, obs_shape, action_shape, horizon, graph, beams=1000, topk=100, iterations=10, min_action=-1, max_action=1): obs_shape, action_shape = tuple(obs_shape), tuple(action_shape) batch = tools.shape(tools.nested.flatten(state)[0])[0] initial_state = tools.nested.map(lambda tensor: tf.tile( tensor, [beams] + [1] * (tensor.shape.ndims - 1)), state) extended_batch = tools.shape(tools.nested.flatten(initial_state)[0])[0] use_obs = tf.zeros([extended_batch, horizon, 1], tf.bool) obs = tf.zeros((extended_batch, horizon) + obs_shape) def iteration(index, mean, stddev): # Sample action proposals from belief. normal = tf.random_normal((batch, beams, horizon) + action_shape) action = normal * stddev[:, None] + mean[:, None] action = tf.clip_by_value(action, min_action, max_action) # Evaluate proposal actions. action = tf.reshape( action, (extended_batch, horizon) + action_shape) (_, state), _ = tf.nn.dynamic_rnn( cell, (0 * obs, action, use_obs), initial_state=initial_state) return_ = objective(state) return_ = tf.reshape(return_, (batch, beams)) # Re-fit belief to the best ones. _, indices = tf.nn.top_k(return_, topk, sorted=False) indices += tf.range(batch)[:, None] * beams best_actions = tf.gather(action, indices) mean, variance = tf.nn.moments(best_actions, 1) stddev = tf.sqrt(variance + 1e-6) return index + 1, mean, stddev mean = tf.zeros((batch, horizon) + action_shape) stddev = tf.ones((batch, horizon) + action_shape) _, mean, std = tf.while_loop( lambda index, mean, stddev: index < iterations, iteration, (0, mean, stddev), back_prop=False) return mean def action_head_policy( cell, objective, state, obs_shape, action_shape, graph, config, strategy, min_action=-1, max_action=1): features = cell.features_from_state(state) policy = graph.heads.action(features) if strategy == 'sample': action = policy.sample() elif strategy == 'mode': action = policy.mode() elif strategy == 'curious_sample': curious_policy = graph.heads.curious_action(features) action = curious_policy.sample() elif strategy == 'random_sample': batch = tools.shape(tools.nested.flatten(features)[0])[0] mean = tf.zeros((batch,action_shape[0])) stddev = tf.ones((batch,action_shape[0])) normal = tf.random_normal((batch,action_shape[0])) action = normal * stddev + mean action = tf.clip_by_value(action, min_action, max_action) else: raise NotImplementedError(strategy) plan = action[:, None, :] return plan
py
1a4ec62b405c59fd6b8a91e94743987df0f97077
# -*- coding: utf-8 -*- # Copyright (C) 2012 Anaconda, Inc # SPDX-License-Identifier: BSD-3-Clause from __future__ import absolute_import, division, print_function, unicode_literals import hashlib import json import os from os.path import abspath, basename, dirname, isdir, isfile, islink, join import re import tarfile import tempfile from ..auxlib.entity import EntityEncoder from ..base.constants import CONDA_PACKAGE_EXTENSION_V1 from ..base.context import context from ..common.compat import PY3 from ..common.path import paths_equal from ..core.prefix_data import PrefixData from ..gateways.disk.delete import rmtree from ..install import PREFIX_PLACEHOLDER from ..misc import untracked def remove(prefix, files): """ Remove files for a given prefix. """ dst_dirs = set() for f in files: dst = join(prefix, f) dst_dirs.add(dirname(dst)) os.unlink(dst) for path in sorted(dst_dirs, key=len, reverse=True): try: os.rmdir(path) except OSError: # directory might not be empty pass def execute(args, parser): prefix = context.target_prefix if args.which: for path in args.which: for prec in which_package(path): print('%-50s %s' % (path, prec.dist_str())) return print('# prefix:', prefix) if args.reset: remove(prefix, untracked(prefix)) return if args.untracked: files = sorted(untracked(prefix)) print('# untracked files: %d' % len(files)) for fn in files: print(fn) return make_tarbz2(prefix, name=args.pkg_name.lower(), version=args.pkg_version, build_number=int(args.pkg_build)) def get_installed_version(prefix, name): for info in PrefixData(prefix).iter_records(): if info['name'] == name: return str(info['version']) return None def create_info(name, version, build_number, requires_py): d = dict( name=name, version=version, platform=context.platform, arch=context.arch_name, build_number=int(build_number), build=str(build_number), depends=[], ) if requires_py: d['build'] = ('py%d%d_' % requires_py) + d['build'] d['depends'].append('python %d.%d*' % requires_py) return d shebang_pat = re.compile(r'^#!.+$', re.M) def fix_shebang(tmp_dir, path): if open(path, 'rb').read(2) != '#!': return False with open(path) as fi: data = fi.read() m = shebang_pat.match(data) if not (m and 'python' in m.group()): return False data = shebang_pat.sub('#!%s/bin/python' % PREFIX_PLACEHOLDER, data, count=1) tmp_path = join(tmp_dir, basename(path)) with open(tmp_path, 'w') as fo: fo.write(data) os.chmod(tmp_path, int('755', 8)) return True def _add_info_dir(t, tmp_dir, files, has_prefix, info): info_dir = join(tmp_dir, 'info') os.mkdir(info_dir) with open(join(info_dir, 'files'), 'w') as fo: for f in files: fo.write(f + '\n') with open(join(info_dir, 'index.json'), 'w') as fo: json.dump(info, fo, indent=2, sort_keys=True, cls=EntityEncoder) if has_prefix: with open(join(info_dir, 'has_prefix'), 'w') as fo: for f in has_prefix: fo.write(f + '\n') for fn in os.listdir(info_dir): t.add(join(info_dir, fn), 'info/' + fn) def create_conda_pkg(prefix, files, info, tar_path, update_info=None): """ create a conda package with `files` (in `prefix` and `info` metadata) at `tar_path`, and return a list of warning strings """ files = sorted(files) warnings = [] has_prefix = [] tmp_dir = tempfile.mkdtemp() t = tarfile.open(tar_path, 'w:bz2') h = hashlib.new('sha1') for f in files: assert not (f.startswith('/') or f.endswith('/') or '\\' in f or f == ''), f path = join(prefix, f) if f.startswith('bin/') and fix_shebang(tmp_dir, path): path = join(tmp_dir, basename(path)) has_prefix.append(f) t.add(path, f) h.update(f.encode('utf-8')) h.update(b'\x00') if islink(path): link = os.readlink(path) if PY3 and isinstance(link, str): h.update(bytes(link, 'utf-8')) else: h.update(link) if link.startswith('/'): warnings.append('found symlink to absolute path: %s -> %s' % (f, link)) elif isfile(path): h.update(open(path, 'rb').read()) if path.endswith('.egg-link'): warnings.append('found egg link: %s' % f) info['file_hash'] = h.hexdigest() if update_info: update_info(info) _add_info_dir(t, tmp_dir, files, has_prefix, info) t.close() rmtree(tmp_dir) return warnings def make_tarbz2(prefix, name='unknown', version='0.0', build_number=0, files=None): if files is None: files = untracked(prefix) print("# files: %d" % len(files)) if len(files) == 0: print("# failed: nothing to do") return None if any('/site-packages/' in f for f in files): python_version = get_installed_version(prefix, 'python') assert python_version is not None requires_py = tuple(int(x) for x in python_version[:3].split('.')) else: requires_py = False info = create_info(name, version, build_number, requires_py) tarbz2_fn = ('%(name)s-%(version)s-%(build)s' % info) + CONDA_PACKAGE_EXTENSION_V1 create_conda_pkg(prefix, files, info, tarbz2_fn) print('# success') print(tarbz2_fn) return tarbz2_fn def which_package(path): """ given the path (of a (presumably) conda installed file) iterate over the conda packages the file came from. Usually the iteration yields only one package. """ path = abspath(path) prefix = which_prefix(path) if prefix is None: from ..exceptions import CondaVerificationError raise CondaVerificationError("could not determine conda prefix from: %s" % path) for prec in PrefixData(prefix).iter_records(): if any(paths_equal(join(prefix, f), path) for f in prec['files'] or ()): yield prec def which_prefix(path): """ given the path (to a (presumably) conda installed file) return the environment prefix in which the file in located """ prefix = abspath(path) while True: if isdir(join(prefix, 'conda-meta')): # we found the it, so let's return it return prefix if prefix == dirname(prefix): # we cannot chop off any more directories, so we didn't find it return None prefix = dirname(prefix)
py
1a4ec773c0929fa40162484601d60565cbf4b92b
from rest_framework import generics from veggies.models import Veggies from .serializers import VeggieSerializer class VeggieList(generics.ListCreateAPIView): queryset = Veggies.objects.all() serializer_class = VeggieSerializer class VeggieDetail(generics.RetrieveUpdateAPIView): queryset = Veggies.objects.all() serializer_class = VeggieSerializer
py
1a4ec78e1295a599b86714a8dca3063b953b2f53
""" Created by: Rob Mulla Sep 24 IEEE Fraud Detection Model - FE013 - Yang's Features - Raddars Features """ import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import sys import matplotlib.pylab as plt from sklearn.model_selection import KFold from datetime import datetime import time import logging from sklearn.metrics import roc_auc_score from catboost import CatBoostClassifier, Pool from timeit import default_timer as timer import lightgbm as lgb import gc start = timer() ################## # PARAMETERS ################### run_id = "{:%m%d_%H%M}".format(datetime.now()) KERNEL_RUN = False MODEL_NUMBER = os.path.basename(__file__).split('.')[0] if KERNEL_RUN: INPUT_DIR = '../input/champs-scalar-coupling/' FE_DIR = '../input/molecule-fe024/' FOLDS_DIR = '../input/champs-3fold-ids/' TARGET = "isFraud" N_ESTIMATORS = 100000 N_META_ESTIMATORS = 500000 LEARNING_RATE = 0.005 VERBOSE = 100 EARLY_STOPPING_ROUNDS = 100 RANDOM_STATE = 529 N_THREADS = 58 DEPTH = -1 #14 N_FOLDS = 5 SHUFFLE = False FE_SET = 'FE013' # Feature Engineering Version MODEL_TYPE = "lightgbm" ##################### ## SETUP LOGGER ##################### def get_logger(): """ credits to: https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480 """ os.environ["TZ"] = "US/Eastern" time.tzset() FORMAT = "[%(levelname)s]%(asctime)s:%(name)s:%(message)s" logging.basicConfig(format=FORMAT) logger = logging.getLogger("main") logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(sys.stdout) fhandler = logging.FileHandler(f'../logs/{MODEL_NUMBER}_{run_id}.log') formatter = logging.Formatter(FORMAT) handler.setFormatter(formatter) # logger.addHandler(handler) logger.addHandler(fhandler) return logger logger = get_logger() logger.info(f'Running for Model Number {MODEL_NUMBER}') ################## # PARAMETERS ################### if MODEL_TYPE == 'xgboost': EVAL_METRIC = "AUC" elif MODEL_TYPE == 'lightgbm': EVAL_METRIC = 'auc' elif MODEL_TYPE == 'catboost': EVAL_METRIC = "AUC" ################## # TRACKING FUNCTION ################### def update_tracking(run_id, field, value, csv_file="../tracking/tracking.csv", integer=False, digits=None, drop_incomplete_rows=False): """ Function to update the tracking CSV with information about the model """ try: df = pd.read_csv(csv_file, index_col=[0]) except FileNotFoundError: df = pd.DataFrame() if integer: value = round(value) elif digits is not None: value = round(value, digits) if drop_incomplete_rows: df = df.loc[~df['AUC'].isna()] df.loc[run_id, field] = value # Model number is index df.to_csv(csv_file) update_tracking(run_id, "model_number", MODEL_NUMBER, drop_incomplete_rows=True) update_tracking(run_id, "n_estimators", N_ESTIMATORS) update_tracking(run_id, "early_stopping_rounds", EARLY_STOPPING_ROUNDS) update_tracking(run_id, "random_state", RANDOM_STATE) update_tracking(run_id, "n_threads", N_THREADS) update_tracking(run_id, "learning_rate", LEARNING_RATE) update_tracking(run_id, "n_fold", N_FOLDS) update_tracking(run_id, "model_type", MODEL_TYPE) update_tracking(run_id, "eval_metric", EVAL_METRIC) update_tracking(run_id, "depth", DEPTH) update_tracking(run_id, "shuffle", SHUFFLE) update_tracking(run_id, "fe", FE_SET) ##################### # PREPARE MODEL DATA ##################### folds = KFold(n_splits=N_FOLDS, random_state=RANDOM_STATE, shuffle=SHUFFLE) logger.info('Loading Data...') train_df = pd.read_parquet(f'../data/train_{FE_SET}.parquet') test_df = pd.read_parquet(f'../data/test_{FE_SET}.parquet') logger.info('Done loading Data...') ########### # FEATURES ########### FEATURES = ['V1max', 'V2max', 'V3max', 'V4max', 'V5max', 'V6max', 'V7max', 'V8max', 'V9max', 'V10max', 'V11max', 'V12max', 'V13max', 'V14max', 'V15max', 'V16max', 'V17max', 'V18max', 'V19max', 'V20max', 'V21max', 'V22max', 'V23max', 'V24max', 'V25max', 'V26max', 'V27max', 'V28max', 'V29max', 'V30max', 'V31max', 'V32max', 'V33max', 'V34max', 'V35max', 'V36max', 'V37max', 'V38max', 'V39max', 'V40max', 'V41max', 'V42max', 'V43max', 'V44max', 'V45max', 'V46max', 'V47max', 'V48max', 'V49max', 'V50max', 'V51max', 'V52max', 'V53max', 'V54max', 'V55max', 'V56max', 'V57max', 'V58max', 'V59max', 'V60max', 'V61max', 'V62max', 'V63max', 'V64max', 'V65max', 'V66max', 'V67max', 'V68max', 'V69max', 'V70max', 'V71max', 'V72max', 'V73max', 'V74max', 'V75max', 'V76max', 'V77max', 'V78max', 'V79max', 'V80max', 'V81max', 'V82max', 'V83max', 'V84max', 'V85max', 'V86max', 'V87max', 'V88max', 'V89max', 'V90max', 'V91max', 'V92max', 'V93max', 'V94max', 'V95max', 'V96max', 'V97max', 'V98max', 'V99max', 'V100max', 'V101max', 'V102max', 'V103max', 'V104max', 'V105max', 'V106max', 'V107max', 'V108max', 'V109max', 'V110max', 'V111max', 'V112max', 'V113max', 'V114max', 'V115max', 'V116max', 'V117max', 'V118max', 'V119max', 'V120max', 'V121max', 'V122max', 'V123max', 'V124max', 'V125max', 'V126max', 'V127max', 'V128max', 'V129max', 'V130max', 'V131max', 'V132max', 'V133max', 'V134max', 'V135max', 'V136max', 'V137max', 'V138max', 'V139max', 'V140max', 'V141max', 'V142max', 'V143max', 'V144max', 'V145max', 'V146max', 'V147max', 'V148max', 'V149max', 'V150max', 'V151max', 'V152max', 'V153max', 'V154max', 'V155max', 'V156max', 'V157max', 'V158max', 'V159max', 'V160max', 'V161max', 'V162max', 'V163max', 'V164max', 'V165max', 'V166max', 'V167max', 'V168max', 'V169max', 'V170max', 'V171max', 'V172max', 'V173max', 'V174max', 'V175max', 'V176max', 'V177max', 'V178max', 'V179max', 'V180max', 'V181max', 'V182max', 'V183max', 'V184max', 'V185max', 'V186max', 'V187max', 'V188max', 'V189max', 'V190max', 'V191max', 'V192max', 'V193max', 'V194max', 'V195max', 'V196max', 'V197max', 'V198max', 'V199max', 'V200max', 'V201max', 'V202max', 'V203max', 'V204max', 'V205max', 'V206max', 'V207max', 'V208max', 'V209max', 'V210max', 'V211max', 'V212max', 'V213max', 'V214max', 'V215max', 'V216max', 'V217max', 'V218max', 'V219max', 'V220max', 'V221max', 'V222max', 'V223max', 'V224max', 'V225max', 'V226max', 'V227max', 'V228max', 'V229max', 'V230max', 'V231max', 'V232max', 'V233max', 'V234max', 'V235max', 'V236max', 'V237max', 'V238max', 'V239max', 'V240max', 'V241max', 'V242max', 'V243max', 'V244max', 'V245max', 'V246max', 'V247max', 'V248max', 'V249max', 'V250max', 'V251max', 'V252max', 'V253max', 'V254max', 'V255max', 'V256max', 'V257max', 'V258max', 'V259max', 'V260max', 'V261max', 'V262max', 'V263max', 'V264max', 'V265max', 'V266max', 'V267max', 'V268max', 'V269max', 'V270max', 'V271max', 'V272max', 'V273max', 'V274max', 'V275max', 'V276max', 'V277max', 'V278max', 'V279max', 'V280max', 'V281max', 'V282max', 'V283max', 'V284max', 'V285max', 'V286max', 'V287max', 'V288max', 'V289max', 'V290max', 'V291max', 'V292max', 'V293max', 'V294max', 'V295max', 'V296max', 'V297max', 'V298max', 'V299max', 'V300max', 'V301max', 'V302max', 'V303max', 'V304max', 'V305max', 'V306max', 'V307max', 'V308max', 'V309max', 'V310max', 'V311max', 'V312max', 'V313max', 'V314max', 'V315max', 'V316max', 'V317max', 'V318max', 'V319max', 'V320max', 'V321max', 'V322max', 'V323max', 'V324max', 'V325max', 'V326max', 'V327max', 'V328max', 'V329max', 'V330max', 'V331max', 'V332max', 'V333max', 'V334max', 'V335max', 'V336max', 'V337max', 'V338max', 'V339max', 'ntrans', 'min_amt', 'mean_amt', 'max_amt', 'num_trans_ints', 'minC1', 'minC2', 'minC3', 'minC4', 'minC5', 'minC6', 'minC7', 'minC8', 'minC9', 'minC10', 'minC11', 'minC12', 'minC13', 'minC14', 'maxC1', 'maxC2', 'maxC3', 'maxC4', 'maxC5', 'maxC6', 'maxC7', 'maxC8', 'maxC9', 'maxC10', 'maxC11', 'maxC12', 'maxC13', 'maxC14', 'countC1_inc', 'countC2_inc', 'countC3_inc', 'countC4_inc', 'countC5_inc', 'countC6_inc', 'countC7_inc', 'countC8_inc', 'countC9_inc', 'countC10_inc', 'countC11_inc', 'countC12_inc', 'countC13_inc', 'countC14_inc', 'ndistM1', 'ndistM2', 'ndistM3', 'ndistM4', 'ndistM5', 'ndistM6', 'ndistM7', 'ndistM8', 'ndistM9'] CAT_FEATURES = ['ProductCD', 'card4', 'card6', 'id_12', 'id_13', 'id_14', 'id_15', 'id_16', 'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22', 'id_23', 'id_24', 'id_25', 'id_26', 'id_27', 'id_28', 'id_29', 'id_32', 'id_34', 'id_35', 'id_36', 'id_37', 'id_38', 'DeviceType', 'DeviceInfo', 'M4','P_emaildomain', 'R_emaildomain', 'addr1', 'addr2', 'M1', 'M2', 'M3', 'M5', 'M6', 'M7', 'M8', 'M9', 'ProductCD_W_95cents','ProductCD_W_00cents','ProductCD_W_50cents', 'ProductCD_W_50_95_0_cents','ProductCD_W_NOT_50_95_0_cents'] CAT_FEATURES = [c for c in CAT_FEATURES if c in FEATURES] X = train_df[FEATURES].copy() y = train_df[TARGET].copy() X_test = test_df[FEATURES].copy() X = X.fillna(-9999) X_test = X_test.fillna(-9999) logger.info('Running with features...') logger.info(FEATURES) logger.info(f'Target is {TARGET}') update_tracking(run_id, "n_features", len(FEATURES), integer=True) ############################ #### TRAIN MODELS FUNCTIONS ############################ def train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance): train_dataset = Pool(data=X_train, label=y_train, cat_features=CAT_FEATURES) valid_dataset = Pool(data=X_valid, label=y_valid, cat_features=CAT_FEATURES) test_dataset = Pool(data=X_test, cat_features=CAT_FEATURES) model = CatBoostClassifier( iterations=N_ESTIMATORS, learning_rate=LEARNING_RATE, depth=DEPTH, eval_metric=EVAL_METRIC, verbose=VERBOSE, random_state=RANDOM_STATE, thread_count=N_THREADS, task_type="GPU") model.fit( train_dataset, eval_set=valid_dataset, early_stopping_rounds=EARLY_STOPPING_ROUNDS, ) y_pred_valid = model.predict_proba(valid_dataset)[:,1] y_pred = model.predict_proba(test_dataset)[:,1] fold_importance = pd.DataFrame() fold_importance["feature"] = model.feature_names_ fold_importance["importance"] = model.get_feature_importance() fold_importance["fold"] = fold_n + 1 feature_importance = pd.concat([feature_importance, fold_importance], axis=0) best_iteration = model.best_iteration_ return y_pred, y_pred_valid, feature_importance, best_iteration lgb_params = { 'objective':'binary', 'boosting_type':'gbdt', 'metric': EVAL_METRIC, 'n_jobs':N_THREADS, 'learning_rate':LEARNING_RATE, 'num_leaves': 2**8, 'max_depth':DEPTH, 'tree_learner':'serial', 'colsample_bytree': 0.85, 'subsample_freq':1, 'subsample':0.85, 'n_estimators':N_ESTIMATORS, 'max_bin':255, 'verbose':-1, 'seed': RANDOM_STATE, #'early_stopping_rounds':EARLY_STOPPING_ROUNDS, 'reg_alpha':0.3, 'reg_lamdba':0.243, #'categorical_feature': CAT_FEATURES } # lgb_params = { # 'min_data_in_leaf': 106, # 'num_leaves': 500, # 'learning_rate': LEARNING_RATE, #0.008, # 'min_child_weight': 0.03454472573214212, # 'bagging_fraction': 0.4181193142567742, # 'feature_fraction': 0.3797454081646243, # 'reg_lambda': 0.6485237330340494, # 'reg_alpha': 0.3899927210061127, # 'max_depth': DEPTH, #-1, # 'objective': 'binary', # 'seed': RANDOM_STATE, #13, # 'feature_fraction_seed': RANDOM_STATE, #13, # 'bagging_seed': RANDOM_STATE, #13, # 'drop_seed': RANDOM_STATE, #13, # 'data_random_seed': RANDOM_STATE, #13, # 'boosting_type': 'gbdt', # 'verbose': 1, # 'metric':'auc', # 'n_estimators':N_ESTIMATORS, # } def train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance): X_train = X_train.copy() X_valid = X_valid.copy() X_test = X_test.copy() if len(CAT_FEATURES) > 0: X_train[CAT_FEATURES] = X_train[CAT_FEATURES].astype('category') X_valid[CAT_FEATURES] = X_valid[CAT_FEATURES].astype('category') X_test[CAT_FEATURES] = X_test[CAT_FEATURES].astype('category') model = lgb.LGBMClassifier(**lgb_params) model.fit(X_train, y_train, eval_set = [(X_train, y_train), (X_valid, y_valid)], verbose = VERBOSE, early_stopping_rounds=EARLY_STOPPING_ROUNDS) y_pred_valid = model.predict_proba(X_valid)[:,1] y_pred = model.predict_proba(X_test)[:,1] fold_importance = pd.DataFrame() fold_importance["feature"] = X_train.columns fold_importance["importance"] = model.feature_importances_ fold_importance["fold"] = fold_n + 1 feature_importance = pd.concat([feature_importance, fold_importance], axis=0) best_iteration = model.best_iteration_ return y_pred, y_pred_valid, feature_importance, best_iteration ################################ # Dataframes for storing results ################################# feature_importance = pd.DataFrame() oof = np.zeros(len(X)) pred = np.zeros(len(X_test)) oof_df = train_df[['isFraud']].copy() oof_df['oof'] = np.nan oof_df['fold'] = np.nan scores = [] best_iterations = [] del train_df, test_df gc.collect() for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X, y)): X_train = X.iloc[train_idx] y_train = y.iloc[train_idx] X_valid = X.iloc[valid_idx] y_valid = y.iloc[valid_idx] if MODEL_TYPE == "catboost": y_pred, y_pred_valid, feature_importance, best_iteration = train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance) if MODEL_TYPE == 'lightgbm': y_pred, y_pred_valid, feature_importance, best_iteration = train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance) best_iterations.append(best_iteration) fold_score = roc_auc_score(y_valid, y_pred_valid) scores.append(fold_score) update_tracking(run_id, "AUC_f{}".format(fold_n + 1), fold_score, integer=False,) logger.info('Fold {} of {} CV mean AUC score: {:.4f}. Best iteration {}'.format(fold_n + 1, N_FOLDS, fold_score, best_iteration)) oof_df.iloc[valid_idx, oof_df.columns.get_loc('oof')] = y_pred_valid.reshape(-1) oof_df.iloc[valid_idx, oof_df.columns.get_loc('fold')] = fold_n + 1 pred += y_pred update_tracking(run_id, 'avg_best_iteration', np.mean(best_iterations), integer=True) ############### # Store Results ############### pred /= N_FOLDS score = np.mean(scores) sub = pd.read_csv('../input/sample_submission.csv') sub['isFraud'] = pred sub.to_csv(f'../sub/sub_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv', index=False) oof_df.to_csv(f'../oof/oof_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv') logger.info('CV mean AUC score: {:.4f}, std: {:.4f}.'.format(np.mean(scores), np.std(scores))) total_score = roc_auc_score(oof_df['isFraud'], oof_df['oof']) feature_importance.to_csv(f'../fi/fi_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv') update_tracking(run_id, "AUC", total_score, integer=False,) logger.info('OOF AUC Score: {:.4f}'.format(total_score)) end = timer() update_tracking(run_id, "training_time", (end - start), integer=True) logger.info('Done!')
py
1a4ec7e8d88f512236120935a9d037f8c28473e9
/home/runner/.cache/pip/pool/da/4b/f0/ed2807419a532229514fac9bfff8bcd42780eeae4cc8a6fc824b50fd1b
py
1a4ec949dfd340adee03660bb2d06b65ea87ed7a
import torch import torch.nn as nn import torch.nn.functional as F use_cuda = torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") class AugmentedConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dk, dv, Nh, relative): super(AugmentedConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.dk = dk self.dv = dv self.Nh = Nh self.relative = relative self.conv_out = nn.Conv2d(self.in_channels, self.out_channels - self.dv, self.kernel_size, padding=1) self.qkv_conv = nn.Conv2d(self.in_channels, 2 * self.dk + self.dv, kernel_size=1) self.attn_out = nn.Conv2d(self.dv, self.dv, 1) def forward(self, x): # Input x # (batch_size, channels, height, width) batch, _, height, width = x.size() # conv_out # (batch_size, out_channels, height, width) conv_out = self.conv_out(x) # flat_q, flat_k, flat_v # (batch_size, Nh, height * width, dvh or dkh) # dvh = dv / Nh, dkh = dk / Nh # q, k, v # (batch_size, Nh, height, width, dv or dk) flat_q, flat_k, flat_v, q, k, v = self.compute_flat_qkv(x, self.dk, self.dv, self.Nh) logits = torch.matmul(flat_q.transpose(2, 3), flat_k) if self.relative: h_rel_logits, w_rel_logits = self.relative_logits(q) logits += h_rel_logits logits += w_rel_logits weights = F.softmax(logits, dim=-1) # attn_out # (batch, Nh, height * width, dvh) attn_out = torch.matmul(weights, flat_v.transpose(2, 3)) attn_out = torch.reshape(attn_out, (batch, self.Nh, self.dv // self.Nh, height, width)) # combine_heads_2d # (batch, out_channels, height, width) attn_out = self.combine_heads_2d(attn_out) attn_out = self.attn_out(attn_out) return torch.cat((conv_out, attn_out), dim=1) def compute_flat_qkv(self, x, dk, dv, Nh): N, _, H, W = x.size() qkv = self.qkv_conv(x) q, k, v = torch.split(qkv, [dk, dk, dv], dim=1) q = self.split_heads_2d(q, Nh) k = self.split_heads_2d(k, Nh) v = self.split_heads_2d(v, Nh) dkh = dk // Nh q *= dkh ** -0.5 flat_q = torch.reshape(q, (N, Nh, dk // Nh, H * W)) flat_k = torch.reshape(k, (N, Nh, dk // Nh, H * W)) flat_v = torch.reshape(v, (N, Nh, dv // Nh, H * W)) return flat_q, flat_k, flat_v, q, k, v def split_heads_2d(self, x, Nh): batch, channels, height, width = x.size() ret_shape = (batch, Nh, channels // Nh, height, width) split = torch.reshape(x, ret_shape) return split def combine_heads_2d(self, x): batch, Nh, dv, H, W = x.size() ret_shape = (batch, Nh * dv, H, W) return torch.reshape(x, ret_shape) def relative_logits(self, q): B, Nh, dk, H, W = q.size() q = torch.transpose(q, 2, 4).transpose(2, 3) key_rel_w = nn.Parameter(torch.randn((2 * W - 1, dk), requires_grad=True)).to(device) rel_logits_w = self.relative_logits_1d(q, key_rel_w, H, W, Nh, "w") key_rel_h = nn.Parameter(torch.randn((2 * H - 1, dk), requires_grad=True)).to(device) rel_logits_h = self.relative_logits_1d(torch.transpose(q, 2, 3), key_rel_h, W, H, Nh, "h") return rel_logits_h, rel_logits_w def relative_logits_1d(self, q, rel_k, H, W, Nh, case): rel_logits = torch.einsum('bhxyd,md->bhxym', q, rel_k) rel_logits = torch.reshape(rel_logits, (-1, Nh * H, W, 2 * W - 1)) rel_logits = self.rel_to_abs(rel_logits) rel_logits = torch.reshape(rel_logits, (-1, Nh, H, W, W)) rel_logits = torch.unsqueeze(rel_logits, dim=3) rel_logits = rel_logits.repeat((1, 1, 1, H, 1, 1)) if case == "w": rel_logits = torch.transpose(rel_logits, 3, 4) elif case == "h": rel_logits = torch.transpose(rel_logits, 2, 4).transpose(4, 5).transpose(3, 5) rel_logits = torch.reshape(rel_logits, (-1, Nh, H * W, H * W)) return rel_logits def rel_to_abs(self, x): B, Nh, L, _ = x.size() col_pad = torch.zeros((B, Nh, L, 1)).to(device) x = torch.cat((x, col_pad), dim=3) flat_x = torch.reshape(x, (B, Nh, L * 2 * L)) flat_pad = torch.zeros((B, Nh, L - 1)).to(device) flat_x_padded = torch.cat((flat_x, flat_pad), dim=2) final_x = torch.reshape(flat_x_padded, (B, Nh, L + 1, 2 * L - 1)) final_x = final_x[:, :, :L, L - 1:] return final_x
py
1a4eca0dd42aee5406755e956d413792136d6634
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A collection of dictionary-based wrappers around the "vanilla" transforms for utility functions defined in :py:class:`monai.transforms.utility.array`. Class names are ended with 'd' to denote dictionary-based transforms. """ import logging import re from copy import deepcopy from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union import numpy as np import torch from monai.config import DtypeLike, KeysCollection from monai.config.type_definitions import NdarrayOrTensor from monai.data.utils import no_collation from monai.transforms.inverse import InvertibleTransform from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform from monai.transforms.utility.array import ( AddChannel, AddExtremePointsChannel, AsChannelFirst, AsChannelLast, CastToType, ClassesToIndices, ConvertToMultiChannelBasedOnBratsClasses, CuCIM, DataStats, EnsureChannelFirst, EnsureType, FgBgToIndices, Identity, IntensityStats, LabelToMask, Lambda, MapLabelValue, RemoveRepeatedChannel, RepeatChannel, SimulateDelay, SplitChannel, SqueezeDim, ToCupy, ToDevice, ToNumpy, ToPIL, TorchVision, ToTensor, Transpose, ) from monai.transforms.utils import extreme_points_to_image, get_extreme_points from monai.transforms.utils_pytorch_numpy_unification import concatenate from monai.utils import convert_to_numpy, ensure_tuple, ensure_tuple_rep from monai.utils.enums import TraceKeys, TransformBackends from monai.utils.type_conversion import convert_to_dst_type __all__ = [ "AddChannelD", "AddChannelDict", "AddChanneld", "AddExtremePointsChannelD", "AddExtremePointsChannelDict", "AddExtremePointsChanneld", "AsChannelFirstD", "AsChannelFirstDict", "AsChannelFirstd", "AsChannelLastD", "AsChannelLastDict", "AsChannelLastd", "CastToTypeD", "CastToTypeDict", "CastToTyped", "ConcatItemsD", "ConcatItemsDict", "ConcatItemsd", "ConvertToMultiChannelBasedOnBratsClassesD", "ConvertToMultiChannelBasedOnBratsClassesDict", "ConvertToMultiChannelBasedOnBratsClassesd", "CopyItemsD", "CopyItemsDict", "CopyItemsd", "CuCIMd", "CuCIMD", "CuCIMDict", "DataStatsD", "DataStatsDict", "DataStatsd", "DeleteItemsD", "DeleteItemsDict", "DeleteItemsd", "EnsureChannelFirstD", "EnsureChannelFirstDict", "EnsureChannelFirstd", "EnsureTypeD", "EnsureTypeDict", "EnsureTyped", "FgBgToIndicesD", "FgBgToIndicesDict", "FgBgToIndicesd", "IdentityD", "IdentityDict", "Identityd", "IntensityStatsd", "IntensityStatsD", "IntensityStatsDict", "LabelToMaskD", "LabelToMaskDict", "LabelToMaskd", "LambdaD", "LambdaDict", "Lambdad", "MapLabelValueD", "MapLabelValueDict", "MapLabelValued", "RandCuCIMd", "RandCuCIMD", "RandCuCIMDict", "RandLambdaD", "RandLambdaDict", "RandLambdad", "RandTorchVisionD", "RandTorchVisionDict", "RandTorchVisiond", "RemoveRepeatedChannelD", "RemoveRepeatedChannelDict", "RemoveRepeatedChanneld", "RepeatChannelD", "RepeatChannelDict", "RepeatChanneld", "SelectItemsD", "SelectItemsDict", "SelectItemsd", "SimulateDelayD", "SimulateDelayDict", "SimulateDelayd", "SplitChannelD", "SplitChannelDict", "SplitChanneld", "SqueezeDimD", "SqueezeDimDict", "SqueezeDimd", "ToCupyD", "ToCupyDict", "ToCupyd", "ToDeviced", "ToDeviceD", "ToDeviceDict", "ToNumpyD", "ToNumpyDict", "ToNumpyd", "ToPILD", "ToPILDict", "ToPILd", "ToTensorD", "ToTensorDict", "ToTensord", "TorchVisionD", "TorchVisionDict", "TorchVisiond", "Transposed", "TransposeDict", "TransposeD", "ClassesToIndicesd", "ClassesToIndicesD", "ClassesToIndicesDict", ] class Identityd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Identity`. """ backend = Identity.backend def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.identity = Identity() def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.identity(d[key]) return d class AsChannelFirstd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelFirst`. """ backend = AsChannelFirst.backend def __init__(self, keys: KeysCollection, channel_dim: int = -1, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` channel_dim: which dimension of input image is the channel, default is the last dimension. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.converter = AsChannelFirst(channel_dim=channel_dim) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d class AsChannelLastd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelLast`. """ backend = AsChannelLast.backend def __init__(self, keys: KeysCollection, channel_dim: int = 0, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` channel_dim: which dimension of input image is the channel, default is the first dimension. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.converter = AsChannelLast(channel_dim=channel_dim) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d class AddChanneld(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.AddChannel`. """ backend = AddChannel.backend def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.adder = AddChannel() def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.adder(d[key]) return d class EnsureChannelFirstd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.EnsureChannelFirst`. """ backend = EnsureChannelFirst.backend def __init__( self, keys: KeysCollection, meta_keys: Optional[KeysCollection] = None, meta_key_postfix: str = "meta_dict", strict_check: bool = True, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` meta_keys: explicitly indicate the key of the corresponding meta data dictionary. for example, for data with key `image`, the metadata by default is in `image_meta_dict`. the meta data is a dictionary object which contains: filename, original_shape, etc. it can be a sequence of string, map to the `keys`. if None, will try to construct meta_keys by `key_{meta_key_postfix}`. meta_key_postfix: if meta_keys is None and `key_{postfix}` was used to store the metadata in `LoadImaged`. So need the key to extract metadata for channel dim information, default is `meta_dict`. For example, for data with key `image`, metadata by default is in `image_meta_dict`. strict_check: whether to raise an error when the meta information is insufficient. """ super().__init__(keys) self.adjuster = EnsureChannelFirst(strict_check=strict_check) self.meta_keys = ensure_tuple_rep(meta_keys, len(self.keys)) self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys)) def __call__(self, data) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, meta_key, meta_key_postfix in zip(self.keys, self.meta_keys, self.meta_key_postfix): d[key] = self.adjuster(d[key], d[meta_key or f"{key}_{meta_key_postfix}"]) return d class RepeatChanneld(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.RepeatChannel`. """ backend = RepeatChannel.backend def __init__(self, keys: KeysCollection, repeats: int, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` repeats: the number of repetitions for each element. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.repeater = RepeatChannel(repeats) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.repeater(d[key]) return d class RemoveRepeatedChanneld(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.RemoveRepeatedChannel`. """ backend = RemoveRepeatedChannel.backend def __init__(self, keys: KeysCollection, repeats: int, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` repeats: the number of repetitions for each element. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.repeater = RemoveRepeatedChannel(repeats) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.repeater(d[key]) return d class SplitChanneld(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.SplitChannel`. All the input specified by `keys` should be split into same count of data. """ backend = SplitChannel.backend def __init__( self, keys: KeysCollection, output_postfixes: Optional[Sequence[str]] = None, channel_dim: int = 0, allow_missing_keys: bool = False, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` output_postfixes: the postfixes to construct keys to store split data. for example: if the key of input data is `pred` and split 2 classes, the output data keys will be: pred_(output_postfixes[0]), pred_(output_postfixes[1]) if None, using the index number: `pred_0`, `pred_1`, ... `pred_N`. channel_dim: which dimension of input image is the channel, default to 0. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.output_postfixes = output_postfixes self.splitter = SplitChannel(channel_dim=channel_dim) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): rets = self.splitter(d[key]) postfixes: Sequence = list(range(len(rets))) if self.output_postfixes is None else self.output_postfixes if len(postfixes) != len(rets): raise AssertionError("count of split results must match output_postfixes.") for i, r in enumerate(rets): split_key = f"{key}_{postfixes[i]}" if split_key in d: raise RuntimeError(f"input data already contains key {split_key}.") d[split_key] = r return d class CastToTyped(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.CastToType`. """ backend = CastToType.backend def __init__( self, keys: KeysCollection, dtype: Union[Sequence[Union[DtypeLike, torch.dtype]], DtypeLike, torch.dtype] = np.float32, allow_missing_keys: bool = False, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` dtype: convert image to this data type, default is `np.float32`. it also can be a sequence of dtypes or torch.dtype, each element corresponds to a key in ``keys``. allow_missing_keys: don't raise exception if key is missing. """ MapTransform.__init__(self, keys, allow_missing_keys) self.dtype = ensure_tuple_rep(dtype, len(self.keys)) self.converter = CastToType() def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, dtype in self.key_iterator(d, self.dtype): d[key] = self.converter(d[key], dtype=dtype) return d class ToTensord(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ToTensor`. """ backend = ToTensor.backend def __init__( self, keys: KeysCollection, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, allow_missing_keys: bool = False, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` dtype: target data content type to convert, for example: torch.float, etc. device: specify the target device to put the Tensor data. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.converter = ToTensor(dtype=dtype, device=device) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): self.push_transform(d, key) d[key] = self.converter(d[key]) return d def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = deepcopy(dict(data)) for key in self.key_iterator(d): # Create inverse transform inverse_transform = ToNumpy() # Apply inverse d[key] = inverse_transform(d[key]) # Remove the applied transform self.pop_transform(d, key) return d class EnsureTyped(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.EnsureType`. Ensure the input data to be a PyTorch Tensor or numpy array, support: `numpy array`, `PyTorch Tensor`, `float`, `int`, `bool`, `string` and `object` keep the original. If passing a dictionary, list or tuple, still return dictionary, list or tuple and recursively convert every item to the expected data type. Note: Currently, we only convert tensor data to numpy array or scalar number in the inverse operation. """ backend = EnsureType.backend def __init__( self, keys: KeysCollection, data_type: str = "tensor", dtype: Optional[Union[DtypeLike, torch.dtype]] = None, device: Optional[torch.device] = None, allow_missing_keys: bool = False, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` data_type: target data type to convert, should be "tensor" or "numpy". dtype: target data content type to convert, for example: np.float32, torch.float, etc. device: for Tensor data type, specify the target device. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.converter = EnsureType(data_type=data_type, dtype=dtype, device=device) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): self.push_transform(d, key) d[key] = self.converter(d[key]) return d def inverse(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: d = deepcopy(dict(data)) for key in self.key_iterator(d): # FIXME: currently, only convert tensor data to numpy array or scalar number, # need to also invert numpy array but it's not easy to determine the previous data type d[key] = convert_to_numpy(d[key]) # Remove the applied transform self.pop_transform(d, key) return d class ToNumpyd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`. """ backend = ToNumpy.backend def __init__(self, keys: KeysCollection, dtype: DtypeLike = None, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` dtype: target data type when converting to numpy array. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.converter = ToNumpy(dtype=dtype) def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d class ToCupyd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ToCupy`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` dtype: data type specifier. It is inferred from the input by default. allow_missing_keys: don't raise exception if key is missing. """ backend = ToCupy.backend def __init__(self, keys: KeysCollection, dtype=None, allow_missing_keys: bool = False) -> None: super().__init__(keys, allow_missing_keys) self.converter = ToCupy(dtype=dtype) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d class ToPILd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`. """ backend = ToPIL.backend def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.converter = ToPIL() def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d class Transposed(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Transpose`. """ backend = Transpose.backend def __init__( self, keys: KeysCollection, indices: Optional[Sequence[int]], allow_missing_keys: bool = False ) -> None: super().__init__(keys, allow_missing_keys) self.transform = Transpose(indices) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.transform(d[key]) # if None was supplied then numpy uses range(a.ndim)[::-1] indices = self.transform.indices or range(d[key].ndim)[::-1] self.push_transform(d, key, extra_info={"indices": indices}) return d def inverse(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: d = deepcopy(dict(data)) for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform fwd_indices = np.array(transform[TraceKeys.EXTRA_INFO]["indices"]) inv_indices = np.argsort(fwd_indices) inverse_transform = Transpose(inv_indices.tolist()) # Apply inverse d[key] = inverse_transform(d[key]) # Remove the applied transform self.pop_transform(d, key) return d class DeleteItemsd(MapTransform): """ Delete specified items from data dictionary to release memory. It will remove the key-values and copy the others to construct a new dictionary. """ backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __init__(self, keys: KeysCollection, sep: str = ".", use_re: Union[Sequence[bool], bool] = False) -> None: """ Args: keys: keys of the corresponding items to delete, can be "A{sep}B{sep}C" to delete key `C` in nested dictionary, `C` can be regular expression. See also: :py:class:`monai.transforms.compose.MapTransform` sep: the separator tag to define nested dictionary keys, default to ".". use_re: whether the specified key is a regular expression, it also can be a list of bool values, map the to keys. """ super().__init__(keys) self.sep = sep self.use_re = ensure_tuple_rep(use_re, len(self.keys)) def __call__(self, data): def _delete_item(keys, d, use_re: bool = False): key = keys[0] if len(keys) > 1: d[key] = _delete_item(keys[1:], d[key], use_re) return d return {k: v for k, v in d.items() if (use_re and not re.search(key, k)) or (not use_re and k != key)} d = dict(data) for key, use_re in zip(self.keys, self.use_re): d = _delete_item(key.split(self.sep), d, use_re) return d class SelectItemsd(MapTransform): """ Select only specified items from data dictionary to release memory. It will copy the selected key-values and construct and new dictionary. """ backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __call__(self, data): return {key: data[key] for key in self.key_iterator(data)} class SqueezeDimd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.SqueezeDim`. """ backend = SqueezeDim.backend def __init__(self, keys: KeysCollection, dim: int = 0, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` dim: dimension to be squeezed. Default: 0 (the first dimension) allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.converter = SqueezeDim(dim=dim) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d class DataStatsd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.DataStats`. """ backend = DataStats.backend def __init__( self, keys: KeysCollection, prefix: Union[Sequence[str], str] = "Data", data_type: Union[Sequence[bool], bool] = True, data_shape: Union[Sequence[bool], bool] = True, value_range: Union[Sequence[bool], bool] = True, data_value: Union[Sequence[bool], bool] = False, additional_info: Optional[Union[Sequence[Callable], Callable]] = None, logger_handler: Optional[logging.Handler] = None, allow_missing_keys: bool = False, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` prefix: will be printed in format: "{prefix} statistics". it also can be a sequence of string, each element corresponds to a key in ``keys``. data_type: whether to show the type of input data. it also can be a sequence of bool, each element corresponds to a key in ``keys``. data_shape: whether to show the shape of input data. it also can be a sequence of bool, each element corresponds to a key in ``keys``. value_range: whether to show the value range of input data. it also can be a sequence of bool, each element corresponds to a key in ``keys``. data_value: whether to show the raw value of input data. it also can be a sequence of bool, each element corresponds to a key in ``keys``. a typical example is to print some properties of Nifti image: affine, pixdim, etc. additional_info: user can define callable function to extract additional info from input data. it also can be a sequence of string, each element corresponds to a key in ``keys``. logger_handler: add additional handler to output data: save to file, etc. add existing python logging handlers: https://docs.python.org/3/library/logging.handlers.html the handler should have a logging level of at least `INFO`. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.prefix = ensure_tuple_rep(prefix, len(self.keys)) self.data_type = ensure_tuple_rep(data_type, len(self.keys)) self.data_shape = ensure_tuple_rep(data_shape, len(self.keys)) self.value_range = ensure_tuple_rep(value_range, len(self.keys)) self.data_value = ensure_tuple_rep(data_value, len(self.keys)) self.additional_info = ensure_tuple_rep(additional_info, len(self.keys)) self.logger_handler = logger_handler self.printer = DataStats(logger_handler=logger_handler) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, prefix, data_type, data_shape, value_range, data_value, additional_info in self.key_iterator( d, self.prefix, self.data_type, self.data_shape, self.value_range, self.data_value, self.additional_info ): d[key] = self.printer(d[key], prefix, data_type, data_shape, value_range, data_value, additional_info) return d class SimulateDelayd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.SimulateDelay`. """ backend = SimulateDelay.backend def __init__( self, keys: KeysCollection, delay_time: Union[Sequence[float], float] = 0.0, allow_missing_keys: bool = False ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` delay_time: The minimum amount of time, in fractions of seconds, to accomplish this identity task. It also can be a sequence of string, each element corresponds to a key in ``keys``. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.delay_time = ensure_tuple_rep(delay_time, len(self.keys)) self.delayer = SimulateDelay() def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, delay_time in self.key_iterator(d, self.delay_time): d[key] = self.delayer(d[key], delay_time=delay_time) return d class CopyItemsd(MapTransform): """ Copy specified items from data dictionary and save with different key names. It can copy several items together and copy several times. """ backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __init__( self, keys: KeysCollection, times: int, names: KeysCollection, allow_missing_keys: bool = False ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` times: expected copy times, for example, if keys is "img", times is 3, it will add 3 copies of "img" data to the dictionary. names: the names corresponding to the newly copied data, the length should match `len(keys) x times`. for example, if keys is ["img", "seg"] and times is 2, names can be: ["img_1", "seg_1", "img_2", "seg_2"]. allow_missing_keys: don't raise exception if key is missing. Raises: ValueError: When ``times`` is nonpositive. ValueError: When ``len(names)`` is not ``len(keys) * times``. Incompatible values. """ super().__init__(keys, allow_missing_keys) if times < 1: raise ValueError(f"times must be positive, got {times}.") self.times = times names = ensure_tuple(names) if len(names) != (len(self.keys) * times): raise ValueError( "len(names) must match len(keys) * times, " f"got len(names)={len(names)} len(keys) * times={len(self.keys) * times}." ) self.names = names def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: """ Raises: KeyError: When a key in ``self.names`` already exists in ``data``. """ d = dict(data) key_len = len(self.keys) for i in range(self.times): for key, new_key in self.key_iterator(d, self.names[i * key_len : (i + 1) * key_len]): if new_key in d: raise KeyError(f"Key {new_key} already exists in data.") val = d[key] if isinstance(val, torch.Tensor): d[new_key] = val.detach().clone() else: d[new_key] = deepcopy(val) return d class ConcatItemsd(MapTransform): """ Concatenate specified items from data dictionary together on the first dim to construct a big array. Expect all the items are numpy array or PyTorch Tensor. """ backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __init__(self, keys: KeysCollection, name: str, dim: int = 0, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be concatenated together. See also: :py:class:`monai.transforms.compose.MapTransform` name: the name corresponding to the key to store the concatenated data. dim: on which dimension to concatenate the items, default is 0. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.name = name self.dim = dim def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: """ Raises: TypeError: When items in ``data`` differ in type. TypeError: When the item type is not in ``Union[numpy.ndarray, torch.Tensor]``. """ d = dict(data) output = [] data_type = None for key in self.key_iterator(d): if data_type is None: data_type = type(d[key]) elif not isinstance(d[key], data_type): raise TypeError("All items in data must have the same type.") output.append(d[key]) if len(output) == 0: return d if data_type is np.ndarray: d[self.name] = np.concatenate(output, axis=self.dim) elif data_type is torch.Tensor: d[self.name] = torch.cat(output, dim=self.dim) # type: ignore else: raise TypeError(f"Unsupported data type: {data_type}, available options are (numpy.ndarray, torch.Tensor).") return d class Lambdad(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Lambda`. For example: .. code-block:: python :emphasize-lines: 2 input_data={'image': np.zeros((10, 2, 2)), 'label': np.ones((10, 2, 2))} lambd = Lambdad(keys='label', func=lambda x: x[:4, :, :]) print(lambd(input_data)['label'].shape) (4, 2, 2) Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` func: Lambda/function to be applied. It also can be a sequence of Callable, each element corresponds to a key in ``keys``. inv_func: Lambda/function of inverse operation if want to invert transforms, default to `lambda x: x`. It also can be a sequence of Callable, each element corresponds to a key in ``keys``. overwrite: whether to overwrite the original data in the input dictionary with lamdbda function output. default to True. it also can be a sequence of bool, each element corresponds to a key in ``keys``. allow_missing_keys: don't raise exception if key is missing. Note: The inverse operation doesn't allow to define `extra_info` or access other information, such as the image's original size. If need these complicated information, please write a new InvertibleTransform directly. """ backend = Lambda.backend def __init__( self, keys: KeysCollection, func: Union[Sequence[Callable], Callable], inv_func: Union[Sequence[Callable], Callable] = no_collation, overwrite: Union[Sequence[bool], bool] = True, allow_missing_keys: bool = False, ) -> None: super().__init__(keys, allow_missing_keys) self.func = ensure_tuple_rep(func, len(self.keys)) self.inv_func = ensure_tuple_rep(inv_func, len(self.keys)) self.overwrite = ensure_tuple_rep(overwrite, len(self.keys)) self._lambd = Lambda() def _transform(self, data: Any, func: Callable): return self._lambd(data, func=func) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, func, overwrite in self.key_iterator(d, self.func, self.overwrite): ret = self._transform(data=d[key], func=func) if overwrite: d[key] = ret self.push_transform(d, key) return d def _inverse_transform(self, transform_info: Dict, data: Any, func: Callable): return self._lambd(data, func=func) def inverse(self, data): d = deepcopy(dict(data)) for key, inv_func, overwrite in self.key_iterator(d, self.inv_func, self.overwrite): transform = self.get_most_recent_transform(d, key) ret = self._inverse_transform(transform_info=transform, data=d[key], func=inv_func) if overwrite: d[key] = ret self.pop_transform(d, key) return d class RandLambdad(Lambdad, RandomizableTransform): """ Randomizable version :py:class:`monai.transforms.Lambdad`, the input `func` may contain random logic, or randomly execute the function based on `prob`. so `CacheDataset` will not execute it and cache the results. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` func: Lambda/function to be applied. It also can be a sequence of Callable, each element corresponds to a key in ``keys``. inv_func: Lambda/function of inverse operation if want to invert transforms, default to `lambda x: x`. It also can be a sequence of Callable, each element corresponds to a key in ``keys``. overwrite: whether to overwrite the original data in the input dictionary with lamdbda function output. default to True. it also can be a sequence of bool, each element corresponds to a key in ``keys``. prob: probability of executing the random function, default to 1.0, with 100% probability to execute. note that all the data specified by `keys` will share the same random probability to execute or not. allow_missing_keys: don't raise exception if key is missing. For more details, please check :py:class:`monai.transforms.Lambdad`. Note: The inverse operation doesn't allow to define `extra_info` or access other information, such as the image's original size. If need these complicated information, please write a new InvertibleTransform directly. """ backend = Lambda.backend def __init__( self, keys: KeysCollection, func: Union[Sequence[Callable], Callable], inv_func: Union[Sequence[Callable], Callable] = no_collation, overwrite: Union[Sequence[bool], bool] = True, prob: float = 1.0, allow_missing_keys: bool = False, ) -> None: Lambdad.__init__( self=self, keys=keys, func=func, inv_func=inv_func, overwrite=overwrite, allow_missing_keys=allow_missing_keys, ) RandomizableTransform.__init__(self=self, prob=prob, do_transform=True) def _transform(self, data: Any, func: Callable): return self._lambd(data, func=func) if self._do_transform else data def __call__(self, data): self.randomize(data) return super().__call__(data) def _inverse_transform(self, transform_info: Dict, data: Any, func: Callable): return self._lambd(data, func=func) if transform_info[TraceKeys.DO_TRANSFORM] else data class LabelToMaskd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.LabelToMask`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` select_labels: labels to generate mask from. for 1 channel label, the `select_labels` is the expected label values, like: [1, 2, 3]. for One-Hot format label, the `select_labels` is the expected channel indices. merge_channels: whether to use `np.any()` to merge the result on channel dim. if yes, will return a single channel mask with binary data. allow_missing_keys: don't raise exception if key is missing. """ backend = LabelToMask.backend def __init__( # pytype: disable=annotation-type-mismatch self, keys: KeysCollection, select_labels: Union[Sequence[int], int], merge_channels: bool = False, allow_missing_keys: bool = False, ) -> None: # pytype: disable=annotation-type-mismatch super().__init__(keys, allow_missing_keys) self.converter = LabelToMask(select_labels=select_labels, merge_channels=merge_channels) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d class FgBgToIndicesd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.FgBgToIndices`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` fg_postfix: postfix to save the computed foreground indices in dict. for example, if computed on `label` and `postfix = "_fg_indices"`, the key will be `label_fg_indices`. bg_postfix: postfix to save the computed background indices in dict. for example, if computed on `label` and `postfix = "_bg_indices"`, the key will be `label_bg_indices`. image_key: if image_key is not None, use ``label == 0 & image > image_threshold`` to determine the negative sample(background). so the output items will not map to all the voxels in the label. image_threshold: if enabled image_key, use ``image > image_threshold`` to determine the valid image content area and select background only in this area. output_shape: expected shape of output indices. if not None, unravel indices to specified shape. allow_missing_keys: don't raise exception if key is missing. """ backend = FgBgToIndices.backend def __init__( self, keys: KeysCollection, fg_postfix: str = "_fg_indices", bg_postfix: str = "_bg_indices", image_key: Optional[str] = None, image_threshold: float = 0.0, output_shape: Optional[Sequence[int]] = None, allow_missing_keys: bool = False, ) -> None: super().__init__(keys, allow_missing_keys) self.fg_postfix = fg_postfix self.bg_postfix = bg_postfix self.image_key = image_key self.converter = FgBgToIndices(image_threshold, output_shape) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) image = d[self.image_key] if self.image_key else None for key in self.key_iterator(d): d[str(key) + self.fg_postfix], d[str(key) + self.bg_postfix] = self.converter(d[key], image) return d class ClassesToIndicesd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ClassesToIndices`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` indices_postfix: postfix to save the computed indices of all classes in dict. for example, if computed on `label` and `postfix = "_cls_indices"`, the key will be `label_cls_indices`. num_classes: number of classes for argmax label, not necessary for One-Hot label. image_key: if image_key is not None, use ``image > image_threshold`` to define valid region, and only select the indices within the valid region. image_threshold: if enabled image_key, use ``image > image_threshold`` to determine the valid image content area and select only the indices of classes in this area. output_shape: expected shape of output indices. if not None, unravel indices to specified shape. allow_missing_keys: don't raise exception if key is missing. """ backend = ClassesToIndices.backend def __init__( self, keys: KeysCollection, indices_postfix: str = "_cls_indices", num_classes: Optional[int] = None, image_key: Optional[str] = None, image_threshold: float = 0.0, output_shape: Optional[Sequence[int]] = None, allow_missing_keys: bool = False, ) -> None: super().__init__(keys, allow_missing_keys) self.indices_postfix = indices_postfix self.image_key = image_key self.converter = ClassesToIndices(num_classes, image_threshold, output_shape) def __call__(self, data: Mapping[Hashable, Any]): d = dict(data) image = d[self.image_key] if self.image_key else None for key in self.key_iterator(d): d[str(key) + self.indices_postfix] = self.converter(d[key], image) return d class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ConvertToMultiChannelBasedOnBratsClasses`. Convert labels to multi channels based on brats18 classes: label 1 is the necrotic and non-enhancing tumor core label 2 is the the peritumoral edema label 4 is the GD-enhancing tumor The possible classes are TC (Tumor core), WT (Whole tumor) and ET (Enhancing tumor). """ backend = ConvertToMultiChannelBasedOnBratsClasses.backend def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False): super().__init__(keys, allow_missing_keys) self.converter = ConvertToMultiChannelBasedOnBratsClasses() def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d class AddExtremePointsChanneld(Randomizable, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.AddExtremePointsChannel`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` label_key: key to label source to get the extreme points. background: Class index of background label, defaults to 0. pert: Random perturbation amount to add to the points, defaults to 0.0. sigma: if a list of values, must match the count of spatial dimensions of input data, and apply every value in the list to 1 spatial dimension. if only 1 value provided, use it for all spatial dimensions. rescale_min: minimum value of output data. rescale_max: maximum value of output data. allow_missing_keys: don't raise exception if key is missing. """ backend = AddExtremePointsChannel.backend def __init__( self, keys: KeysCollection, label_key: str, background: int = 0, pert: float = 0.0, sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 3.0, rescale_min: float = -1.0, rescale_max: float = 1.0, allow_missing_keys: bool = False, ): MapTransform.__init__(self, keys, allow_missing_keys) self.background = background self.pert = pert self.points: List[Tuple[int, ...]] = [] self.label_key = label_key self.sigma = sigma self.rescale_min = rescale_min self.rescale_max = rescale_max def randomize(self, label: NdarrayOrTensor) -> None: self.points = get_extreme_points(label, rand_state=self.R, background=self.background, pert=self.pert) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) label = d[self.label_key] if label.shape[0] != 1: raise ValueError("Only supports single channel labels!") # Generate extreme points self.randomize(label[0, :]) for key in self.key_iterator(d): img = d[key] points_image = extreme_points_to_image( points=self.points, label=label, sigma=self.sigma, rescale_min=self.rescale_min, rescale_max=self.rescale_max, ) points_image, *_ = convert_to_dst_type(points_image, img) # type: ignore d[key] = concatenate([img, points_image], axis=0) return d class TorchVisiond(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.TorchVision` for non-randomized transforms. For randomized transforms of TorchVision use :py:class:`monai.transforms.RandTorchVisiond`. Note: As most of the TorchVision transforms only work for PIL image and PyTorch Tensor, this transform expects input data to be dict of PyTorch Tensors, users can easily call `ToTensord` transform to convert Numpy to Tensor. """ backend = TorchVision.backend def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = False, *args, **kwargs) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` name: The transform name in TorchVision package. allow_missing_keys: don't raise exception if key is missing. args: parameters for the TorchVision transform. kwargs: parameters for the TorchVision transform. """ super().__init__(keys, allow_missing_keys) self.trans = TorchVision(name, *args, **kwargs) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.trans(d[key]) return d class RandTorchVisiond(Randomizable, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.TorchVision` for randomized transforms. For deterministic non-randomized transforms of TorchVision use :py:class:`monai.transforms.TorchVisiond`. Note: - As most of the TorchVision transforms only work for PIL image and PyTorch Tensor, this transform expects input data to be dict of PyTorch Tensors, users can easily call `ToTensord` transform to convert Numpy to Tensor. - This class inherits the ``Randomizable`` purely to prevent any dataset caching to skip the transform computation. If the random factor of the underlying torchvision transform is not derived from `self.R`, the results may not be deterministic. See Also: :py:class:`monai.transforms.Randomizable`. """ backend = TorchVision.backend def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = False, *args, **kwargs) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` name: The transform name in TorchVision package. allow_missing_keys: don't raise exception if key is missing. args: parameters for the TorchVision transform. kwargs: parameters for the TorchVision transform. """ MapTransform.__init__(self, keys, allow_missing_keys) self.trans = TorchVision(name, *args, **kwargs) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.trans(d[key]) return d class MapLabelValued(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.MapLabelValue`. """ backend = MapLabelValue.backend def __init__( self, keys: KeysCollection, orig_labels: Sequence, target_labels: Sequence, dtype: DtypeLike = np.float32, allow_missing_keys: bool = False, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` orig_labels: original labels that map to others. target_labels: expected label values, 1: 1 map to the `orig_labels`. dtype: convert the output data to dtype, default to float32. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.mapper = MapLabelValue(orig_labels=orig_labels, target_labels=target_labels, dtype=dtype) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.mapper(d[key]) return d class IntensityStatsd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.IntensityStats`. Compute statistics for the intensity values of input image and store into the meta data dictionary. For example: if `ops=[lambda x: np.mean(x), "max"]` and `key_prefix="orig"`, may generate below stats: `{"orig_custom_0": 1.5, "orig_max": 3.0}`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` ops: expected operations to compute statistics for the intensity. if a string, will map to the predefined operations, supported: ["mean", "median", "max", "min", "std"] mapping to `np.nanmean`, `np.nanmedian`, `np.nanmax`, `np.nanmin`, `np.nanstd`. if a callable function, will execute the function on input image. key_prefix: the prefix to combine with `ops` name to generate the key to store the results in the meta data dictionary. if some `ops` are callable functions, will use "{key_prefix}_custom_{index}" as the key, where index counts from 0. mask_keys: if not None, specify the mask array for the image to extract only the interested area to compute statistics, mask must have the same shape as the image. it should be a sequence of strings or None, map to the `keys`. channel_wise: whether to compute statistics for every channel of input image separately. if True, return a list of values for every operation, default to False. meta_keys: explicitly indicate the key of the corresponding meta data dictionary. used to store the computed statistics to the meta dict. for example, for data with key `image`, the metadata by default is in `image_meta_dict`. the meta data is a dictionary object which contains: filename, original_shape, etc. it can be a sequence of string, map to the `keys`. if None, will try to construct meta_keys by `key_{meta_key_postfix}`. meta_key_postfix: if meta_keys is None, use `key_{postfix}` to to fetch the meta data according to the key data, default is `meta_dict`, the meta data is a dictionary object. used to store the computed statistics to the meta dict. allow_missing_keys: don't raise exception if key is missing. """ backend = IntensityStats.backend def __init__( self, keys: KeysCollection, ops: Sequence[Union[str, Callable]], key_prefix: str, mask_keys: Optional[KeysCollection] = None, channel_wise: bool = False, meta_keys: Optional[KeysCollection] = None, meta_key_postfix: str = "meta_dict", allow_missing_keys: bool = False, ) -> None: super().__init__(keys, allow_missing_keys) self.stats = IntensityStats(ops=ops, key_prefix=key_prefix, channel_wise=channel_wise) self.mask_keys = ensure_tuple_rep(None, len(self.keys)) if mask_keys is None else ensure_tuple(mask_keys) self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys) if len(self.keys) != len(self.meta_keys): raise ValueError("meta_keys should have the same length as keys.") self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys)) def __call__(self, data) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, mask_key, meta_key, meta_key_postfix in self.key_iterator( d, self.mask_keys, self.meta_keys, self.meta_key_postfix ): meta_key = meta_key or f"{key}_{meta_key_postfix}" d[key], d[meta_key] = self.stats( img=d[key], meta_data=d.get(meta_key), mask=d.get(mask_key) if mask_key is not None else None ) return d class ToDeviced(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ToDevice`. """ backend = ToDevice.backend def __init__( self, keys: KeysCollection, device: Union[torch.device, str], allow_missing_keys: bool = False, **kwargs ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` device: target device to move the Tensor, for example: "cuda:1". allow_missing_keys: don't raise exception if key is missing. kwargs: other args for the PyTorch `Tensor.to()` API, for more details: https://pytorch.org/docs/stable/generated/torch.Tensor.to.html. """ super().__init__(keys, allow_missing_keys) self.converter = ToDevice(device=device, **kwargs) def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d class CuCIMd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.CuCIM` for non-randomized transforms. For randomized transforms of CuCIM use :py:class:`monai.transforms.RandCuCIMd`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` name: The transform name in CuCIM package. allow_missing_keys: don't raise exception if key is missing. args: parameters for the CuCIM transform. kwargs: parameters for the CuCIM transform. Note: CuCIM transforms only work with CuPy arrays, this transform expects input data to be `cupy.ndarray`. Users can call `ToCuPy` transform to convert a numpy array or torch tensor to cupy array. """ def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = False, *args, **kwargs) -> None: super().__init__(keys=keys, allow_missing_keys=allow_missing_keys) self.trans = CuCIM(name, *args, **kwargs) def __call__(self, data): """ Args: data: Dict[Hashable, `cupy.ndarray`] Returns: Dict[Hashable, `cupy.ndarray`] """ d = dict(data) for key in self.key_iterator(d): d[key] = self.trans(d[key]) return d class RandCuCIMd(CuCIMd, RandomizableTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.CuCIM` for randomized transforms. For deterministic non-randomized transforms of CuCIM use :py:class:`monai.transforms.CuCIMd`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` name: The transform name in CuCIM package. apply_prob: the probability to apply the transform (default=1.0) allow_missing_keys: don't raise exception if key is missing. args: parameters for the CuCIM transform. kwargs: parameters for the CuCIM transform. Note: - CuCIM transform only work with CuPy arrays, so this transform expects input data to be `cupy.ndarray`. Users can call `ToCuPy` transform to convert a numpy array or torch tensor to cupy array. - If the cuCIM transform is already randomized the `apply_prob` argument has nothing to do with the randomness of the underlying cuCIM transform. `apply_prob` defines if the transform (either randomized or non-randomized) being applied randomly, so it can apply non-randomized tranforms randomly but be careful with setting `apply_prob` to anything than 1.0 when using along with cuCIM's randomized transforms. - If the random factor of the underlying cuCIM transform is not derived from `self.R`, the results may not be deterministic. See Also: :py:class:`monai.transforms.Randomizable`. """ def __init__(self, apply_prob: float = 1.0, *args, **kwargs) -> None: CuCIMd.__init__(self, *args, **kwargs) RandomizableTransform.__init__(self, prob=apply_prob) def __call__(self, data): """ Args: data: Dict[Hashable, `cupy.ndarray`] Returns: Dict[Hashable, `cupy.ndarray`] """ self.randomize(data) if not self._do_transform: return dict(data) return super().__call__(data) IdentityD = IdentityDict = Identityd AsChannelFirstD = AsChannelFirstDict = AsChannelFirstd AsChannelLastD = AsChannelLastDict = AsChannelLastd AddChannelD = AddChannelDict = AddChanneld EnsureChannelFirstD = EnsureChannelFirstDict = EnsureChannelFirstd RemoveRepeatedChannelD = RemoveRepeatedChannelDict = RemoveRepeatedChanneld RepeatChannelD = RepeatChannelDict = RepeatChanneld SplitChannelD = SplitChannelDict = SplitChanneld CastToTypeD = CastToTypeDict = CastToTyped ToTensorD = ToTensorDict = ToTensord EnsureTypeD = EnsureTypeDict = EnsureTyped ToNumpyD = ToNumpyDict = ToNumpyd ToCupyD = ToCupyDict = ToCupyd ToPILD = ToPILDict = ToPILd TransposeD = TransposeDict = Transposed DeleteItemsD = DeleteItemsDict = DeleteItemsd SelectItemsD = SelectItemsDict = SelectItemsd SqueezeDimD = SqueezeDimDict = SqueezeDimd DataStatsD = DataStatsDict = DataStatsd SimulateDelayD = SimulateDelayDict = SimulateDelayd CopyItemsD = CopyItemsDict = CopyItemsd ConcatItemsD = ConcatItemsDict = ConcatItemsd LambdaD = LambdaDict = Lambdad LabelToMaskD = LabelToMaskDict = LabelToMaskd FgBgToIndicesD = FgBgToIndicesDict = FgBgToIndicesd ClassesToIndicesD = ClassesToIndicesDict = ClassesToIndicesd ConvertToMultiChannelBasedOnBratsClassesD = ( ConvertToMultiChannelBasedOnBratsClassesDict ) = ConvertToMultiChannelBasedOnBratsClassesd AddExtremePointsChannelD = AddExtremePointsChannelDict = AddExtremePointsChanneld TorchVisionD = TorchVisionDict = TorchVisiond RandTorchVisionD = RandTorchVisionDict = RandTorchVisiond RandLambdaD = RandLambdaDict = RandLambdad MapLabelValueD = MapLabelValueDict = MapLabelValued IntensityStatsD = IntensityStatsDict = IntensityStatsd ToDeviceD = ToDeviceDict = ToDeviced CuCIMD = CuCIMDict = CuCIMd RandCuCIMD = RandCuCIMDict = RandCuCIMd
py
1a4eca1b3e0609d4f68012b0804c48652deb20cc
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.8.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class V1beta1Role(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'rules': 'list[V1beta1PolicyRule]' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'rules': 'rules' } def __init__(self, api_version=None, kind=None, metadata=None, rules=None): """ V1beta1Role - a model defined in Swagger """ self._api_version = None self._kind = None self._metadata = None self._rules = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata self.rules = rules @property def api_version(self): """ Gets the api_version of this V1beta1Role. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources :return: The api_version of this V1beta1Role. :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """ Sets the api_version of this V1beta1Role. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources :param api_version: The api_version of this V1beta1Role. :type: str """ self._api_version = api_version @property def kind(self): """ Gets the kind of this V1beta1Role. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :return: The kind of this V1beta1Role. :rtype: str """ return self._kind @kind.setter def kind(self, kind): """ Sets the kind of this V1beta1Role. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :param kind: The kind of this V1beta1Role. :type: str """ self._kind = kind @property def metadata(self): """ Gets the metadata of this V1beta1Role. Standard object's metadata. :return: The metadata of this V1beta1Role. :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """ Sets the metadata of this V1beta1Role. Standard object's metadata. :param metadata: The metadata of this V1beta1Role. :type: V1ObjectMeta """ self._metadata = metadata @property def rules(self): """ Gets the rules of this V1beta1Role. Rules holds all the PolicyRules for this Role :return: The rules of this V1beta1Role. :rtype: list[V1beta1PolicyRule] """ return self._rules @rules.setter def rules(self, rules): """ Sets the rules of this V1beta1Role. Rules holds all the PolicyRules for this Role :param rules: The rules of this V1beta1Role. :type: list[V1beta1PolicyRule] """ if rules is None: raise ValueError("Invalid value for `rules`, must not be `None`") self._rules = rules def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, V1beta1Role): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
py
1a4eca69fc23a284a857c141e9b46315ef83c0a1
x = [0.0, 3.0, 5.0, 2.5, 3.7] # Define an array print(type(x)) # Remove the third element x.pop(2) print(x) # Remove the element at the end x.remove(2.5) print(x) # Add and element at he end x.append(1.2) print(x) # Get a copy y = x.copy() print(y) # How many elements are 0.0 print(y.count(0.0)) # Print the index with value 3.7 print(y.index(3.7)) # Sort the list y.sort() print(y) # Reverse sort y.reverse() print(y) # Remote all elements y.clear() print(y)
py
1a4eca95920960f73df5684126803d1736405085
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import numpy as np import unittest from paddle.jit import to_static, ProgramTranslator class NetWithParameterList(paddle.nn.Layer): def __init__(self, in_size, out_size): super(NetWithParameterList, self).__init__() weight = self.create_parameter([in_size, out_size]) bias = self.create_parameter([out_size], is_bias=True) self.params = paddle.nn.ParameterList([weight, bias]) @to_static def forward(self, x): out = paddle.matmul(x, self.params[0]) out = paddle.add(out, self.params[1]) out = paddle.tanh(out) return out class NetWithParameterListIter(NetWithParameterList): def __init__(self, in_size, out_size): super(NetWithParameterListIter, self).__init__(in_size, out_size) @to_static def forward(self, x): # NOTE: manually trigger `__iter__` logic. params = list(self.params.__iter__()) out = paddle.matmul(x, params[0]) out = paddle.add(out, params[1]) out = paddle.tanh(out) return out class TestParameterList(unittest.TestCase): def setUp(self): self.seed = 2021 self.iter_num = 5 self.prog_trans = ProgramTranslator() def train(self, is_iter, to_static): paddle.seed(self.seed) np.random.seed(self.seed) self.prog_trans.enable(to_static) if is_iter: net = NetWithParameterList(10, 3) else: net = NetWithParameterListIter(10, 3) sgd = paddle.optimizer.SGD(0.1, parameters=net.parameters()) for batch_id in range(self.iter_num): x = paddle.rand([4, 10], dtype='float32') out = net(x) loss = paddle.mean(out) loss.backward() sgd.step() sgd.clear_grad() return loss def test_parameter_list(self): static_loss = self.train(False, to_static=True) dygraph_loss = self.train(False, to_static=False) self.assertTrue(np.allclose(dygraph_loss, static_loss), msg='dygraph result is {}\nstatic result is {}'.format( dygraph_loss, static_loss)) def test_parameter_list_iter(self): static_loss = self.train(True, to_static=True) dygraph_loss = self.train(True, to_static=False) self.assertTrue(np.allclose(dygraph_loss, static_loss), msg='dygraph result is {}\nstatic result is {}'.format( dygraph_loss, static_loss)) class NetWithRawParamList(paddle.nn.Layer): def __init__(self, in_size, out_size): super(NetWithRawParamList, self).__init__() weight = self.add_parameter('w', self.create_parameter([in_size, out_size])) bias = self.add_parameter( 'b', self.create_parameter([out_size], is_bias=True)) self.params = [weight] self.bias_dict = {'b': bias} @to_static def forward(self, x): out = paddle.matmul(x, self.params[0]) out = paddle.add(out, self.bias_dict['b']) out = paddle.tanh(out) return out class TestRawParameterList(unittest.TestCase): def setUp(self): self.seed = 2021 self.iter_num = 5 self.prog_trans = ProgramTranslator() def init_net(self): self.net = NetWithRawParamList(10, 3) def train(self, to_static): paddle.seed(self.seed) np.random.seed(self.seed) self.prog_trans.enable(to_static) self.init_net() sgd = paddle.optimizer.SGD(0.1, parameters=self.net.parameters()) for batch_id in range(self.iter_num): x = paddle.rand([4, 10], dtype='float32') out = self.net(x) loss = paddle.mean(out) loss.backward() sgd.step() sgd.clear_grad() return loss def test_parameter_list(self): static_loss = self.train(to_static=True) dygraph_loss = self.train(to_static=False) self.assertTrue(np.allclose(dygraph_loss, static_loss), msg='dygraph result is {}\nstatic result is {}'.format( dygraph_loss, static_loss)) class NetWithSubLayerParamList(paddle.nn.Layer): def __init__(self, sub_layer): super(NetWithSubLayerParamList, self).__init__() self.sub_layer = sub_layer self.params = [sub_layer.weight] self.bias_dict = {'b': sub_layer.bias} @to_static def forward(self, x): out = paddle.matmul(x, self.params[0]) out = paddle.add(out, self.bias_dict['b']) out = paddle.tanh(out) return out class TestSubLayerParameterList(TestRawParameterList): def init_net(self): fc = paddle.nn.Linear(10, 3) self.net = NetWithSubLayerParamList(fc) if __name__ == '__main__': unittest.main()
py
1a4eccf09eb1be64689af6478b52e261531578d8
from rest_framework import generics, authentication, permissions from rest_framework.authtoken.views import ObtainAuthToken from rest_framework.settings import api_settings from .serializers import UserSerializer, AuthTokenSerializer class CreateUserView(generics.CreateAPIView): """View to create a new user in the system""" serializer_class = UserSerializer class CreateTokenView(ObtainAuthToken): """Create a new auth token for user""" serializer_class = AuthTokenSerializer renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES class ManageUserView(generics.RetrieveUpdateAPIView): """Manage the authenticated user""" serializer_class = UserSerializer authentication_classes = (authentication.TokenAuthentication,) permission_classes = (permissions.IsAuthenticated,) def get_object(self): """Retrieve and return authenticated user""" return self.request.user
py
1a4ecd20ba45464eb7dfd300df574921c7a738dd
########################### # # #118 Pandigital prime sets - Project Euler # https://projecteuler.net/problem=118 # # Code by Kevin Marciniak # ###########################
py
1a4ecd5444b78d5e9e1f453c9800c29739be4d31
import copy from django.conf import settings from olympia.constants.promoted import RECOMMENDED import olympia.core.logger from olympia import amo from olympia.amo.utils import attach_trans_dict from olympia.amo.celery import create_chunked_tasks_signatures from olympia.amo.utils import to_language from olympia.constants.search import SEARCH_LANGUAGE_TO_ANALYZER from olympia.lib.es.utils import create_index from olympia.versions.compare import version_int log = olympia.core.logger.getLogger('z.es') class AddonIndexer: """ Base Indexer class for add-ons. """ @classmethod def attach_translation_mappings(cls, mapping, field_names): """ For each field in field_names, attach a dict to the ES mapping properties making "<field_name>_translations" an object containing "string" and "lang" as non-indexed strings. Used to store non-indexed, non-analyzed translations in ES that will be sent back by the API for each item. It does not take care of the indexed content for search, it's there only to store and return raw translations. """ for field_name in field_names: # _translations is the suffix in TranslationSerializer. mapping['properties'][ '%s_translations' % field_name ] = cls.get_translations_definition() @classmethod def get_translations_definition(cls): """ Return the mapping to use for raw translations (to be returned directly by the API, not used for analysis). See attach_translation_mappings() for more information. """ return { 'type': 'object', 'properties': { 'lang': {'type': 'text', 'index': False}, 'string': {'type': 'text', 'index': False}, }, } @classmethod def get_raw_field_definition(cls): """ Return the mapping to use for the "raw" version of a field. Meant to be used as part of a 'fields': {'raw': ... } definition in the mapping of an existing field. Used for exact matches and sorting """ # It needs to be a keyword to turnoff all analysis ; that means we # don't get the lowercase filter applied by the standard & # language-specific analyzers, so we need to do that ourselves through # a custom normalizer for exact matches to work in a case-insensitive # way. return { 'type': 'keyword', 'normalizer': 'lowercase_keyword_normalizer', } @classmethod def attach_language_specific_analyzers(cls, mapping, field_names): """ For each field in field_names, attach language-specific mappings that will use specific analyzers for these fields in every language that we support. These mappings are used by the search filtering code if they exist. """ for lang, analyzer in SEARCH_LANGUAGE_TO_ANALYZER.items(): for field in field_names: property_name = '%s_l10n_%s' % (field, lang) mapping['properties'][property_name] = { 'type': 'text', 'analyzer': analyzer, } @classmethod def attach_language_specific_analyzers_with_raw_variant(cls, mapping, field_names): """ Like attach_language_specific_analyzers() but with an extra field to storethe "raw" variant of the value, for exact matches. """ for lang, analyzer in SEARCH_LANGUAGE_TO_ANALYZER.items(): for field in field_names: property_name = '%s_l10n_%s' % (field, lang) mapping['properties'][property_name] = { 'type': 'text', 'analyzer': analyzer, 'fields': { 'raw': cls.get_raw_field_definition(), }, } @classmethod def extract_field_api_translations(cls, obj, field, db_field=None): """ Returns a dict containing translations that we need to store for the API. Empty translations are skipped entirely. """ if db_field is None: db_field = '%s_id' % field extend_with_me = { '%s_translations' % field: [ {'lang': to_language(lang), 'string': str(string)} for lang, string in obj.translations[getattr(obj, db_field)] if string ] } return extend_with_me @classmethod def extract_field_search_translation(cls, obj, field, default_locale): """ Returns the translation for this field in the object's default locale, in the form a dict with one entry (the field being the key and the translation being the value, or an empty string if none was found). That field will be analyzed and indexed by ES *without* language-specific analyzers. """ translations = dict(obj.translations[getattr(obj, '%s_id' % field)]) default_locale = default_locale.lower() if default_locale else None value = translations.get(default_locale, getattr(obj, field)) return {field: str(value) if value else ''} @classmethod def extract_field_analyzed_translations(cls, obj, field, db_field=None): """ Returns a dict containing translations for each language that we have an analyzer for, for the given field. When no translation exist for a given language+field combo, the value returned is an empty string, to avoid storing the word "None" as the field does not understand null values. """ if db_field is None: db_field = '%s_id' % field translations = dict(obj.translations[getattr(obj, db_field)]) return { '%s_l10n_%s' % (field, lang): translations.get(lang) or '' for lang in SEARCH_LANGUAGE_TO_ANALYZER } # Fields we don't need to expose in the results, only used for filtering # or sorting. hidden_fields = ( '*.raw', 'boost', 'colors', 'hotness', # Translated content that is used for filtering purposes is stored # under 3 different fields: # - One field with all translations (e.g., "name"). # - One field for each language, using corresponding analyzer # (e.g., "name_l10n_en-us", "name_l10n_fr", etc.) # - One field with all translations in separate objects for the API # (e.g. "name_translations") # Only that last one with all translations needs to be returned. 'name', 'description', 'name_l10n_*', 'description_l10n_*', 'summary', 'summary_l10n_*', ) index_settings = { 'analysis': { 'analyzer': { 'standard_with_word_split': { # This analyzer tries to split the text into words by using # various methods. It also lowercases them and make sure # each token is only returned once. # Only use for short things with extremely meaningful # content like add-on name - it makes too many # modifications to be useful for things like descriptions, # for instance. 'tokenizer': 'standard', 'filter': [ 'custom_word_delimiter', 'lowercase', 'stop', 'custom_dictionary_decompounder', 'unique', ], }, 'trigram': { # Analyzer that splits the text into trigrams. 'tokenizer': 'ngram_tokenizer', 'filter': [ 'lowercase', ], }, }, 'tokenizer': { 'ngram_tokenizer': { 'type': 'ngram', 'min_gram': 3, 'max_gram': 3, 'token_chars': ['letter', 'digit'], } }, 'normalizer': { 'lowercase_keyword_normalizer': { # By default keywords are indexed 'as-is', but for exact # name matches we need to lowercase them before indexing, # so this normalizer does that for us. 'type': 'custom', 'filter': ['lowercase'], }, }, 'filter': { 'custom_word_delimiter': { # This filter is useful for add-on names that have multiple # words sticked together in a way that is easy to # recognize, like FooBar, which should be indexed as FooBar # and Foo Bar. (preserve_original: True makes us index both # the original and the split version.) 'type': 'word_delimiter', 'preserve_original': True, }, 'custom_dictionary_decompounder': { # This filter is also useful for add-on names that have # multiple words sticked together, but without a pattern # that we can automatically recognize. To deal with those, # we use a small dictionary of common words. It allows us # to index 'awesometabpassword' as 'awesome tab password', # helping users looking for 'tab password' find that addon. 'type': 'dictionary_decompounder', 'word_list': [ 'all', 'auto', 'ball', 'bar', 'block', 'blog', 'bookmark', 'browser', 'bug', 'button', 'cat', 'chat', 'click', 'clip', 'close', 'color', 'context', 'cookie', 'cool', 'css', 'delete', 'dictionary', 'down', 'download', 'easy', 'edit', 'fill', 'fire', 'firefox', 'fix', 'flag', 'flash', 'fly', 'forecast', 'fox', 'foxy', 'google', 'grab', 'grease', 'html', 'http', 'image', 'input', 'inspect', 'inspector', 'iris', 'js', 'key', 'keys', 'lang', 'link', 'mail', 'manager', 'map', 'mega', 'menu', 'menus', 'monkey', 'name', 'net', 'new', 'open', 'password', 'persona', 'privacy', 'query', 'screen', 'scroll', 'search', 'secure', 'select', 'smart', 'spring', 'status', 'style', 'super', 'sync', 'tab', 'text', 'think', 'this', 'time', 'title', 'translate', 'tree', 'undo', 'upload', 'url', 'user', 'video', 'window', 'with', 'word', 'zilla', ], }, }, } } @classmethod def get_model(cls): from olympia.addons.models import Addon return Addon @classmethod def get_index_alias(cls): """Return the index alias name.""" return settings.ES_INDEXES.get('default') @classmethod def get_mapping(cls): appver_mapping = { 'properties': { 'max': {'type': 'long'}, 'min': {'type': 'long'}, 'max_human': {'type': 'keyword', 'index': False}, 'min_human': {'type': 'keyword', 'index': False}, } } version_mapping = { 'type': 'object', 'properties': { 'compatible_apps': { 'properties': {app.id: appver_mapping for app in amo.APP_USAGE} }, # Keep '<version>.id' indexed to be able to run exists queries # on it. 'id': {'type': 'long'}, 'reviewed': {'type': 'date', 'index': False}, 'files': { 'type': 'object', 'properties': { 'id': {'type': 'long', 'index': False}, 'created': {'type': 'date', 'index': False}, 'hash': {'type': 'keyword', 'index': False}, 'filename': {'type': 'keyword', 'index': False}, 'is_mozilla_signed_extension': {'type': 'boolean'}, 'size': {'type': 'long', 'index': False}, 'strict_compatibility': {'type': 'boolean', 'index': False}, 'status': {'type': 'byte'}, 'permissions': {'type': 'keyword', 'index': False}, 'optional_permissions': {'type': 'keyword', 'index': False}, }, }, 'license': { 'type': 'object', 'properties': { 'id': {'type': 'long', 'index': False}, 'builtin': {'type': 'short', 'index': False}, 'name_translations': cls.get_translations_definition(), 'url': {'type': 'text', 'index': False}, }, }, 'release_notes_translations': cls.get_translations_definition(), 'version': {'type': 'keyword', 'index': False}, }, } mapping = { 'properties': { 'id': {'type': 'long'}, 'app': {'type': 'byte'}, 'average_daily_users': {'type': 'long'}, 'bayesian_rating': {'type': 'double'}, 'boost': {'type': 'float', 'null_value': 1.0}, 'category': {'type': 'integer'}, 'colors': { 'type': 'nested', 'properties': { 'h': {'type': 'integer'}, 's': {'type': 'integer'}, 'l': {'type': 'integer'}, 'ratio': {'type': 'double'}, }, }, 'contributions': {'type': 'text'}, 'created': {'type': 'date'}, 'current_version': version_mapping, 'default_locale': {'type': 'keyword', 'index': False}, 'description': {'type': 'text', 'analyzer': 'snowball'}, 'guid': {'type': 'keyword'}, 'has_eula': {'type': 'boolean', 'index': False}, 'has_privacy_policy': {'type': 'boolean', 'index': False}, 'hotness': {'type': 'double'}, 'icon_hash': {'type': 'keyword', 'index': False}, 'icon_type': {'type': 'keyword', 'index': False}, 'is_disabled': {'type': 'boolean'}, 'is_experimental': {'type': 'boolean'}, 'is_recommended': {'type': 'boolean'}, 'last_updated': {'type': 'date'}, 'listed_authors': { 'type': 'object', 'properties': { 'id': {'type': 'long'}, 'name': {'type': 'text'}, 'username': {'type': 'keyword'}, 'is_public': {'type': 'boolean', 'index': False}, }, }, 'modified': {'type': 'date', 'index': False}, 'name': { 'type': 'text', # Adding word-delimiter to split on camelcase, known # words like 'tab', and punctuation, and eliminate # duplicates. 'analyzer': 'standard_with_word_split', 'fields': { # Raw field for exact matches and sorting. 'raw': cls.get_raw_field_definition(), # Trigrams for partial matches. 'trigrams': { 'type': 'text', 'analyzer': 'trigram', }, }, }, 'previews': { 'type': 'object', 'properties': { 'id': {'type': 'long', 'index': False}, 'caption_translations': cls.get_translations_definition(), 'modified': {'type': 'date', 'index': False}, 'position': {'type': 'long', 'index': False}, 'sizes': { 'type': 'object', 'properties': { 'thumbnail': {'type': 'short', 'index': False}, 'image': {'type': 'short', 'index': False}, }, }, }, }, 'promoted': { 'type': 'object', 'properties': { 'group_id': {'type': 'byte'}, 'approved_for_apps': {'type': 'byte'}, }, }, 'ratings': { 'type': 'object', 'properties': { 'count': {'type': 'short', 'index': False}, 'average': {'type': 'float'}, }, }, 'slug': {'type': 'keyword'}, 'requires_payment': {'type': 'boolean', 'index': False}, 'status': {'type': 'byte'}, 'summary': {'type': 'text', 'analyzer': 'snowball'}, 'tags': {'type': 'keyword'}, 'type': {'type': 'byte'}, 'weekly_downloads': {'type': 'long'}, }, } # Add fields that we expect to return all translations without being # analyzed/indexed. cls.attach_translation_mappings( mapping, ( 'description', 'developer_comments', 'homepage', 'name', 'summary', 'support_email', 'support_url', ), ) # Add language-specific analyzers for localized fields that are # analyzed/indexed. cls.attach_language_specific_analyzers(mapping, ('description', 'summary')) cls.attach_language_specific_analyzers_with_raw_variant(mapping, ('name',)) return mapping @classmethod def extract_version(cls, obj, version_obj): from olympia.versions.models import License, Version data = ( { 'id': version_obj.pk, 'compatible_apps': cls.extract_compatibility_info(obj, version_obj), 'files': [ { 'id': version_obj.file.id, 'created': version_obj.file.created, 'filename': version_obj.file.filename, 'hash': version_obj.file.hash, 'is_mozilla_signed_extension': ( version_obj.file.is_mozilla_signed_extension ), 'size': version_obj.file.size, 'status': version_obj.file.status, 'strict_compatibility': version_obj.file.strict_compatibility, 'permissions': version_obj.file.permissions, 'optional_permissions': version_obj.file.optional_permissions, } ], 'reviewed': version_obj.reviewed, 'version': version_obj.version, } if version_obj else None ) if data and version_obj: attach_trans_dict(Version, [version_obj]) data.update( cls.extract_field_api_translations( version_obj, 'release_notes', db_field='release_notes_id' ) ) if version_obj.license: data['license'] = { 'id': version_obj.license.id, 'builtin': version_obj.license.builtin, 'url': version_obj.license.url, } attach_trans_dict(License, [version_obj.license]) data['license'].update( cls.extract_field_api_translations(version_obj.license, 'name') ) return data @classmethod def extract_compatibility_info(cls, obj, version_obj): """Return compatibility info for the specified version_obj, as will be indexed in ES.""" compatible_apps = {} for app, appver in version_obj.compatible_apps.items(): if appver: min_, max_ = appver.min.version_int, appver.max.version_int min_human, max_human = appver.min.version, appver.max.version if not version_obj.file.strict_compatibility: # The files attached to this version are not using strict # compatibility, so the max version essentially needs to be # ignored - let's fake a super high one. We leave max_human # alone to leave the API representation intact. max_ = version_int('*') else: # Fake wide compatibility for add-ons with no info. We don't # want to reindex every time a new version of the app is # released, so we directly index a super high version as the # max. min_human, max_human = ( amo.DEFAULT_WEBEXT_MIN_VERSIONS.get( app, amo.DEFAULT_WEBEXT_MIN_VERSION ), amo.FAKE_MAX_VERSION, ) min_, max_ = version_int(min_human), version_int(max_human) compatible_apps[app.id] = { 'min': min_, 'min_human': min_human, 'max': max_, 'max_human': max_human, } return compatible_apps @classmethod def extract_document(cls, obj): """Extract indexable attributes from an add-on.""" from olympia.addons.models import Preview attrs = ( 'id', 'average_daily_users', 'bayesian_rating', 'contributions', 'created', 'default_locale', 'guid', 'hotness', 'icon_hash', 'icon_type', 'is_disabled', 'is_experimental', 'last_updated', 'modified', 'requires_payment', 'slug', 'status', 'type', 'weekly_downloads', ) data = {attr: getattr(obj, attr) for attr in attrs} data['colors'] = None # Extract dominant colors from static themes. if obj.type == amo.ADDON_STATICTHEME: if obj.current_previews: data['colors'] = obj.current_previews[0].colors data['app'] = [app.id for app in obj.compatible_apps.keys()] # Boost by the number of users on a logarithmic scale. data['boost'] = float(data['average_daily_users'] ** 0.2) # Quadruple the boost if the add-on is public. if ( obj.status == amo.STATUS_APPROVED and not obj.is_experimental and 'boost' in data ): data['boost'] = float(max(data['boost'], 1) * 4) # We can use all_categories because the indexing code goes through the # transformer that sets it. data['category'] = [cat.id for cat in obj.all_categories] data['current_version'] = cls.extract_version(obj, obj.current_version) data['listed_authors'] = [ { 'name': a.name, 'id': a.id, 'username': a.username, 'is_public': a.is_public, } for a in obj.listed_authors ] data['has_eula'] = bool(obj.eula) data['has_privacy_policy'] = bool(obj.privacy_policy) data['is_recommended'] = bool( obj.promoted and obj.promoted.group == RECOMMENDED ) data['previews'] = [ { 'id': preview.id, 'modified': preview.modified, 'sizes': preview.sizes, 'position': preview.position, } for preview in obj.current_previews ] data['promoted'] = ( { 'group_id': obj.promoted.group_id, # store the app approvals because .approved_applications needs it. 'approved_for_apps': [ app.id for app in obj.promoted.approved_applications ], } if obj.promoted else None ) data['ratings'] = { 'average': obj.average_rating, 'count': obj.total_ratings, 'text_count': obj.text_ratings_count, } # We can use tag_list because the indexing code goes through the # transformer that sets it (attach_tags). data['tags'] = getattr(obj, 'tag_list', []) # Handle localized fields. # First, deal with the 3 fields that need everything: for field in ('description', 'name', 'summary'): data.update(cls.extract_field_api_translations(obj, field)) data.update( cls.extract_field_search_translation(obj, field, obj.default_locale) ) data.update(cls.extract_field_analyzed_translations(obj, field)) # Then add fields that only need to be returned to the API without # contributing to search relevancy. for field in ('developer_comments', 'homepage', 'support_email', 'support_url'): data.update(cls.extract_field_api_translations(obj, field)) if obj.type != amo.ADDON_STATICTHEME: # Also do that for preview captions, which are set on each preview # object. attach_trans_dict(Preview, obj.current_previews) for i, preview in enumerate(obj.current_previews): data['previews'][i].update( cls.extract_field_api_translations(preview, 'caption') ) return data @classmethod def create_new_index(cls, index_name): """ Create a new index for addons in ES. Intended to be used by reindexation (and tests), generally a bad idea to call manually. """ index_settings = copy.deepcopy(cls.index_settings) config = { 'mappings': cls.get_mapping(), 'settings': { # create_index will add its own index settings like number of # shards and replicas. 'index': index_settings }, } create_index(index_name, config) @classmethod def reindex_tasks_group(cls, index_name): """ Return the group of tasks to execute for a full reindex of addons on the index called `index_name` (which is not an alias but the real index name). """ from olympia.addons.tasks import index_addons ids = cls.get_model().unfiltered.values_list('id', flat=True).order_by('id') chunk_size = 150 return create_chunked_tasks_signatures( index_addons, list(ids), chunk_size, task_kwargs={'index': index_name} )
py
1a4ece54a834aa92f61e6414304b6adc45785a0f
n1 = float(input('Digite o salario do funcionario: ')) s1 = 10 * n1 de = s1/100 dez = n1 + de s = 15 * n1 des = s/100 quinze = n1 + des if(n1>1250): print('Valor do salario {}, Valor do salario com o aumento de 10% {}'.format(n1,dez)) if(n1<=1250): print('Valor do salario {}, Valor do salario com o aumento de 15% {}'.format(n1, quinze))
py
1a4eceec97bb0864913e158ba0a2a5c5471b3ef8
import matplotlib.pyplot as plt import pydicom from pydicom.data import get_testdata_files print(__doc__) #filename = get_testdata_files('000000.dcm') dataset = pydicom.dcmread('yourfilenameorpath') # Normal mode: print() #print("Filename.........:", filename) print("Storage type.....:", dataset.SOPClassUID) print() pat_name = dataset.PatientName display_name = pat_name.family_name + ", " + pat_name.given_name print("Patient's name...:", display_name) print("Patient id.......:", dataset.PatientID) print("Modality.........:", dataset.Modality) print("Study Date.......:", dataset.StudyDate) if 'PixelData' in dataset: rows = int(dataset.Rows) cols = int(dataset.Columns) print("Image size.......: {rows:d} x {cols:d}, {size:d} bytes".format( rows=rows, cols=cols, size=len(dataset.PixelData))) if 'PixelSpacing' in dataset: print("Pixel spacing....:", dataset.PixelSpacing) # use .get() if not sure the item exists, and want a default value if missing print("Slice location...:", dataset.get('SliceLocation', "(missing)")) # plot the image using matplotlib plt.imshow(dataset.pixel_array, cmap=plt.cm.bone) plt.show()
py
1a4ecf044f63d1578e34f4a96d8c46fdf34b49e1
#Michael Astwood 2018 with help from https://apmonitor.com/wiki/index.php/Main/GekkoPythonOptimization #This program is built to control light #levels based on data taken during experiments. from gekko import GEKKO import numpy as np import matplotlib.pyplot as plt m = GEKKO() m.time = np.linspace(0,20,41) # Parameters for model mass = 500 b = m.Param(value=50) K = m.Param(value=0.8) # Manipulated variable (in our system it will be the light intensity) p = m.MV(value=50, lb=0, ub=100) #value is initial value, ub is upper bound, lb is lower bound p.STATUS = 1 # allow optimizer to change p.DCOST = 0 # smooth out changes in intensity p.DMAX = 10 # slow down changes in intensity # Controlled Variable (in our system it will be the ratio) v = m.CV(value=0) v.STATUS = 1 # add the setpoint to the objective m.options.CV_TYPE = 2 # squared error root(sum(x^2)) v.SP = 40 # set point v.TR_INIT = 0 # set point trajectory (0 = straight line at SP, 1 = starts at zero and ramps up) v.TAU = 5 # time constant of trajectory # Process model (this is a model for car acceleration) m.Equation(mass*v.dt() == -v*b + K*b*p) #a differential equation in terms of our controlled variable #linear drag vs gas pedal input m.options.IMODE = 6 #this puts the library in MPC mode m.solve(disp=True) #this finalizes our controller for this prediction cycle # get additional solution information import json with open(m.path+'//results.json') as f: results = json.load(f) #plotting the results plt.figure() plt.subplot(2,1,1) plt.plot(m.time,p.value,'b-',label='MV Optimized') plt.legend() plt.ylabel('Input') plt.subplot(2,1,2) plt.plot(m.time,results['v1.tr'],'k-',label='Reference Trajectory') plt.plot(m.time,v.value,'r--',label='CV Response') plt.ylabel('Output') plt.xlabel('Time') plt.legend(loc='best') plt.show()
bzl
1a4ecf0d91c75fc7f44e30331248e730147ff13c
load("@rules_python//python:defs.bzl", "py_binary", "py_library") def envoy_py_test(name, package, visibility): native.genrule( name = "generate_pytest_" + name, cmd = "sed s/_PACKAGE_NAME_/" + package + "/ $(location //tools/testing:base_pytest_runner.py) > \"$(@D)/pytest_" + name + ".py\"", tools = ["//tools/testing:base_pytest_runner.py"], outs = ["pytest_" + name + ".py"], ) test_deps = [ ":" + name, ] if name != "python_pytest": test_deps.append("//tools/testing:python_pytest") py_binary( name = "pytest_" + name, srcs = [ "pytest_" + name + ".py", "tests/test_" + name + ".py", ], data = [":generate_pytest_" + name], deps = test_deps, visibility = visibility, ) def envoy_py_library( name = None, deps = [], data = [], visibility = ["//visibility:public"]): _parts = name.split(".") package = ".".join(_parts[:-1]) name = _parts[-1] py_library( name = name, srcs = [name + ".py"], deps = deps, data = data, visibility = visibility, ) envoy_py_test(name, package, visibility) def envoy_py_binary( name = None, deps = [], data = [], visibility = ["//visibility:public"]): _parts = name.split(".") package = ".".join(_parts[:-1]) name = _parts[-1] py_binary( name = name, srcs = [name + ".py"], deps = deps, data = data, visibility = visibility, ) envoy_py_test(name, package, visibility)
py
1a4ecf81c8e99df3355cea4cae226122025f5717
from django.urls import include, path from event import views from rest_framework.routers import SimpleRouter from rest_framework_nested import routers router = SimpleRouter() router.register(r"event", views.EventViewSet) event_router = routers.NestedSimpleRouter(router, r"event", lookup="event") event_router.register( r"participants", views.EventParticipant, basename="event-participants" ) event_router.register( r"competitions", views.EventCompetitionListCreate, basename="competitions" ) router.register( r"competition", views.EventCompetitionRetrieveUpdateDestroy, basename="competition" ) event_participants_router = routers.NestedSimpleRouter( router, r"competition", lookup="competition" ) event_participants_router.register( r"participants", views.EventCompetitionParticipants, basename="competition-participants", ) event_participants_router.register( r"nominations", views.NominationView, basename="competition-nominations" ) router.register(r"tickets", views.TicketViewSet, basename="tickets") router.register(r"scans", views.TicketScanViewSet, basename="scans") router.register(r"quotas", views.EventQuotaViewSet, basename="quotas") app_name = "event" urlpatterns = [ path("", include(router.urls)), path("", include(event_router.urls)), path("", include(event_participants_router.urls)), ]
py
1a4ed016d2fcda1f6e484400696709de38f70110
#!/usr/bin/python2.7 """Public interface to top-level pytype functions.""" from __future__ import print_function import contextlib import logging import os import sys import tokenize import traceback from pytype import __version__ from pytype import analyze from pytype import directors from pytype import errors from pytype import load_pytd from pytype import utils from pytype.pyc import pyc from pytype.pyi import parser from pytype.pytd import optimize from pytype.pytd import pytd from pytype.pytd import pytd_utils from pytype.pytd import serialize_ast from pytype.pytd import visitors from pytype.pytd.parse import builtins as pytd_builtins import six log = logging.getLogger(__name__) # Webpage explaining the pytype error codes ERROR_DOC_URL = "https://google.github.io/pytype/errors.html" def read_source_file(input_filename): try: with open(input_filename, "r") as fi: return fi.read() except IOError: raise utils.UsageError("Could not load input file %s" % input_filename) def _call(analyze_types, input_filename, errorlog, options, loader): """Helper function to call analyze.check/infer_types.""" src = read_source_file(input_filename) # 'deep' tells the analyzer whether to analyze functions not called from main. deep = not options.main_only return analyze_types( src=src, filename=input_filename, errorlog=errorlog, options=options, loader=loader, deep=deep) def check_py(input_filename, errorlog, options, loader): """Check the types of one file.""" _call(analyze.check_types, input_filename, errorlog, options, loader) def generate_pyi(input_filename, errorlog, options, loader): """Run the inferencer on one file, producing output. Args: input_filename: name of the file to process errorlog: Where error messages go. Instance of errors.ErrorLog. options: config.Options object. loader: A load_pytd.Loader instance. Returns: A tuple, (PYI Ast as string, TypeDeclUnit). Raises: CompileError: If we couldn't parse the input file. UsageError: If the input filepath is invalid. """ mod, builtins = _call( analyze.infer_types, input_filename, errorlog, options, loader) mod.Visit(visitors.VerifyVisitor()) mod = optimize.Optimize(mod, builtins, # TODO(kramm): Add FLAGs for these lossy=False, use_abcs=False, max_union=7, remove_mutable=False) mod = pytd_utils.CanonicalOrdering(mod, sort_signatures=True) result = pytd.Print(mod) log.info("=========== pyi optimized =============") log.info("\n%s", result) log.info("========================================") if not result.endswith("\n"): result += "\n" result_prefix = "" if options.quick: result_prefix += "# (generated with --quick)\n" if result_prefix: result = result_prefix + "\n" + result return result, mod def check_or_generate_pyi(options, errorlog, loader): """Returns generated errors and result pyi or None if it's only check. Args: options: config.Options object. errorlog: errors.ErrorLog object. loader: load_pytd.Loader object. Returns: A tuple, (PYI Ast as string, AST) or None. """ result = pytd_builtins.DEFAULT_SRC ast = pytd_builtins.GetDefaultAst(options.python_version) try: if options.check: check_py(input_filename=options.input, errorlog=errorlog, options=options, loader=loader) return None else: result, ast = generate_pyi(input_filename=options.input, errorlog=errorlog, options=options, loader=loader) except utils.UsageError as e: raise except pyc.CompileError as e: errorlog.python_compiler_error(options.input, e.lineno, e.error) except IndentationError as e: errorlog.python_compiler_error(options.input, e.lineno, e.msg) except tokenize.TokenError as e: msg, (lineno, unused_column) = e.args # pylint: disable=unbalanced-tuple-unpacking errorlog.python_compiler_error(options.input, lineno, msg) except directors.SkipFile: result += "# skip-file found, file not analyzed" except Exception as e: # pylint: disable=broad-except if options.nofail: log.warn("***Caught exception: %s", str(e), exc_info=True) if not options.check: result += ( # pytype: disable=name-error "# Caught error in pytype: " + str(e).replace("\n", "\n#") + "\n# " + "\n# ".join(traceback.format_exc().splitlines())) else: e.args = ( str(utils.message(e)) + "\nFile: %s" % options.input,) + e.args[1:] raise return (result, ast) def _write_pyi_output(options, contents, filename): assert filename if filename == "-": sys.stdout.write(contents) else: log.info("write pyi %r => %r", options.input, filename) with open(filename, "w") as fi: fi.write(contents) def process_one_file(options): """Check a .py file or generate a .pyi for it, according to options. Args: options: config.Options object. Returns: An error code (0 means no error). """ log.info("Process %s => %s", options.input, options.output) errorlog = errors.ErrorLog() loader = load_pytd.create_loader(options) try: generated_values = check_or_generate_pyi(options, errorlog, loader) except utils.UsageError as e: logging.error("Usage error: %s\n", utils.message(e)) return 1 if not options.check: result, ast = generated_values if options.pickle_output: pyi_output = options.verify_pickle else: pyi_output = options.output # Write out the pyi file. if pyi_output: _write_pyi_output(options, result, pyi_output) # Write out the pickle file. if options.pickle_output: log.info("write pickle %r => %r", options.input, options.output) write_pickle(ast, loader, options) exit_status = handle_errors(errorlog, options) # If we have set return_success, set exit_status to 0 after the regular error # handler has been called. if options.return_success: exit_status = 0 # Touch output file upon success. if options.touch and not exit_status: with open(options.touch, "a"): os.utime(options.touch, None) return exit_status def write_pickle(ast, loader, options): """Dump a pickle of the ast to a file.""" try: ast = serialize_ast.PrepareForExport( options.module_name, options.python_version, ast, loader) except parser.ParseError as e: if options.nofail: ast = serialize_ast.PrepareForExport( options.module_name, options.python_version, pytd_builtins.GetDefaultAst(options.python_version), loader) log.warn("***Caught exception: %s", str(e), exc_info=True) else: raise if options.verify_pickle: ast1 = ast.Visit(visitors.LateTypeToClassType()) ast1 = ast1.Visit(visitors.ClearClassPointers()) ast2 = loader.load_file(options.module_name, options.verify_pickle) ast2 = ast2.Visit(visitors.ClearClassPointers()) if not ast1.ASTeq(ast2): raise AssertionError() serialize_ast.StoreAst(ast, options.output) def print_error_doc_url(errorlog): names = {e.name for e in errorlog} if names: doclink = "\nFor more details, see %s" % ERROR_DOC_URL if len(names) == 1: doclink += "#" + names.pop() print(doclink + ".", file=sys.stderr) def handle_errors(errorlog, options): """Handle the errorlog according to the given options.""" if not options.report_errors: return 0 if options.output_errors_csv: errorlog.print_to_csv_file(options.output_errors_csv) return 0 # Command is successful regardless of errors. errorlog.print_to_stderr() print_error_doc_url(errorlog) return 1 if errorlog.has_error() else 0 # exit code def parse_pyi(options): """Tries parsing a PYI file.""" loader = load_pytd.create_loader(options) ast = loader.load_file(options.module_name, options.input) ast = loader.finish_and_verify_ast(ast) if options.output: result = "# Internal AST parsed and postprocessed from %s\n\n%s" % ( options.input, pytd.Print(ast)) _write_pyi_output(options, result, options.output) def get_pytype_version(): return __version__.__version__ @contextlib.contextmanager def wrap_pytype_exceptions(exception_type, filename=""): """Catch pytype errors and reraise them as a single exception type. NOTE: This will also wrap non-pytype errors thrown within the body of the code block; it is therefore recommended to use this to wrap a single function call. Args: exception_type: The class to wrap exceptions in. filename: A filename to use in error messages. Yields: nothing, just calls the code block. """ try: yield except utils.UsageError as e: raise exception_type("Pytype usage error: %s" % utils.message(e)) except pyc.CompileError as e: raise exception_type("Error reading file %s at line %s: %s" % (filename, e.lineno, e.error)) except tokenize.TokenError as e: msg, (lineno, unused_column) = e.args # pylint: disable=unbalanced-tuple-unpacking raise exception_type("Error reading file %s at line %s: %s" % (filename, lineno, msg)) except directors.SkipFile: raise exception_type("Pytype could not analyze file %s: " "'# skip-file' directive found" % filename) except Exception as e: # pylint: disable=broad-except msg = "Pytype error: %s: %s" % (e.__class__.__name__, e.args[0]) # We need the version check here because six.reraise doesn't work properly # in python3 if sys.version_info[0] == 2: _, _, tb = sys.exc_info() six.reraise(exception_type, exception_type(msg), tb) else: raise exception_type(msg).with_traceback(e.__traceback__)
py
1a4ed019145e4ee03243b0f71e4a34d8cf6ecbdc
''' What do we want to test? 2 types of researchers: winner and loser both researchers: - need to create a new proposal at the appropriate tick winner: - needs to send the grant funding to KnowledgeMarket (different fixed amount in this case) - increase knowledge_access by 1 loser: - needs to buy assets from KnowledgeMarket (fixed amount) - increase knowledge_access by 1 what to check and how to check it? Add 2 researchers, 1 who is always going to be the winner, 1 loser initially, check if they both create the proposal at the appropriate tick (enough to be done once) then check whether the balances they're sending to the KnowledgeMarket are correct (check the balances in their wallets) lastly, check knowledge_access index (should be the same after each tick) ''' from enforce_typing import enforce_types import random from assets.agents.opsci_agents.profit_sharing_agents.ResearcherAgent import ResearcherAgent from assets.agents.opsci_agents.profit_sharing_agents.OpscientiaDAOAgent import OpscientiaDAOAgent from engine import AgentBase, SimStateBase, SimStrategyBase from engine.AgentDict import AgentDict from util.constants import S_PER_DAY class SimStrategy(SimStrategyBase.SimStrategyBase): def __init__(self): self.TICKS_BETWEEN_PROPOSALS = 2 self.PRICE_OF_ASSETS = 1 self.RATIO_FUNDS_TO_PUBLISH = 1 self.FUNDING_BOUNDARY = 0 class SimState(SimStateBase.SimStateBase): def __init__(self, ss=None): super().__init__(ss) self.ss = SimStrategy() self.agents = AgentDict({}) self.researchers: dict = {} def takeStep(self) -> None: for agent in list(self.agents.values()): agent.takeStep(self) @enforce_types def test1(): state = SimState() class SimpleKnowledgeMarketAgent(AgentBase.AgentBase): def __init__(self, name: str, USD: float, OCEAN: float,): super().__init__(name, USD, OCEAN) def takeStep(self, state) -> None: pass state.agents["m"] = SimpleKnowledgeMarketAgent("m", 0.0, 0.0) state.agents["dao"] = OpscientiaDAOAgent("dao1", USD=0.0, OCEAN=10.0) state.agents["r0"] = ResearcherAgent(name="r0", USD=0.0, OCEAN=10.0, evaluator="dao", receiving_agents = {"m": 1.0}, proposal_setup={'grant_requested': 1, 'assets_generated': 1, 'no_researchers': 1}) state.agents["r1"] = ResearcherAgent(name="r1", USD=0.0, OCEAN=10.0, evaluator="dao", receiving_agents = {"m": 1.0}, proposal_setup={'grant_requested': 2, 'assets_generated': 1, 'no_researchers': 1}) state.researchers["r0"] = ResearcherAgent(name="r0", USD=0.0, OCEAN=10.0, evaluator="dao", receiving_agents = {"m": 1.0}, proposal_setup={'grant_requested': 1, 'assets_generated': 1, 'no_researchers': 1}) state.researchers["r1"] = ResearcherAgent(name="r1", USD=0.0, OCEAN=10.0, evaluator="dao", receiving_agents = {"m": 1.0}, proposal_setup={'grant_requested': 2, 'assets_generated': 1, 'no_researchers': 1}) assert state.agents["r0"].OCEAN() == 10.0 assert state.agents["r1"].OCEAN() == 10.0 assert state.agents["dao"].OCEAN() == 10.0 assert state.agents["m"].OCEAN() == 0.0 r0 = state.agents["r0"] r1 = state.agents["r1"] dao = state.agents["dao"] m = state.agents["m"] state.takeStep(); state.tick += 1 # create a proposal | won't be funded yet since opsci_dao is the first agent assert r0.proposal != {} assert r0.new_proposal == True assert r1.proposal != {} assert r1.new_proposal == True assert not dao.proposal_evaluation assert r0.OCEAN() == 10.0 assert r1.OCEAN() == 10.0 assert dao.OCEAN() == 10.0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal != {} assert r0.new_proposal == False assert r1.proposal != {} assert r1.new_proposal == False assert dao.proposal_evaluation assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 9.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 9.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 state.takeStep(); state.tick += 1 # create a proposal | SHOULD NOT BE FUNDED UNTIL NEXT ROUND assert r0.proposal != {} assert r0.new_proposal == True assert r1.proposal != {} assert r1.new_proposal == True assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 9.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 9.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 0 assert r1.ticks_since_proposal == 0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal != {} assert r0.new_proposal == False assert r1.proposal != {} assert r1.new_proposal == False assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 8.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 8.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 state.takeStep(); state.tick += 1 # create a proposal | SHOULD NOT BE FUNDED UNTIL NEXT ROUND assert r0.proposal != {} assert r0.new_proposal == True assert r1.proposal != {} assert r1.new_proposal == True assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 8.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 8.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 0 assert r1.ticks_since_proposal == 0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal != {} assert r0.new_proposal == False assert r1.proposal != {} assert r1.new_proposal == False assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 7.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 7.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 state.takeStep(); state.tick += 1 # create a proposal | SHOULD NOT BE FUNDED UNTIL NEXT ROUND assert r0.proposal != {} assert r0.new_proposal == True assert r1.proposal != {} assert r1.new_proposal == True assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 7.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 7.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 0 assert r1.ticks_since_proposal == 0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal != {} assert r0.new_proposal == False assert r1.proposal != {} assert r1.new_proposal == False assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 6.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 6.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 state.takeStep(); state.tick += 1 # create a proposal | SHOULD NOT BE FUNDED UNTIL NEXT ROUND assert r0.proposal != {} assert r0.new_proposal == True assert r1.proposal != {} assert r1.new_proposal == True assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 6.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 6.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 0 assert r1.ticks_since_proposal == 0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal assert r0.new_proposal == False assert r1.proposal assert r1.new_proposal == False assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 5.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 5.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 state.takeStep(); state.tick += 1 # create a proposal | SHOULD NOT BE FUNDED UNTIL NEXT ROUND assert r0.proposal assert r0.new_proposal == True assert r1.proposal assert r1.new_proposal == True assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 5.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 5.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 0 assert r1.ticks_since_proposal == 0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal assert r0.new_proposal == False assert r1.proposal assert r1.new_proposal == False assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 4.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 4.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 state.takeStep(); state.tick += 1 # create a proposal | SHOULD NOT BE FUNDED UNTIL NEXT ROUND assert r0.proposal assert r0.new_proposal == True assert r1.proposal assert r1.new_proposal == True assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 4.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 4.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 0 assert r1.ticks_since_proposal == 0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal assert r0.new_proposal == False assert r1.proposal assert r1.new_proposal == False assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 3.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 3.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 state.takeStep(); state.tick += 1 # create a proposal | SHOULD NOT BE FUNDED UNTIL NEXT ROUND assert r0.proposal assert r0.new_proposal == True assert r1.proposal assert r1.new_proposal == True assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 3.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 3.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 0 assert r1.ticks_since_proposal == 0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal assert r0.new_proposal == False assert r1.proposal assert r1.new_proposal == False assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 2.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 2.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 state.takeStep(); state.tick += 1 # create a proposal | SHOULD NOT BE FUNDED UNTIL NEXT ROUND assert r0.proposal assert r0.new_proposal == True assert r1.proposal assert r1.new_proposal == True assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 2.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 2.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 0 assert r1.ticks_since_proposal == 0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal assert r0.new_proposal == False assert r1.proposal assert r1.new_proposal == False assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 1.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 1.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 state.takeStep(); state.tick += 1 # create a proposal | SHOULD NOT BE FUNDED UNTIL NEXT ROUND assert r0.proposal assert r0.new_proposal == True assert r1.proposal assert r1.new_proposal == True assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 1.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 1.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 0 assert r1.ticks_since_proposal == 0 state.takeStep(); state.tick += 1 # fund & publish assert r0.proposal assert r0.new_proposal == False assert r1.proposal assert r1.new_proposal == False assert r0.OCEAN() == 10.0 # winner receives 1 OCEAN but immediatelly spends it assert r1.OCEAN() == 0.0 # loser uses one 1 OCEAN to buy from Knowledge Market assert dao.OCEAN() == 0.0 # dao sent 1 OCEAN to r1 (winner) assert r0.ticks_since_proposal == 1 assert r1.ticks_since_proposal == 1 assert r0.total_assets_in_mrkt == 10 assert r1.total_assets_in_mrkt == 0 assert r0.knowledge_access == 11 assert r1.knowledge_access == 11 assert m.OCEAN() == 20